source: mainline/uspace/srv/fs/fat/fat_ops.c@ 4637c72

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4637c72 was dba4a23, checked in by Jakub Jermar <jakub@…>, 15 years ago

Speed up sequential I/O by caching the "current" cluster in fat_block_get().
This improves sequentil read of a 5m file (using the cat command) by 45 seconds
(73s → 28s).

  • Property mode set to 100644
File size: 37.0 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/ipc.h>
45#include <ipc/services.h>
46#include <ipc/devmap.h>
47#include <macros.h>
48#include <async.h>
49#include <errno.h>
50#include <str.h>
51#include <byteorder.h>
52#include <adt/hash_table.h>
53#include <adt/list.h>
54#include <assert.h>
55#include <fibril_synch.h>
56#include <sys/mman.h>
57#include <align.h>
58
59#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
60#define FS_NODE(node) ((node) ? (node)->bp : NULL)
61
62#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
63#define BPC(bs) (BPS((bs)) * SPC((bs)))
64
65/** Mutex protecting the list of cached free FAT nodes. */
66static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
67
68/** List of cached free FAT nodes. */
69static LIST_INITIALIZE(ffn_head);
70
71/*
72 * Forward declarations of FAT libfs operations.
73 */
74static int fat_root_get(fs_node_t **, dev_handle_t);
75static int fat_match(fs_node_t **, fs_node_t *, const char *);
76static int fat_node_get(fs_node_t **, dev_handle_t, fs_index_t);
77static int fat_node_open(fs_node_t *);
78static int fat_node_put(fs_node_t *);
79static int fat_create_node(fs_node_t **, dev_handle_t, int);
80static int fat_destroy_node(fs_node_t *);
81static int fat_link(fs_node_t *, fs_node_t *, const char *);
82static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
83static int fat_has_children(bool *, fs_node_t *);
84static fs_index_t fat_index_get(fs_node_t *);
85static aoff64_t fat_size_get(fs_node_t *);
86static unsigned fat_lnkcnt_get(fs_node_t *);
87static char fat_plb_get_char(unsigned);
88static bool fat_is_directory(fs_node_t *);
89static bool fat_is_file(fs_node_t *node);
90static dev_handle_t fat_device_get(fs_node_t *node);
91
92/*
93 * Helper functions.
94 */
95static void fat_node_initialize(fat_node_t *node)
96{
97 fibril_mutex_initialize(&node->lock);
98 node->bp = NULL;
99 node->idx = NULL;
100 node->type = 0;
101 link_initialize(&node->ffn_link);
102 node->size = 0;
103 node->lnkcnt = 0;
104 node->refcnt = 0;
105 node->dirty = false;
106 node->lastc_cached_valid = false;
107 node->lastc_cached_value = FAT_CLST_LAST1;
108 node->currc_cached_valid = false;
109 node->currc_cached_bn = 0;
110 node->currc_cached_value = FAT_CLST_LAST1;
111}
112
113static int fat_node_sync(fat_node_t *node)
114{
115 block_t *b;
116 fat_bs_t *bs;
117 fat_dentry_t *d;
118 int rc;
119
120 assert(node->dirty);
121
122 bs = block_bb_get(node->idx->dev_handle);
123
124 /* Read the block that contains the dentry of interest. */
125 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc,
126 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
127 BLOCK_FLAGS_NONE);
128 if (rc != EOK)
129 return rc;
130
131 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
132
133 d->firstc = host2uint16_t_le(node->firstc);
134 if (node->type == FAT_FILE) {
135 d->size = host2uint32_t_le(node->size);
136 } else if (node->type == FAT_DIRECTORY) {
137 d->attr = FAT_ATTR_SUBDIR;
138 }
139
140 /* TODO: update other fields? (e.g time fields) */
141
142 b->dirty = true; /* need to sync block */
143 rc = block_put(b);
144 return rc;
145}
146
147static int fat_node_fini_by_dev_handle(dev_handle_t dev_handle)
148{
149 link_t *lnk;
150 fat_node_t *nodep;
151 int rc;
152
153 /*
154 * We are called from fat_unmounted() and assume that there are already
155 * no nodes belonging to this instance with non-zero refcount. Therefore
156 * it is sufficient to clean up only the FAT free node list.
157 */
158
159restart:
160 fibril_mutex_lock(&ffn_mutex);
161 for (lnk = ffn_head.next; lnk != &ffn_head; lnk = lnk->next) {
162 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
163 if (!fibril_mutex_trylock(&nodep->lock)) {
164 fibril_mutex_unlock(&ffn_mutex);
165 goto restart;
166 }
167 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
168 fibril_mutex_unlock(&nodep->lock);
169 fibril_mutex_unlock(&ffn_mutex);
170 goto restart;
171 }
172 if (nodep->idx->dev_handle != dev_handle) {
173 fibril_mutex_unlock(&nodep->idx->lock);
174 fibril_mutex_unlock(&nodep->lock);
175 continue;
176 }
177
178 list_remove(&nodep->ffn_link);
179 fibril_mutex_unlock(&ffn_mutex);
180
181 /*
182 * We can unlock the node and its index structure because we are
183 * the last player on this playground and VFS is preventing new
184 * players from entering.
185 */
186 fibril_mutex_unlock(&nodep->idx->lock);
187 fibril_mutex_unlock(&nodep->lock);
188
189 if (nodep->dirty) {
190 rc = fat_node_sync(nodep);
191 if (rc != EOK)
192 return rc;
193 }
194 nodep->idx->nodep = NULL;
195 free(nodep->bp);
196 free(nodep);
197
198 /* Need to restart because we changed the ffn_head list. */
199 goto restart;
200 }
201 fibril_mutex_unlock(&ffn_mutex);
202
203 return EOK;
204}
205
206static int fat_node_get_new(fat_node_t **nodepp)
207{
208 fs_node_t *fn;
209 fat_node_t *nodep;
210 int rc;
211
212 fibril_mutex_lock(&ffn_mutex);
213 if (!list_empty(&ffn_head)) {
214 /* Try to use a cached free node structure. */
215 fat_idx_t *idxp_tmp;
216 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
217 if (!fibril_mutex_trylock(&nodep->lock))
218 goto skip_cache;
219 idxp_tmp = nodep->idx;
220 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
221 fibril_mutex_unlock(&nodep->lock);
222 goto skip_cache;
223 }
224 list_remove(&nodep->ffn_link);
225 fibril_mutex_unlock(&ffn_mutex);
226 if (nodep->dirty) {
227 rc = fat_node_sync(nodep);
228 if (rc != EOK) {
229 idxp_tmp->nodep = NULL;
230 fibril_mutex_unlock(&nodep->lock);
231 fibril_mutex_unlock(&idxp_tmp->lock);
232 free(nodep->bp);
233 free(nodep);
234 return rc;
235 }
236 }
237 idxp_tmp->nodep = NULL;
238 fibril_mutex_unlock(&nodep->lock);
239 fibril_mutex_unlock(&idxp_tmp->lock);
240 fn = FS_NODE(nodep);
241 } else {
242skip_cache:
243 /* Try to allocate a new node structure. */
244 fibril_mutex_unlock(&ffn_mutex);
245 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
246 if (!fn)
247 return ENOMEM;
248 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
249 if (!nodep) {
250 free(fn);
251 return ENOMEM;
252 }
253 }
254 fat_node_initialize(nodep);
255 fs_node_initialize(fn);
256 fn->data = nodep;
257 nodep->bp = fn;
258
259 *nodepp = nodep;
260 return EOK;
261}
262
263/** Internal version of fat_node_get().
264 *
265 * @param idxp Locked index structure.
266 */
267static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
268{
269 block_t *b;
270 fat_bs_t *bs;
271 fat_dentry_t *d;
272 fat_node_t *nodep = NULL;
273 int rc;
274
275 if (idxp->nodep) {
276 /*
277 * We are lucky.
278 * The node is already instantiated in memory.
279 */
280 fibril_mutex_lock(&idxp->nodep->lock);
281 if (!idxp->nodep->refcnt++) {
282 fibril_mutex_lock(&ffn_mutex);
283 list_remove(&idxp->nodep->ffn_link);
284 fibril_mutex_unlock(&ffn_mutex);
285 }
286 fibril_mutex_unlock(&idxp->nodep->lock);
287 *nodepp = idxp->nodep;
288 return EOK;
289 }
290
291 /*
292 * We must instantiate the node from the file system.
293 */
294
295 assert(idxp->pfc);
296
297 rc = fat_node_get_new(&nodep);
298 if (rc != EOK)
299 return rc;
300
301 bs = block_bb_get(idxp->dev_handle);
302
303 /* Read the block that contains the dentry of interest. */
304 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc, NULL,
305 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
306 if (rc != EOK) {
307 (void) fat_node_put(FS_NODE(nodep));
308 return rc;
309 }
310
311 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
312 if (d->attr & FAT_ATTR_SUBDIR) {
313 /*
314 * The only directory which does not have this bit set is the
315 * root directory itself. The root directory node is handled
316 * and initialized elsewhere.
317 */
318 nodep->type = FAT_DIRECTORY;
319 /*
320 * Unfortunately, the 'size' field of the FAT dentry is not
321 * defined for the directory entry type. We must determine the
322 * size of the directory by walking the FAT.
323 */
324 uint16_t clusters;
325 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle,
326 uint16_t_le2host(d->firstc));
327 if (rc != EOK) {
328 (void) fat_node_put(FS_NODE(nodep));
329 return rc;
330 }
331 nodep->size = BPS(bs) * SPC(bs) * clusters;
332 } else {
333 nodep->type = FAT_FILE;
334 nodep->size = uint32_t_le2host(d->size);
335 }
336 nodep->firstc = uint16_t_le2host(d->firstc);
337 nodep->lnkcnt = 1;
338 nodep->refcnt = 1;
339
340 rc = block_put(b);
341 if (rc != EOK) {
342 (void) fat_node_put(FS_NODE(nodep));
343 return rc;
344 }
345
346 /* Link the idx structure with the node structure. */
347 nodep->idx = idxp;
348 idxp->nodep = nodep;
349
350 *nodepp = nodep;
351 return EOK;
352}
353
354/*
355 * FAT libfs operations.
356 */
357
358int fat_root_get(fs_node_t **rfn, dev_handle_t dev_handle)
359{
360 return fat_node_get(rfn, dev_handle, 0);
361}
362
363int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
364{
365 fat_bs_t *bs;
366 fat_node_t *parentp = FAT_NODE(pfn);
367 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
368 unsigned i, j;
369 unsigned blocks;
370 fat_dentry_t *d;
371 block_t *b;
372 int rc;
373
374 fibril_mutex_lock(&parentp->idx->lock);
375 bs = block_bb_get(parentp->idx->dev_handle);
376 blocks = parentp->size / BPS(bs);
377 for (i = 0; i < blocks; i++) {
378 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
379 if (rc != EOK) {
380 fibril_mutex_unlock(&parentp->idx->lock);
381 return rc;
382 }
383 for (j = 0; j < DPS(bs); j++) {
384 d = ((fat_dentry_t *)b->data) + j;
385 switch (fat_classify_dentry(d)) {
386 case FAT_DENTRY_SKIP:
387 case FAT_DENTRY_FREE:
388 continue;
389 case FAT_DENTRY_LAST:
390 /* miss */
391 rc = block_put(b);
392 fibril_mutex_unlock(&parentp->idx->lock);
393 *rfn = NULL;
394 return rc;
395 default:
396 case FAT_DENTRY_VALID:
397 fat_dentry_name_get(d, name);
398 break;
399 }
400 if (fat_dentry_namecmp(name, component) == 0) {
401 /* hit */
402 fat_node_t *nodep;
403 /*
404 * Assume tree hierarchy for locking. We
405 * already have the parent and now we are going
406 * to lock the child. Never lock in the oposite
407 * order.
408 */
409 fat_idx_t *idx = fat_idx_get_by_pos(
410 parentp->idx->dev_handle, parentp->firstc,
411 i * DPS(bs) + j);
412 fibril_mutex_unlock(&parentp->idx->lock);
413 if (!idx) {
414 /*
415 * Can happen if memory is low or if we
416 * run out of 32-bit indices.
417 */
418 rc = block_put(b);
419 return (rc == EOK) ? ENOMEM : rc;
420 }
421 rc = fat_node_get_core(&nodep, idx);
422 fibril_mutex_unlock(&idx->lock);
423 if (rc != EOK) {
424 (void) block_put(b);
425 return rc;
426 }
427 *rfn = FS_NODE(nodep);
428 rc = block_put(b);
429 if (rc != EOK)
430 (void) fat_node_put(*rfn);
431 return rc;
432 }
433 }
434 rc = block_put(b);
435 if (rc != EOK) {
436 fibril_mutex_unlock(&parentp->idx->lock);
437 return rc;
438 }
439 }
440
441 fibril_mutex_unlock(&parentp->idx->lock);
442 *rfn = NULL;
443 return EOK;
444}
445
446/** Instantiate a FAT in-core node. */
447int fat_node_get(fs_node_t **rfn, dev_handle_t dev_handle, fs_index_t index)
448{
449 fat_node_t *nodep;
450 fat_idx_t *idxp;
451 int rc;
452
453 idxp = fat_idx_get_by_index(dev_handle, index);
454 if (!idxp) {
455 *rfn = NULL;
456 return EOK;
457 }
458 /* idxp->lock held */
459 rc = fat_node_get_core(&nodep, idxp);
460 fibril_mutex_unlock(&idxp->lock);
461 if (rc == EOK)
462 *rfn = FS_NODE(nodep);
463 return rc;
464}
465
466int fat_node_open(fs_node_t *fn)
467{
468 /*
469 * Opening a file is stateless, nothing
470 * to be done here.
471 */
472 return EOK;
473}
474
475int fat_node_put(fs_node_t *fn)
476{
477 fat_node_t *nodep = FAT_NODE(fn);
478 bool destroy = false;
479
480 fibril_mutex_lock(&nodep->lock);
481 if (!--nodep->refcnt) {
482 if (nodep->idx) {
483 fibril_mutex_lock(&ffn_mutex);
484 list_append(&nodep->ffn_link, &ffn_head);
485 fibril_mutex_unlock(&ffn_mutex);
486 } else {
487 /*
488 * The node does not have any index structure associated
489 * with itself. This can only mean that we are releasing
490 * the node after a failed attempt to allocate the index
491 * structure for it.
492 */
493 destroy = true;
494 }
495 }
496 fibril_mutex_unlock(&nodep->lock);
497 if (destroy) {
498 free(nodep->bp);
499 free(nodep);
500 }
501 return EOK;
502}
503
504int fat_create_node(fs_node_t **rfn, dev_handle_t dev_handle, int flags)
505{
506 fat_idx_t *idxp;
507 fat_node_t *nodep;
508 fat_bs_t *bs;
509 fat_cluster_t mcl, lcl;
510 int rc;
511
512 bs = block_bb_get(dev_handle);
513 if (flags & L_DIRECTORY) {
514 /* allocate a cluster */
515 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
516 if (rc != EOK)
517 return rc;
518 /* populate the new cluster with unused dentries */
519 rc = fat_zero_cluster(bs, dev_handle, mcl);
520 if (rc != EOK) {
521 (void) fat_free_clusters(bs, dev_handle, mcl);
522 return rc;
523 }
524 }
525
526 rc = fat_node_get_new(&nodep);
527 if (rc != EOK) {
528 (void) fat_free_clusters(bs, dev_handle, mcl);
529 return rc;
530 }
531 rc = fat_idx_get_new(&idxp, dev_handle);
532 if (rc != EOK) {
533 (void) fat_free_clusters(bs, dev_handle, mcl);
534 (void) fat_node_put(FS_NODE(nodep));
535 return rc;
536 }
537 /* idxp->lock held */
538 if (flags & L_DIRECTORY) {
539 nodep->type = FAT_DIRECTORY;
540 nodep->firstc = mcl;
541 nodep->size = BPS(bs) * SPC(bs);
542 } else {
543 nodep->type = FAT_FILE;
544 nodep->firstc = FAT_CLST_RES0;
545 nodep->size = 0;
546 }
547 nodep->lnkcnt = 0; /* not linked anywhere */
548 nodep->refcnt = 1;
549 nodep->dirty = true;
550
551 nodep->idx = idxp;
552 idxp->nodep = nodep;
553
554 fibril_mutex_unlock(&idxp->lock);
555 *rfn = FS_NODE(nodep);
556 return EOK;
557}
558
559int fat_destroy_node(fs_node_t *fn)
560{
561 fat_node_t *nodep = FAT_NODE(fn);
562 fat_bs_t *bs;
563 bool has_children;
564 int rc;
565
566 /*
567 * The node is not reachable from the file system. This means that the
568 * link count should be zero and that the index structure cannot be
569 * found in the position hash. Obviously, we don't need to lock the node
570 * nor its index structure.
571 */
572 assert(nodep->lnkcnt == 0);
573
574 /*
575 * The node may not have any children.
576 */
577 rc = fat_has_children(&has_children, fn);
578 if (rc != EOK)
579 return rc;
580 assert(!has_children);
581
582 bs = block_bb_get(nodep->idx->dev_handle);
583 if (nodep->firstc != FAT_CLST_RES0) {
584 assert(nodep->size);
585 /* Free all clusters allocated to the node. */
586 rc = fat_free_clusters(bs, nodep->idx->dev_handle,
587 nodep->firstc);
588 }
589
590 fat_idx_destroy(nodep->idx);
591 free(nodep->bp);
592 free(nodep);
593 return rc;
594}
595
596int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
597{
598 fat_node_t *parentp = FAT_NODE(pfn);
599 fat_node_t *childp = FAT_NODE(cfn);
600 fat_dentry_t *d;
601 fat_bs_t *bs;
602 block_t *b;
603 unsigned i, j;
604 unsigned blocks;
605 fat_cluster_t mcl, lcl;
606 int rc;
607
608 fibril_mutex_lock(&childp->lock);
609 if (childp->lnkcnt == 1) {
610 /*
611 * On FAT, we don't support multiple hard links.
612 */
613 fibril_mutex_unlock(&childp->lock);
614 return EMLINK;
615 }
616 assert(childp->lnkcnt == 0);
617 fibril_mutex_unlock(&childp->lock);
618
619 if (!fat_dentry_name_verify(name)) {
620 /*
621 * Attempt to create unsupported name.
622 */
623 return ENOTSUP;
624 }
625
626 /*
627 * Get us an unused parent node's dentry or grow the parent and allocate
628 * a new one.
629 */
630
631 fibril_mutex_lock(&parentp->idx->lock);
632 bs = block_bb_get(parentp->idx->dev_handle);
633
634 blocks = parentp->size / BPS(bs);
635
636 for (i = 0; i < blocks; i++) {
637 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
638 if (rc != EOK) {
639 fibril_mutex_unlock(&parentp->idx->lock);
640 return rc;
641 }
642 for (j = 0; j < DPS(bs); j++) {
643 d = ((fat_dentry_t *)b->data) + j;
644 switch (fat_classify_dentry(d)) {
645 case FAT_DENTRY_SKIP:
646 case FAT_DENTRY_VALID:
647 /* skipping used and meta entries */
648 continue;
649 case FAT_DENTRY_FREE:
650 case FAT_DENTRY_LAST:
651 /* found an empty slot */
652 goto hit;
653 }
654 }
655 rc = block_put(b);
656 if (rc != EOK) {
657 fibril_mutex_unlock(&parentp->idx->lock);
658 return rc;
659 }
660 }
661 j = 0;
662
663 /*
664 * We need to grow the parent in order to create a new unused dentry.
665 */
666 if (parentp->firstc == FAT_CLST_ROOT) {
667 /* Can't grow the root directory. */
668 fibril_mutex_unlock(&parentp->idx->lock);
669 return ENOSPC;
670 }
671 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
672 if (rc != EOK) {
673 fibril_mutex_unlock(&parentp->idx->lock);
674 return rc;
675 }
676 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl);
677 if (rc != EOK) {
678 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
679 fibril_mutex_unlock(&parentp->idx->lock);
680 return rc;
681 }
682 rc = fat_append_clusters(bs, parentp, mcl, lcl);
683 if (rc != EOK) {
684 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
685 fibril_mutex_unlock(&parentp->idx->lock);
686 return rc;
687 }
688 parentp->size += BPS(bs) * SPC(bs);
689 parentp->dirty = true; /* need to sync node */
690 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
691 if (rc != EOK) {
692 fibril_mutex_unlock(&parentp->idx->lock);
693 return rc;
694 }
695 d = (fat_dentry_t *)b->data;
696
697hit:
698 /*
699 * At this point we only establish the link between the parent and the
700 * child. The dentry, except of the name and the extension, will remain
701 * uninitialized until the corresponding node is synced. Thus the valid
702 * dentry data is kept in the child node structure.
703 */
704 memset(d, 0, sizeof(fat_dentry_t));
705 fat_dentry_name_set(d, name);
706 b->dirty = true; /* need to sync block */
707 rc = block_put(b);
708 fibril_mutex_unlock(&parentp->idx->lock);
709 if (rc != EOK)
710 return rc;
711
712 fibril_mutex_lock(&childp->idx->lock);
713
714 if (childp->type == FAT_DIRECTORY) {
715 /*
716 * If possible, create the Sub-directory Identifier Entry and
717 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
718 * These entries are not mandatory according to Standard
719 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
720 * rather a sign of our good will.
721 */
722 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
723 if (rc != EOK) {
724 /*
725 * Rather than returning an error, simply skip the
726 * creation of these two entries.
727 */
728 goto skip_dots;
729 }
730 d = (fat_dentry_t *) b->data;
731 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
732 (str_cmp((char *) d->name, FAT_NAME_DOT)) == 0) {
733 memset(d, 0, sizeof(fat_dentry_t));
734 str_cpy((char *) d->name, 8, FAT_NAME_DOT);
735 str_cpy((char *) d->ext, 3, FAT_EXT_PAD);
736 d->attr = FAT_ATTR_SUBDIR;
737 d->firstc = host2uint16_t_le(childp->firstc);
738 /* TODO: initialize also the date/time members. */
739 }
740 d++;
741 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
742 (str_cmp((char *) d->name, FAT_NAME_DOT_DOT) == 0)) {
743 memset(d, 0, sizeof(fat_dentry_t));
744 str_cpy((char *) d->name, 8, FAT_NAME_DOT_DOT);
745 str_cpy((char *) d->ext, 3, FAT_EXT_PAD);
746 d->attr = FAT_ATTR_SUBDIR;
747 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
748 host2uint16_t_le(FAT_CLST_RES0) :
749 host2uint16_t_le(parentp->firstc);
750 /* TODO: initialize also the date/time members. */
751 }
752 b->dirty = true; /* need to sync block */
753 /*
754 * Ignore the return value as we would have fallen through on error
755 * anyway.
756 */
757 (void) block_put(b);
758 }
759skip_dots:
760
761 childp->idx->pfc = parentp->firstc;
762 childp->idx->pdi = i * DPS(bs) + j;
763 fibril_mutex_unlock(&childp->idx->lock);
764
765 fibril_mutex_lock(&childp->lock);
766 childp->lnkcnt = 1;
767 childp->dirty = true; /* need to sync node */
768 fibril_mutex_unlock(&childp->lock);
769
770 /*
771 * Hash in the index structure into the position hash.
772 */
773 fat_idx_hashin(childp->idx);
774
775 return EOK;
776}
777
778int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
779{
780 fat_node_t *parentp = FAT_NODE(pfn);
781 fat_node_t *childp = FAT_NODE(cfn);
782 fat_bs_t *bs;
783 fat_dentry_t *d;
784 block_t *b;
785 bool has_children;
786 int rc;
787
788 if (!parentp)
789 return EBUSY;
790
791 rc = fat_has_children(&has_children, cfn);
792 if (rc != EOK)
793 return rc;
794 if (has_children)
795 return ENOTEMPTY;
796
797 fibril_mutex_lock(&parentp->lock);
798 fibril_mutex_lock(&childp->lock);
799 assert(childp->lnkcnt == 1);
800 fibril_mutex_lock(&childp->idx->lock);
801 bs = block_bb_get(childp->idx->dev_handle);
802
803 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc,
804 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
805 BLOCK_FLAGS_NONE);
806 if (rc != EOK)
807 goto error;
808 d = (fat_dentry_t *)b->data +
809 (childp->idx->pdi % (BPS(bs) / sizeof(fat_dentry_t)));
810 /* mark the dentry as not-currently-used */
811 d->name[0] = FAT_DENTRY_ERASED;
812 b->dirty = true; /* need to sync block */
813 rc = block_put(b);
814 if (rc != EOK)
815 goto error;
816
817 /* remove the index structure from the position hash */
818 fat_idx_hashout(childp->idx);
819 /* clear position information */
820 childp->idx->pfc = FAT_CLST_RES0;
821 childp->idx->pdi = 0;
822 fibril_mutex_unlock(&childp->idx->lock);
823 childp->lnkcnt = 0;
824 childp->dirty = true;
825 fibril_mutex_unlock(&childp->lock);
826 fibril_mutex_unlock(&parentp->lock);
827
828 return EOK;
829
830error:
831 fibril_mutex_unlock(&parentp->idx->lock);
832 fibril_mutex_unlock(&childp->lock);
833 fibril_mutex_unlock(&childp->idx->lock);
834 return rc;
835}
836
837int fat_has_children(bool *has_children, fs_node_t *fn)
838{
839 fat_bs_t *bs;
840 fat_node_t *nodep = FAT_NODE(fn);
841 unsigned blocks;
842 block_t *b;
843 unsigned i, j;
844 int rc;
845
846 if (nodep->type != FAT_DIRECTORY) {
847 *has_children = false;
848 return EOK;
849 }
850
851 fibril_mutex_lock(&nodep->idx->lock);
852 bs = block_bb_get(nodep->idx->dev_handle);
853
854 blocks = nodep->size / BPS(bs);
855
856 for (i = 0; i < blocks; i++) {
857 fat_dentry_t *d;
858
859 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
860 if (rc != EOK) {
861 fibril_mutex_unlock(&nodep->idx->lock);
862 return rc;
863 }
864 for (j = 0; j < DPS(bs); j++) {
865 d = ((fat_dentry_t *)b->data) + j;
866 switch (fat_classify_dentry(d)) {
867 case FAT_DENTRY_SKIP:
868 case FAT_DENTRY_FREE:
869 continue;
870 case FAT_DENTRY_LAST:
871 rc = block_put(b);
872 fibril_mutex_unlock(&nodep->idx->lock);
873 *has_children = false;
874 return rc;
875 default:
876 case FAT_DENTRY_VALID:
877 rc = block_put(b);
878 fibril_mutex_unlock(&nodep->idx->lock);
879 *has_children = true;
880 return rc;
881 }
882 }
883 rc = block_put(b);
884 if (rc != EOK) {
885 fibril_mutex_unlock(&nodep->idx->lock);
886 return rc;
887 }
888 }
889
890 fibril_mutex_unlock(&nodep->idx->lock);
891 *has_children = false;
892 return EOK;
893}
894
895
896fs_index_t fat_index_get(fs_node_t *fn)
897{
898 return FAT_NODE(fn)->idx->index;
899}
900
901aoff64_t fat_size_get(fs_node_t *fn)
902{
903 return FAT_NODE(fn)->size;
904}
905
906unsigned fat_lnkcnt_get(fs_node_t *fn)
907{
908 return FAT_NODE(fn)->lnkcnt;
909}
910
911char fat_plb_get_char(unsigned pos)
912{
913 return fat_reg.plb_ro[pos % PLB_SIZE];
914}
915
916bool fat_is_directory(fs_node_t *fn)
917{
918 return FAT_NODE(fn)->type == FAT_DIRECTORY;
919}
920
921bool fat_is_file(fs_node_t *fn)
922{
923 return FAT_NODE(fn)->type == FAT_FILE;
924}
925
926dev_handle_t fat_device_get(fs_node_t *node)
927{
928 return 0;
929}
930
931/** libfs operations */
932libfs_ops_t fat_libfs_ops = {
933 .root_get = fat_root_get,
934 .match = fat_match,
935 .node_get = fat_node_get,
936 .node_open = fat_node_open,
937 .node_put = fat_node_put,
938 .create = fat_create_node,
939 .destroy = fat_destroy_node,
940 .link = fat_link,
941 .unlink = fat_unlink,
942 .has_children = fat_has_children,
943 .index_get = fat_index_get,
944 .size_get = fat_size_get,
945 .lnkcnt_get = fat_lnkcnt_get,
946 .plb_get_char = fat_plb_get_char,
947 .is_directory = fat_is_directory,
948 .is_file = fat_is_file,
949 .device_get = fat_device_get
950};
951
952/*
953 * VFS operations.
954 */
955
956void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
957{
958 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
959 enum cache_mode cmode;
960 fat_bs_t *bs;
961
962 /* Accept the mount options */
963 char *opts;
964 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL);
965
966 if (rc != EOK) {
967 ipc_answer_0(rid, rc);
968 return;
969 }
970
971 /* Check for option enabling write through. */
972 if (str_cmp(opts, "wtcache") == 0)
973 cmode = CACHE_MODE_WT;
974 else
975 cmode = CACHE_MODE_WB;
976
977 free(opts);
978
979 /* initialize libblock */
980 rc = block_init(dev_handle, BS_SIZE);
981 if (rc != EOK) {
982 ipc_answer_0(rid, rc);
983 return;
984 }
985
986 /* prepare the boot block */
987 rc = block_bb_read(dev_handle, BS_BLOCK);
988 if (rc != EOK) {
989 block_fini(dev_handle);
990 ipc_answer_0(rid, rc);
991 return;
992 }
993
994 /* get the buffer with the boot sector */
995 bs = block_bb_get(dev_handle);
996
997 if (BPS(bs) != BS_SIZE) {
998 block_fini(dev_handle);
999 ipc_answer_0(rid, ENOTSUP);
1000 return;
1001 }
1002
1003 /* Initialize the block cache */
1004 rc = block_cache_init(dev_handle, BPS(bs), 0 /* XXX */, cmode);
1005 if (rc != EOK) {
1006 block_fini(dev_handle);
1007 ipc_answer_0(rid, rc);
1008 return;
1009 }
1010
1011 /* Do some simple sanity checks on the file system. */
1012 rc = fat_sanity_check(bs, dev_handle);
1013 if (rc != EOK) {
1014 (void) block_cache_fini(dev_handle);
1015 block_fini(dev_handle);
1016 ipc_answer_0(rid, rc);
1017 return;
1018 }
1019
1020 rc = fat_idx_init_by_dev_handle(dev_handle);
1021 if (rc != EOK) {
1022 (void) block_cache_fini(dev_handle);
1023 block_fini(dev_handle);
1024 ipc_answer_0(rid, rc);
1025 return;
1026 }
1027
1028 /* Initialize the root node. */
1029 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
1030 if (!rfn) {
1031 (void) block_cache_fini(dev_handle);
1032 block_fini(dev_handle);
1033 fat_idx_fini_by_dev_handle(dev_handle);
1034 ipc_answer_0(rid, ENOMEM);
1035 return;
1036 }
1037 fs_node_initialize(rfn);
1038 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1039 if (!rootp) {
1040 free(rfn);
1041 (void) block_cache_fini(dev_handle);
1042 block_fini(dev_handle);
1043 fat_idx_fini_by_dev_handle(dev_handle);
1044 ipc_answer_0(rid, ENOMEM);
1045 return;
1046 }
1047 fat_node_initialize(rootp);
1048
1049 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
1050 if (!ridxp) {
1051 free(rfn);
1052 free(rootp);
1053 (void) block_cache_fini(dev_handle);
1054 block_fini(dev_handle);
1055 fat_idx_fini_by_dev_handle(dev_handle);
1056 ipc_answer_0(rid, ENOMEM);
1057 return;
1058 }
1059 assert(ridxp->index == 0);
1060 /* ridxp->lock held */
1061
1062 rootp->type = FAT_DIRECTORY;
1063 rootp->firstc = FAT_CLST_ROOT;
1064 rootp->refcnt = 1;
1065 rootp->lnkcnt = 0; /* FS root is not linked */
1066 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
1067 rootp->idx = ridxp;
1068 ridxp->nodep = rootp;
1069 rootp->bp = rfn;
1070 rfn->data = rootp;
1071
1072 fibril_mutex_unlock(&ridxp->lock);
1073
1074 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
1075}
1076
1077void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1078{
1079 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1080}
1081
1082void fat_unmounted(ipc_callid_t rid, ipc_call_t *request)
1083{
1084 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
1085 fs_node_t *fn;
1086 fat_node_t *nodep;
1087 int rc;
1088
1089 rc = fat_root_get(&fn, dev_handle);
1090 if (rc != EOK) {
1091 ipc_answer_0(rid, rc);
1092 return;
1093 }
1094 nodep = FAT_NODE(fn);
1095
1096 /*
1097 * We expect exactly two references on the root node. One for the
1098 * fat_root_get() above and one created in fat_mounted().
1099 */
1100 if (nodep->refcnt != 2) {
1101 (void) fat_node_put(fn);
1102 ipc_answer_0(rid, EBUSY);
1103 return;
1104 }
1105
1106 /*
1107 * Put the root node and force it to the FAT free node list.
1108 */
1109 (void) fat_node_put(fn);
1110 (void) fat_node_put(fn);
1111
1112 /*
1113 * Perform cleanup of the node structures, index structures and
1114 * associated data. Write back this file system's dirty blocks and
1115 * stop using libblock for this instance.
1116 */
1117 (void) fat_node_fini_by_dev_handle(dev_handle);
1118 fat_idx_fini_by_dev_handle(dev_handle);
1119 (void) block_cache_fini(dev_handle);
1120 block_fini(dev_handle);
1121
1122 ipc_answer_0(rid, EOK);
1123}
1124
1125void fat_unmount(ipc_callid_t rid, ipc_call_t *request)
1126{
1127 libfs_unmount(&fat_libfs_ops, rid, request);
1128}
1129
1130void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1131{
1132 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1133}
1134
1135void fat_read(ipc_callid_t rid, ipc_call_t *request)
1136{
1137 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
1138 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1139 aoff64_t pos =
1140 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1141 fs_node_t *fn;
1142 fat_node_t *nodep;
1143 fat_bs_t *bs;
1144 size_t bytes;
1145 block_t *b;
1146 int rc;
1147
1148 rc = fat_node_get(&fn, dev_handle, index);
1149 if (rc != EOK) {
1150 ipc_answer_0(rid, rc);
1151 return;
1152 }
1153 if (!fn) {
1154 ipc_answer_0(rid, ENOENT);
1155 return;
1156 }
1157 nodep = FAT_NODE(fn);
1158
1159 ipc_callid_t callid;
1160 size_t len;
1161 if (!async_data_read_receive(&callid, &len)) {
1162 fat_node_put(fn);
1163 ipc_answer_0(callid, EINVAL);
1164 ipc_answer_0(rid, EINVAL);
1165 return;
1166 }
1167
1168 bs = block_bb_get(dev_handle);
1169
1170 if (nodep->type == FAT_FILE) {
1171 /*
1172 * Our strategy for regular file reads is to read one block at
1173 * most and make use of the possibility to return less data than
1174 * requested. This keeps the code very simple.
1175 */
1176 if (pos >= nodep->size) {
1177 /* reading beyond the EOF */
1178 bytes = 0;
1179 (void) async_data_read_finalize(callid, NULL, 0);
1180 } else {
1181 bytes = min(len, BPS(bs) - pos % BPS(bs));
1182 bytes = min(bytes, nodep->size - pos);
1183 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
1184 BLOCK_FLAGS_NONE);
1185 if (rc != EOK) {
1186 fat_node_put(fn);
1187 ipc_answer_0(callid, rc);
1188 ipc_answer_0(rid, rc);
1189 return;
1190 }
1191 (void) async_data_read_finalize(callid,
1192 b->data + pos % BPS(bs), bytes);
1193 rc = block_put(b);
1194 if (rc != EOK) {
1195 fat_node_put(fn);
1196 ipc_answer_0(rid, rc);
1197 return;
1198 }
1199 }
1200 } else {
1201 unsigned bnum;
1202 aoff64_t spos = pos;
1203 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1204 fat_dentry_t *d;
1205
1206 assert(nodep->type == FAT_DIRECTORY);
1207 assert(nodep->size % BPS(bs) == 0);
1208 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
1209
1210 /*
1211 * Our strategy for readdir() is to use the position pointer as
1212 * an index into the array of all dentries. On entry, it points
1213 * to the first unread dentry. If we skip any dentries, we bump
1214 * the position pointer accordingly.
1215 */
1216 bnum = (pos * sizeof(fat_dentry_t)) / BPS(bs);
1217 while (bnum < nodep->size / BPS(bs)) {
1218 aoff64_t o;
1219
1220 rc = fat_block_get(&b, bs, nodep, bnum,
1221 BLOCK_FLAGS_NONE);
1222 if (rc != EOK)
1223 goto err;
1224 for (o = pos % (BPS(bs) / sizeof(fat_dentry_t));
1225 o < BPS(bs) / sizeof(fat_dentry_t);
1226 o++, pos++) {
1227 d = ((fat_dentry_t *)b->data) + o;
1228 switch (fat_classify_dentry(d)) {
1229 case FAT_DENTRY_SKIP:
1230 case FAT_DENTRY_FREE:
1231 continue;
1232 case FAT_DENTRY_LAST:
1233 rc = block_put(b);
1234 if (rc != EOK)
1235 goto err;
1236 goto miss;
1237 default:
1238 case FAT_DENTRY_VALID:
1239 fat_dentry_name_get(d, name);
1240 rc = block_put(b);
1241 if (rc != EOK)
1242 goto err;
1243 goto hit;
1244 }
1245 }
1246 rc = block_put(b);
1247 if (rc != EOK)
1248 goto err;
1249 bnum++;
1250 }
1251miss:
1252 rc = fat_node_put(fn);
1253 ipc_answer_0(callid, rc != EOK ? rc : ENOENT);
1254 ipc_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
1255 return;
1256
1257err:
1258 (void) fat_node_put(fn);
1259 ipc_answer_0(callid, rc);
1260 ipc_answer_0(rid, rc);
1261 return;
1262
1263hit:
1264 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
1265 bytes = (pos - spos) + 1;
1266 }
1267
1268 rc = fat_node_put(fn);
1269 ipc_answer_1(rid, rc, (ipcarg_t)bytes);
1270}
1271
1272void fat_write(ipc_callid_t rid, ipc_call_t *request)
1273{
1274 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
1275 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1276 aoff64_t pos =
1277 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1278 fs_node_t *fn;
1279 fat_node_t *nodep;
1280 fat_bs_t *bs;
1281 size_t bytes, size;
1282 block_t *b;
1283 aoff64_t boundary;
1284 int flags = BLOCK_FLAGS_NONE;
1285 int rc;
1286
1287 rc = fat_node_get(&fn, dev_handle, index);
1288 if (rc != EOK) {
1289 ipc_answer_0(rid, rc);
1290 return;
1291 }
1292 if (!fn) {
1293 ipc_answer_0(rid, ENOENT);
1294 return;
1295 }
1296 nodep = FAT_NODE(fn);
1297
1298 ipc_callid_t callid;
1299 size_t len;
1300 if (!async_data_write_receive(&callid, &len)) {
1301 (void) fat_node_put(fn);
1302 ipc_answer_0(callid, EINVAL);
1303 ipc_answer_0(rid, EINVAL);
1304 return;
1305 }
1306
1307 bs = block_bb_get(dev_handle);
1308
1309 /*
1310 * In all scenarios, we will attempt to write out only one block worth
1311 * of data at maximum. There might be some more efficient approaches,
1312 * but this one greatly simplifies fat_write(). Note that we can afford
1313 * to do this because the client must be ready to handle the return
1314 * value signalizing a smaller number of bytes written.
1315 */
1316 bytes = min(len, BPS(bs) - pos % BPS(bs));
1317 if (bytes == BPS(bs))
1318 flags |= BLOCK_FLAGS_NOREAD;
1319
1320 boundary = ROUND_UP(nodep->size, BPC(bs));
1321 if (pos < boundary) {
1322 /*
1323 * This is the easier case - we are either overwriting already
1324 * existing contents or writing behind the EOF, but still within
1325 * the limits of the last cluster. The node size may grow to the
1326 * next block size boundary.
1327 */
1328 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1329 if (rc != EOK) {
1330 (void) fat_node_put(fn);
1331 ipc_answer_0(callid, rc);
1332 ipc_answer_0(rid, rc);
1333 return;
1334 }
1335 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
1336 if (rc != EOK) {
1337 (void) fat_node_put(fn);
1338 ipc_answer_0(callid, rc);
1339 ipc_answer_0(rid, rc);
1340 return;
1341 }
1342 (void) async_data_write_finalize(callid,
1343 b->data + pos % BPS(bs), bytes);
1344 b->dirty = true; /* need to sync block */
1345 rc = block_put(b);
1346 if (rc != EOK) {
1347 (void) fat_node_put(fn);
1348 ipc_answer_0(rid, rc);
1349 return;
1350 }
1351 if (pos + bytes > nodep->size) {
1352 nodep->size = pos + bytes;
1353 nodep->dirty = true; /* need to sync node */
1354 }
1355 size = nodep->size;
1356 rc = fat_node_put(fn);
1357 ipc_answer_2(rid, rc, bytes, nodep->size);
1358 return;
1359 } else {
1360 /*
1361 * This is the more difficult case. We must allocate new
1362 * clusters for the node and zero them out.
1363 */
1364 unsigned nclsts;
1365 fat_cluster_t mcl, lcl;
1366
1367 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
1368 /* create an independent chain of nclsts clusters in all FATs */
1369 rc = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1370 if (rc != EOK) {
1371 /* could not allocate a chain of nclsts clusters */
1372 (void) fat_node_put(fn);
1373 ipc_answer_0(callid, rc);
1374 ipc_answer_0(rid, rc);
1375 return;
1376 }
1377 /* zero fill any gaps */
1378 rc = fat_fill_gap(bs, nodep, mcl, pos);
1379 if (rc != EOK) {
1380 (void) fat_free_clusters(bs, dev_handle, mcl);
1381 (void) fat_node_put(fn);
1382 ipc_answer_0(callid, rc);
1383 ipc_answer_0(rid, rc);
1384 return;
1385 }
1386 rc = _fat_block_get(&b, bs, dev_handle, lcl, NULL,
1387 (pos / BPS(bs)) % SPC(bs), flags);
1388 if (rc != EOK) {
1389 (void) fat_free_clusters(bs, dev_handle, mcl);
1390 (void) fat_node_put(fn);
1391 ipc_answer_0(callid, rc);
1392 ipc_answer_0(rid, rc);
1393 return;
1394 }
1395 (void) async_data_write_finalize(callid,
1396 b->data + pos % BPS(bs), bytes);
1397 b->dirty = true; /* need to sync block */
1398 rc = block_put(b);
1399 if (rc != EOK) {
1400 (void) fat_free_clusters(bs, dev_handle, mcl);
1401 (void) fat_node_put(fn);
1402 ipc_answer_0(rid, rc);
1403 return;
1404 }
1405 /*
1406 * Append the cluster chain starting in mcl to the end of the
1407 * node's cluster chain.
1408 */
1409 rc = fat_append_clusters(bs, nodep, mcl, lcl);
1410 if (rc != EOK) {
1411 (void) fat_free_clusters(bs, dev_handle, mcl);
1412 (void) fat_node_put(fn);
1413 ipc_answer_0(rid, rc);
1414 return;
1415 }
1416 nodep->size = size = pos + bytes;
1417 nodep->dirty = true; /* need to sync node */
1418 rc = fat_node_put(fn);
1419 ipc_answer_2(rid, rc, bytes, size);
1420 return;
1421 }
1422}
1423
1424void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1425{
1426 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
1427 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1428 aoff64_t size =
1429 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1430 fs_node_t *fn;
1431 fat_node_t *nodep;
1432 fat_bs_t *bs;
1433 int rc;
1434
1435 rc = fat_node_get(&fn, dev_handle, index);
1436 if (rc != EOK) {
1437 ipc_answer_0(rid, rc);
1438 return;
1439 }
1440 if (!fn) {
1441 ipc_answer_0(rid, ENOENT);
1442 return;
1443 }
1444 nodep = FAT_NODE(fn);
1445
1446 bs = block_bb_get(dev_handle);
1447
1448 if (nodep->size == size) {
1449 rc = EOK;
1450 } else if (nodep->size < size) {
1451 /*
1452 * The standard says we have the freedom to grow the node.
1453 * For now, we simply return an error.
1454 */
1455 rc = EINVAL;
1456 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
1457 /*
1458 * The node will be shrunk, but no clusters will be deallocated.
1459 */
1460 nodep->size = size;
1461 nodep->dirty = true; /* need to sync node */
1462 rc = EOK;
1463 } else {
1464 /*
1465 * The node will be shrunk, clusters will be deallocated.
1466 */
1467 if (size == 0) {
1468 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1469 if (rc != EOK)
1470 goto out;
1471 } else {
1472 fat_cluster_t lastc;
1473 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc,
1474 &lastc, NULL, (size - 1) / BPC(bs));
1475 if (rc != EOK)
1476 goto out;
1477 rc = fat_chop_clusters(bs, nodep, lastc);
1478 if (rc != EOK)
1479 goto out;
1480 }
1481 nodep->size = size;
1482 nodep->dirty = true; /* need to sync node */
1483 rc = EOK;
1484 }
1485out:
1486 fat_node_put(fn);
1487 ipc_answer_0(rid, rc);
1488 return;
1489}
1490
1491void fat_close(ipc_callid_t rid, ipc_call_t *request)
1492{
1493 ipc_answer_0(rid, EOK);
1494}
1495
1496void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1497{
1498 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1499 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1500 fs_node_t *fn;
1501 int rc;
1502
1503 rc = fat_node_get(&fn, dev_handle, index);
1504 if (rc != EOK) {
1505 ipc_answer_0(rid, rc);
1506 return;
1507 }
1508 if (!fn) {
1509 ipc_answer_0(rid, ENOENT);
1510 return;
1511 }
1512
1513 rc = fat_destroy_node(fn);
1514 ipc_answer_0(rid, rc);
1515}
1516
1517void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1518{
1519 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1520}
1521
1522void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1523{
1524 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1525}
1526
1527void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1528{
1529 /* Dummy implementation */
1530 ipc_answer_0(rid, EOK);
1531}
1532
1533/**
1534 * @}
1535 */
Note: See TracBrowser for help on using the repository browser.