source: mainline/uspace/srv/fs/fat/fat_ops.c@ 87d4422

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 87d4422 was 5ca5eaa7, checked in by Jakub Jermar <jakub@…>, 14 years ago

Create an extra reference to an unlinked FAT node. This keeps the node
resident in memory until the node is eventually destroyed. The extra
reference especially prevents the unlinked node from being put onto the
node free list in the meantime. Otherwise the node structure can be
recycled for instantiating some unrelated node.

  • Property mode set to 100644
File size: 37.7 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/services.h>
45#include <ipc/devmap.h>
46#include <macros.h>
47#include <async.h>
48#include <errno.h>
49#include <str.h>
50#include <byteorder.h>
51#include <adt/hash_table.h>
52#include <adt/list.h>
53#include <assert.h>
54#include <fibril_synch.h>
55#include <sys/mman.h>
56#include <align.h>
57
58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
61#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
62#define BPC(bs) (BPS((bs)) * SPC((bs)))
63
64/** Mutex protecting the list of cached free FAT nodes. */
65static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
66
67/** List of cached free FAT nodes. */
68static LIST_INITIALIZE(ffn_head);
69
70/*
71 * Forward declarations of FAT libfs operations.
72 */
73static int fat_root_get(fs_node_t **, devmap_handle_t);
74static int fat_match(fs_node_t **, fs_node_t *, const char *);
75static int fat_node_get(fs_node_t **, devmap_handle_t, fs_index_t);
76static int fat_node_open(fs_node_t *);
77static int fat_node_put(fs_node_t *);
78static int fat_create_node(fs_node_t **, devmap_handle_t, int);
79static int fat_destroy_node(fs_node_t *);
80static int fat_link(fs_node_t *, fs_node_t *, const char *);
81static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
82static int fat_has_children(bool *, fs_node_t *);
83static fs_index_t fat_index_get(fs_node_t *);
84static aoff64_t fat_size_get(fs_node_t *);
85static unsigned fat_lnkcnt_get(fs_node_t *);
86static char fat_plb_get_char(unsigned);
87static bool fat_is_directory(fs_node_t *);
88static bool fat_is_file(fs_node_t *node);
89static devmap_handle_t fat_device_get(fs_node_t *node);
90
91/*
92 * Helper functions.
93 */
94static void fat_node_initialize(fat_node_t *node)
95{
96 fibril_mutex_initialize(&node->lock);
97 node->bp = NULL;
98 node->idx = NULL;
99 node->type = 0;
100 link_initialize(&node->ffn_link);
101 node->size = 0;
102 node->lnkcnt = 0;
103 node->refcnt = 0;
104 node->dirty = false;
105 node->lastc_cached_valid = false;
106 node->lastc_cached_value = FAT_CLST_LAST1;
107 node->currc_cached_valid = false;
108 node->currc_cached_bn = 0;
109 node->currc_cached_value = FAT_CLST_LAST1;
110}
111
112static int fat_node_sync(fat_node_t *node)
113{
114 block_t *b;
115 fat_bs_t *bs;
116 fat_dentry_t *d;
117 int rc;
118
119 assert(node->dirty);
120
121 bs = block_bb_get(node->idx->devmap_handle);
122
123 /* Read the block that contains the dentry of interest. */
124 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc,
125 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
126 BLOCK_FLAGS_NONE);
127 if (rc != EOK)
128 return rc;
129
130 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
131
132 d->firstc = host2uint16_t_le(node->firstc);
133 if (node->type == FAT_FILE) {
134 d->size = host2uint32_t_le(node->size);
135 } else if (node->type == FAT_DIRECTORY) {
136 d->attr = FAT_ATTR_SUBDIR;
137 }
138
139 /* TODO: update other fields? (e.g time fields) */
140
141 b->dirty = true; /* need to sync block */
142 rc = block_put(b);
143 return rc;
144}
145
146static int fat_node_fini_by_devmap_handle(devmap_handle_t devmap_handle)
147{
148 link_t *lnk;
149 fat_node_t *nodep;
150 int rc;
151
152 /*
153 * We are called from fat_unmounted() and assume that there are already
154 * no nodes belonging to this instance with non-zero refcount. Therefore
155 * it is sufficient to clean up only the FAT free node list.
156 */
157
158restart:
159 fibril_mutex_lock(&ffn_mutex);
160 for (lnk = ffn_head.next; lnk != &ffn_head; lnk = lnk->next) {
161 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
162 if (!fibril_mutex_trylock(&nodep->lock)) {
163 fibril_mutex_unlock(&ffn_mutex);
164 goto restart;
165 }
166 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
167 fibril_mutex_unlock(&nodep->lock);
168 fibril_mutex_unlock(&ffn_mutex);
169 goto restart;
170 }
171 if (nodep->idx->devmap_handle != devmap_handle) {
172 fibril_mutex_unlock(&nodep->idx->lock);
173 fibril_mutex_unlock(&nodep->lock);
174 continue;
175 }
176
177 list_remove(&nodep->ffn_link);
178 fibril_mutex_unlock(&ffn_mutex);
179
180 /*
181 * We can unlock the node and its index structure because we are
182 * the last player on this playground and VFS is preventing new
183 * players from entering.
184 */
185 fibril_mutex_unlock(&nodep->idx->lock);
186 fibril_mutex_unlock(&nodep->lock);
187
188 if (nodep->dirty) {
189 rc = fat_node_sync(nodep);
190 if (rc != EOK)
191 return rc;
192 }
193 nodep->idx->nodep = NULL;
194 free(nodep->bp);
195 free(nodep);
196
197 /* Need to restart because we changed the ffn_head list. */
198 goto restart;
199 }
200 fibril_mutex_unlock(&ffn_mutex);
201
202 return EOK;
203}
204
205static int fat_node_get_new(fat_node_t **nodepp)
206{
207 fs_node_t *fn;
208 fat_node_t *nodep;
209 int rc;
210
211 fibril_mutex_lock(&ffn_mutex);
212 if (!list_empty(&ffn_head)) {
213 /* Try to use a cached free node structure. */
214 fat_idx_t *idxp_tmp;
215 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
216 if (!fibril_mutex_trylock(&nodep->lock))
217 goto skip_cache;
218 idxp_tmp = nodep->idx;
219 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
220 fibril_mutex_unlock(&nodep->lock);
221 goto skip_cache;
222 }
223 list_remove(&nodep->ffn_link);
224 fibril_mutex_unlock(&ffn_mutex);
225 if (nodep->dirty) {
226 rc = fat_node_sync(nodep);
227 if (rc != EOK) {
228 idxp_tmp->nodep = NULL;
229 fibril_mutex_unlock(&nodep->lock);
230 fibril_mutex_unlock(&idxp_tmp->lock);
231 free(nodep->bp);
232 free(nodep);
233 return rc;
234 }
235 }
236 idxp_tmp->nodep = NULL;
237 fibril_mutex_unlock(&nodep->lock);
238 fibril_mutex_unlock(&idxp_tmp->lock);
239 fn = FS_NODE(nodep);
240 } else {
241skip_cache:
242 /* Try to allocate a new node structure. */
243 fibril_mutex_unlock(&ffn_mutex);
244 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
245 if (!fn)
246 return ENOMEM;
247 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
248 if (!nodep) {
249 free(fn);
250 return ENOMEM;
251 }
252 }
253 fat_node_initialize(nodep);
254 fs_node_initialize(fn);
255 fn->data = nodep;
256 nodep->bp = fn;
257
258 *nodepp = nodep;
259 return EOK;
260}
261
262/** Internal version of fat_node_get().
263 *
264 * @param idxp Locked index structure.
265 */
266static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
267{
268 block_t *b;
269 fat_bs_t *bs;
270 fat_dentry_t *d;
271 fat_node_t *nodep = NULL;
272 int rc;
273
274 if (idxp->nodep) {
275 /*
276 * We are lucky.
277 * The node is already instantiated in memory.
278 */
279 fibril_mutex_lock(&idxp->nodep->lock);
280 if (!idxp->nodep->refcnt++) {
281 fibril_mutex_lock(&ffn_mutex);
282 list_remove(&idxp->nodep->ffn_link);
283 fibril_mutex_unlock(&ffn_mutex);
284 }
285 fibril_mutex_unlock(&idxp->nodep->lock);
286 *nodepp = idxp->nodep;
287 return EOK;
288 }
289
290 /*
291 * We must instantiate the node from the file system.
292 */
293
294 assert(idxp->pfc);
295
296 rc = fat_node_get_new(&nodep);
297 if (rc != EOK)
298 return rc;
299
300 bs = block_bb_get(idxp->devmap_handle);
301
302 /* Read the block that contains the dentry of interest. */
303 rc = _fat_block_get(&b, bs, idxp->devmap_handle, idxp->pfc, NULL,
304 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
305 if (rc != EOK) {
306 (void) fat_node_put(FS_NODE(nodep));
307 return rc;
308 }
309
310 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
311 if (d->attr & FAT_ATTR_SUBDIR) {
312 /*
313 * The only directory which does not have this bit set is the
314 * root directory itself. The root directory node is handled
315 * and initialized elsewhere.
316 */
317 nodep->type = FAT_DIRECTORY;
318 /*
319 * Unfortunately, the 'size' field of the FAT dentry is not
320 * defined for the directory entry type. We must determine the
321 * size of the directory by walking the FAT.
322 */
323 uint16_t clusters;
324 rc = fat_clusters_get(&clusters, bs, idxp->devmap_handle,
325 uint16_t_le2host(d->firstc));
326 if (rc != EOK) {
327 (void) block_put(b);
328 (void) fat_node_put(FS_NODE(nodep));
329 return rc;
330 }
331 nodep->size = BPS(bs) * SPC(bs) * clusters;
332 } else {
333 nodep->type = FAT_FILE;
334 nodep->size = uint32_t_le2host(d->size);
335 }
336 nodep->firstc = uint16_t_le2host(d->firstc);
337 nodep->lnkcnt = 1;
338 nodep->refcnt = 1;
339
340 rc = block_put(b);
341 if (rc != EOK) {
342 (void) fat_node_put(FS_NODE(nodep));
343 return rc;
344 }
345
346 /* Link the idx structure with the node structure. */
347 nodep->idx = idxp;
348 idxp->nodep = nodep;
349
350 *nodepp = nodep;
351 return EOK;
352}
353
354/*
355 * FAT libfs operations.
356 */
357
358int fat_root_get(fs_node_t **rfn, devmap_handle_t devmap_handle)
359{
360 return fat_node_get(rfn, devmap_handle, 0);
361}
362
363int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
364{
365 fat_bs_t *bs;
366 fat_node_t *parentp = FAT_NODE(pfn);
367 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
368 unsigned i, j;
369 unsigned blocks;
370 fat_dentry_t *d;
371 devmap_handle_t devmap_handle;
372 block_t *b;
373 int rc;
374
375 fibril_mutex_lock(&parentp->idx->lock);
376 devmap_handle = parentp->idx->devmap_handle;
377 fibril_mutex_unlock(&parentp->idx->lock);
378
379 bs = block_bb_get(devmap_handle);
380 blocks = parentp->size / BPS(bs);
381 for (i = 0; i < blocks; i++) {
382 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
383 if (rc != EOK)
384 return rc;
385 for (j = 0; j < DPS(bs); j++) {
386 d = ((fat_dentry_t *)b->data) + j;
387 switch (fat_classify_dentry(d)) {
388 case FAT_DENTRY_SKIP:
389 case FAT_DENTRY_FREE:
390 continue;
391 case FAT_DENTRY_LAST:
392 /* miss */
393 rc = block_put(b);
394 *rfn = NULL;
395 return rc;
396 default:
397 case FAT_DENTRY_VALID:
398 fat_dentry_name_get(d, name);
399 break;
400 }
401 if (fat_dentry_namecmp(name, component) == 0) {
402 /* hit */
403 fat_node_t *nodep;
404 fat_idx_t *idx = fat_idx_get_by_pos(devmap_handle,
405 parentp->firstc, i * DPS(bs) + j);
406 if (!idx) {
407 /*
408 * Can happen if memory is low or if we
409 * run out of 32-bit indices.
410 */
411 rc = block_put(b);
412 return (rc == EOK) ? ENOMEM : rc;
413 }
414 rc = fat_node_get_core(&nodep, idx);
415 fibril_mutex_unlock(&idx->lock);
416 if (rc != EOK) {
417 (void) block_put(b);
418 return rc;
419 }
420 *rfn = FS_NODE(nodep);
421 rc = block_put(b);
422 if (rc != EOK)
423 (void) fat_node_put(*rfn);
424 return rc;
425 }
426 }
427 rc = block_put(b);
428 if (rc != EOK)
429 return rc;
430 }
431
432 *rfn = NULL;
433 return EOK;
434}
435
436/** Instantiate a FAT in-core node. */
437int fat_node_get(fs_node_t **rfn, devmap_handle_t devmap_handle, fs_index_t index)
438{
439 fat_node_t *nodep;
440 fat_idx_t *idxp;
441 int rc;
442
443 idxp = fat_idx_get_by_index(devmap_handle, index);
444 if (!idxp) {
445 *rfn = NULL;
446 return EOK;
447 }
448 /* idxp->lock held */
449 rc = fat_node_get_core(&nodep, idxp);
450 fibril_mutex_unlock(&idxp->lock);
451 if (rc == EOK)
452 *rfn = FS_NODE(nodep);
453 return rc;
454}
455
456int fat_node_open(fs_node_t *fn)
457{
458 /*
459 * Opening a file is stateless, nothing
460 * to be done here.
461 */
462 return EOK;
463}
464
465int fat_node_put(fs_node_t *fn)
466{
467 fat_node_t *nodep = FAT_NODE(fn);
468 bool destroy = false;
469
470 fibril_mutex_lock(&nodep->lock);
471 if (!--nodep->refcnt) {
472 if (nodep->idx) {
473 fibril_mutex_lock(&ffn_mutex);
474 list_append(&nodep->ffn_link, &ffn_head);
475 fibril_mutex_unlock(&ffn_mutex);
476 } else {
477 /*
478 * The node does not have any index structure associated
479 * with itself. This can only mean that we are releasing
480 * the node after a failed attempt to allocate the index
481 * structure for it.
482 */
483 destroy = true;
484 }
485 }
486 fibril_mutex_unlock(&nodep->lock);
487 if (destroy) {
488 free(nodep->bp);
489 free(nodep);
490 }
491 return EOK;
492}
493
494int fat_create_node(fs_node_t **rfn, devmap_handle_t devmap_handle, int flags)
495{
496 fat_idx_t *idxp;
497 fat_node_t *nodep;
498 fat_bs_t *bs;
499 fat_cluster_t mcl, lcl;
500 int rc;
501
502 bs = block_bb_get(devmap_handle);
503 if (flags & L_DIRECTORY) {
504 /* allocate a cluster */
505 rc = fat_alloc_clusters(bs, devmap_handle, 1, &mcl, &lcl);
506 if (rc != EOK)
507 return rc;
508 /* populate the new cluster with unused dentries */
509 rc = fat_zero_cluster(bs, devmap_handle, mcl);
510 if (rc != EOK) {
511 (void) fat_free_clusters(bs, devmap_handle, mcl);
512 return rc;
513 }
514 }
515
516 rc = fat_node_get_new(&nodep);
517 if (rc != EOK) {
518 (void) fat_free_clusters(bs, devmap_handle, mcl);
519 return rc;
520 }
521 rc = fat_idx_get_new(&idxp, devmap_handle);
522 if (rc != EOK) {
523 (void) fat_free_clusters(bs, devmap_handle, mcl);
524 (void) fat_node_put(FS_NODE(nodep));
525 return rc;
526 }
527 /* idxp->lock held */
528 if (flags & L_DIRECTORY) {
529 nodep->type = FAT_DIRECTORY;
530 nodep->firstc = mcl;
531 nodep->size = BPS(bs) * SPC(bs);
532 } else {
533 nodep->type = FAT_FILE;
534 nodep->firstc = FAT_CLST_RES0;
535 nodep->size = 0;
536 }
537 nodep->lnkcnt = 0; /* not linked anywhere */
538 nodep->refcnt = 1;
539 nodep->dirty = true;
540
541 nodep->idx = idxp;
542 idxp->nodep = nodep;
543
544 fibril_mutex_unlock(&idxp->lock);
545 *rfn = FS_NODE(nodep);
546 return EOK;
547}
548
549int fat_destroy_node(fs_node_t *fn)
550{
551 fat_node_t *nodep = FAT_NODE(fn);
552 fat_bs_t *bs;
553 bool has_children;
554 int rc;
555
556 /*
557 * The node is not reachable from the file system. This means that the
558 * link count should be zero and that the index structure cannot be
559 * found in the position hash. Obviously, we don't need to lock the node
560 * nor its index structure.
561 */
562 assert(nodep->lnkcnt == 0);
563
564 /*
565 * The node may not have any children.
566 */
567 rc = fat_has_children(&has_children, fn);
568 if (rc != EOK)
569 return rc;
570 assert(!has_children);
571
572 bs = block_bb_get(nodep->idx->devmap_handle);
573 if (nodep->firstc != FAT_CLST_RES0) {
574 assert(nodep->size);
575 /* Free all clusters allocated to the node. */
576 rc = fat_free_clusters(bs, nodep->idx->devmap_handle,
577 nodep->firstc);
578 }
579
580 fat_idx_destroy(nodep->idx);
581 free(nodep->bp);
582 free(nodep);
583 return rc;
584}
585
586int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
587{
588 fat_node_t *parentp = FAT_NODE(pfn);
589 fat_node_t *childp = FAT_NODE(cfn);
590 fat_dentry_t *d;
591 fat_bs_t *bs;
592 block_t *b;
593 unsigned i, j;
594 unsigned blocks;
595 fat_cluster_t mcl, lcl;
596 int rc;
597
598 fibril_mutex_lock(&childp->lock);
599 if (childp->lnkcnt == 1) {
600 /*
601 * On FAT, we don't support multiple hard links.
602 */
603 fibril_mutex_unlock(&childp->lock);
604 return EMLINK;
605 }
606 assert(childp->lnkcnt == 0);
607 fibril_mutex_unlock(&childp->lock);
608
609 if (!fat_dentry_name_verify(name)) {
610 /*
611 * Attempt to create unsupported name.
612 */
613 return ENOTSUP;
614 }
615
616 /*
617 * Get us an unused parent node's dentry or grow the parent and allocate
618 * a new one.
619 */
620
621 fibril_mutex_lock(&parentp->idx->lock);
622 bs = block_bb_get(parentp->idx->devmap_handle);
623
624 blocks = parentp->size / BPS(bs);
625
626 for (i = 0; i < blocks; i++) {
627 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
628 if (rc != EOK) {
629 fibril_mutex_unlock(&parentp->idx->lock);
630 return rc;
631 }
632 for (j = 0; j < DPS(bs); j++) {
633 d = ((fat_dentry_t *)b->data) + j;
634 switch (fat_classify_dentry(d)) {
635 case FAT_DENTRY_SKIP:
636 case FAT_DENTRY_VALID:
637 /* skipping used and meta entries */
638 continue;
639 case FAT_DENTRY_FREE:
640 case FAT_DENTRY_LAST:
641 /* found an empty slot */
642 goto hit;
643 }
644 }
645 rc = block_put(b);
646 if (rc != EOK) {
647 fibril_mutex_unlock(&parentp->idx->lock);
648 return rc;
649 }
650 }
651 j = 0;
652
653 /*
654 * We need to grow the parent in order to create a new unused dentry.
655 */
656 if (parentp->firstc == FAT_CLST_ROOT) {
657 /* Can't grow the root directory. */
658 fibril_mutex_unlock(&parentp->idx->lock);
659 return ENOSPC;
660 }
661 rc = fat_alloc_clusters(bs, parentp->idx->devmap_handle, 1, &mcl, &lcl);
662 if (rc != EOK) {
663 fibril_mutex_unlock(&parentp->idx->lock);
664 return rc;
665 }
666 rc = fat_zero_cluster(bs, parentp->idx->devmap_handle, mcl);
667 if (rc != EOK) {
668 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
669 fibril_mutex_unlock(&parentp->idx->lock);
670 return rc;
671 }
672 rc = fat_append_clusters(bs, parentp, mcl, lcl);
673 if (rc != EOK) {
674 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
675 fibril_mutex_unlock(&parentp->idx->lock);
676 return rc;
677 }
678 parentp->size += BPS(bs) * SPC(bs);
679 parentp->dirty = true; /* need to sync node */
680 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
681 if (rc != EOK) {
682 fibril_mutex_unlock(&parentp->idx->lock);
683 return rc;
684 }
685 d = (fat_dentry_t *)b->data;
686
687hit:
688 /*
689 * At this point we only establish the link between the parent and the
690 * child. The dentry, except of the name and the extension, will remain
691 * uninitialized until the corresponding node is synced. Thus the valid
692 * dentry data is kept in the child node structure.
693 */
694 memset(d, 0, sizeof(fat_dentry_t));
695 fat_dentry_name_set(d, name);
696 b->dirty = true; /* need to sync block */
697 rc = block_put(b);
698 fibril_mutex_unlock(&parentp->idx->lock);
699 if (rc != EOK)
700 return rc;
701
702 fibril_mutex_lock(&childp->idx->lock);
703
704 if (childp->type == FAT_DIRECTORY) {
705 /*
706 * If possible, create the Sub-directory Identifier Entry and
707 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
708 * These entries are not mandatory according to Standard
709 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
710 * rather a sign of our good will.
711 */
712 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
713 if (rc != EOK) {
714 /*
715 * Rather than returning an error, simply skip the
716 * creation of these two entries.
717 */
718 goto skip_dots;
719 }
720 d = (fat_dentry_t *) b->data;
721 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
722 (str_cmp((char *) d->name, FAT_NAME_DOT)) == 0) {
723 memset(d, 0, sizeof(fat_dentry_t));
724 str_cpy((char *) d->name, 8, FAT_NAME_DOT);
725 str_cpy((char *) d->ext, 3, FAT_EXT_PAD);
726 d->attr = FAT_ATTR_SUBDIR;
727 d->firstc = host2uint16_t_le(childp->firstc);
728 /* TODO: initialize also the date/time members. */
729 }
730 d++;
731 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
732 (str_cmp((char *) d->name, FAT_NAME_DOT_DOT) == 0)) {
733 memset(d, 0, sizeof(fat_dentry_t));
734 str_cpy((char *) d->name, 8, FAT_NAME_DOT_DOT);
735 str_cpy((char *) d->ext, 3, FAT_EXT_PAD);
736 d->attr = FAT_ATTR_SUBDIR;
737 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
738 host2uint16_t_le(FAT_CLST_RES0) :
739 host2uint16_t_le(parentp->firstc);
740 /* TODO: initialize also the date/time members. */
741 }
742 b->dirty = true; /* need to sync block */
743 /*
744 * Ignore the return value as we would have fallen through on error
745 * anyway.
746 */
747 (void) block_put(b);
748 }
749skip_dots:
750
751 childp->idx->pfc = parentp->firstc;
752 childp->idx->pdi = i * DPS(bs) + j;
753 fibril_mutex_unlock(&childp->idx->lock);
754
755 fibril_mutex_lock(&childp->lock);
756 childp->lnkcnt = 1;
757 childp->dirty = true; /* need to sync node */
758 fibril_mutex_unlock(&childp->lock);
759
760 /*
761 * Hash in the index structure into the position hash.
762 */
763 fat_idx_hashin(childp->idx);
764
765 return EOK;
766}
767
768int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
769{
770 fat_node_t *parentp = FAT_NODE(pfn);
771 fat_node_t *childp = FAT_NODE(cfn);
772 fat_bs_t *bs;
773 fat_dentry_t *d;
774 block_t *b;
775 bool has_children;
776 int rc;
777
778 if (!parentp)
779 return EBUSY;
780
781 rc = fat_has_children(&has_children, cfn);
782 if (rc != EOK)
783 return rc;
784 if (has_children)
785 return ENOTEMPTY;
786
787 fibril_mutex_lock(&parentp->lock);
788 fibril_mutex_lock(&childp->lock);
789 assert(childp->lnkcnt == 1);
790 fibril_mutex_lock(&childp->idx->lock);
791 bs = block_bb_get(childp->idx->devmap_handle);
792
793 rc = _fat_block_get(&b, bs, childp->idx->devmap_handle, childp->idx->pfc,
794 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
795 BLOCK_FLAGS_NONE);
796 if (rc != EOK)
797 goto error;
798 d = (fat_dentry_t *)b->data +
799 (childp->idx->pdi % (BPS(bs) / sizeof(fat_dentry_t)));
800 /* mark the dentry as not-currently-used */
801 d->name[0] = FAT_DENTRY_ERASED;
802 b->dirty = true; /* need to sync block */
803 rc = block_put(b);
804 if (rc != EOK)
805 goto error;
806
807 /* remove the index structure from the position hash */
808 fat_idx_hashout(childp->idx);
809 /* clear position information */
810 childp->idx->pfc = FAT_CLST_RES0;
811 childp->idx->pdi = 0;
812 fibril_mutex_unlock(&childp->idx->lock);
813 childp->lnkcnt = 0;
814 childp->refcnt++; /* keep the node in memory until destroyed */
815 childp->dirty = true;
816 fibril_mutex_unlock(&childp->lock);
817 fibril_mutex_unlock(&parentp->lock);
818
819 return EOK;
820
821error:
822 fibril_mutex_unlock(&parentp->idx->lock);
823 fibril_mutex_unlock(&childp->lock);
824 fibril_mutex_unlock(&childp->idx->lock);
825 return rc;
826}
827
828int fat_has_children(bool *has_children, fs_node_t *fn)
829{
830 fat_bs_t *bs;
831 fat_node_t *nodep = FAT_NODE(fn);
832 unsigned blocks;
833 block_t *b;
834 unsigned i, j;
835 int rc;
836
837 if (nodep->type != FAT_DIRECTORY) {
838 *has_children = false;
839 return EOK;
840 }
841
842 fibril_mutex_lock(&nodep->idx->lock);
843 bs = block_bb_get(nodep->idx->devmap_handle);
844
845 blocks = nodep->size / BPS(bs);
846
847 for (i = 0; i < blocks; i++) {
848 fat_dentry_t *d;
849
850 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
851 if (rc != EOK) {
852 fibril_mutex_unlock(&nodep->idx->lock);
853 return rc;
854 }
855 for (j = 0; j < DPS(bs); j++) {
856 d = ((fat_dentry_t *)b->data) + j;
857 switch (fat_classify_dentry(d)) {
858 case FAT_DENTRY_SKIP:
859 case FAT_DENTRY_FREE:
860 continue;
861 case FAT_DENTRY_LAST:
862 rc = block_put(b);
863 fibril_mutex_unlock(&nodep->idx->lock);
864 *has_children = false;
865 return rc;
866 default:
867 case FAT_DENTRY_VALID:
868 rc = block_put(b);
869 fibril_mutex_unlock(&nodep->idx->lock);
870 *has_children = true;
871 return rc;
872 }
873 }
874 rc = block_put(b);
875 if (rc != EOK) {
876 fibril_mutex_unlock(&nodep->idx->lock);
877 return rc;
878 }
879 }
880
881 fibril_mutex_unlock(&nodep->idx->lock);
882 *has_children = false;
883 return EOK;
884}
885
886
887fs_index_t fat_index_get(fs_node_t *fn)
888{
889 return FAT_NODE(fn)->idx->index;
890}
891
892aoff64_t fat_size_get(fs_node_t *fn)
893{
894 return FAT_NODE(fn)->size;
895}
896
897unsigned fat_lnkcnt_get(fs_node_t *fn)
898{
899 return FAT_NODE(fn)->lnkcnt;
900}
901
902char fat_plb_get_char(unsigned pos)
903{
904 return fat_reg.plb_ro[pos % PLB_SIZE];
905}
906
907bool fat_is_directory(fs_node_t *fn)
908{
909 return FAT_NODE(fn)->type == FAT_DIRECTORY;
910}
911
912bool fat_is_file(fs_node_t *fn)
913{
914 return FAT_NODE(fn)->type == FAT_FILE;
915}
916
917devmap_handle_t fat_device_get(fs_node_t *node)
918{
919 return 0;
920}
921
922/** libfs operations */
923libfs_ops_t fat_libfs_ops = {
924 .root_get = fat_root_get,
925 .match = fat_match,
926 .node_get = fat_node_get,
927 .node_open = fat_node_open,
928 .node_put = fat_node_put,
929 .create = fat_create_node,
930 .destroy = fat_destroy_node,
931 .link = fat_link,
932 .unlink = fat_unlink,
933 .has_children = fat_has_children,
934 .index_get = fat_index_get,
935 .size_get = fat_size_get,
936 .lnkcnt_get = fat_lnkcnt_get,
937 .plb_get_char = fat_plb_get_char,
938 .is_directory = fat_is_directory,
939 .is_file = fat_is_file,
940 .device_get = fat_device_get
941};
942
943/*
944 * VFS operations.
945 */
946
947void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
948{
949 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
950 enum cache_mode cmode;
951 fat_bs_t *bs;
952
953 /* Accept the mount options */
954 char *opts;
955 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL);
956
957 if (rc != EOK) {
958 async_answer_0(rid, rc);
959 return;
960 }
961
962 /* Check for option enabling write through. */
963 if (str_cmp(opts, "wtcache") == 0)
964 cmode = CACHE_MODE_WT;
965 else
966 cmode = CACHE_MODE_WB;
967
968 free(opts);
969
970 /* initialize libblock */
971 rc = block_init(devmap_handle, BS_SIZE);
972 if (rc != EOK) {
973 async_answer_0(rid, rc);
974 return;
975 }
976
977 /* prepare the boot block */
978 rc = block_bb_read(devmap_handle, BS_BLOCK);
979 if (rc != EOK) {
980 block_fini(devmap_handle);
981 async_answer_0(rid, rc);
982 return;
983 }
984
985 /* get the buffer with the boot sector */
986 bs = block_bb_get(devmap_handle);
987
988 if (BPS(bs) != BS_SIZE) {
989 block_fini(devmap_handle);
990 async_answer_0(rid, ENOTSUP);
991 return;
992 }
993
994 /* Initialize the block cache */
995 rc = block_cache_init(devmap_handle, BPS(bs), 0 /* XXX */, cmode);
996 if (rc != EOK) {
997 block_fini(devmap_handle);
998 async_answer_0(rid, rc);
999 return;
1000 }
1001
1002 /* Do some simple sanity checks on the file system. */
1003 rc = fat_sanity_check(bs, devmap_handle);
1004 if (rc != EOK) {
1005 (void) block_cache_fini(devmap_handle);
1006 block_fini(devmap_handle);
1007 async_answer_0(rid, rc);
1008 return;
1009 }
1010
1011 rc = fat_idx_init_by_devmap_handle(devmap_handle);
1012 if (rc != EOK) {
1013 (void) block_cache_fini(devmap_handle);
1014 block_fini(devmap_handle);
1015 async_answer_0(rid, rc);
1016 return;
1017 }
1018
1019 /* Initialize the root node. */
1020 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
1021 if (!rfn) {
1022 (void) block_cache_fini(devmap_handle);
1023 block_fini(devmap_handle);
1024 fat_idx_fini_by_devmap_handle(devmap_handle);
1025 async_answer_0(rid, ENOMEM);
1026 return;
1027 }
1028 fs_node_initialize(rfn);
1029 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1030 if (!rootp) {
1031 free(rfn);
1032 (void) block_cache_fini(devmap_handle);
1033 block_fini(devmap_handle);
1034 fat_idx_fini_by_devmap_handle(devmap_handle);
1035 async_answer_0(rid, ENOMEM);
1036 return;
1037 }
1038 fat_node_initialize(rootp);
1039
1040 fat_idx_t *ridxp = fat_idx_get_by_pos(devmap_handle, FAT_CLST_ROOTPAR, 0);
1041 if (!ridxp) {
1042 free(rfn);
1043 free(rootp);
1044 (void) block_cache_fini(devmap_handle);
1045 block_fini(devmap_handle);
1046 fat_idx_fini_by_devmap_handle(devmap_handle);
1047 async_answer_0(rid, ENOMEM);
1048 return;
1049 }
1050 assert(ridxp->index == 0);
1051 /* ridxp->lock held */
1052
1053 rootp->type = FAT_DIRECTORY;
1054 rootp->firstc = FAT_CLST_ROOT;
1055 rootp->refcnt = 1;
1056 rootp->lnkcnt = 0; /* FS root is not linked */
1057 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
1058 rootp->idx = ridxp;
1059 ridxp->nodep = rootp;
1060 rootp->bp = rfn;
1061 rfn->data = rootp;
1062
1063 fibril_mutex_unlock(&ridxp->lock);
1064
1065 async_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
1066}
1067
1068void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1069{
1070 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1071}
1072
1073void fat_unmounted(ipc_callid_t rid, ipc_call_t *request)
1074{
1075 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1076 fs_node_t *fn;
1077 fat_node_t *nodep;
1078 int rc;
1079
1080 rc = fat_root_get(&fn, devmap_handle);
1081 if (rc != EOK) {
1082 async_answer_0(rid, rc);
1083 return;
1084 }
1085 nodep = FAT_NODE(fn);
1086
1087 /*
1088 * We expect exactly two references on the root node. One for the
1089 * fat_root_get() above and one created in fat_mounted().
1090 */
1091 if (nodep->refcnt != 2) {
1092 (void) fat_node_put(fn);
1093 async_answer_0(rid, EBUSY);
1094 return;
1095 }
1096
1097 /*
1098 * Put the root node and force it to the FAT free node list.
1099 */
1100 (void) fat_node_put(fn);
1101 (void) fat_node_put(fn);
1102
1103 /*
1104 * Perform cleanup of the node structures, index structures and
1105 * associated data. Write back this file system's dirty blocks and
1106 * stop using libblock for this instance.
1107 */
1108 (void) fat_node_fini_by_devmap_handle(devmap_handle);
1109 fat_idx_fini_by_devmap_handle(devmap_handle);
1110 (void) block_cache_fini(devmap_handle);
1111 block_fini(devmap_handle);
1112
1113 async_answer_0(rid, EOK);
1114}
1115
1116void fat_unmount(ipc_callid_t rid, ipc_call_t *request)
1117{
1118 libfs_unmount(&fat_libfs_ops, rid, request);
1119}
1120
1121void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1122{
1123 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1124}
1125
1126void fat_read(ipc_callid_t rid, ipc_call_t *request)
1127{
1128 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1129 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1130 aoff64_t pos =
1131 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1132 fs_node_t *fn;
1133 fat_node_t *nodep;
1134 fat_bs_t *bs;
1135 size_t bytes;
1136 block_t *b;
1137 int rc;
1138
1139 rc = fat_node_get(&fn, devmap_handle, index);
1140 if (rc != EOK) {
1141 async_answer_0(rid, rc);
1142 return;
1143 }
1144 if (!fn) {
1145 async_answer_0(rid, ENOENT);
1146 return;
1147 }
1148 nodep = FAT_NODE(fn);
1149
1150 ipc_callid_t callid;
1151 size_t len;
1152 if (!async_data_read_receive(&callid, &len)) {
1153 fat_node_put(fn);
1154 async_answer_0(callid, EINVAL);
1155 async_answer_0(rid, EINVAL);
1156 return;
1157 }
1158
1159 bs = block_bb_get(devmap_handle);
1160
1161 if (nodep->type == FAT_FILE) {
1162 /*
1163 * Our strategy for regular file reads is to read one block at
1164 * most and make use of the possibility to return less data than
1165 * requested. This keeps the code very simple.
1166 */
1167 if (pos >= nodep->size) {
1168 /* reading beyond the EOF */
1169 bytes = 0;
1170 (void) async_data_read_finalize(callid, NULL, 0);
1171 } else {
1172 bytes = min(len, BPS(bs) - pos % BPS(bs));
1173 bytes = min(bytes, nodep->size - pos);
1174 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
1175 BLOCK_FLAGS_NONE);
1176 if (rc != EOK) {
1177 fat_node_put(fn);
1178 async_answer_0(callid, rc);
1179 async_answer_0(rid, rc);
1180 return;
1181 }
1182 (void) async_data_read_finalize(callid,
1183 b->data + pos % BPS(bs), bytes);
1184 rc = block_put(b);
1185 if (rc != EOK) {
1186 fat_node_put(fn);
1187 async_answer_0(rid, rc);
1188 return;
1189 }
1190 }
1191 } else {
1192 unsigned bnum;
1193 aoff64_t spos = pos;
1194 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1195 fat_dentry_t *d;
1196
1197 assert(nodep->type == FAT_DIRECTORY);
1198 assert(nodep->size % BPS(bs) == 0);
1199 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
1200
1201 /*
1202 * Our strategy for readdir() is to use the position pointer as
1203 * an index into the array of all dentries. On entry, it points
1204 * to the first unread dentry. If we skip any dentries, we bump
1205 * the position pointer accordingly.
1206 */
1207 bnum = (pos * sizeof(fat_dentry_t)) / BPS(bs);
1208 while (bnum < nodep->size / BPS(bs)) {
1209 aoff64_t o;
1210
1211 rc = fat_block_get(&b, bs, nodep, bnum,
1212 BLOCK_FLAGS_NONE);
1213 if (rc != EOK)
1214 goto err;
1215 for (o = pos % (BPS(bs) / sizeof(fat_dentry_t));
1216 o < BPS(bs) / sizeof(fat_dentry_t);
1217 o++, pos++) {
1218 d = ((fat_dentry_t *)b->data) + o;
1219 switch (fat_classify_dentry(d)) {
1220 case FAT_DENTRY_SKIP:
1221 case FAT_DENTRY_FREE:
1222 continue;
1223 case FAT_DENTRY_LAST:
1224 rc = block_put(b);
1225 if (rc != EOK)
1226 goto err;
1227 goto miss;
1228 default:
1229 case FAT_DENTRY_VALID:
1230 fat_dentry_name_get(d, name);
1231 rc = block_put(b);
1232 if (rc != EOK)
1233 goto err;
1234 goto hit;
1235 }
1236 }
1237 rc = block_put(b);
1238 if (rc != EOK)
1239 goto err;
1240 bnum++;
1241 }
1242miss:
1243 rc = fat_node_put(fn);
1244 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1245 async_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
1246 return;
1247
1248err:
1249 (void) fat_node_put(fn);
1250 async_answer_0(callid, rc);
1251 async_answer_0(rid, rc);
1252 return;
1253
1254hit:
1255 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
1256 bytes = (pos - spos) + 1;
1257 }
1258
1259 rc = fat_node_put(fn);
1260 async_answer_1(rid, rc, (sysarg_t)bytes);
1261}
1262
1263void fat_write(ipc_callid_t rid, ipc_call_t *request)
1264{
1265 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1266 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1267 aoff64_t pos =
1268 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1269 fs_node_t *fn;
1270 fat_node_t *nodep;
1271 fat_bs_t *bs;
1272 size_t bytes, size;
1273 block_t *b;
1274 aoff64_t boundary;
1275 int flags = BLOCK_FLAGS_NONE;
1276 int rc;
1277
1278 rc = fat_node_get(&fn, devmap_handle, index);
1279 if (rc != EOK) {
1280 async_answer_0(rid, rc);
1281 return;
1282 }
1283 if (!fn) {
1284 async_answer_0(rid, ENOENT);
1285 return;
1286 }
1287 nodep = FAT_NODE(fn);
1288
1289 ipc_callid_t callid;
1290 size_t len;
1291 if (!async_data_write_receive(&callid, &len)) {
1292 (void) fat_node_put(fn);
1293 async_answer_0(callid, EINVAL);
1294 async_answer_0(rid, EINVAL);
1295 return;
1296 }
1297
1298 bs = block_bb_get(devmap_handle);
1299
1300 /*
1301 * In all scenarios, we will attempt to write out only one block worth
1302 * of data at maximum. There might be some more efficient approaches,
1303 * but this one greatly simplifies fat_write(). Note that we can afford
1304 * to do this because the client must be ready to handle the return
1305 * value signalizing a smaller number of bytes written.
1306 */
1307 bytes = min(len, BPS(bs) - pos % BPS(bs));
1308 if (bytes == BPS(bs))
1309 flags |= BLOCK_FLAGS_NOREAD;
1310
1311 boundary = ROUND_UP(nodep->size, BPC(bs));
1312 if (pos < boundary) {
1313 /*
1314 * This is the easier case - we are either overwriting already
1315 * existing contents or writing behind the EOF, but still within
1316 * the limits of the last cluster. The node size may grow to the
1317 * next block size boundary.
1318 */
1319 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1320 if (rc != EOK) {
1321 (void) fat_node_put(fn);
1322 async_answer_0(callid, rc);
1323 async_answer_0(rid, rc);
1324 return;
1325 }
1326 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
1327 if (rc != EOK) {
1328 (void) fat_node_put(fn);
1329 async_answer_0(callid, rc);
1330 async_answer_0(rid, rc);
1331 return;
1332 }
1333 (void) async_data_write_finalize(callid,
1334 b->data + pos % BPS(bs), bytes);
1335 b->dirty = true; /* need to sync block */
1336 rc = block_put(b);
1337 if (rc != EOK) {
1338 (void) fat_node_put(fn);
1339 async_answer_0(rid, rc);
1340 return;
1341 }
1342 if (pos + bytes > nodep->size) {
1343 nodep->size = pos + bytes;
1344 nodep->dirty = true; /* need to sync node */
1345 }
1346 size = nodep->size;
1347 rc = fat_node_put(fn);
1348 async_answer_2(rid, rc, bytes, nodep->size);
1349 return;
1350 } else {
1351 /*
1352 * This is the more difficult case. We must allocate new
1353 * clusters for the node and zero them out.
1354 */
1355 unsigned nclsts;
1356 fat_cluster_t mcl, lcl;
1357
1358 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
1359 /* create an independent chain of nclsts clusters in all FATs */
1360 rc = fat_alloc_clusters(bs, devmap_handle, nclsts, &mcl, &lcl);
1361 if (rc != EOK) {
1362 /* could not allocate a chain of nclsts clusters */
1363 (void) fat_node_put(fn);
1364 async_answer_0(callid, rc);
1365 async_answer_0(rid, rc);
1366 return;
1367 }
1368 /* zero fill any gaps */
1369 rc = fat_fill_gap(bs, nodep, mcl, pos);
1370 if (rc != EOK) {
1371 (void) fat_free_clusters(bs, devmap_handle, mcl);
1372 (void) fat_node_put(fn);
1373 async_answer_0(callid, rc);
1374 async_answer_0(rid, rc);
1375 return;
1376 }
1377 rc = _fat_block_get(&b, bs, devmap_handle, lcl, NULL,
1378 (pos / BPS(bs)) % SPC(bs), flags);
1379 if (rc != EOK) {
1380 (void) fat_free_clusters(bs, devmap_handle, mcl);
1381 (void) fat_node_put(fn);
1382 async_answer_0(callid, rc);
1383 async_answer_0(rid, rc);
1384 return;
1385 }
1386 (void) async_data_write_finalize(callid,
1387 b->data + pos % BPS(bs), bytes);
1388 b->dirty = true; /* need to sync block */
1389 rc = block_put(b);
1390 if (rc != EOK) {
1391 (void) fat_free_clusters(bs, devmap_handle, mcl);
1392 (void) fat_node_put(fn);
1393 async_answer_0(rid, rc);
1394 return;
1395 }
1396 /*
1397 * Append the cluster chain starting in mcl to the end of the
1398 * node's cluster chain.
1399 */
1400 rc = fat_append_clusters(bs, nodep, mcl, lcl);
1401 if (rc != EOK) {
1402 (void) fat_free_clusters(bs, devmap_handle, mcl);
1403 (void) fat_node_put(fn);
1404 async_answer_0(rid, rc);
1405 return;
1406 }
1407 nodep->size = size = pos + bytes;
1408 nodep->dirty = true; /* need to sync node */
1409 rc = fat_node_put(fn);
1410 async_answer_2(rid, rc, bytes, size);
1411 return;
1412 }
1413}
1414
1415void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1416{
1417 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1418 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1419 aoff64_t size =
1420 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1421 fs_node_t *fn;
1422 fat_node_t *nodep;
1423 fat_bs_t *bs;
1424 int rc;
1425
1426 rc = fat_node_get(&fn, devmap_handle, index);
1427 if (rc != EOK) {
1428 async_answer_0(rid, rc);
1429 return;
1430 }
1431 if (!fn) {
1432 async_answer_0(rid, ENOENT);
1433 return;
1434 }
1435 nodep = FAT_NODE(fn);
1436
1437 bs = block_bb_get(devmap_handle);
1438
1439 if (nodep->size == size) {
1440 rc = EOK;
1441 } else if (nodep->size < size) {
1442 /*
1443 * The standard says we have the freedom to grow the node.
1444 * For now, we simply return an error.
1445 */
1446 rc = EINVAL;
1447 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
1448 /*
1449 * The node will be shrunk, but no clusters will be deallocated.
1450 */
1451 nodep->size = size;
1452 nodep->dirty = true; /* need to sync node */
1453 rc = EOK;
1454 } else {
1455 /*
1456 * The node will be shrunk, clusters will be deallocated.
1457 */
1458 if (size == 0) {
1459 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1460 if (rc != EOK)
1461 goto out;
1462 } else {
1463 fat_cluster_t lastc;
1464 rc = fat_cluster_walk(bs, devmap_handle, nodep->firstc,
1465 &lastc, NULL, (size - 1) / BPC(bs));
1466 if (rc != EOK)
1467 goto out;
1468 rc = fat_chop_clusters(bs, nodep, lastc);
1469 if (rc != EOK)
1470 goto out;
1471 }
1472 nodep->size = size;
1473 nodep->dirty = true; /* need to sync node */
1474 rc = EOK;
1475 }
1476out:
1477 fat_node_put(fn);
1478 async_answer_0(rid, rc);
1479 return;
1480}
1481
1482void fat_close(ipc_callid_t rid, ipc_call_t *request)
1483{
1484 async_answer_0(rid, EOK);
1485}
1486
1487void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1488{
1489 devmap_handle_t devmap_handle = (devmap_handle_t)IPC_GET_ARG1(*request);
1490 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1491 fs_node_t *fn;
1492 fat_node_t *nodep;
1493 int rc;
1494
1495 rc = fat_node_get(&fn, devmap_handle, index);
1496 if (rc != EOK) {
1497 async_answer_0(rid, rc);
1498 return;
1499 }
1500 if (!fn) {
1501 async_answer_0(rid, ENOENT);
1502 return;
1503 }
1504
1505 nodep = FAT_NODE(fn);
1506 /*
1507 * We should have exactly two references. One for the above
1508 * call to fat_node_get() and one from fat_unlink().
1509 */
1510 assert(nodep->refcnt == 2);
1511
1512 rc = fat_destroy_node(fn);
1513 async_answer_0(rid, rc);
1514}
1515
1516void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1517{
1518 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1519}
1520
1521void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1522{
1523 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1524}
1525
1526void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1527{
1528 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1529 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1530
1531 fs_node_t *fn;
1532 int rc = fat_node_get(&fn, devmap_handle, index);
1533 if (rc != EOK) {
1534 async_answer_0(rid, rc);
1535 return;
1536 }
1537 if (!fn) {
1538 async_answer_0(rid, ENOENT);
1539 return;
1540 }
1541
1542 fat_node_t *nodep = FAT_NODE(fn);
1543
1544 nodep->dirty = true;
1545 rc = fat_node_sync(nodep);
1546
1547 fat_node_put(fn);
1548 async_answer_0(rid, rc);
1549}
1550
1551/**
1552 * @}
1553 */
Note: See TracBrowser for help on using the repository browser.