source: mainline/uspace/srv/fs/fat/fat_ops.c@ 73b1218

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 73b1218 was 5a9a1aaf, checked in by Oleg Romanenko <romanenko.oleg@…>, 14 years ago

FAT: use uint32_t for clusters count instead of uint16_t

  • Property mode set to 100644
File size: 35.4 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2011 Oleg Romanenko
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup fs
31 * @{
32 */
33
34/**
35 * @file fat_ops.c
36 * @brief Implementation of VFS operations for the FAT file system server.
37 */
38
39#include "fat.h"
40#include "fat_dentry.h"
41#include "fat_fat.h"
42#include "fat_directory.h"
43#include "../../vfs/vfs.h"
44#include <libfs.h>
45#include <libblock.h>
46#include <ipc/services.h>
47#include <ipc/devmap.h>
48#include <macros.h>
49#include <async.h>
50#include <errno.h>
51#include <str.h>
52#include <byteorder.h>
53#include <adt/hash_table.h>
54#include <adt/list.h>
55#include <assert.h>
56#include <fibril_synch.h>
57#include <sys/mman.h>
58#include <align.h>
59#include <malloc.h>
60#include <str.h>
61
62#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
63#define FS_NODE(node) ((node) ? (node)->bp : NULL)
64
65#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
66#define BPC(bs) (BPS((bs)) * SPC((bs)))
67
68/** Mutex protecting the list of cached free FAT nodes. */
69static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
70
71/** List of cached free FAT nodes. */
72static LIST_INITIALIZE(ffn_head);
73
74/*
75 * Forward declarations of FAT libfs operations.
76 */
77static int fat_root_get(fs_node_t **, devmap_handle_t);
78static int fat_match(fs_node_t **, fs_node_t *, const char *);
79static int fat_node_get(fs_node_t **, devmap_handle_t, fs_index_t);
80static int fat_node_open(fs_node_t *);
81static int fat_node_put(fs_node_t *);
82static int fat_create_node(fs_node_t **, devmap_handle_t, int);
83static int fat_destroy_node(fs_node_t *);
84static int fat_link(fs_node_t *, fs_node_t *, const char *);
85static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
86static int fat_has_children(bool *, fs_node_t *);
87static fs_index_t fat_index_get(fs_node_t *);
88static aoff64_t fat_size_get(fs_node_t *);
89static unsigned fat_lnkcnt_get(fs_node_t *);
90static char fat_plb_get_char(unsigned);
91static bool fat_is_directory(fs_node_t *);
92static bool fat_is_file(fs_node_t *node);
93static devmap_handle_t fat_device_get(fs_node_t *node);
94
95/*
96 * Helper functions.
97 */
98static void fat_node_initialize(fat_node_t *node)
99{
100 fibril_mutex_initialize(&node->lock);
101 node->bp = NULL;
102 node->idx = NULL;
103 node->type = 0;
104 link_initialize(&node->ffn_link);
105 node->size = 0;
106 node->lnkcnt = 0;
107 node->refcnt = 0;
108 node->dirty = false;
109 node->lastc_cached_valid = false;
110 node->lastc_cached_value = 0;
111 node->currc_cached_valid = false;
112 node->currc_cached_bn = 0;
113 node->currc_cached_value = 0;
114}
115
116static int fat_node_sync(fat_node_t *node)
117{
118 block_t *b;
119 fat_bs_t *bs;
120 fat_dentry_t *d;
121 int rc;
122
123 assert(node->dirty);
124
125 bs = block_bb_get(node->idx->devmap_handle);
126
127 /* Read the block that contains the dentry of interest. */
128 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc,
129 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
130 BLOCK_FLAGS_NONE);
131 if (rc != EOK)
132 return rc;
133
134 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
135
136 d->firstc = host2uint16_t_le(node->firstc);
137 if (node->type == FAT_FILE) {
138 d->size = host2uint32_t_le(node->size);
139 } else if (node->type == FAT_DIRECTORY) {
140 d->attr = FAT_ATTR_SUBDIR;
141 }
142
143 /* TODO: update other fields? (e.g time fields) */
144
145 b->dirty = true; /* need to sync block */
146 rc = block_put(b);
147 return rc;
148}
149
150static int fat_node_fini_by_devmap_handle(devmap_handle_t devmap_handle)
151{
152 link_t *lnk;
153 fat_node_t *nodep;
154 int rc;
155
156 /*
157 * We are called from fat_unmounted() and assume that there are already
158 * no nodes belonging to this instance with non-zero refcount. Therefore
159 * it is sufficient to clean up only the FAT free node list.
160 */
161
162restart:
163 fibril_mutex_lock(&ffn_mutex);
164 for (lnk = ffn_head.next; lnk != &ffn_head; lnk = lnk->next) {
165 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
166 if (!fibril_mutex_trylock(&nodep->lock)) {
167 fibril_mutex_unlock(&ffn_mutex);
168 goto restart;
169 }
170 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
171 fibril_mutex_unlock(&nodep->lock);
172 fibril_mutex_unlock(&ffn_mutex);
173 goto restart;
174 }
175 if (nodep->idx->devmap_handle != devmap_handle) {
176 fibril_mutex_unlock(&nodep->idx->lock);
177 fibril_mutex_unlock(&nodep->lock);
178 continue;
179 }
180
181 list_remove(&nodep->ffn_link);
182 fibril_mutex_unlock(&ffn_mutex);
183
184 /*
185 * We can unlock the node and its index structure because we are
186 * the last player on this playground and VFS is preventing new
187 * players from entering.
188 */
189 fibril_mutex_unlock(&nodep->idx->lock);
190 fibril_mutex_unlock(&nodep->lock);
191
192 if (nodep->dirty) {
193 rc = fat_node_sync(nodep);
194 if (rc != EOK)
195 return rc;
196 }
197 nodep->idx->nodep = NULL;
198 free(nodep->bp);
199 free(nodep);
200
201 /* Need to restart because we changed the ffn_head list. */
202 goto restart;
203 }
204 fibril_mutex_unlock(&ffn_mutex);
205
206 return EOK;
207}
208
209static int fat_node_get_new(fat_node_t **nodepp)
210{
211 fs_node_t *fn;
212 fat_node_t *nodep;
213 int rc;
214
215 fibril_mutex_lock(&ffn_mutex);
216 if (!list_empty(&ffn_head)) {
217 /* Try to use a cached free node structure. */
218 fat_idx_t *idxp_tmp;
219 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
220 if (!fibril_mutex_trylock(&nodep->lock))
221 goto skip_cache;
222 idxp_tmp = nodep->idx;
223 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
224 fibril_mutex_unlock(&nodep->lock);
225 goto skip_cache;
226 }
227 list_remove(&nodep->ffn_link);
228 fibril_mutex_unlock(&ffn_mutex);
229 if (nodep->dirty) {
230 rc = fat_node_sync(nodep);
231 if (rc != EOK) {
232 idxp_tmp->nodep = NULL;
233 fibril_mutex_unlock(&nodep->lock);
234 fibril_mutex_unlock(&idxp_tmp->lock);
235 free(nodep->bp);
236 free(nodep);
237 return rc;
238 }
239 }
240 idxp_tmp->nodep = NULL;
241 fibril_mutex_unlock(&nodep->lock);
242 fibril_mutex_unlock(&idxp_tmp->lock);
243 fn = FS_NODE(nodep);
244 } else {
245skip_cache:
246 /* Try to allocate a new node structure. */
247 fibril_mutex_unlock(&ffn_mutex);
248 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
249 if (!fn)
250 return ENOMEM;
251 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
252 if (!nodep) {
253 free(fn);
254 return ENOMEM;
255 }
256 }
257 fat_node_initialize(nodep);
258 fs_node_initialize(fn);
259 fn->data = nodep;
260 nodep->bp = fn;
261
262 *nodepp = nodep;
263 return EOK;
264}
265
266/** Internal version of fat_node_get().
267 *
268 * @param idxp Locked index structure.
269 */
270static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
271{
272 block_t *b;
273 fat_bs_t *bs;
274 fat_dentry_t *d;
275 fat_node_t *nodep = NULL;
276 int rc;
277
278 if (idxp->nodep) {
279 /*
280 * We are lucky.
281 * The node is already instantiated in memory.
282 */
283 fibril_mutex_lock(&idxp->nodep->lock);
284 if (!idxp->nodep->refcnt++) {
285 fibril_mutex_lock(&ffn_mutex);
286 list_remove(&idxp->nodep->ffn_link);
287 fibril_mutex_unlock(&ffn_mutex);
288 }
289 fibril_mutex_unlock(&idxp->nodep->lock);
290 *nodepp = idxp->nodep;
291 return EOK;
292 }
293
294 /*
295 * We must instantiate the node from the file system.
296 */
297
298 assert(idxp->pfc);
299
300 rc = fat_node_get_new(&nodep);
301 if (rc != EOK)
302 return rc;
303
304 bs = block_bb_get(idxp->devmap_handle);
305
306 /* Read the block that contains the dentry of interest. */
307 rc = _fat_block_get(&b, bs, idxp->devmap_handle, idxp->pfc, NULL,
308 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
309 if (rc != EOK) {
310 (void) fat_node_put(FS_NODE(nodep));
311 return rc;
312 }
313
314 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
315 if (FAT_IS_FAT32(bs)) {
316 nodep->firstc = uint16_t_le2host(d->firstc_lo) |
317 (uint16_t_le2host(d->firstc_hi) << 16);
318 }
319 else
320 nodep->firstc = uint16_t_le2host(d->firstc);
321
322 if (d->attr & FAT_ATTR_SUBDIR) {
323 /*
324 * The only directory which does not have this bit set is the
325 * root directory itself. The root directory node is handled
326 * and initialized elsewhere.
327 */
328 nodep->type = FAT_DIRECTORY;
329
330 /*
331 * Unfortunately, the 'size' field of the FAT dentry is not
332 * defined for the directory entry type. We must determine the
333 * size of the directory by walking the FAT.
334 */
335 uint32_t clusters;
336 rc = fat_clusters_get(&clusters, bs, idxp->devmap_handle, nodep->firstc);
337 if (rc != EOK) {
338 (void) block_put(b);
339 (void) fat_node_put(FS_NODE(nodep));
340 return rc;
341 }
342 nodep->size = BPS(bs) * SPC(bs) * clusters;
343 } else {
344 nodep->type = FAT_FILE;
345 nodep->size = uint32_t_le2host(d->size);
346 }
347
348 nodep->lnkcnt = 1;
349 nodep->refcnt = 1;
350
351 rc = block_put(b);
352 if (rc != EOK) {
353 (void) fat_node_put(FS_NODE(nodep));
354 return rc;
355 }
356
357 /* Link the idx structure with the node structure. */
358 nodep->idx = idxp;
359 idxp->nodep = nodep;
360
361 *nodepp = nodep;
362 return EOK;
363}
364
365/*
366 * FAT libfs operations.
367 */
368
369int fat_root_get(fs_node_t **rfn, devmap_handle_t devmap_handle)
370{
371 return fat_node_get(rfn, devmap_handle, 0);
372}
373
374int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
375{
376 fat_node_t *parentp = FAT_NODE(pfn);
377 char name[FAT_LFN_NAME_SIZE];
378 fat_dentry_t *d;
379 devmap_handle_t devmap_handle;
380 int rc;
381
382 fibril_mutex_lock(&parentp->idx->lock);
383 devmap_handle = parentp->idx->devmap_handle;
384 fibril_mutex_unlock(&parentp->idx->lock);
385
386 fat_directory_t di;
387 rc = fat_directory_open(parentp, &di);
388 if (rc != EOK)
389 return rc;
390
391 while (fat_directory_read(&di, name, &d) == EOK) {
392 if (fat_dentry_namecmp(name, component) == 0) {
393 /* hit */
394 fat_node_t *nodep;
395 aoff64_t o = di.pos % (BPS(di.bs) / sizeof(fat_dentry_t));
396 fat_idx_t *idx = fat_idx_get_by_pos(devmap_handle,
397 parentp->firstc, di.bnum * DPS(di.bs) + o);
398 if (!idx) {
399 /*
400 * Can happen if memory is low or if we
401 * run out of 32-bit indices.
402 */
403 rc = fat_directory_close(&di);
404 return (rc == EOK) ? ENOMEM : rc;
405 }
406 rc = fat_node_get_core(&nodep, idx);
407 fibril_mutex_unlock(&idx->lock);
408 if (rc != EOK) {
409 (void) fat_directory_close(&di);
410 return rc;
411 }
412 *rfn = FS_NODE(nodep);
413 rc = fat_directory_close(&di);
414 if (rc != EOK)
415 (void) fat_node_put(*rfn);
416 return rc;
417 } else {
418 rc = fat_directory_next(&di);
419 if (rc != EOK)
420 break;
421 }
422 }
423 (void) fat_directory_close(&di);
424 *rfn = NULL;
425 return EOK;
426}
427
428/** Instantiate a FAT in-core node. */
429int fat_node_get(fs_node_t **rfn, devmap_handle_t devmap_handle, fs_index_t index)
430{
431 fat_node_t *nodep;
432 fat_idx_t *idxp;
433 int rc;
434
435 idxp = fat_idx_get_by_index(devmap_handle, index);
436 if (!idxp) {
437 *rfn = NULL;
438 return EOK;
439 }
440 /* idxp->lock held */
441 rc = fat_node_get_core(&nodep, idxp);
442 fibril_mutex_unlock(&idxp->lock);
443 if (rc == EOK)
444 *rfn = FS_NODE(nodep);
445 return rc;
446}
447
448int fat_node_open(fs_node_t *fn)
449{
450 /*
451 * Opening a file is stateless, nothing
452 * to be done here.
453 */
454 return EOK;
455}
456
457int fat_node_put(fs_node_t *fn)
458{
459 fat_node_t *nodep = FAT_NODE(fn);
460 bool destroy = false;
461
462 fibril_mutex_lock(&nodep->lock);
463 if (!--nodep->refcnt) {
464 if (nodep->idx) {
465 fibril_mutex_lock(&ffn_mutex);
466 list_append(&nodep->ffn_link, &ffn_head);
467 fibril_mutex_unlock(&ffn_mutex);
468 } else {
469 /*
470 * The node does not have any index structure associated
471 * with itself. This can only mean that we are releasing
472 * the node after a failed attempt to allocate the index
473 * structure for it.
474 */
475 destroy = true;
476 }
477 }
478 fibril_mutex_unlock(&nodep->lock);
479 if (destroy) {
480 free(nodep->bp);
481 free(nodep);
482 }
483 return EOK;
484}
485
486int fat_create_node(fs_node_t **rfn, devmap_handle_t devmap_handle, int flags)
487{
488 fat_idx_t *idxp;
489 fat_node_t *nodep;
490 fat_bs_t *bs;
491 fat_cluster_t mcl, lcl;
492 int rc;
493
494 bs = block_bb_get(devmap_handle);
495 if (flags & L_DIRECTORY) {
496 /* allocate a cluster */
497 rc = fat_alloc_clusters(bs, devmap_handle, 1, &mcl, &lcl);
498 if (rc != EOK)
499 return rc;
500 /* populate the new cluster with unused dentries */
501 rc = fat_zero_cluster(bs, devmap_handle, mcl);
502 if (rc != EOK) {
503 (void) fat_free_clusters(bs, devmap_handle, mcl);
504 return rc;
505 }
506 }
507
508 rc = fat_node_get_new(&nodep);
509 if (rc != EOK) {
510 (void) fat_free_clusters(bs, devmap_handle, mcl);
511 return rc;
512 }
513 rc = fat_idx_get_new(&idxp, devmap_handle);
514 if (rc != EOK) {
515 (void) fat_free_clusters(bs, devmap_handle, mcl);
516 (void) fat_node_put(FS_NODE(nodep));
517 return rc;
518 }
519 /* idxp->lock held */
520 if (flags & L_DIRECTORY) {
521 nodep->type = FAT_DIRECTORY;
522 nodep->firstc = mcl;
523 nodep->size = BPS(bs) * SPC(bs);
524 } else {
525 nodep->type = FAT_FILE;
526 nodep->firstc = FAT_CLST_RES0;
527 nodep->size = 0;
528 }
529 nodep->lnkcnt = 0; /* not linked anywhere */
530 nodep->refcnt = 1;
531 nodep->dirty = true;
532
533 nodep->idx = idxp;
534 idxp->nodep = nodep;
535
536 fibril_mutex_unlock(&idxp->lock);
537 *rfn = FS_NODE(nodep);
538 return EOK;
539}
540
541int fat_destroy_node(fs_node_t *fn)
542{
543 fat_node_t *nodep = FAT_NODE(fn);
544 fat_bs_t *bs;
545 bool has_children;
546 int rc;
547
548 /*
549 * The node is not reachable from the file system. This means that the
550 * link count should be zero and that the index structure cannot be
551 * found in the position hash. Obviously, we don't need to lock the node
552 * nor its index structure.
553 */
554 assert(nodep->lnkcnt == 0);
555
556 /*
557 * The node may not have any children.
558 */
559 rc = fat_has_children(&has_children, fn);
560 if (rc != EOK)
561 return rc;
562 assert(!has_children);
563
564 bs = block_bb_get(nodep->idx->devmap_handle);
565 if (nodep->firstc != FAT_CLST_RES0) {
566 assert(nodep->size);
567 /* Free all clusters allocated to the node. */
568 rc = fat_free_clusters(bs, nodep->idx->devmap_handle,
569 nodep->firstc);
570 }
571
572 fat_idx_destroy(nodep->idx);
573 free(nodep->bp);
574 free(nodep);
575 return rc;
576}
577
578int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
579{
580 fat_node_t *parentp = FAT_NODE(pfn);
581 fat_node_t *childp = FAT_NODE(cfn);
582 fat_dentry_t *d;
583 fat_bs_t *bs;
584 block_t *b;
585 fat_directory_t di;
586 fat_dentry_t de;
587 int rc;
588
589 fibril_mutex_lock(&childp->lock);
590 if (childp->lnkcnt == 1) {
591 /*
592 * On FAT, we don't support multiple hard links.
593 */
594 fibril_mutex_unlock(&childp->lock);
595 return EMLINK;
596 }
597 assert(childp->lnkcnt == 0);
598 fibril_mutex_unlock(&childp->lock);
599
600 if (!fat_valid_name(name))
601 return ENOTSUP;
602
603 fibril_mutex_lock(&parentp->idx->lock);
604 bs = block_bb_get(parentp->idx->devmap_handle);
605 rc = fat_directory_open(parentp, &di);
606 if (rc != EOK)
607 return rc;
608
609 /*
610 * At this point we only establish the link between the parent and the
611 * child. The dentry, except of the name and the extension, will remain
612 * uninitialized until the corresponding node is synced. Thus the valid
613 * dentry data is kept in the child node structure.
614 */
615 memset(&de, 0, sizeof(fat_dentry_t));
616
617 rc = fat_directory_write(&di, name, &de);
618 if (rc!=EOK)
619 return rc;
620 rc = fat_directory_close(&di);
621 if (rc!=EOK)
622 return rc;
623
624 fibril_mutex_unlock(&parentp->idx->lock);
625 if (rc != EOK)
626 return rc;
627
628 fibril_mutex_lock(&childp->idx->lock);
629
630 if (childp->type == FAT_DIRECTORY) {
631 /*
632 * If possible, create the Sub-directory Identifier Entry and
633 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
634 * These entries are not mandatory according to Standard
635 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
636 * rather a sign of our good will.
637 */
638 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
639 if (rc != EOK) {
640 /*
641 * Rather than returning an error, simply skip the
642 * creation of these two entries.
643 */
644 goto skip_dots;
645 }
646 d = (fat_dentry_t *) b->data;
647 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
648 (bcmp(d->name, FAT_NAME_DOT, FAT_NAME_LEN)) == 0) {
649 memset(d, 0, sizeof(fat_dentry_t));
650 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN);
651 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
652 d->attr = FAT_ATTR_SUBDIR;
653 d->firstc = host2uint16_t_le(childp->firstc);
654 /* TODO: initialize also the date/time members. */
655 }
656 d++;
657 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
658 (bcmp(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN) == 0)) {
659 memset(d, 0, sizeof(fat_dentry_t));
660 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN);
661 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
662 d->attr = FAT_ATTR_SUBDIR;
663 d->firstc = (parentp->firstc == FAT_ROOT_CLST(bs)) ?
664 host2uint16_t_le(FAT_CLST_ROOTPAR) :
665 host2uint16_t_le(parentp->firstc);
666 /* TODO: initialize also the date/time members. */
667 }
668 b->dirty = true; /* need to sync block */
669 /*
670 * Ignore the return value as we would have fallen through on error
671 * anyway.
672 */
673 (void) block_put(b);
674 }
675skip_dots:
676
677 childp->idx->pfc = parentp->firstc;
678 childp->idx->pdi = di.pos; /* di.pos holds absolute position of SFN entry */
679 fibril_mutex_unlock(&childp->idx->lock);
680
681 fibril_mutex_lock(&childp->lock);
682 childp->lnkcnt = 1;
683 childp->dirty = true; /* need to sync node */
684 fibril_mutex_unlock(&childp->lock);
685
686 /*
687 * Hash in the index structure into the position hash.
688 */
689 fat_idx_hashin(childp->idx);
690
691 return EOK;
692}
693
694int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
695{
696 fat_node_t *parentp = FAT_NODE(pfn);
697 fat_node_t *childp = FAT_NODE(cfn);
698 bool has_children;
699 int rc;
700
701 if (!parentp)
702 return EBUSY;
703
704 rc = fat_has_children(&has_children, cfn);
705 if (rc != EOK)
706 return rc;
707 if (has_children)
708 return ENOTEMPTY;
709
710 fibril_mutex_lock(&parentp->lock);
711 fibril_mutex_lock(&childp->lock);
712 assert(childp->lnkcnt == 1);
713 fibril_mutex_lock(&childp->idx->lock);
714
715 fat_directory_t di;
716 rc = fat_directory_open(parentp,&di);
717 if (rc != EOK)
718 goto error;
719 rc = fat_directory_seek(&di, childp->idx->pdi);
720 if (rc != EOK)
721 goto error;
722 rc = fat_directory_erase(&di);
723 if (rc != EOK)
724 goto error;
725 rc = fat_directory_close(&di);
726 if (rc != EOK)
727 goto error;
728
729 /* remove the index structure from the position hash */
730 fat_idx_hashout(childp->idx);
731 /* clear position information */
732 childp->idx->pfc = FAT_CLST_RES0;
733 childp->idx->pdi = 0;
734 fibril_mutex_unlock(&childp->idx->lock);
735 childp->lnkcnt = 0;
736 childp->refcnt++; /* keep the node in memory until destroyed */
737 childp->dirty = true;
738 fibril_mutex_unlock(&childp->lock);
739 fibril_mutex_unlock(&parentp->lock);
740
741 return EOK;
742
743error:
744 (void) fat_directory_close(&di);
745 fibril_mutex_unlock(&childp->idx->lock);
746 fibril_mutex_unlock(&childp->lock);
747 fibril_mutex_unlock(&parentp->lock);
748 return rc;
749}
750
751int fat_has_children(bool *has_children, fs_node_t *fn)
752{
753 fat_bs_t *bs;
754 fat_node_t *nodep = FAT_NODE(fn);
755 unsigned blocks;
756 block_t *b;
757 unsigned i, j;
758 int rc;
759
760 if (nodep->type != FAT_DIRECTORY) {
761 *has_children = false;
762 return EOK;
763 }
764
765 fibril_mutex_lock(&nodep->idx->lock);
766 bs = block_bb_get(nodep->idx->devmap_handle);
767
768 blocks = nodep->size / BPS(bs);
769
770 for (i = 0; i < blocks; i++) {
771 fat_dentry_t *d;
772
773 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
774 if (rc != EOK) {
775 fibril_mutex_unlock(&nodep->idx->lock);
776 return rc;
777 }
778 for (j = 0; j < DPS(bs); j++) {
779 d = ((fat_dentry_t *)b->data) + j;
780 switch (fat_classify_dentry(d)) {
781 case FAT_DENTRY_SKIP:
782 case FAT_DENTRY_FREE:
783 continue;
784 case FAT_DENTRY_LAST:
785 rc = block_put(b);
786 fibril_mutex_unlock(&nodep->idx->lock);
787 *has_children = false;
788 return rc;
789 default:
790 case FAT_DENTRY_VALID:
791 rc = block_put(b);
792 fibril_mutex_unlock(&nodep->idx->lock);
793 *has_children = true;
794 return rc;
795 }
796 }
797 rc = block_put(b);
798 if (rc != EOK) {
799 fibril_mutex_unlock(&nodep->idx->lock);
800 return rc;
801 }
802 }
803
804 fibril_mutex_unlock(&nodep->idx->lock);
805 *has_children = false;
806 return EOK;
807}
808
809
810fs_index_t fat_index_get(fs_node_t *fn)
811{
812 return FAT_NODE(fn)->idx->index;
813}
814
815aoff64_t fat_size_get(fs_node_t *fn)
816{
817 return FAT_NODE(fn)->size;
818}
819
820unsigned fat_lnkcnt_get(fs_node_t *fn)
821{
822 return FAT_NODE(fn)->lnkcnt;
823}
824
825char fat_plb_get_char(unsigned pos)
826{
827 return fat_reg.plb_ro[pos % PLB_SIZE];
828}
829
830bool fat_is_directory(fs_node_t *fn)
831{
832 return FAT_NODE(fn)->type == FAT_DIRECTORY;
833}
834
835bool fat_is_file(fs_node_t *fn)
836{
837 return FAT_NODE(fn)->type == FAT_FILE;
838}
839
840devmap_handle_t fat_device_get(fs_node_t *node)
841{
842 return 0;
843}
844
845/** libfs operations */
846libfs_ops_t fat_libfs_ops = {
847 .root_get = fat_root_get,
848 .match = fat_match,
849 .node_get = fat_node_get,
850 .node_open = fat_node_open,
851 .node_put = fat_node_put,
852 .create = fat_create_node,
853 .destroy = fat_destroy_node,
854 .link = fat_link,
855 .unlink = fat_unlink,
856 .has_children = fat_has_children,
857 .index_get = fat_index_get,
858 .size_get = fat_size_get,
859 .lnkcnt_get = fat_lnkcnt_get,
860 .plb_get_char = fat_plb_get_char,
861 .is_directory = fat_is_directory,
862 .is_file = fat_is_file,
863 .device_get = fat_device_get
864};
865
866/*
867 * VFS operations.
868 */
869
870void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
871{
872 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
873 enum cache_mode cmode;
874 fat_bs_t *bs;
875
876 /* Accept the mount options */
877 char *opts;
878 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL);
879
880 if (rc != EOK) {
881 async_answer_0(rid, rc);
882 return;
883 }
884
885 /* Check for option enabling write through. */
886 if (str_cmp(opts, "wtcache") == 0)
887 cmode = CACHE_MODE_WT;
888 else
889 cmode = CACHE_MODE_WB;
890
891 free(opts);
892
893 /* initialize libblock */
894 rc = block_init(devmap_handle, BS_SIZE);
895 if (rc != EOK) {
896 async_answer_0(rid, rc);
897 return;
898 }
899
900 /* prepare the boot block */
901 rc = block_bb_read(devmap_handle, BS_BLOCK);
902 if (rc != EOK) {
903 block_fini(devmap_handle);
904 async_answer_0(rid, rc);
905 return;
906 }
907
908 /* get the buffer with the boot sector */
909 bs = block_bb_get(devmap_handle);
910
911 if (BPS(bs) != BS_SIZE) {
912 block_fini(devmap_handle);
913 async_answer_0(rid, ENOTSUP);
914 return;
915 }
916
917 /* Initialize the block cache */
918 rc = block_cache_init(devmap_handle, BPS(bs), 0 /* XXX */, cmode);
919 if (rc != EOK) {
920 block_fini(devmap_handle);
921 async_answer_0(rid, rc);
922 return;
923 }
924
925 /* Do some simple sanity checks on the file system. */
926 rc = fat_sanity_check(bs, devmap_handle);
927 if (rc != EOK) {
928 (void) block_cache_fini(devmap_handle);
929 block_fini(devmap_handle);
930 async_answer_0(rid, rc);
931 return;
932 }
933
934 rc = fat_idx_init_by_devmap_handle(devmap_handle);
935 if (rc != EOK) {
936 (void) block_cache_fini(devmap_handle);
937 block_fini(devmap_handle);
938 async_answer_0(rid, rc);
939 return;
940 }
941
942 /* Initialize the root node. */
943 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
944 if (!rfn) {
945 (void) block_cache_fini(devmap_handle);
946 block_fini(devmap_handle);
947 fat_idx_fini_by_devmap_handle(devmap_handle);
948 async_answer_0(rid, ENOMEM);
949 return;
950 }
951
952 fs_node_initialize(rfn);
953 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
954 if (!rootp) {
955 free(rfn);
956 (void) block_cache_fini(devmap_handle);
957 block_fini(devmap_handle);
958 fat_idx_fini_by_devmap_handle(devmap_handle);
959 async_answer_0(rid, ENOMEM);
960 return;
961 }
962 fat_node_initialize(rootp);
963
964 fat_idx_t *ridxp = fat_idx_get_by_pos(devmap_handle, FAT_CLST_ROOTPAR, 0);
965 if (!ridxp) {
966 free(rfn);
967 free(rootp);
968 (void) block_cache_fini(devmap_handle);
969 block_fini(devmap_handle);
970 fat_idx_fini_by_devmap_handle(devmap_handle);
971 async_answer_0(rid, ENOMEM);
972 return;
973 }
974 assert(ridxp->index == 0);
975 /* ridxp->lock held */
976
977 rootp->type = FAT_DIRECTORY;
978 rootp->firstc = FAT_ROOT_CLST(bs);
979 rootp->refcnt = 1;
980 rootp->lnkcnt = 0; /* FS root is not linked */
981
982 if (FAT_IS_FAT32(bs)) {
983 uint32_t clusters;
984 rc = fat_clusters_get(&clusters, bs, devmap_handle, rootp->firstc);
985 if (rc != EOK) {
986 free(rfn);
987 free(rootp);
988 free(ridxp); /* TODO: Is it right way to free ridxp? */
989 (void) block_cache_fini(devmap_handle);
990 block_fini(devmap_handle);
991 fat_idx_fini_by_devmap_handle(devmap_handle);
992 async_answer_0(rid, ENOTSUP);
993 return;
994 }
995 rootp->size = BPS(bs) * SPC(bs) * clusters;
996 } else
997 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
998
999 rootp->idx = ridxp;
1000 ridxp->nodep = rootp;
1001 rootp->bp = rfn;
1002 rfn->data = rootp;
1003
1004 fibril_mutex_unlock(&ridxp->lock);
1005
1006 async_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
1007}
1008
1009void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1010{
1011 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1012}
1013
1014void fat_unmounted(ipc_callid_t rid, ipc_call_t *request)
1015{
1016 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1017 fs_node_t *fn;
1018 fat_node_t *nodep;
1019 int rc;
1020
1021 rc = fat_root_get(&fn, devmap_handle);
1022 if (rc != EOK) {
1023 async_answer_0(rid, rc);
1024 return;
1025 }
1026 nodep = FAT_NODE(fn);
1027
1028 /*
1029 * We expect exactly two references on the root node. One for the
1030 * fat_root_get() above and one created in fat_mounted().
1031 */
1032 if (nodep->refcnt != 2) {
1033 (void) fat_node_put(fn);
1034 async_answer_0(rid, EBUSY);
1035 return;
1036 }
1037
1038 /*
1039 * Put the root node and force it to the FAT free node list.
1040 */
1041 (void) fat_node_put(fn);
1042 (void) fat_node_put(fn);
1043
1044 /*
1045 * Perform cleanup of the node structures, index structures and
1046 * associated data. Write back this file system's dirty blocks and
1047 * stop using libblock for this instance.
1048 */
1049 (void) fat_node_fini_by_devmap_handle(devmap_handle);
1050 fat_idx_fini_by_devmap_handle(devmap_handle);
1051 (void) block_cache_fini(devmap_handle);
1052 block_fini(devmap_handle);
1053
1054 async_answer_0(rid, EOK);
1055}
1056
1057void fat_unmount(ipc_callid_t rid, ipc_call_t *request)
1058{
1059 libfs_unmount(&fat_libfs_ops, rid, request);
1060}
1061
1062void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1063{
1064 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1065}
1066
1067void fat_read(ipc_callid_t rid, ipc_call_t *request)
1068{
1069 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1070 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1071 aoff64_t pos =
1072 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1073 fs_node_t *fn;
1074 fat_node_t *nodep;
1075 fat_bs_t *bs;
1076 size_t bytes;
1077 block_t *b;
1078 int rc;
1079
1080 rc = fat_node_get(&fn, devmap_handle, index);
1081 if (rc != EOK) {
1082 async_answer_0(rid, rc);
1083 return;
1084 }
1085 if (!fn) {
1086 async_answer_0(rid, ENOENT);
1087 return;
1088 }
1089 nodep = FAT_NODE(fn);
1090
1091 ipc_callid_t callid;
1092 size_t len;
1093 if (!async_data_read_receive(&callid, &len)) {
1094 fat_node_put(fn);
1095 async_answer_0(callid, EINVAL);
1096 async_answer_0(rid, EINVAL);
1097 return;
1098 }
1099
1100 bs = block_bb_get(devmap_handle);
1101
1102 if (nodep->type == FAT_FILE) {
1103 /*
1104 * Our strategy for regular file reads is to read one block at
1105 * most and make use of the possibility to return less data than
1106 * requested. This keeps the code very simple.
1107 */
1108 if (pos >= nodep->size) {
1109 /* reading beyond the EOF */
1110 bytes = 0;
1111 (void) async_data_read_finalize(callid, NULL, 0);
1112 } else {
1113 bytes = min(len, BPS(bs) - pos % BPS(bs));
1114 bytes = min(bytes, nodep->size - pos);
1115 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
1116 BLOCK_FLAGS_NONE);
1117 if (rc != EOK) {
1118 fat_node_put(fn);
1119 async_answer_0(callid, rc);
1120 async_answer_0(rid, rc);
1121 return;
1122 }
1123 (void) async_data_read_finalize(callid,
1124 b->data + pos % BPS(bs), bytes);
1125 rc = block_put(b);
1126 if (rc != EOK) {
1127 fat_node_put(fn);
1128 async_answer_0(rid, rc);
1129 return;
1130 }
1131 }
1132 } else {
1133 aoff64_t spos = pos;
1134 char name[FAT_LFN_NAME_SIZE];
1135 fat_dentry_t *d;
1136
1137 assert(nodep->type == FAT_DIRECTORY);
1138 assert(nodep->size % BPS(bs) == 0);
1139 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
1140
1141 fat_directory_t di;
1142 rc = fat_directory_open(nodep, &di);
1143 if (rc != EOK) goto err;
1144 rc = fat_directory_seek(&di, pos);
1145 if (rc != EOK) {
1146 (void) fat_directory_close(&di);
1147 goto err;
1148 }
1149
1150 rc = fat_directory_read(&di, name, &d);
1151 if (rc == EOK) goto hit;
1152 if (rc == ENOENT) goto miss;
1153
1154err:
1155 (void) fat_node_put(fn);
1156 async_answer_0(callid, rc);
1157 async_answer_0(rid, rc);
1158 return;
1159
1160miss:
1161 rc = fat_directory_close(&di);
1162 if (rc!=EOK)
1163 goto err;
1164 rc = fat_node_put(fn);
1165 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1166 async_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
1167 return;
1168
1169hit:
1170 pos = di.pos;
1171 rc = fat_directory_close(&di);
1172 if (rc!=EOK)
1173 goto err;
1174 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
1175 bytes = (pos - spos)+1;
1176 }
1177
1178 rc = fat_node_put(fn);
1179 async_answer_1(rid, rc, (sysarg_t)bytes);
1180}
1181
1182void fat_write(ipc_callid_t rid, ipc_call_t *request)
1183{
1184 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1185 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1186 aoff64_t pos =
1187 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1188 fs_node_t *fn;
1189 fat_node_t *nodep;
1190 fat_bs_t *bs;
1191 size_t bytes, size;
1192 block_t *b;
1193 aoff64_t boundary;
1194 int flags = BLOCK_FLAGS_NONE;
1195 int rc;
1196
1197 rc = fat_node_get(&fn, devmap_handle, index);
1198 if (rc != EOK) {
1199 async_answer_0(rid, rc);
1200 return;
1201 }
1202 if (!fn) {
1203 async_answer_0(rid, ENOENT);
1204 return;
1205 }
1206 nodep = FAT_NODE(fn);
1207
1208 ipc_callid_t callid;
1209 size_t len;
1210 if (!async_data_write_receive(&callid, &len)) {
1211 (void) fat_node_put(fn);
1212 async_answer_0(callid, EINVAL);
1213 async_answer_0(rid, EINVAL);
1214 return;
1215 }
1216
1217 bs = block_bb_get(devmap_handle);
1218
1219 /*
1220 * In all scenarios, we will attempt to write out only one block worth
1221 * of data at maximum. There might be some more efficient approaches,
1222 * but this one greatly simplifies fat_write(). Note that we can afford
1223 * to do this because the client must be ready to handle the return
1224 * value signalizing a smaller number of bytes written.
1225 */
1226 bytes = min(len, BPS(bs) - pos % BPS(bs));
1227 if (bytes == BPS(bs))
1228 flags |= BLOCK_FLAGS_NOREAD;
1229
1230 boundary = ROUND_UP(nodep->size, BPC(bs));
1231 if (pos < boundary) {
1232 /*
1233 * This is the easier case - we are either overwriting already
1234 * existing contents or writing behind the EOF, but still within
1235 * the limits of the last cluster. The node size may grow to the
1236 * next block size boundary.
1237 */
1238 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1239 if (rc != EOK) {
1240 (void) fat_node_put(fn);
1241 async_answer_0(callid, rc);
1242 async_answer_0(rid, rc);
1243 return;
1244 }
1245 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
1246 if (rc != EOK) {
1247 (void) fat_node_put(fn);
1248 async_answer_0(callid, rc);
1249 async_answer_0(rid, rc);
1250 return;
1251 }
1252 (void) async_data_write_finalize(callid,
1253 b->data + pos % BPS(bs), bytes);
1254 b->dirty = true; /* need to sync block */
1255 rc = block_put(b);
1256 if (rc != EOK) {
1257 (void) fat_node_put(fn);
1258 async_answer_0(rid, rc);
1259 return;
1260 }
1261 if (pos + bytes > nodep->size) {
1262 nodep->size = pos + bytes;
1263 nodep->dirty = true; /* need to sync node */
1264 }
1265 size = nodep->size;
1266 rc = fat_node_put(fn);
1267 async_answer_2(rid, rc, bytes, nodep->size);
1268 return;
1269 } else {
1270 /*
1271 * This is the more difficult case. We must allocate new
1272 * clusters for the node and zero them out.
1273 */
1274 unsigned nclsts;
1275 fat_cluster_t mcl, lcl;
1276
1277 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
1278 /* create an independent chain of nclsts clusters in all FATs */
1279 rc = fat_alloc_clusters(bs, devmap_handle, nclsts, &mcl, &lcl);
1280 if (rc != EOK) {
1281 /* could not allocate a chain of nclsts clusters */
1282 (void) fat_node_put(fn);
1283 async_answer_0(callid, rc);
1284 async_answer_0(rid, rc);
1285 return;
1286 }
1287 /* zero fill any gaps */
1288 rc = fat_fill_gap(bs, nodep, mcl, pos);
1289 if (rc != EOK) {
1290 (void) fat_free_clusters(bs, devmap_handle, mcl);
1291 (void) fat_node_put(fn);
1292 async_answer_0(callid, rc);
1293 async_answer_0(rid, rc);
1294 return;
1295 }
1296 rc = _fat_block_get(&b, bs, devmap_handle, lcl, NULL,
1297 (pos / BPS(bs)) % SPC(bs), flags);
1298 if (rc != EOK) {
1299 (void) fat_free_clusters(bs, devmap_handle, mcl);
1300 (void) fat_node_put(fn);
1301 async_answer_0(callid, rc);
1302 async_answer_0(rid, rc);
1303 return;
1304 }
1305 (void) async_data_write_finalize(callid,
1306 b->data + pos % BPS(bs), bytes);
1307 b->dirty = true; /* need to sync block */
1308 rc = block_put(b);
1309 if (rc != EOK) {
1310 (void) fat_free_clusters(bs, devmap_handle, mcl);
1311 (void) fat_node_put(fn);
1312 async_answer_0(rid, rc);
1313 return;
1314 }
1315 /*
1316 * Append the cluster chain starting in mcl to the end of the
1317 * node's cluster chain.
1318 */
1319 rc = fat_append_clusters(bs, nodep, mcl, lcl);
1320 if (rc != EOK) {
1321 (void) fat_free_clusters(bs, devmap_handle, mcl);
1322 (void) fat_node_put(fn);
1323 async_answer_0(rid, rc);
1324 return;
1325 }
1326 nodep->size = size = pos + bytes;
1327 nodep->dirty = true; /* need to sync node */
1328 rc = fat_node_put(fn);
1329 async_answer_2(rid, rc, bytes, size);
1330 return;
1331 }
1332}
1333
1334void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1335{
1336 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1337 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1338 aoff64_t size =
1339 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1340 fs_node_t *fn;
1341 fat_node_t *nodep;
1342 fat_bs_t *bs;
1343 int rc;
1344
1345 rc = fat_node_get(&fn, devmap_handle, index);
1346 if (rc != EOK) {
1347 async_answer_0(rid, rc);
1348 return;
1349 }
1350 if (!fn) {
1351 async_answer_0(rid, ENOENT);
1352 return;
1353 }
1354 nodep = FAT_NODE(fn);
1355
1356 bs = block_bb_get(devmap_handle);
1357
1358 if (nodep->size == size) {
1359 rc = EOK;
1360 } else if (nodep->size < size) {
1361 /*
1362 * The standard says we have the freedom to grow the node.
1363 * For now, we simply return an error.
1364 */
1365 rc = EINVAL;
1366 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
1367 /*
1368 * The node will be shrunk, but no clusters will be deallocated.
1369 */
1370 nodep->size = size;
1371 nodep->dirty = true; /* need to sync node */
1372 rc = EOK;
1373 } else {
1374 /*
1375 * The node will be shrunk, clusters will be deallocated.
1376 */
1377 if (size == 0) {
1378 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1379 if (rc != EOK)
1380 goto out;
1381 } else {
1382 fat_cluster_t lastc;
1383 rc = fat_cluster_walk(bs, devmap_handle, nodep->firstc,
1384 &lastc, NULL, (size - 1) / BPC(bs));
1385 if (rc != EOK)
1386 goto out;
1387 rc = fat_chop_clusters(bs, nodep, lastc);
1388 if (rc != EOK)
1389 goto out;
1390 }
1391 nodep->size = size;
1392 nodep->dirty = true; /* need to sync node */
1393 rc = EOK;
1394 }
1395out:
1396 fat_node_put(fn);
1397 async_answer_0(rid, rc);
1398 return;
1399}
1400
1401void fat_close(ipc_callid_t rid, ipc_call_t *request)
1402{
1403 async_answer_0(rid, EOK);
1404}
1405
1406void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1407{
1408 devmap_handle_t devmap_handle = (devmap_handle_t)IPC_GET_ARG1(*request);
1409 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1410 fs_node_t *fn;
1411 fat_node_t *nodep;
1412 int rc;
1413
1414 rc = fat_node_get(&fn, devmap_handle, index);
1415 if (rc != EOK) {
1416 async_answer_0(rid, rc);
1417 return;
1418 }
1419 if (!fn) {
1420 async_answer_0(rid, ENOENT);
1421 return;
1422 }
1423
1424 nodep = FAT_NODE(fn);
1425 /*
1426 * We should have exactly two references. One for the above
1427 * call to fat_node_get() and one from fat_unlink().
1428 */
1429 assert(nodep->refcnt == 2);
1430
1431 rc = fat_destroy_node(fn);
1432 async_answer_0(rid, rc);
1433}
1434
1435void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1436{
1437 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1438}
1439
1440void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1441{
1442 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1443}
1444
1445void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1446{
1447 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1448 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1449
1450 fs_node_t *fn;
1451 int rc = fat_node_get(&fn, devmap_handle, index);
1452 if (rc != EOK) {
1453 async_answer_0(rid, rc);
1454 return;
1455 }
1456 if (!fn) {
1457 async_answer_0(rid, ENOENT);
1458 return;
1459 }
1460
1461 fat_node_t *nodep = FAT_NODE(fn);
1462
1463 nodep->dirty = true;
1464 rc = fat_node_sync(nodep);
1465
1466 fat_node_put(fn);
1467 async_answer_0(rid, rc);
1468}
1469
1470/**
1471 * @}
1472 */
Note: See TracBrowser for help on using the repository browser.