source: mainline/uspace/srv/fs/fat/fat_ops.c@ dfddfcd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since dfddfcd was dfddfcd, checked in by Jakub Jermar <jakub@…>, 16 years ago

Make fat_write() never assert on an I/O error.

  • Property mode set to 100644
File size: 34.3 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/ipc.h>
45#include <ipc/services.h>
46#include <ipc/devmap.h>
47#include <async.h>
48#include <errno.h>
49#include <string.h>
50#include <byteorder.h>
51#include <adt/hash_table.h>
52#include <adt/list.h>
53#include <assert.h>
54#include <fibril_sync.h>
55#include <sys/mman.h>
56#include <align.h>
57
58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
61/** Mutex protecting the list of cached free FAT nodes. */
62static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
63
64/** List of cached free FAT nodes. */
65static LIST_INITIALIZE(ffn_head);
66
67/*
68 * Forward declarations of FAT libfs operations.
69 */
70static int fat_root_get(fs_node_t **, dev_handle_t);
71static int fat_match(fs_node_t **, fs_node_t *, const char *);
72static int fat_node_get(fs_node_t **, dev_handle_t, fs_index_t);
73static int fat_node_put(fs_node_t *);
74static int fat_create_node(fs_node_t **, dev_handle_t, int);
75static int fat_destroy_node(fs_node_t *);
76static int fat_link(fs_node_t *, fs_node_t *, const char *);
77static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
78static int fat_has_children(bool *, fs_node_t *);
79static fs_index_t fat_index_get(fs_node_t *);
80static size_t fat_size_get(fs_node_t *);
81static unsigned fat_lnkcnt_get(fs_node_t *);
82static char fat_plb_get_char(unsigned);
83static bool fat_is_directory(fs_node_t *);
84static bool fat_is_file(fs_node_t *node);
85
86/*
87 * Helper functions.
88 */
89static void fat_node_initialize(fat_node_t *node)
90{
91 fibril_mutex_initialize(&node->lock);
92 node->bp = NULL;
93 node->idx = NULL;
94 node->type = 0;
95 link_initialize(&node->ffn_link);
96 node->size = 0;
97 node->lnkcnt = 0;
98 node->refcnt = 0;
99 node->dirty = false;
100}
101
102static int fat_node_sync(fat_node_t *node)
103{
104 block_t *b;
105 fat_bs_t *bs;
106 fat_dentry_t *d;
107 uint16_t bps;
108 unsigned dps;
109 int rc;
110
111 assert(node->dirty);
112
113 bs = block_bb_get(node->idx->dev_handle);
114 bps = uint16_t_le2host(bs->bps);
115 dps = bps / sizeof(fat_dentry_t);
116
117 /* Read the block that contains the dentry of interest. */
118 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc,
119 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
120 if (rc != EOK)
121 return rc;
122
123 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
124
125 d->firstc = host2uint16_t_le(node->firstc);
126 if (node->type == FAT_FILE) {
127 d->size = host2uint32_t_le(node->size);
128 } else if (node->type == FAT_DIRECTORY) {
129 d->attr = FAT_ATTR_SUBDIR;
130 }
131
132 /* TODO: update other fields? (e.g time fields) */
133
134 b->dirty = true; /* need to sync block */
135 rc = block_put(b);
136 return rc;
137}
138
139static int fat_node_get_new(fat_node_t **nodepp)
140{
141 fs_node_t *fn;
142 fat_node_t *nodep;
143 int rc;
144
145 fibril_mutex_lock(&ffn_mutex);
146 if (!list_empty(&ffn_head)) {
147 /* Try to use a cached free node structure. */
148 fat_idx_t *idxp_tmp;
149 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
150 if (!fibril_mutex_trylock(&nodep->lock))
151 goto skip_cache;
152 idxp_tmp = nodep->idx;
153 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
154 fibril_mutex_unlock(&nodep->lock);
155 goto skip_cache;
156 }
157 list_remove(&nodep->ffn_link);
158 fibril_mutex_unlock(&ffn_mutex);
159 if (nodep->dirty) {
160 rc = fat_node_sync(nodep);
161 if (rc != EOK) {
162 idxp_tmp->nodep = NULL;
163 fibril_mutex_unlock(&nodep->lock);
164 fibril_mutex_unlock(&idxp_tmp->lock);
165 free(nodep->bp);
166 free(nodep);
167 return rc;
168 }
169 }
170 idxp_tmp->nodep = NULL;
171 fibril_mutex_unlock(&nodep->lock);
172 fibril_mutex_unlock(&idxp_tmp->lock);
173 fn = FS_NODE(nodep);
174 } else {
175skip_cache:
176 /* Try to allocate a new node structure. */
177 fibril_mutex_unlock(&ffn_mutex);
178 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
179 if (!fn)
180 return ENOMEM;
181 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
182 if (!nodep) {
183 free(fn);
184 return ENOMEM;
185 }
186 }
187 fat_node_initialize(nodep);
188 fs_node_initialize(fn);
189 fn->data = nodep;
190 nodep->bp = fn;
191
192 *nodepp = nodep;
193 return EOK;
194}
195
196/** Internal version of fat_node_get().
197 *
198 * @param idxp Locked index structure.
199 */
200static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
201{
202 block_t *b;
203 fat_bs_t *bs;
204 fat_dentry_t *d;
205 fat_node_t *nodep = NULL;
206 unsigned bps;
207 unsigned spc;
208 unsigned dps;
209 int rc;
210
211 if (idxp->nodep) {
212 /*
213 * We are lucky.
214 * The node is already instantiated in memory.
215 */
216 fibril_mutex_lock(&idxp->nodep->lock);
217 if (!idxp->nodep->refcnt++) {
218 fibril_mutex_lock(&ffn_mutex);
219 list_remove(&idxp->nodep->ffn_link);
220 fibril_mutex_unlock(&ffn_mutex);
221 }
222 fibril_mutex_unlock(&idxp->nodep->lock);
223 *nodepp = idxp->nodep;
224 return EOK;
225 }
226
227 /*
228 * We must instantiate the node from the file system.
229 */
230
231 assert(idxp->pfc);
232
233 rc = fat_node_get_new(&nodep);
234 if (rc != EOK)
235 return rc;
236
237 bs = block_bb_get(idxp->dev_handle);
238 bps = uint16_t_le2host(bs->bps);
239 spc = bs->spc;
240 dps = bps / sizeof(fat_dentry_t);
241
242 /* Read the block that contains the dentry of interest. */
243 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc,
244 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
245 if (rc != EOK) {
246 (void) fat_node_put(FS_NODE(nodep));
247 return rc;
248 }
249
250 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
251 if (d->attr & FAT_ATTR_SUBDIR) {
252 /*
253 * The only directory which does not have this bit set is the
254 * root directory itself. The root directory node is handled
255 * and initialized elsewhere.
256 */
257 nodep->type = FAT_DIRECTORY;
258 /*
259 * Unfortunately, the 'size' field of the FAT dentry is not
260 * defined for the directory entry type. We must determine the
261 * size of the directory by walking the FAT.
262 */
263 uint16_t clusters;
264 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle,
265 uint16_t_le2host(d->firstc));
266 if (rc != EOK) {
267 (void) fat_node_put(FS_NODE(nodep));
268 return rc;
269 }
270 nodep->size = bps * spc * clusters;
271 } else {
272 nodep->type = FAT_FILE;
273 nodep->size = uint32_t_le2host(d->size);
274 }
275 nodep->firstc = uint16_t_le2host(d->firstc);
276 nodep->lnkcnt = 1;
277 nodep->refcnt = 1;
278
279 rc = block_put(b);
280 if (rc != EOK) {
281 (void) fat_node_put(FS_NODE(nodep));
282 return rc;
283 }
284
285 /* Link the idx structure with the node structure. */
286 nodep->idx = idxp;
287 idxp->nodep = nodep;
288
289 *nodepp = nodep;
290 return EOK;
291}
292
293/*
294 * FAT libfs operations.
295 */
296
297int fat_root_get(fs_node_t **rfn, dev_handle_t dev_handle)
298{
299 return fat_node_get(rfn, dev_handle, 0);
300}
301
302int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
303{
304 fat_bs_t *bs;
305 fat_node_t *parentp = FAT_NODE(pfn);
306 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
307 unsigned i, j;
308 unsigned bps; /* bytes per sector */
309 unsigned dps; /* dentries per sector */
310 unsigned blocks;
311 fat_dentry_t *d;
312 block_t *b;
313 int rc;
314
315 fibril_mutex_lock(&parentp->idx->lock);
316 bs = block_bb_get(parentp->idx->dev_handle);
317 bps = uint16_t_le2host(bs->bps);
318 dps = bps / sizeof(fat_dentry_t);
319 blocks = parentp->size / bps;
320 for (i = 0; i < blocks; i++) {
321 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
322 if (rc != EOK) {
323 fibril_mutex_unlock(&parentp->idx->lock);
324 return rc;
325 }
326 for (j = 0; j < dps; j++) {
327 d = ((fat_dentry_t *)b->data) + j;
328 switch (fat_classify_dentry(d)) {
329 case FAT_DENTRY_SKIP:
330 case FAT_DENTRY_FREE:
331 continue;
332 case FAT_DENTRY_LAST:
333 /* miss */
334 rc = block_put(b);
335 fibril_mutex_unlock(&parentp->idx->lock);
336 *rfn = NULL;
337 return rc;
338 default:
339 case FAT_DENTRY_VALID:
340 fat_dentry_name_get(d, name);
341 break;
342 }
343 if (fat_dentry_namecmp(name, component) == 0) {
344 /* hit */
345 fat_node_t *nodep;
346 /*
347 * Assume tree hierarchy for locking. We
348 * already have the parent and now we are going
349 * to lock the child. Never lock in the oposite
350 * order.
351 */
352 fat_idx_t *idx = fat_idx_get_by_pos(
353 parentp->idx->dev_handle, parentp->firstc,
354 i * dps + j);
355 fibril_mutex_unlock(&parentp->idx->lock);
356 if (!idx) {
357 /*
358 * Can happen if memory is low or if we
359 * run out of 32-bit indices.
360 */
361 rc = block_put(b);
362 return (rc == EOK) ? ENOMEM : rc;
363 }
364 rc = fat_node_get_core(&nodep, idx);
365 fibril_mutex_unlock(&idx->lock);
366 if (rc != EOK) {
367 (void) block_put(b);
368 return rc;
369 }
370 *rfn = FS_NODE(nodep);
371 rc = block_put(b);
372 if (rc != EOK)
373 (void) fat_node_put(*rfn);
374 return rc;
375 }
376 }
377 rc = block_put(b);
378 if (rc != EOK) {
379 fibril_mutex_unlock(&parentp->idx->lock);
380 return rc;
381 }
382 }
383
384 fibril_mutex_unlock(&parentp->idx->lock);
385 *rfn = NULL;
386 return EOK;
387}
388
389/** Instantiate a FAT in-core node. */
390int fat_node_get(fs_node_t **rfn, dev_handle_t dev_handle, fs_index_t index)
391{
392 fat_node_t *nodep;
393 fat_idx_t *idxp;
394 int rc;
395
396 idxp = fat_idx_get_by_index(dev_handle, index);
397 if (!idxp) {
398 *rfn = NULL;
399 return EOK;
400 }
401 /* idxp->lock held */
402 rc = fat_node_get_core(&nodep, idxp);
403 fibril_mutex_unlock(&idxp->lock);
404 if (rc == EOK)
405 *rfn = FS_NODE(nodep);
406 return rc;
407}
408
409int fat_node_put(fs_node_t *fn)
410{
411 fat_node_t *nodep = FAT_NODE(fn);
412 bool destroy = false;
413
414 fibril_mutex_lock(&nodep->lock);
415 if (!--nodep->refcnt) {
416 if (nodep->idx) {
417 fibril_mutex_lock(&ffn_mutex);
418 list_append(&nodep->ffn_link, &ffn_head);
419 fibril_mutex_unlock(&ffn_mutex);
420 } else {
421 /*
422 * The node does not have any index structure associated
423 * with itself. This can only mean that we are releasing
424 * the node after a failed attempt to allocate the index
425 * structure for it.
426 */
427 destroy = true;
428 }
429 }
430 fibril_mutex_unlock(&nodep->lock);
431 if (destroy) {
432 free(nodep->bp);
433 free(nodep);
434 }
435 return EOK;
436}
437
438int fat_create_node(fs_node_t **rfn, dev_handle_t dev_handle, int flags)
439{
440 fat_idx_t *idxp;
441 fat_node_t *nodep;
442 fat_bs_t *bs;
443 fat_cluster_t mcl, lcl;
444 uint16_t bps;
445 int rc;
446
447 bs = block_bb_get(dev_handle);
448 bps = uint16_t_le2host(bs->bps);
449 if (flags & L_DIRECTORY) {
450 /* allocate a cluster */
451 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
452 if (rc != EOK)
453 return rc;
454 /* populate the new cluster with unused dentries */
455 rc = fat_zero_cluster(bs, dev_handle, mcl);
456 if (rc != EOK) {
457 (void) fat_free_clusters(bs, dev_handle, mcl);
458 return rc;
459 }
460 }
461
462 rc = fat_node_get_new(&nodep);
463 if (rc != EOK) {
464 (void) fat_free_clusters(bs, dev_handle, mcl);
465 return rc;
466 }
467 rc = fat_idx_get_new(&idxp, dev_handle);
468 if (rc != EOK) {
469 (void) fat_free_clusters(bs, dev_handle, mcl);
470 (void) fat_node_put(FS_NODE(nodep));
471 return rc;
472 }
473 /* idxp->lock held */
474 if (flags & L_DIRECTORY) {
475 nodep->type = FAT_DIRECTORY;
476 nodep->firstc = mcl;
477 nodep->size = bps * bs->spc;
478 } else {
479 nodep->type = FAT_FILE;
480 nodep->firstc = FAT_CLST_RES0;
481 nodep->size = 0;
482 }
483 nodep->lnkcnt = 0; /* not linked anywhere */
484 nodep->refcnt = 1;
485 nodep->dirty = true;
486
487 nodep->idx = idxp;
488 idxp->nodep = nodep;
489
490 fibril_mutex_unlock(&idxp->lock);
491 *rfn = FS_NODE(nodep);
492 return EOK;
493}
494
495int fat_destroy_node(fs_node_t *fn)
496{
497 fat_node_t *nodep = FAT_NODE(fn);
498 fat_bs_t *bs;
499 bool has_children;
500 int rc;
501
502 /*
503 * The node is not reachable from the file system. This means that the
504 * link count should be zero and that the index structure cannot be
505 * found in the position hash. Obviously, we don't need to lock the node
506 * nor its index structure.
507 */
508 assert(nodep->lnkcnt == 0);
509
510 /*
511 * The node may not have any children.
512 */
513 rc = fat_has_children(&has_children, fn);
514 if (rc != EOK)
515 return rc;
516 assert(!has_children);
517
518 bs = block_bb_get(nodep->idx->dev_handle);
519 if (nodep->firstc != FAT_CLST_RES0) {
520 assert(nodep->size);
521 /* Free all clusters allocated to the node. */
522 rc = fat_free_clusters(bs, nodep->idx->dev_handle,
523 nodep->firstc);
524 }
525
526 fat_idx_destroy(nodep->idx);
527 free(nodep->bp);
528 free(nodep);
529 return rc;
530}
531
532int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
533{
534 fat_node_t *parentp = FAT_NODE(pfn);
535 fat_node_t *childp = FAT_NODE(cfn);
536 fat_dentry_t *d;
537 fat_bs_t *bs;
538 block_t *b;
539 unsigned i, j;
540 uint16_t bps;
541 unsigned dps;
542 unsigned blocks;
543 fat_cluster_t mcl, lcl;
544 int rc;
545
546 fibril_mutex_lock(&childp->lock);
547 if (childp->lnkcnt == 1) {
548 /*
549 * On FAT, we don't support multiple hard links.
550 */
551 fibril_mutex_unlock(&childp->lock);
552 return EMLINK;
553 }
554 assert(childp->lnkcnt == 0);
555 fibril_mutex_unlock(&childp->lock);
556
557 if (!fat_dentry_name_verify(name)) {
558 /*
559 * Attempt to create unsupported name.
560 */
561 return ENOTSUP;
562 }
563
564 /*
565 * Get us an unused parent node's dentry or grow the parent and allocate
566 * a new one.
567 */
568
569 fibril_mutex_lock(&parentp->idx->lock);
570 bs = block_bb_get(parentp->idx->dev_handle);
571 bps = uint16_t_le2host(bs->bps);
572 dps = bps / sizeof(fat_dentry_t);
573
574 blocks = parentp->size / bps;
575
576 for (i = 0; i < blocks; i++) {
577 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
578 if (rc != EOK) {
579 fibril_mutex_unlock(&parentp->idx->lock);
580 return rc;
581 }
582 for (j = 0; j < dps; j++) {
583 d = ((fat_dentry_t *)b->data) + j;
584 switch (fat_classify_dentry(d)) {
585 case FAT_DENTRY_SKIP:
586 case FAT_DENTRY_VALID:
587 /* skipping used and meta entries */
588 continue;
589 case FAT_DENTRY_FREE:
590 case FAT_DENTRY_LAST:
591 /* found an empty slot */
592 goto hit;
593 }
594 }
595 rc = block_put(b);
596 if (rc != EOK) {
597 fibril_mutex_unlock(&parentp->idx->lock);
598 return rc;
599 }
600 }
601 j = 0;
602
603 /*
604 * We need to grow the parent in order to create a new unused dentry.
605 */
606 if (parentp->firstc == FAT_CLST_ROOT) {
607 /* Can't grow the root directory. */
608 fibril_mutex_unlock(&parentp->idx->lock);
609 return ENOSPC;
610 }
611 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
612 if (rc != EOK) {
613 fibril_mutex_unlock(&parentp->idx->lock);
614 return rc;
615 }
616 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl);
617 if (rc != EOK) {
618 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
619 fibril_mutex_unlock(&parentp->idx->lock);
620 return rc;
621 }
622 rc = fat_append_clusters(bs, parentp, mcl);
623 if (rc != EOK) {
624 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
625 fibril_mutex_unlock(&parentp->idx->lock);
626 return rc;
627 }
628 parentp->size += bps * bs->spc;
629 parentp->dirty = true; /* need to sync node */
630 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
631 if (rc != EOK) {
632 fibril_mutex_unlock(&parentp->idx->lock);
633 return rc;
634 }
635 d = (fat_dentry_t *)b->data;
636
637hit:
638 /*
639 * At this point we only establish the link between the parent and the
640 * child. The dentry, except of the name and the extension, will remain
641 * uninitialized until the corresponding node is synced. Thus the valid
642 * dentry data is kept in the child node structure.
643 */
644 memset(d, 0, sizeof(fat_dentry_t));
645 fat_dentry_name_set(d, name);
646 b->dirty = true; /* need to sync block */
647 rc = block_put(b);
648 fibril_mutex_unlock(&parentp->idx->lock);
649 if (rc != EOK)
650 return rc;
651
652 fibril_mutex_lock(&childp->idx->lock);
653
654 /*
655 * If possible, create the Sub-directory Identifier Entry and the
656 * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
657 * are not mandatory according to Standard ECMA-107 and HelenOS VFS does
658 * not use them anyway, so this is rather a sign of our good will.
659 */
660 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
661 if (rc != EOK) {
662 /*
663 * Rather than returning an error, simply skip the creation of
664 * these two entries.
665 */
666 goto skip_dots;
667 }
668 d = (fat_dentry_t *)b->data;
669 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
670 str_cmp(d->name, FAT_NAME_DOT) == 0) {
671 memset(d, 0, sizeof(fat_dentry_t));
672 str_cpy(d->name, 8, FAT_NAME_DOT);
673 str_cpy(d->ext, 3, FAT_EXT_PAD);
674 d->attr = FAT_ATTR_SUBDIR;
675 d->firstc = host2uint16_t_le(childp->firstc);
676 /* TODO: initialize also the date/time members. */
677 }
678 d++;
679 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
680 str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) {
681 memset(d, 0, sizeof(fat_dentry_t));
682 str_cpy(d->name, 8, FAT_NAME_DOT_DOT);
683 str_cpy(d->ext, 3, FAT_EXT_PAD);
684 d->attr = FAT_ATTR_SUBDIR;
685 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
686 host2uint16_t_le(FAT_CLST_RES0) :
687 host2uint16_t_le(parentp->firstc);
688 /* TODO: initialize also the date/time members. */
689 }
690 b->dirty = true; /* need to sync block */
691 /*
692 * Ignore the return value as we would have fallen through on error
693 * anyway.
694 */
695 (void) block_put(b);
696skip_dots:
697
698 childp->idx->pfc = parentp->firstc;
699 childp->idx->pdi = i * dps + j;
700 fibril_mutex_unlock(&childp->idx->lock);
701
702 fibril_mutex_lock(&childp->lock);
703 childp->lnkcnt = 1;
704 childp->dirty = true; /* need to sync node */
705 fibril_mutex_unlock(&childp->lock);
706
707 /*
708 * Hash in the index structure into the position hash.
709 */
710 fat_idx_hashin(childp->idx);
711
712 return EOK;
713}
714
715int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
716{
717 fat_node_t *parentp = FAT_NODE(pfn);
718 fat_node_t *childp = FAT_NODE(cfn);
719 fat_bs_t *bs;
720 fat_dentry_t *d;
721 uint16_t bps;
722 block_t *b;
723 bool has_children;
724 int rc;
725
726 if (!parentp)
727 return EBUSY;
728
729 rc = fat_has_children(&has_children, cfn);
730 if (rc != EOK)
731 return rc;
732 if (has_children)
733 return ENOTEMPTY;
734
735 fibril_mutex_lock(&parentp->lock);
736 fibril_mutex_lock(&childp->lock);
737 assert(childp->lnkcnt == 1);
738 fibril_mutex_lock(&childp->idx->lock);
739 bs = block_bb_get(childp->idx->dev_handle);
740 bps = uint16_t_le2host(bs->bps);
741
742 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc,
743 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
744 BLOCK_FLAGS_NONE);
745 if (rc != EOK)
746 goto error;
747 d = (fat_dentry_t *)b->data +
748 (childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
749 /* mark the dentry as not-currently-used */
750 d->name[0] = FAT_DENTRY_ERASED;
751 b->dirty = true; /* need to sync block */
752 rc = block_put(b);
753 if (rc != EOK)
754 goto error;
755
756 /* remove the index structure from the position hash */
757 fat_idx_hashout(childp->idx);
758 /* clear position information */
759 childp->idx->pfc = FAT_CLST_RES0;
760 childp->idx->pdi = 0;
761 fibril_mutex_unlock(&childp->idx->lock);
762 childp->lnkcnt = 0;
763 childp->dirty = true;
764 fibril_mutex_unlock(&childp->lock);
765 fibril_mutex_unlock(&parentp->lock);
766
767 return EOK;
768
769error:
770 fibril_mutex_unlock(&parentp->idx->lock);
771 fibril_mutex_unlock(&childp->lock);
772 fibril_mutex_unlock(&childp->idx->lock);
773 return rc;
774}
775
776int fat_has_children(bool *has_children, fs_node_t *fn)
777{
778 fat_bs_t *bs;
779 fat_node_t *nodep = FAT_NODE(fn);
780 unsigned bps;
781 unsigned dps;
782 unsigned blocks;
783 block_t *b;
784 unsigned i, j;
785 int rc;
786
787 if (nodep->type != FAT_DIRECTORY) {
788 *has_children = false;
789 return EOK;
790 }
791
792 fibril_mutex_lock(&nodep->idx->lock);
793 bs = block_bb_get(nodep->idx->dev_handle);
794 bps = uint16_t_le2host(bs->bps);
795 dps = bps / sizeof(fat_dentry_t);
796
797 blocks = nodep->size / bps;
798
799 for (i = 0; i < blocks; i++) {
800 fat_dentry_t *d;
801
802 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
803 if (rc != EOK) {
804 fibril_mutex_unlock(&nodep->idx->lock);
805 return rc;
806 }
807 for (j = 0; j < dps; j++) {
808 d = ((fat_dentry_t *)b->data) + j;
809 switch (fat_classify_dentry(d)) {
810 case FAT_DENTRY_SKIP:
811 case FAT_DENTRY_FREE:
812 continue;
813 case FAT_DENTRY_LAST:
814 rc = block_put(b);
815 fibril_mutex_unlock(&nodep->idx->lock);
816 *has_children = false;
817 return rc;
818 default:
819 case FAT_DENTRY_VALID:
820 rc = block_put(b);
821 fibril_mutex_unlock(&nodep->idx->lock);
822 *has_children = true;
823 return rc;
824 }
825 }
826 rc = block_put(b);
827 if (rc != EOK) {
828 fibril_mutex_unlock(&nodep->idx->lock);
829 return rc;
830 }
831 }
832
833 fibril_mutex_unlock(&nodep->idx->lock);
834 *has_children = false;
835 return EOK;
836}
837
838
839fs_index_t fat_index_get(fs_node_t *fn)
840{
841 return FAT_NODE(fn)->idx->index;
842}
843
844size_t fat_size_get(fs_node_t *fn)
845{
846 return FAT_NODE(fn)->size;
847}
848
849unsigned fat_lnkcnt_get(fs_node_t *fn)
850{
851 return FAT_NODE(fn)->lnkcnt;
852}
853
854char fat_plb_get_char(unsigned pos)
855{
856 return fat_reg.plb_ro[pos % PLB_SIZE];
857}
858
859bool fat_is_directory(fs_node_t *fn)
860{
861 return FAT_NODE(fn)->type == FAT_DIRECTORY;
862}
863
864bool fat_is_file(fs_node_t *fn)
865{
866 return FAT_NODE(fn)->type == FAT_FILE;
867}
868
869/** libfs operations */
870libfs_ops_t fat_libfs_ops = {
871 .root_get = fat_root_get,
872 .match = fat_match,
873 .node_get = fat_node_get,
874 .node_put = fat_node_put,
875 .create = fat_create_node,
876 .destroy = fat_destroy_node,
877 .link = fat_link,
878 .unlink = fat_unlink,
879 .has_children = fat_has_children,
880 .index_get = fat_index_get,
881 .size_get = fat_size_get,
882 .lnkcnt_get = fat_lnkcnt_get,
883 .plb_get_char = fat_plb_get_char,
884 .is_directory = fat_is_directory,
885 .is_file = fat_is_file
886};
887
888/*
889 * VFS operations.
890 */
891
892void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
893{
894 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
895 enum cache_mode cmode;
896 fat_bs_t *bs;
897 uint16_t bps;
898 uint16_t rde;
899 int rc;
900
901 /* accept the mount options */
902 ipc_callid_t callid;
903 size_t size;
904 if (!async_data_write_receive(&callid, &size)) {
905 ipc_answer_0(callid, EINVAL);
906 ipc_answer_0(rid, EINVAL);
907 return;
908 }
909 char *opts = malloc(size + 1);
910 if (!opts) {
911 ipc_answer_0(callid, ENOMEM);
912 ipc_answer_0(rid, ENOMEM);
913 return;
914 }
915 ipcarg_t retval = async_data_write_finalize(callid, opts, size);
916 if (retval != EOK) {
917 ipc_answer_0(rid, retval);
918 free(opts);
919 return;
920 }
921 opts[size] = '\0';
922
923 /* Check for option enabling write through. */
924 if (str_cmp(opts, "wtcache") == 0)
925 cmode = CACHE_MODE_WT;
926 else
927 cmode = CACHE_MODE_WB;
928
929 /* initialize libblock */
930 rc = block_init(dev_handle, BS_SIZE);
931 if (rc != EOK) {
932 ipc_answer_0(rid, rc);
933 return;
934 }
935
936 /* prepare the boot block */
937 rc = block_bb_read(dev_handle, BS_BLOCK);
938 if (rc != EOK) {
939 block_fini(dev_handle);
940 ipc_answer_0(rid, rc);
941 return;
942 }
943
944 /* get the buffer with the boot sector */
945 bs = block_bb_get(dev_handle);
946
947 /* Read the number of root directory entries. */
948 bps = uint16_t_le2host(bs->bps);
949 rde = uint16_t_le2host(bs->root_ent_max);
950
951 if (bps != BS_SIZE) {
952 block_fini(dev_handle);
953 ipc_answer_0(rid, ENOTSUP);
954 return;
955 }
956
957 /* Initialize the block cache */
958 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);
959 if (rc != EOK) {
960 block_fini(dev_handle);
961 ipc_answer_0(rid, rc);
962 return;
963 }
964
965 rc = fat_idx_init_by_dev_handle(dev_handle);
966 if (rc != EOK) {
967 block_fini(dev_handle);
968 ipc_answer_0(rid, rc);
969 return;
970 }
971
972 /* Initialize the root node. */
973 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
974 if (!rfn) {
975 block_fini(dev_handle);
976 fat_idx_fini_by_dev_handle(dev_handle);
977 ipc_answer_0(rid, ENOMEM);
978 return;
979 }
980 fs_node_initialize(rfn);
981 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
982 if (!rootp) {
983 free(rfn);
984 block_fini(dev_handle);
985 fat_idx_fini_by_dev_handle(dev_handle);
986 ipc_answer_0(rid, ENOMEM);
987 return;
988 }
989 fat_node_initialize(rootp);
990
991 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
992 if (!ridxp) {
993 free(rfn);
994 free(rootp);
995 block_fini(dev_handle);
996 fat_idx_fini_by_dev_handle(dev_handle);
997 ipc_answer_0(rid, ENOMEM);
998 return;
999 }
1000 assert(ridxp->index == 0);
1001 /* ridxp->lock held */
1002
1003 rootp->type = FAT_DIRECTORY;
1004 rootp->firstc = FAT_CLST_ROOT;
1005 rootp->refcnt = 1;
1006 rootp->lnkcnt = 0; /* FS root is not linked */
1007 rootp->size = rde * sizeof(fat_dentry_t);
1008 rootp->idx = ridxp;
1009 ridxp->nodep = rootp;
1010 rootp->bp = rfn;
1011 rfn->data = rootp;
1012
1013 fibril_mutex_unlock(&ridxp->lock);
1014
1015 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
1016}
1017
1018void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1019{
1020 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1021}
1022
1023void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1024{
1025 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1026}
1027
1028void fat_read(ipc_callid_t rid, ipc_call_t *request)
1029{
1030 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1031 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1032 off_t pos = (off_t)IPC_GET_ARG3(*request);
1033 fs_node_t *fn;
1034 fat_node_t *nodep;
1035 fat_bs_t *bs;
1036 uint16_t bps;
1037 size_t bytes;
1038 block_t *b;
1039 int rc;
1040
1041 rc = fat_node_get(&fn, dev_handle, index);
1042 if (rc != EOK) {
1043 ipc_answer_0(rid, rc);
1044 return;
1045 }
1046 if (!fn) {
1047 ipc_answer_0(rid, ENOENT);
1048 return;
1049 }
1050 nodep = FAT_NODE(fn);
1051
1052 ipc_callid_t callid;
1053 size_t len;
1054 if (!async_data_read_receive(&callid, &len)) {
1055 fat_node_put(fn);
1056 ipc_answer_0(callid, EINVAL);
1057 ipc_answer_0(rid, EINVAL);
1058 return;
1059 }
1060
1061 bs = block_bb_get(dev_handle);
1062 bps = uint16_t_le2host(bs->bps);
1063
1064 if (nodep->type == FAT_FILE) {
1065 /*
1066 * Our strategy for regular file reads is to read one block at
1067 * most and make use of the possibility to return less data than
1068 * requested. This keeps the code very simple.
1069 */
1070 if (pos >= nodep->size) {
1071 /* reading beyond the EOF */
1072 bytes = 0;
1073 (void) async_data_read_finalize(callid, NULL, 0);
1074 } else {
1075 bytes = min(len, bps - pos % bps);
1076 bytes = min(bytes, nodep->size - pos);
1077 rc = fat_block_get(&b, bs, nodep, pos / bps,
1078 BLOCK_FLAGS_NONE);
1079 if (rc != EOK) {
1080 fat_node_put(fn);
1081 ipc_answer_0(callid, rc);
1082 ipc_answer_0(rid, rc);
1083 return;
1084 }
1085 (void) async_data_read_finalize(callid, b->data + pos % bps,
1086 bytes);
1087 rc = block_put(b);
1088 if (rc != EOK) {
1089 fat_node_put(fn);
1090 ipc_answer_0(rid, rc);
1091 return;
1092 }
1093 }
1094 } else {
1095 unsigned bnum;
1096 off_t spos = pos;
1097 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1098 fat_dentry_t *d;
1099
1100 assert(nodep->type == FAT_DIRECTORY);
1101 assert(nodep->size % bps == 0);
1102 assert(bps % sizeof(fat_dentry_t) == 0);
1103
1104 /*
1105 * Our strategy for readdir() is to use the position pointer as
1106 * an index into the array of all dentries. On entry, it points
1107 * to the first unread dentry. If we skip any dentries, we bump
1108 * the position pointer accordingly.
1109 */
1110 bnum = (pos * sizeof(fat_dentry_t)) / bps;
1111 while (bnum < nodep->size / bps) {
1112 off_t o;
1113
1114 rc = fat_block_get(&b, bs, nodep, bnum,
1115 BLOCK_FLAGS_NONE);
1116 if (rc != EOK)
1117 goto err;
1118 for (o = pos % (bps / sizeof(fat_dentry_t));
1119 o < bps / sizeof(fat_dentry_t);
1120 o++, pos++) {
1121 d = ((fat_dentry_t *)b->data) + o;
1122 switch (fat_classify_dentry(d)) {
1123 case FAT_DENTRY_SKIP:
1124 case FAT_DENTRY_FREE:
1125 continue;
1126 case FAT_DENTRY_LAST:
1127 rc = block_put(b);
1128 if (rc != EOK)
1129 goto err;
1130 goto miss;
1131 default:
1132 case FAT_DENTRY_VALID:
1133 fat_dentry_name_get(d, name);
1134 rc = block_put(b);
1135 if (rc != EOK)
1136 goto err;
1137 goto hit;
1138 }
1139 }
1140 rc = block_put(b);
1141 if (rc != EOK)
1142 goto err;
1143 bnum++;
1144 }
1145miss:
1146 rc = fat_node_put(fn);
1147 ipc_answer_0(callid, rc != EOK ? rc : ENOENT);
1148 ipc_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
1149 return;
1150
1151err:
1152 (void) fat_node_put(fn);
1153 ipc_answer_0(callid, rc);
1154 ipc_answer_0(rid, rc);
1155 return;
1156
1157hit:
1158 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
1159 bytes = (pos - spos) + 1;
1160 }
1161
1162 rc = fat_node_put(fn);
1163 ipc_answer_1(rid, rc, (ipcarg_t)bytes);
1164}
1165
1166void fat_write(ipc_callid_t rid, ipc_call_t *request)
1167{
1168 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1169 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1170 off_t pos = (off_t)IPC_GET_ARG3(*request);
1171 fs_node_t *fn;
1172 fat_node_t *nodep;
1173 fat_bs_t *bs;
1174 size_t bytes, size;
1175 block_t *b;
1176 uint16_t bps;
1177 unsigned spc;
1178 unsigned bpc; /* bytes per cluster */
1179 off_t boundary;
1180 int flags = BLOCK_FLAGS_NONE;
1181 int rc;
1182
1183 rc = fat_node_get(&fn, dev_handle, index);
1184 if (rc != EOK) {
1185 ipc_answer_0(rid, rc);
1186 return;
1187 }
1188 if (!fn) {
1189 ipc_answer_0(rid, ENOENT);
1190 return;
1191 }
1192 nodep = FAT_NODE(fn);
1193
1194 ipc_callid_t callid;
1195 size_t len;
1196 if (!async_data_write_receive(&callid, &len)) {
1197 (void) fat_node_put(fn);
1198 ipc_answer_0(callid, EINVAL);
1199 ipc_answer_0(rid, EINVAL);
1200 return;
1201 }
1202
1203 bs = block_bb_get(dev_handle);
1204 bps = uint16_t_le2host(bs->bps);
1205 spc = bs->spc;
1206 bpc = bps * spc;
1207
1208 /*
1209 * In all scenarios, we will attempt to write out only one block worth
1210 * of data at maximum. There might be some more efficient approaches,
1211 * but this one greatly simplifies fat_write(). Note that we can afford
1212 * to do this because the client must be ready to handle the return
1213 * value signalizing a smaller number of bytes written.
1214 */
1215 bytes = min(len, bps - pos % bps);
1216 if (bytes == bps)
1217 flags |= BLOCK_FLAGS_NOREAD;
1218
1219 boundary = ROUND_UP(nodep->size, bpc);
1220 if (pos < boundary) {
1221 /*
1222 * This is the easier case - we are either overwriting already
1223 * existing contents or writing behind the EOF, but still within
1224 * the limits of the last cluster. The node size may grow to the
1225 * next block size boundary.
1226 */
1227 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1228 if (rc != EOK) {
1229 (void) fat_node_put(fn);
1230 ipc_answer_0(callid, rc);
1231 ipc_answer_0(rid, rc);
1232 return;
1233 }
1234 rc = fat_block_get(&b, bs, nodep, pos / bps, flags);
1235 if (rc != EOK) {
1236 (void) fat_node_put(fn);
1237 ipc_answer_0(callid, rc);
1238 ipc_answer_0(rid, rc);
1239 return;
1240 }
1241 (void) async_data_write_finalize(callid, b->data + pos % bps,
1242 bytes);
1243 b->dirty = true; /* need to sync block */
1244 rc = block_put(b);
1245 if (rc != EOK) {
1246 (void) fat_node_put(fn);
1247 ipc_answer_0(rid, rc);
1248 return;
1249 }
1250 if (pos + bytes > nodep->size) {
1251 nodep->size = pos + bytes;
1252 nodep->dirty = true; /* need to sync node */
1253 }
1254 size = nodep->size;
1255 rc = fat_node_put(fn);
1256 ipc_answer_2(rid, rc, bytes, nodep->size);
1257 return;
1258 } else {
1259 /*
1260 * This is the more difficult case. We must allocate new
1261 * clusters for the node and zero them out.
1262 */
1263 unsigned nclsts;
1264 fat_cluster_t mcl, lcl;
1265
1266 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
1267 /* create an independent chain of nclsts clusters in all FATs */
1268 rc = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1269 if (rc != EOK) {
1270 /* could not allocate a chain of nclsts clusters */
1271 (void) fat_node_put(fn);
1272 ipc_answer_0(callid, rc);
1273 ipc_answer_0(rid, rc);
1274 return;
1275 }
1276 /* zero fill any gaps */
1277 rc = fat_fill_gap(bs, nodep, mcl, pos);
1278 if (rc != EOK) {
1279 (void) fat_free_clusters(bs, dev_handle, mcl);
1280 (void) fat_node_put(fn);
1281 ipc_answer_0(callid, rc);
1282 ipc_answer_0(rid, rc);
1283 return;
1284 }
1285 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc,
1286 flags);
1287 if (rc != EOK) {
1288 (void) fat_free_clusters(bs, dev_handle, mcl);
1289 (void) fat_node_put(fn);
1290 ipc_answer_0(callid, rc);
1291 ipc_answer_0(rid, rc);
1292 return;
1293 }
1294 (void) async_data_write_finalize(callid, b->data + pos % bps,
1295 bytes);
1296 b->dirty = true; /* need to sync block */
1297 rc = block_put(b);
1298 if (rc != EOK) {
1299 (void) fat_free_clusters(bs, dev_handle, mcl);
1300 (void) fat_node_put(fn);
1301 ipc_answer_0(rid, rc);
1302 return;
1303 }
1304 /*
1305 * Append the cluster chain starting in mcl to the end of the
1306 * node's cluster chain.
1307 */
1308 rc = fat_append_clusters(bs, nodep, mcl);
1309 if (rc != EOK) {
1310 (void) fat_free_clusters(bs, dev_handle, mcl);
1311 (void) fat_node_put(fn);
1312 ipc_answer_0(rid, rc);
1313 return;
1314 }
1315 nodep->size = size = pos + bytes;
1316 nodep->dirty = true; /* need to sync node */
1317 rc = fat_node_put(fn);
1318 ipc_answer_2(rid, rc, bytes, size);
1319 return;
1320 }
1321}
1322
1323void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1324{
1325 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1326 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1327 size_t size = (off_t)IPC_GET_ARG3(*request);
1328 fs_node_t *fn;
1329 fat_node_t *nodep;
1330 fat_bs_t *bs;
1331 uint16_t bps;
1332 uint8_t spc;
1333 unsigned bpc; /* bytes per cluster */
1334 int rc;
1335
1336 rc = fat_node_get(&fn, dev_handle, index);
1337 if (rc != EOK) {
1338 ipc_answer_0(rid, rc);
1339 return;
1340 }
1341 if (!fn) {
1342 ipc_answer_0(rid, ENOENT);
1343 return;
1344 }
1345 nodep = FAT_NODE(fn);
1346
1347 bs = block_bb_get(dev_handle);
1348 bps = uint16_t_le2host(bs->bps);
1349 spc = bs->spc;
1350 bpc = bps * spc;
1351
1352 if (nodep->size == size) {
1353 rc = EOK;
1354 } else if (nodep->size < size) {
1355 /*
1356 * The standard says we have the freedom to grow the node.
1357 * For now, we simply return an error.
1358 */
1359 rc = EINVAL;
1360 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
1361 /*
1362 * The node will be shrunk, but no clusters will be deallocated.
1363 */
1364 nodep->size = size;
1365 nodep->dirty = true; /* need to sync node */
1366 rc = EOK;
1367 } else {
1368 /*
1369 * The node will be shrunk, clusters will be deallocated.
1370 */
1371 if (size == 0) {
1372 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1373 if (rc != EOK)
1374 goto out;
1375 } else {
1376 fat_cluster_t lastc;
1377 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc,
1378 &lastc, NULL, (size - 1) / bpc);
1379 if (rc != EOK)
1380 goto out;
1381 rc = fat_chop_clusters(bs, nodep, lastc);
1382 if (rc != EOK)
1383 goto out;
1384 }
1385 nodep->size = size;
1386 nodep->dirty = true; /* need to sync node */
1387 rc = EOK;
1388 }
1389out:
1390 fat_node_put(fn);
1391 ipc_answer_0(rid, rc);
1392 return;
1393}
1394
1395void fat_close(ipc_callid_t rid, ipc_call_t *request)
1396{
1397 ipc_answer_0(rid, EOK);
1398}
1399
1400void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1401{
1402 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1403 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1404 fs_node_t *fn;
1405 int rc;
1406
1407 rc = fat_node_get(&fn, dev_handle, index);
1408 if (rc != EOK) {
1409 ipc_answer_0(rid, rc);
1410 return;
1411 }
1412 if (!fn) {
1413 ipc_answer_0(rid, ENOENT);
1414 return;
1415 }
1416
1417 rc = fat_destroy_node(fn);
1418 ipc_answer_0(rid, rc);
1419}
1420
1421void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1422{
1423 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1424}
1425
1426void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1427{
1428 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1429}
1430
1431void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1432{
1433 /* Dummy implementation */
1434 ipc_answer_0(rid, EOK);
1435}
1436
1437/**
1438 * @}
1439 */
Note: See TracBrowser for help on using the repository browser.