source: mainline/uspace/srv/fs/fat/fat_ops.c@ 80e9e5e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 80e9e5e was efcebe1, checked in by Jakub Jermar <jakub@…>, 14 years ago

Get rid of per filesystem VFS_OUT method switch and IPC unmarshalling.

  • libfs now understands the notion of VFS_OUT operations and provides the single version of the switch
  • libfs now automatically takes care of some libfs provided operations, such as lookup and stat; filesystem need not be even aware of these
  • one filesystem time per libfs instance
  • plb_get_char() no longer a libfs operation
  • filesystem implemenations need not worry about IPC with the exception of VFS_OUT_READ/WRITE methods and filesystems that depend on doing extra IPC in these and similar methods, such as devfs
  • Property mode set to 100644
File size: 35.2 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/services.h>
45#include <ipc/devmap.h>
46#include <macros.h>
47#include <async.h>
48#include <errno.h>
49#include <str.h>
50#include <byteorder.h>
51#include <adt/hash_table.h>
52#include <adt/list.h>
53#include <assert.h>
54#include <fibril_synch.h>
55#include <sys/mman.h>
56#include <align.h>
57#include <malloc.h>
58
59#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
60#define FS_NODE(node) ((node) ? (node)->bp : NULL)
61
62#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
63#define BPC(bs) (BPS((bs)) * SPC((bs)))
64
65/** Mutex protecting the list of cached free FAT nodes. */
66static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
67
68/** List of cached free FAT nodes. */
69static LIST_INITIALIZE(ffn_list);
70
71/*
72 * Forward declarations of FAT libfs operations.
73 */
74static int fat_root_get(fs_node_t **, devmap_handle_t);
75static int fat_match(fs_node_t **, fs_node_t *, const char *);
76static int fat_node_get(fs_node_t **, devmap_handle_t, fs_index_t);
77static int fat_node_open(fs_node_t *);
78static int fat_node_put(fs_node_t *);
79static int fat_create_node(fs_node_t **, devmap_handle_t, int);
80static int fat_destroy_node(fs_node_t *);
81static int fat_link(fs_node_t *, fs_node_t *, const char *);
82static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
83static int fat_has_children(bool *, fs_node_t *);
84static fs_index_t fat_index_get(fs_node_t *);
85static aoff64_t fat_size_get(fs_node_t *);
86static unsigned fat_lnkcnt_get(fs_node_t *);
87static bool fat_is_directory(fs_node_t *);
88static bool fat_is_file(fs_node_t *node);
89static devmap_handle_t fat_device_get(fs_node_t *node);
90
91/*
92 * Helper functions.
93 */
94static void fat_node_initialize(fat_node_t *node)
95{
96 fibril_mutex_initialize(&node->lock);
97 node->bp = NULL;
98 node->idx = NULL;
99 node->type = 0;
100 link_initialize(&node->ffn_link);
101 node->size = 0;
102 node->lnkcnt = 0;
103 node->refcnt = 0;
104 node->dirty = false;
105 node->lastc_cached_valid = false;
106 node->lastc_cached_value = FAT_CLST_LAST1;
107 node->currc_cached_valid = false;
108 node->currc_cached_bn = 0;
109 node->currc_cached_value = FAT_CLST_LAST1;
110}
111
112static int fat_node_sync(fat_node_t *node)
113{
114 block_t *b;
115 fat_bs_t *bs;
116 fat_dentry_t *d;
117 int rc;
118
119 assert(node->dirty);
120
121 bs = block_bb_get(node->idx->devmap_handle);
122
123 /* Read the block that contains the dentry of interest. */
124 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc,
125 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
126 BLOCK_FLAGS_NONE);
127 if (rc != EOK)
128 return rc;
129
130 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
131
132 d->firstc = host2uint16_t_le(node->firstc);
133 if (node->type == FAT_FILE) {
134 d->size = host2uint32_t_le(node->size);
135 } else if (node->type == FAT_DIRECTORY) {
136 d->attr = FAT_ATTR_SUBDIR;
137 }
138
139 /* TODO: update other fields? (e.g time fields) */
140
141 b->dirty = true; /* need to sync block */
142 rc = block_put(b);
143 return rc;
144}
145
146static int fat_node_fini_by_devmap_handle(devmap_handle_t devmap_handle)
147{
148 fat_node_t *nodep;
149 int rc;
150
151 /*
152 * We are called from fat_unmounted() and assume that there are already
153 * no nodes belonging to this instance with non-zero refcount. Therefore
154 * it is sufficient to clean up only the FAT free node list.
155 */
156
157restart:
158 fibril_mutex_lock(&ffn_mutex);
159 list_foreach(ffn_list, lnk) {
160 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
161 if (!fibril_mutex_trylock(&nodep->lock)) {
162 fibril_mutex_unlock(&ffn_mutex);
163 goto restart;
164 }
165 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
166 fibril_mutex_unlock(&nodep->lock);
167 fibril_mutex_unlock(&ffn_mutex);
168 goto restart;
169 }
170 if (nodep->idx->devmap_handle != devmap_handle) {
171 fibril_mutex_unlock(&nodep->idx->lock);
172 fibril_mutex_unlock(&nodep->lock);
173 continue;
174 }
175
176 list_remove(&nodep->ffn_link);
177 fibril_mutex_unlock(&ffn_mutex);
178
179 /*
180 * We can unlock the node and its index structure because we are
181 * the last player on this playground and VFS is preventing new
182 * players from entering.
183 */
184 fibril_mutex_unlock(&nodep->idx->lock);
185 fibril_mutex_unlock(&nodep->lock);
186
187 if (nodep->dirty) {
188 rc = fat_node_sync(nodep);
189 if (rc != EOK)
190 return rc;
191 }
192 nodep->idx->nodep = NULL;
193 free(nodep->bp);
194 free(nodep);
195
196 /* Need to restart because we changed ffn_list. */
197 goto restart;
198 }
199 fibril_mutex_unlock(&ffn_mutex);
200
201 return EOK;
202}
203
204static int fat_node_get_new(fat_node_t **nodepp)
205{
206 fs_node_t *fn;
207 fat_node_t *nodep;
208 int rc;
209
210 fibril_mutex_lock(&ffn_mutex);
211 if (!list_empty(&ffn_list)) {
212 /* Try to use a cached free node structure. */
213 fat_idx_t *idxp_tmp;
214 nodep = list_get_instance(list_first(&ffn_list), fat_node_t,
215 ffn_link);
216 if (!fibril_mutex_trylock(&nodep->lock))
217 goto skip_cache;
218 idxp_tmp = nodep->idx;
219 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
220 fibril_mutex_unlock(&nodep->lock);
221 goto skip_cache;
222 }
223 list_remove(&nodep->ffn_link);
224 fibril_mutex_unlock(&ffn_mutex);
225 if (nodep->dirty) {
226 rc = fat_node_sync(nodep);
227 if (rc != EOK) {
228 idxp_tmp->nodep = NULL;
229 fibril_mutex_unlock(&nodep->lock);
230 fibril_mutex_unlock(&idxp_tmp->lock);
231 free(nodep->bp);
232 free(nodep);
233 return rc;
234 }
235 }
236 idxp_tmp->nodep = NULL;
237 fibril_mutex_unlock(&nodep->lock);
238 fibril_mutex_unlock(&idxp_tmp->lock);
239 fn = FS_NODE(nodep);
240 } else {
241skip_cache:
242 /* Try to allocate a new node structure. */
243 fibril_mutex_unlock(&ffn_mutex);
244 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
245 if (!fn)
246 return ENOMEM;
247 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
248 if (!nodep) {
249 free(fn);
250 return ENOMEM;
251 }
252 }
253 fat_node_initialize(nodep);
254 fs_node_initialize(fn);
255 fn->data = nodep;
256 nodep->bp = fn;
257
258 *nodepp = nodep;
259 return EOK;
260}
261
262/** Internal version of fat_node_get().
263 *
264 * @param idxp Locked index structure.
265 */
266static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
267{
268 block_t *b;
269 fat_bs_t *bs;
270 fat_dentry_t *d;
271 fat_node_t *nodep = NULL;
272 int rc;
273
274 if (idxp->nodep) {
275 /*
276 * We are lucky.
277 * The node is already instantiated in memory.
278 */
279 fibril_mutex_lock(&idxp->nodep->lock);
280 if (!idxp->nodep->refcnt++) {
281 fibril_mutex_lock(&ffn_mutex);
282 list_remove(&idxp->nodep->ffn_link);
283 fibril_mutex_unlock(&ffn_mutex);
284 }
285 fibril_mutex_unlock(&idxp->nodep->lock);
286 *nodepp = idxp->nodep;
287 return EOK;
288 }
289
290 /*
291 * We must instantiate the node from the file system.
292 */
293
294 assert(idxp->pfc);
295
296 rc = fat_node_get_new(&nodep);
297 if (rc != EOK)
298 return rc;
299
300 bs = block_bb_get(idxp->devmap_handle);
301
302 /* Read the block that contains the dentry of interest. */
303 rc = _fat_block_get(&b, bs, idxp->devmap_handle, idxp->pfc, NULL,
304 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
305 if (rc != EOK) {
306 (void) fat_node_put(FS_NODE(nodep));
307 return rc;
308 }
309
310 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
311 if (d->attr & FAT_ATTR_SUBDIR) {
312 /*
313 * The only directory which does not have this bit set is the
314 * root directory itself. The root directory node is handled
315 * and initialized elsewhere.
316 */
317 nodep->type = FAT_DIRECTORY;
318 /*
319 * Unfortunately, the 'size' field of the FAT dentry is not
320 * defined for the directory entry type. We must determine the
321 * size of the directory by walking the FAT.
322 */
323 uint16_t clusters;
324 rc = fat_clusters_get(&clusters, bs, idxp->devmap_handle,
325 uint16_t_le2host(d->firstc));
326 if (rc != EOK) {
327 (void) block_put(b);
328 (void) fat_node_put(FS_NODE(nodep));
329 return rc;
330 }
331 nodep->size = BPS(bs) * SPC(bs) * clusters;
332 } else {
333 nodep->type = FAT_FILE;
334 nodep->size = uint32_t_le2host(d->size);
335 }
336 nodep->firstc = uint16_t_le2host(d->firstc);
337 nodep->lnkcnt = 1;
338 nodep->refcnt = 1;
339
340 rc = block_put(b);
341 if (rc != EOK) {
342 (void) fat_node_put(FS_NODE(nodep));
343 return rc;
344 }
345
346 /* Link the idx structure with the node structure. */
347 nodep->idx = idxp;
348 idxp->nodep = nodep;
349
350 *nodepp = nodep;
351 return EOK;
352}
353
354/*
355 * FAT libfs operations.
356 */
357
358int fat_root_get(fs_node_t **rfn, devmap_handle_t devmap_handle)
359{
360 return fat_node_get(rfn, devmap_handle, 0);
361}
362
363int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
364{
365 fat_bs_t *bs;
366 fat_node_t *parentp = FAT_NODE(pfn);
367 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
368 unsigned i, j;
369 unsigned blocks;
370 fat_dentry_t *d;
371 devmap_handle_t devmap_handle;
372 block_t *b;
373 int rc;
374
375 fibril_mutex_lock(&parentp->idx->lock);
376 devmap_handle = parentp->idx->devmap_handle;
377 fibril_mutex_unlock(&parentp->idx->lock);
378
379 bs = block_bb_get(devmap_handle);
380 blocks = parentp->size / BPS(bs);
381 for (i = 0; i < blocks; i++) {
382 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
383 if (rc != EOK)
384 return rc;
385 for (j = 0; j < DPS(bs); j++) {
386 d = ((fat_dentry_t *)b->data) + j;
387 switch (fat_classify_dentry(d)) {
388 case FAT_DENTRY_SKIP:
389 case FAT_DENTRY_FREE:
390 continue;
391 case FAT_DENTRY_LAST:
392 /* miss */
393 rc = block_put(b);
394 *rfn = NULL;
395 return rc;
396 default:
397 case FAT_DENTRY_VALID:
398 fat_dentry_name_get(d, name);
399 break;
400 }
401 if (fat_dentry_namecmp(name, component) == 0) {
402 /* hit */
403 fat_node_t *nodep;
404 fat_idx_t *idx = fat_idx_get_by_pos(devmap_handle,
405 parentp->firstc, i * DPS(bs) + j);
406 if (!idx) {
407 /*
408 * Can happen if memory is low or if we
409 * run out of 32-bit indices.
410 */
411 rc = block_put(b);
412 return (rc == EOK) ? ENOMEM : rc;
413 }
414 rc = fat_node_get_core(&nodep, idx);
415 fibril_mutex_unlock(&idx->lock);
416 if (rc != EOK) {
417 (void) block_put(b);
418 return rc;
419 }
420 *rfn = FS_NODE(nodep);
421 rc = block_put(b);
422 if (rc != EOK)
423 (void) fat_node_put(*rfn);
424 return rc;
425 }
426 }
427 rc = block_put(b);
428 if (rc != EOK)
429 return rc;
430 }
431
432 *rfn = NULL;
433 return EOK;
434}
435
436/** Instantiate a FAT in-core node. */
437int fat_node_get(fs_node_t **rfn, devmap_handle_t devmap_handle, fs_index_t index)
438{
439 fat_node_t *nodep;
440 fat_idx_t *idxp;
441 int rc;
442
443 idxp = fat_idx_get_by_index(devmap_handle, index);
444 if (!idxp) {
445 *rfn = NULL;
446 return EOK;
447 }
448 /* idxp->lock held */
449 rc = fat_node_get_core(&nodep, idxp);
450 fibril_mutex_unlock(&idxp->lock);
451 if (rc == EOK)
452 *rfn = FS_NODE(nodep);
453 return rc;
454}
455
456int fat_node_open(fs_node_t *fn)
457{
458 /*
459 * Opening a file is stateless, nothing
460 * to be done here.
461 */
462 return EOK;
463}
464
465int fat_node_put(fs_node_t *fn)
466{
467 fat_node_t *nodep = FAT_NODE(fn);
468 bool destroy = false;
469
470 fibril_mutex_lock(&nodep->lock);
471 if (!--nodep->refcnt) {
472 if (nodep->idx) {
473 fibril_mutex_lock(&ffn_mutex);
474 list_append(&nodep->ffn_link, &ffn_list);
475 fibril_mutex_unlock(&ffn_mutex);
476 } else {
477 /*
478 * The node does not have any index structure associated
479 * with itself. This can only mean that we are releasing
480 * the node after a failed attempt to allocate the index
481 * structure for it.
482 */
483 destroy = true;
484 }
485 }
486 fibril_mutex_unlock(&nodep->lock);
487 if (destroy) {
488 free(nodep->bp);
489 free(nodep);
490 }
491 return EOK;
492}
493
494int fat_create_node(fs_node_t **rfn, devmap_handle_t devmap_handle, int flags)
495{
496 fat_idx_t *idxp;
497 fat_node_t *nodep;
498 fat_bs_t *bs;
499 fat_cluster_t mcl, lcl;
500 int rc;
501
502 bs = block_bb_get(devmap_handle);
503 if (flags & L_DIRECTORY) {
504 /* allocate a cluster */
505 rc = fat_alloc_clusters(bs, devmap_handle, 1, &mcl, &lcl);
506 if (rc != EOK)
507 return rc;
508 /* populate the new cluster with unused dentries */
509 rc = fat_zero_cluster(bs, devmap_handle, mcl);
510 if (rc != EOK) {
511 (void) fat_free_clusters(bs, devmap_handle, mcl);
512 return rc;
513 }
514 }
515
516 rc = fat_node_get_new(&nodep);
517 if (rc != EOK) {
518 (void) fat_free_clusters(bs, devmap_handle, mcl);
519 return rc;
520 }
521 rc = fat_idx_get_new(&idxp, devmap_handle);
522 if (rc != EOK) {
523 (void) fat_free_clusters(bs, devmap_handle, mcl);
524 (void) fat_node_put(FS_NODE(nodep));
525 return rc;
526 }
527 /* idxp->lock held */
528 if (flags & L_DIRECTORY) {
529 nodep->type = FAT_DIRECTORY;
530 nodep->firstc = mcl;
531 nodep->size = BPS(bs) * SPC(bs);
532 } else {
533 nodep->type = FAT_FILE;
534 nodep->firstc = FAT_CLST_RES0;
535 nodep->size = 0;
536 }
537 nodep->lnkcnt = 0; /* not linked anywhere */
538 nodep->refcnt = 1;
539 nodep->dirty = true;
540
541 nodep->idx = idxp;
542 idxp->nodep = nodep;
543
544 fibril_mutex_unlock(&idxp->lock);
545 *rfn = FS_NODE(nodep);
546 return EOK;
547}
548
549int fat_destroy_node(fs_node_t *fn)
550{
551 fat_node_t *nodep = FAT_NODE(fn);
552 fat_bs_t *bs;
553 bool has_children;
554 int rc;
555
556 /*
557 * The node is not reachable from the file system. This means that the
558 * link count should be zero and that the index structure cannot be
559 * found in the position hash. Obviously, we don't need to lock the node
560 * nor its index structure.
561 */
562 assert(nodep->lnkcnt == 0);
563
564 /*
565 * The node may not have any children.
566 */
567 rc = fat_has_children(&has_children, fn);
568 if (rc != EOK)
569 return rc;
570 assert(!has_children);
571
572 bs = block_bb_get(nodep->idx->devmap_handle);
573 if (nodep->firstc != FAT_CLST_RES0) {
574 assert(nodep->size);
575 /* Free all clusters allocated to the node. */
576 rc = fat_free_clusters(bs, nodep->idx->devmap_handle,
577 nodep->firstc);
578 }
579
580 fat_idx_destroy(nodep->idx);
581 free(nodep->bp);
582 free(nodep);
583 return rc;
584}
585
586int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
587{
588 fat_node_t *parentp = FAT_NODE(pfn);
589 fat_node_t *childp = FAT_NODE(cfn);
590 fat_dentry_t *d;
591 fat_bs_t *bs;
592 block_t *b;
593 unsigned i, j;
594 unsigned blocks;
595 fat_cluster_t mcl, lcl;
596 int rc;
597
598 fibril_mutex_lock(&childp->lock);
599 if (childp->lnkcnt == 1) {
600 /*
601 * On FAT, we don't support multiple hard links.
602 */
603 fibril_mutex_unlock(&childp->lock);
604 return EMLINK;
605 }
606 assert(childp->lnkcnt == 0);
607 fibril_mutex_unlock(&childp->lock);
608
609 if (!fat_dentry_name_verify(name)) {
610 /*
611 * Attempt to create unsupported name.
612 */
613 return ENOTSUP;
614 }
615
616 /*
617 * Get us an unused parent node's dentry or grow the parent and allocate
618 * a new one.
619 */
620
621 fibril_mutex_lock(&parentp->idx->lock);
622 bs = block_bb_get(parentp->idx->devmap_handle);
623
624 blocks = parentp->size / BPS(bs);
625
626 for (i = 0; i < blocks; i++) {
627 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
628 if (rc != EOK) {
629 fibril_mutex_unlock(&parentp->idx->lock);
630 return rc;
631 }
632 for (j = 0; j < DPS(bs); j++) {
633 d = ((fat_dentry_t *)b->data) + j;
634 switch (fat_classify_dentry(d)) {
635 case FAT_DENTRY_SKIP:
636 case FAT_DENTRY_VALID:
637 /* skipping used and meta entries */
638 continue;
639 case FAT_DENTRY_FREE:
640 case FAT_DENTRY_LAST:
641 /* found an empty slot */
642 goto hit;
643 }
644 }
645 rc = block_put(b);
646 if (rc != EOK) {
647 fibril_mutex_unlock(&parentp->idx->lock);
648 return rc;
649 }
650 }
651 j = 0;
652
653 /*
654 * We need to grow the parent in order to create a new unused dentry.
655 */
656 if (parentp->firstc == FAT_CLST_ROOT) {
657 /* Can't grow the root directory. */
658 fibril_mutex_unlock(&parentp->idx->lock);
659 return ENOSPC;
660 }
661 rc = fat_alloc_clusters(bs, parentp->idx->devmap_handle, 1, &mcl, &lcl);
662 if (rc != EOK) {
663 fibril_mutex_unlock(&parentp->idx->lock);
664 return rc;
665 }
666 rc = fat_zero_cluster(bs, parentp->idx->devmap_handle, mcl);
667 if (rc != EOK) {
668 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
669 fibril_mutex_unlock(&parentp->idx->lock);
670 return rc;
671 }
672 rc = fat_append_clusters(bs, parentp, mcl, lcl);
673 if (rc != EOK) {
674 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
675 fibril_mutex_unlock(&parentp->idx->lock);
676 return rc;
677 }
678 parentp->size += BPS(bs) * SPC(bs);
679 parentp->dirty = true; /* need to sync node */
680 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
681 if (rc != EOK) {
682 fibril_mutex_unlock(&parentp->idx->lock);
683 return rc;
684 }
685 d = (fat_dentry_t *)b->data;
686
687hit:
688 /*
689 * At this point we only establish the link between the parent and the
690 * child. The dentry, except of the name and the extension, will remain
691 * uninitialized until the corresponding node is synced. Thus the valid
692 * dentry data is kept in the child node structure.
693 */
694 memset(d, 0, sizeof(fat_dentry_t));
695 fat_dentry_name_set(d, name);
696 b->dirty = true; /* need to sync block */
697 rc = block_put(b);
698 fibril_mutex_unlock(&parentp->idx->lock);
699 if (rc != EOK)
700 return rc;
701
702 fibril_mutex_lock(&childp->idx->lock);
703
704 if (childp->type == FAT_DIRECTORY) {
705 /*
706 * If possible, create the Sub-directory Identifier Entry and
707 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
708 * These entries are not mandatory according to Standard
709 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
710 * rather a sign of our good will.
711 */
712 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
713 if (rc != EOK) {
714 /*
715 * Rather than returning an error, simply skip the
716 * creation of these two entries.
717 */
718 goto skip_dots;
719 }
720 d = (fat_dentry_t *) b->data;
721 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
722 (str_cmp((char *) d->name, FAT_NAME_DOT)) == 0) {
723 memset(d, 0, sizeof(fat_dentry_t));
724 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN);
725 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
726 d->attr = FAT_ATTR_SUBDIR;
727 d->firstc = host2uint16_t_le(childp->firstc);
728 /* TODO: initialize also the date/time members. */
729 }
730 d++;
731 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
732 (str_cmp((char *) d->name, FAT_NAME_DOT_DOT) == 0)) {
733 memset(d, 0, sizeof(fat_dentry_t));
734 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN);
735 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
736 d->attr = FAT_ATTR_SUBDIR;
737 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
738 host2uint16_t_le(FAT_CLST_RES0) :
739 host2uint16_t_le(parentp->firstc);
740 /* TODO: initialize also the date/time members. */
741 }
742 b->dirty = true; /* need to sync block */
743 /*
744 * Ignore the return value as we would have fallen through on error
745 * anyway.
746 */
747 (void) block_put(b);
748 }
749skip_dots:
750
751 childp->idx->pfc = parentp->firstc;
752 childp->idx->pdi = i * DPS(bs) + j;
753 fibril_mutex_unlock(&childp->idx->lock);
754
755 fibril_mutex_lock(&childp->lock);
756 childp->lnkcnt = 1;
757 childp->dirty = true; /* need to sync node */
758 fibril_mutex_unlock(&childp->lock);
759
760 /*
761 * Hash in the index structure into the position hash.
762 */
763 fat_idx_hashin(childp->idx);
764
765 return EOK;
766}
767
768int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
769{
770 fat_node_t *parentp = FAT_NODE(pfn);
771 fat_node_t *childp = FAT_NODE(cfn);
772 fat_bs_t *bs;
773 fat_dentry_t *d;
774 block_t *b;
775 bool has_children;
776 int rc;
777
778 if (!parentp)
779 return EBUSY;
780
781 rc = fat_has_children(&has_children, cfn);
782 if (rc != EOK)
783 return rc;
784 if (has_children)
785 return ENOTEMPTY;
786
787 fibril_mutex_lock(&parentp->lock);
788 fibril_mutex_lock(&childp->lock);
789 assert(childp->lnkcnt == 1);
790 fibril_mutex_lock(&childp->idx->lock);
791 bs = block_bb_get(childp->idx->devmap_handle);
792
793 rc = _fat_block_get(&b, bs, childp->idx->devmap_handle, childp->idx->pfc,
794 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
795 BLOCK_FLAGS_NONE);
796 if (rc != EOK)
797 goto error;
798 d = (fat_dentry_t *)b->data +
799 (childp->idx->pdi % (BPS(bs) / sizeof(fat_dentry_t)));
800 /* mark the dentry as not-currently-used */
801 d->name[0] = FAT_DENTRY_ERASED;
802 b->dirty = true; /* need to sync block */
803 rc = block_put(b);
804 if (rc != EOK)
805 goto error;
806
807 /* remove the index structure from the position hash */
808 fat_idx_hashout(childp->idx);
809 /* clear position information */
810 childp->idx->pfc = FAT_CLST_RES0;
811 childp->idx->pdi = 0;
812 fibril_mutex_unlock(&childp->idx->lock);
813 childp->lnkcnt = 0;
814 childp->refcnt++; /* keep the node in memory until destroyed */
815 childp->dirty = true;
816 fibril_mutex_unlock(&childp->lock);
817 fibril_mutex_unlock(&parentp->lock);
818
819 return EOK;
820
821error:
822 fibril_mutex_unlock(&parentp->idx->lock);
823 fibril_mutex_unlock(&childp->lock);
824 fibril_mutex_unlock(&childp->idx->lock);
825 return rc;
826}
827
828int fat_has_children(bool *has_children, fs_node_t *fn)
829{
830 fat_bs_t *bs;
831 fat_node_t *nodep = FAT_NODE(fn);
832 unsigned blocks;
833 block_t *b;
834 unsigned i, j;
835 int rc;
836
837 if (nodep->type != FAT_DIRECTORY) {
838 *has_children = false;
839 return EOK;
840 }
841
842 fibril_mutex_lock(&nodep->idx->lock);
843 bs = block_bb_get(nodep->idx->devmap_handle);
844
845 blocks = nodep->size / BPS(bs);
846
847 for (i = 0; i < blocks; i++) {
848 fat_dentry_t *d;
849
850 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
851 if (rc != EOK) {
852 fibril_mutex_unlock(&nodep->idx->lock);
853 return rc;
854 }
855 for (j = 0; j < DPS(bs); j++) {
856 d = ((fat_dentry_t *)b->data) + j;
857 switch (fat_classify_dentry(d)) {
858 case FAT_DENTRY_SKIP:
859 case FAT_DENTRY_FREE:
860 continue;
861 case FAT_DENTRY_LAST:
862 rc = block_put(b);
863 fibril_mutex_unlock(&nodep->idx->lock);
864 *has_children = false;
865 return rc;
866 default:
867 case FAT_DENTRY_VALID:
868 rc = block_put(b);
869 fibril_mutex_unlock(&nodep->idx->lock);
870 *has_children = true;
871 return rc;
872 }
873 }
874 rc = block_put(b);
875 if (rc != EOK) {
876 fibril_mutex_unlock(&nodep->idx->lock);
877 return rc;
878 }
879 }
880
881 fibril_mutex_unlock(&nodep->idx->lock);
882 *has_children = false;
883 return EOK;
884}
885
886
887fs_index_t fat_index_get(fs_node_t *fn)
888{
889 return FAT_NODE(fn)->idx->index;
890}
891
892aoff64_t fat_size_get(fs_node_t *fn)
893{
894 return FAT_NODE(fn)->size;
895}
896
897unsigned fat_lnkcnt_get(fs_node_t *fn)
898{
899 return FAT_NODE(fn)->lnkcnt;
900}
901
902bool fat_is_directory(fs_node_t *fn)
903{
904 return FAT_NODE(fn)->type == FAT_DIRECTORY;
905}
906
907bool fat_is_file(fs_node_t *fn)
908{
909 return FAT_NODE(fn)->type == FAT_FILE;
910}
911
912devmap_handle_t fat_device_get(fs_node_t *node)
913{
914 return 0;
915}
916
917/** libfs operations */
918libfs_ops_t fat_libfs_ops = {
919 .root_get = fat_root_get,
920 .match = fat_match,
921 .node_get = fat_node_get,
922 .node_open = fat_node_open,
923 .node_put = fat_node_put,
924 .create = fat_create_node,
925 .destroy = fat_destroy_node,
926 .link = fat_link,
927 .unlink = fat_unlink,
928 .has_children = fat_has_children,
929 .index_get = fat_index_get,
930 .size_get = fat_size_get,
931 .lnkcnt_get = fat_lnkcnt_get,
932 .is_directory = fat_is_directory,
933 .is_file = fat_is_file,
934 .device_get = fat_device_get
935};
936
937/*
938 * FAT VFS_OUT operations.
939 */
940
941static int
942fat_mounted(devmap_handle_t devmap_handle, const char *opts, fs_index_t *index,
943 aoff64_t *size, unsigned *linkcnt)
944{
945 enum cache_mode cmode;
946 fat_bs_t *bs;
947 int rc;
948
949 /* Check for option enabling write through. */
950 if (str_cmp(opts, "wtcache") == 0)
951 cmode = CACHE_MODE_WT;
952 else
953 cmode = CACHE_MODE_WB;
954
955 /* initialize libblock */
956 rc = block_init(EXCHANGE_SERIALIZE, devmap_handle, BS_SIZE);
957 if (rc != EOK)
958 return rc;
959
960 /* prepare the boot block */
961 rc = block_bb_read(devmap_handle, BS_BLOCK);
962 if (rc != EOK) {
963 block_fini(devmap_handle);
964 return rc;
965 }
966
967 /* get the buffer with the boot sector */
968 bs = block_bb_get(devmap_handle);
969
970 if (BPS(bs) != BS_SIZE) {
971 block_fini(devmap_handle);
972 return ENOTSUP;
973 }
974
975 /* Initialize the block cache */
976 rc = block_cache_init(devmap_handle, BPS(bs), 0 /* XXX */, cmode);
977 if (rc != EOK) {
978 block_fini(devmap_handle);
979 return rc;
980 }
981
982 /* Do some simple sanity checks on the file system. */
983 rc = fat_sanity_check(bs, devmap_handle);
984 if (rc != EOK) {
985 (void) block_cache_fini(devmap_handle);
986 block_fini(devmap_handle);
987 return rc;
988 }
989
990 rc = fat_idx_init_by_devmap_handle(devmap_handle);
991 if (rc != EOK) {
992 (void) block_cache_fini(devmap_handle);
993 block_fini(devmap_handle);
994 return rc;
995 }
996
997 /* Initialize the root node. */
998 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
999 if (!rfn) {
1000 (void) block_cache_fini(devmap_handle);
1001 block_fini(devmap_handle);
1002 fat_idx_fini_by_devmap_handle(devmap_handle);
1003 return ENOMEM;
1004 }
1005 fs_node_initialize(rfn);
1006 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1007 if (!rootp) {
1008 free(rfn);
1009 (void) block_cache_fini(devmap_handle);
1010 block_fini(devmap_handle);
1011 fat_idx_fini_by_devmap_handle(devmap_handle);
1012 return ENOMEM;
1013 }
1014 fat_node_initialize(rootp);
1015
1016 fat_idx_t *ridxp = fat_idx_get_by_pos(devmap_handle, FAT_CLST_ROOTPAR, 0);
1017 if (!ridxp) {
1018 free(rfn);
1019 free(rootp);
1020 (void) block_cache_fini(devmap_handle);
1021 block_fini(devmap_handle);
1022 fat_idx_fini_by_devmap_handle(devmap_handle);
1023 return ENOMEM;
1024 }
1025 assert(ridxp->index == 0);
1026 /* ridxp->lock held */
1027
1028 rootp->type = FAT_DIRECTORY;
1029 rootp->firstc = FAT_CLST_ROOT;
1030 rootp->refcnt = 1;
1031 rootp->lnkcnt = 0; /* FS root is not linked */
1032 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
1033 rootp->idx = ridxp;
1034 ridxp->nodep = rootp;
1035 rootp->bp = rfn;
1036 rfn->data = rootp;
1037
1038 fibril_mutex_unlock(&ridxp->lock);
1039
1040 *index = ridxp->index;
1041 *size = rootp->size;
1042 *linkcnt = rootp->lnkcnt;
1043
1044 return EOK;
1045}
1046
1047static int fat_unmounted(devmap_handle_t devmap_handle)
1048{
1049 fs_node_t *fn;
1050 fat_node_t *nodep;
1051 int rc;
1052
1053 rc = fat_root_get(&fn, devmap_handle);
1054 if (rc != EOK)
1055 return rc;
1056 nodep = FAT_NODE(fn);
1057
1058 /*
1059 * We expect exactly two references on the root node. One for the
1060 * fat_root_get() above and one created in fat_mounted().
1061 */
1062 if (nodep->refcnt != 2) {
1063 (void) fat_node_put(fn);
1064 return EBUSY;
1065 }
1066
1067 /*
1068 * Put the root node and force it to the FAT free node list.
1069 */
1070 (void) fat_node_put(fn);
1071 (void) fat_node_put(fn);
1072
1073 /*
1074 * Perform cleanup of the node structures, index structures and
1075 * associated data. Write back this file system's dirty blocks and
1076 * stop using libblock for this instance.
1077 */
1078 (void) fat_node_fini_by_devmap_handle(devmap_handle);
1079 fat_idx_fini_by_devmap_handle(devmap_handle);
1080 (void) block_cache_fini(devmap_handle);
1081 block_fini(devmap_handle);
1082
1083 return EOK;
1084}
1085
1086static int
1087fat_read(devmap_handle_t devmap_handle, fs_index_t index, aoff64_t pos,
1088 size_t *rbytes)
1089{
1090 fs_node_t *fn;
1091 fat_node_t *nodep;
1092 fat_bs_t *bs;
1093 size_t bytes;
1094 block_t *b;
1095 int rc;
1096
1097 rc = fat_node_get(&fn, devmap_handle, index);
1098 if (rc != EOK)
1099 return rc;
1100 if (!fn)
1101 return ENOENT;
1102 nodep = FAT_NODE(fn);
1103
1104 ipc_callid_t callid;
1105 size_t len;
1106 if (!async_data_read_receive(&callid, &len)) {
1107 fat_node_put(fn);
1108 async_answer_0(callid, EINVAL);
1109 return EINVAL;
1110 }
1111
1112 bs = block_bb_get(devmap_handle);
1113
1114 if (nodep->type == FAT_FILE) {
1115 /*
1116 * Our strategy for regular file reads is to read one block at
1117 * most and make use of the possibility to return less data than
1118 * requested. This keeps the code very simple.
1119 */
1120 if (pos >= nodep->size) {
1121 /* reading beyond the EOF */
1122 bytes = 0;
1123 (void) async_data_read_finalize(callid, NULL, 0);
1124 } else {
1125 bytes = min(len, BPS(bs) - pos % BPS(bs));
1126 bytes = min(bytes, nodep->size - pos);
1127 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
1128 BLOCK_FLAGS_NONE);
1129 if (rc != EOK) {
1130 fat_node_put(fn);
1131 async_answer_0(callid, rc);
1132 return rc;
1133 }
1134 (void) async_data_read_finalize(callid,
1135 b->data + pos % BPS(bs), bytes);
1136 rc = block_put(b);
1137 if (rc != EOK) {
1138 fat_node_put(fn);
1139 return rc;
1140 }
1141 }
1142 } else {
1143 unsigned bnum;
1144 aoff64_t spos = pos;
1145 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1146 fat_dentry_t *d;
1147
1148 assert(nodep->type == FAT_DIRECTORY);
1149 assert(nodep->size % BPS(bs) == 0);
1150 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
1151
1152 /*
1153 * Our strategy for readdir() is to use the position pointer as
1154 * an index into the array of all dentries. On entry, it points
1155 * to the first unread dentry. If we skip any dentries, we bump
1156 * the position pointer accordingly.
1157 */
1158 bnum = (pos * sizeof(fat_dentry_t)) / BPS(bs);
1159 while (bnum < nodep->size / BPS(bs)) {
1160 aoff64_t o;
1161
1162 rc = fat_block_get(&b, bs, nodep, bnum,
1163 BLOCK_FLAGS_NONE);
1164 if (rc != EOK)
1165 goto err;
1166 for (o = pos % (BPS(bs) / sizeof(fat_dentry_t));
1167 o < BPS(bs) / sizeof(fat_dentry_t);
1168 o++, pos++) {
1169 d = ((fat_dentry_t *)b->data) + o;
1170 switch (fat_classify_dentry(d)) {
1171 case FAT_DENTRY_SKIP:
1172 case FAT_DENTRY_FREE:
1173 continue;
1174 case FAT_DENTRY_LAST:
1175 rc = block_put(b);
1176 if (rc != EOK)
1177 goto err;
1178 goto miss;
1179 default:
1180 case FAT_DENTRY_VALID:
1181 fat_dentry_name_get(d, name);
1182 rc = block_put(b);
1183 if (rc != EOK)
1184 goto err;
1185 goto hit;
1186 }
1187 }
1188 rc = block_put(b);
1189 if (rc != EOK)
1190 goto err;
1191 bnum++;
1192 }
1193miss:
1194 rc = fat_node_put(fn);
1195 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1196 *rbytes = 0;
1197 return rc != EOK ? rc : ENOENT;
1198
1199err:
1200 (void) fat_node_put(fn);
1201 async_answer_0(callid, rc);
1202 return rc;
1203
1204hit:
1205 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
1206 bytes = (pos - spos) + 1;
1207 }
1208
1209 rc = fat_node_put(fn);
1210 *rbytes = bytes;
1211 return rc;
1212}
1213
1214static int
1215fat_write(devmap_handle_t devmap_handle, fs_index_t index, aoff64_t pos,
1216 size_t *wbytes, aoff64_t *nsize)
1217{
1218 fs_node_t *fn;
1219 fat_node_t *nodep;
1220 fat_bs_t *bs;
1221 size_t bytes;
1222 block_t *b;
1223 aoff64_t boundary;
1224 int flags = BLOCK_FLAGS_NONE;
1225 int rc;
1226
1227 rc = fat_node_get(&fn, devmap_handle, index);
1228 if (rc != EOK)
1229 return rc;
1230 if (!fn)
1231 return ENOENT;
1232 nodep = FAT_NODE(fn);
1233
1234 ipc_callid_t callid;
1235 size_t len;
1236 if (!async_data_write_receive(&callid, &len)) {
1237 (void) fat_node_put(fn);
1238 async_answer_0(callid, EINVAL);
1239 return EINVAL;
1240 }
1241
1242 bs = block_bb_get(devmap_handle);
1243
1244 /*
1245 * In all scenarios, we will attempt to write out only one block worth
1246 * of data at maximum. There might be some more efficient approaches,
1247 * but this one greatly simplifies fat_write(). Note that we can afford
1248 * to do this because the client must be ready to handle the return
1249 * value signalizing a smaller number of bytes written.
1250 */
1251 bytes = min(len, BPS(bs) - pos % BPS(bs));
1252 if (bytes == BPS(bs))
1253 flags |= BLOCK_FLAGS_NOREAD;
1254
1255 boundary = ROUND_UP(nodep->size, BPC(bs));
1256 if (pos < boundary) {
1257 /*
1258 * This is the easier case - we are either overwriting already
1259 * existing contents or writing behind the EOF, but still within
1260 * the limits of the last cluster. The node size may grow to the
1261 * next block size boundary.
1262 */
1263 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1264 if (rc != EOK) {
1265 (void) fat_node_put(fn);
1266 async_answer_0(callid, rc);
1267 return rc;
1268 }
1269 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
1270 if (rc != EOK) {
1271 (void) fat_node_put(fn);
1272 async_answer_0(callid, rc);
1273 return rc;
1274 }
1275 (void) async_data_write_finalize(callid,
1276 b->data + pos % BPS(bs), bytes);
1277 b->dirty = true; /* need to sync block */
1278 rc = block_put(b);
1279 if (rc != EOK) {
1280 (void) fat_node_put(fn);
1281 return rc;
1282 }
1283 if (pos + bytes > nodep->size) {
1284 nodep->size = pos + bytes;
1285 nodep->dirty = true; /* need to sync node */
1286 }
1287 *wbytes = bytes;
1288 *nsize = nodep->size;
1289 rc = fat_node_put(fn);
1290 return rc;
1291 } else {
1292 /*
1293 * This is the more difficult case. We must allocate new
1294 * clusters for the node and zero them out.
1295 */
1296 unsigned nclsts;
1297 fat_cluster_t mcl, lcl;
1298
1299 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
1300 /* create an independent chain of nclsts clusters in all FATs */
1301 rc = fat_alloc_clusters(bs, devmap_handle, nclsts, &mcl, &lcl);
1302 if (rc != EOK) {
1303 /* could not allocate a chain of nclsts clusters */
1304 (void) fat_node_put(fn);
1305 async_answer_0(callid, rc);
1306 return rc;
1307 }
1308 /* zero fill any gaps */
1309 rc = fat_fill_gap(bs, nodep, mcl, pos);
1310 if (rc != EOK) {
1311 (void) fat_free_clusters(bs, devmap_handle, mcl);
1312 (void) fat_node_put(fn);
1313 async_answer_0(callid, rc);
1314 return rc;
1315 }
1316 rc = _fat_block_get(&b, bs, devmap_handle, lcl, NULL,
1317 (pos / BPS(bs)) % SPC(bs), flags);
1318 if (rc != EOK) {
1319 (void) fat_free_clusters(bs, devmap_handle, mcl);
1320 (void) fat_node_put(fn);
1321 async_answer_0(callid, rc);
1322 return rc;
1323 }
1324 (void) async_data_write_finalize(callid,
1325 b->data + pos % BPS(bs), bytes);
1326 b->dirty = true; /* need to sync block */
1327 rc = block_put(b);
1328 if (rc != EOK) {
1329 (void) fat_free_clusters(bs, devmap_handle, mcl);
1330 (void) fat_node_put(fn);
1331 return rc;
1332 }
1333 /*
1334 * Append the cluster chain starting in mcl to the end of the
1335 * node's cluster chain.
1336 */
1337 rc = fat_append_clusters(bs, nodep, mcl, lcl);
1338 if (rc != EOK) {
1339 (void) fat_free_clusters(bs, devmap_handle, mcl);
1340 (void) fat_node_put(fn);
1341 return rc;
1342 }
1343 *nsize = nodep->size = pos + bytes;
1344 rc = fat_node_put(fn);
1345 nodep->dirty = true; /* need to sync node */
1346 *wbytes = bytes;
1347 return rc;
1348 }
1349}
1350
1351static int
1352fat_truncate(devmap_handle_t devmap_handle, fs_index_t index, aoff64_t size)
1353{
1354 fs_node_t *fn;
1355 fat_node_t *nodep;
1356 fat_bs_t *bs;
1357 int rc;
1358
1359 rc = fat_node_get(&fn, devmap_handle, index);
1360 if (rc != EOK)
1361 return rc;
1362 if (!fn)
1363 return ENOENT;
1364 nodep = FAT_NODE(fn);
1365
1366 bs = block_bb_get(devmap_handle);
1367
1368 if (nodep->size == size) {
1369 rc = EOK;
1370 } else if (nodep->size < size) {
1371 /*
1372 * The standard says we have the freedom to grow the node.
1373 * For now, we simply return an error.
1374 */
1375 rc = EINVAL;
1376 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
1377 /*
1378 * The node will be shrunk, but no clusters will be deallocated.
1379 */
1380 nodep->size = size;
1381 nodep->dirty = true; /* need to sync node */
1382 rc = EOK;
1383 } else {
1384 /*
1385 * The node will be shrunk, clusters will be deallocated.
1386 */
1387 if (size == 0) {
1388 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1389 if (rc != EOK)
1390 goto out;
1391 } else {
1392 fat_cluster_t lastc;
1393 rc = fat_cluster_walk(bs, devmap_handle, nodep->firstc,
1394 &lastc, NULL, (size - 1) / BPC(bs));
1395 if (rc != EOK)
1396 goto out;
1397 rc = fat_chop_clusters(bs, nodep, lastc);
1398 if (rc != EOK)
1399 goto out;
1400 }
1401 nodep->size = size;
1402 nodep->dirty = true; /* need to sync node */
1403 rc = EOK;
1404 }
1405out:
1406 fat_node_put(fn);
1407 return rc;
1408}
1409
1410static int fat_close(devmap_handle_t devmap_handle, fs_index_t index)
1411{
1412 return EOK;
1413}
1414
1415static int fat_destroy(devmap_handle_t devmap_handle, fs_index_t index)
1416{
1417 fs_node_t *fn;
1418 fat_node_t *nodep;
1419 int rc;
1420
1421 rc = fat_node_get(&fn, devmap_handle, index);
1422 if (rc != EOK)
1423 return rc;
1424 if (!fn)
1425 return ENOENT;
1426
1427 nodep = FAT_NODE(fn);
1428 /*
1429 * We should have exactly two references. One for the above
1430 * call to fat_node_get() and one from fat_unlink().
1431 */
1432 assert(nodep->refcnt == 2);
1433
1434 rc = fat_destroy_node(fn);
1435 return rc;
1436}
1437
1438static int fat_sync(devmap_handle_t devmap_handle, fs_index_t index)
1439{
1440 fs_node_t *fn;
1441 int rc = fat_node_get(&fn, devmap_handle, index);
1442 if (rc != EOK)
1443 return rc;
1444 if (!fn)
1445 return ENOENT;
1446
1447 fat_node_t *nodep = FAT_NODE(fn);
1448
1449 nodep->dirty = true;
1450 rc = fat_node_sync(nodep);
1451
1452 fat_node_put(fn);
1453 return rc;
1454}
1455
1456vfs_out_ops_t fat_ops = {
1457 .mounted = fat_mounted,
1458 .unmounted = fat_unmounted,
1459 .read = fat_read,
1460 .write = fat_write,
1461 .truncate = fat_truncate,
1462 .close = fat_close,
1463 .destroy = fat_destroy,
1464 .sync = fat_sync,
1465};
1466
1467/**
1468 * @}
1469 */
Note: See TracBrowser for help on using the repository browser.