source: mainline/uspace/srv/fs/fat/fat_ops.c@ ee3f6f6

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since ee3f6f6 was ee3f6f6, checked in by Jakub Jermar <jakub@…>, 11 years ago

Removal of strtok() and strtok_r() in favour of str_tok().

  • Property mode set to 100644
File size: 35.5 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2011 Oleg Romanenko
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup fs
31 * @{
32 */
33
34/**
35 * @file fat_ops.c
36 * @brief Implementation of VFS operations for the FAT file system server.
37 */
38
39#include "fat.h"
40#include "fat_dentry.h"
41#include "fat_fat.h"
42#include "fat_directory.h"
43#include "../../vfs/vfs.h"
44#include <libfs.h>
45#include <block.h>
46#include <ipc/services.h>
47#include <ipc/loc.h>
48#include <macros.h>
49#include <async.h>
50#include <errno.h>
51#include <str.h>
52#include <byteorder.h>
53#include <adt/hash_table.h>
54#include <adt/list.h>
55#include <assert.h>
56#include <fibril_synch.h>
57#include <align.h>
58#include <malloc.h>
59#include <str.h>
60
61#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
62#define FS_NODE(node) ((node) ? (node)->bp : NULL)
63
64#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
65#define BPC(bs) (BPS((bs)) * SPC((bs)))
66
67/** Mutex protecting the list of cached free FAT nodes. */
68static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
69
70/** List of cached free FAT nodes. */
71static LIST_INITIALIZE(ffn_list);
72
73/*
74 * Forward declarations of FAT libfs operations.
75 */
76static int fat_root_get(fs_node_t **, service_id_t);
77static int fat_match(fs_node_t **, fs_node_t *, const char *);
78static int fat_node_get(fs_node_t **, service_id_t, fs_index_t);
79static int fat_node_open(fs_node_t *);
80static int fat_node_put(fs_node_t *);
81static int fat_create_node(fs_node_t **, service_id_t, int);
82static int fat_destroy_node(fs_node_t *);
83static int fat_link(fs_node_t *, fs_node_t *, const char *);
84static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
85static int fat_has_children(bool *, fs_node_t *);
86static fs_index_t fat_index_get(fs_node_t *);
87static aoff64_t fat_size_get(fs_node_t *);
88static unsigned fat_lnkcnt_get(fs_node_t *);
89static bool fat_is_directory(fs_node_t *);
90static bool fat_is_file(fs_node_t *node);
91static service_id_t fat_service_get(fs_node_t *node);
92static int fat_size_block(service_id_t, uint32_t *);
93static int fat_total_block_count(service_id_t, uint64_t *);
94static int fat_free_block_count(service_id_t, uint64_t *);
95
96/*
97 * Helper functions.
98 */
99static void fat_node_initialize(fat_node_t *node)
100{
101 fibril_mutex_initialize(&node->lock);
102 node->bp = NULL;
103 node->idx = NULL;
104 node->type = 0;
105 link_initialize(&node->ffn_link);
106 node->size = 0;
107 node->lnkcnt = 0;
108 node->refcnt = 0;
109 node->dirty = false;
110 node->lastc_cached_valid = false;
111 node->lastc_cached_value = 0;
112 node->currc_cached_valid = false;
113 node->currc_cached_bn = 0;
114 node->currc_cached_value = 0;
115}
116
117static int fat_node_sync(fat_node_t *node)
118{
119 block_t *b;
120 fat_bs_t *bs;
121 fat_dentry_t *d;
122 int rc;
123
124 assert(node->dirty);
125
126 bs = block_bb_get(node->idx->service_id);
127
128 /* Read the block that contains the dentry of interest. */
129 rc = _fat_block_get(&b, bs, node->idx->service_id, node->idx->pfc,
130 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
131 BLOCK_FLAGS_NONE);
132 if (rc != EOK)
133 return rc;
134
135 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
136
137 d->firstc = host2uint16_t_le(node->firstc);
138 if (node->type == FAT_FILE) {
139 d->size = host2uint32_t_le(node->size);
140 } else if (node->type == FAT_DIRECTORY) {
141 d->attr = FAT_ATTR_SUBDIR;
142 }
143
144 /* TODO: update other fields? (e.g time fields) */
145
146 b->dirty = true; /* need to sync block */
147 rc = block_put(b);
148 return rc;
149}
150
151static int fat_node_fini_by_service_id(service_id_t service_id)
152{
153 int rc;
154
155 /*
156 * We are called from fat_unmounted() and assume that there are already
157 * no nodes belonging to this instance with non-zero refcount. Therefore
158 * it is sufficient to clean up only the FAT free node list.
159 */
160
161restart:
162 fibril_mutex_lock(&ffn_mutex);
163 list_foreach(ffn_list, ffn_link, fat_node_t, nodep) {
164 if (!fibril_mutex_trylock(&nodep->lock)) {
165 fibril_mutex_unlock(&ffn_mutex);
166 goto restart;
167 }
168 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
169 fibril_mutex_unlock(&nodep->lock);
170 fibril_mutex_unlock(&ffn_mutex);
171 goto restart;
172 }
173 if (nodep->idx->service_id != service_id) {
174 fibril_mutex_unlock(&nodep->idx->lock);
175 fibril_mutex_unlock(&nodep->lock);
176 continue;
177 }
178
179 list_remove(&nodep->ffn_link);
180 fibril_mutex_unlock(&ffn_mutex);
181
182 /*
183 * We can unlock the node and its index structure because we are
184 * the last player on this playground and VFS is preventing new
185 * players from entering.
186 */
187 fibril_mutex_unlock(&nodep->idx->lock);
188 fibril_mutex_unlock(&nodep->lock);
189
190 if (nodep->dirty) {
191 rc = fat_node_sync(nodep);
192 if (rc != EOK)
193 return rc;
194 }
195 nodep->idx->nodep = NULL;
196 free(nodep->bp);
197 free(nodep);
198
199 /* Need to restart because we changed ffn_list. */
200 goto restart;
201 }
202 fibril_mutex_unlock(&ffn_mutex);
203
204 return EOK;
205}
206
207static int fat_node_get_new(fat_node_t **nodepp)
208{
209 fs_node_t *fn;
210 fat_node_t *nodep;
211 int rc;
212
213 fibril_mutex_lock(&ffn_mutex);
214 if (!list_empty(&ffn_list)) {
215 /* Try to use a cached free node structure. */
216 fat_idx_t *idxp_tmp;
217 nodep = list_get_instance(list_first(&ffn_list), fat_node_t,
218 ffn_link);
219 if (!fibril_mutex_trylock(&nodep->lock))
220 goto skip_cache;
221 idxp_tmp = nodep->idx;
222 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
223 fibril_mutex_unlock(&nodep->lock);
224 goto skip_cache;
225 }
226 list_remove(&nodep->ffn_link);
227 fibril_mutex_unlock(&ffn_mutex);
228 if (nodep->dirty) {
229 rc = fat_node_sync(nodep);
230 if (rc != EOK) {
231 idxp_tmp->nodep = NULL;
232 fibril_mutex_unlock(&nodep->lock);
233 fibril_mutex_unlock(&idxp_tmp->lock);
234 free(nodep->bp);
235 free(nodep);
236 return rc;
237 }
238 }
239 idxp_tmp->nodep = NULL;
240 fibril_mutex_unlock(&nodep->lock);
241 fibril_mutex_unlock(&idxp_tmp->lock);
242 fn = FS_NODE(nodep);
243 } else {
244skip_cache:
245 /* Try to allocate a new node structure. */
246 fibril_mutex_unlock(&ffn_mutex);
247 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
248 if (!fn)
249 return ENOMEM;
250 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
251 if (!nodep) {
252 free(fn);
253 return ENOMEM;
254 }
255 }
256 fat_node_initialize(nodep);
257 fs_node_initialize(fn);
258 fn->data = nodep;
259 nodep->bp = fn;
260
261 *nodepp = nodep;
262 return EOK;
263}
264
265/** Internal version of fat_node_get().
266 *
267 * @param idxp Locked index structure.
268 */
269static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
270{
271 block_t *b;
272 fat_bs_t *bs;
273 fat_dentry_t *d;
274 fat_node_t *nodep = NULL;
275 int rc;
276
277 if (idxp->nodep) {
278 /*
279 * We are lucky.
280 * The node is already instantiated in memory.
281 */
282 fibril_mutex_lock(&idxp->nodep->lock);
283 if (!idxp->nodep->refcnt++) {
284 fibril_mutex_lock(&ffn_mutex);
285 list_remove(&idxp->nodep->ffn_link);
286 fibril_mutex_unlock(&ffn_mutex);
287 }
288 fibril_mutex_unlock(&idxp->nodep->lock);
289 *nodepp = idxp->nodep;
290 return EOK;
291 }
292
293 /*
294 * We must instantiate the node from the file system.
295 */
296
297 assert(idxp->pfc);
298
299 rc = fat_node_get_new(&nodep);
300 if (rc != EOK)
301 return rc;
302
303 bs = block_bb_get(idxp->service_id);
304
305 /* Read the block that contains the dentry of interest. */
306 rc = _fat_block_get(&b, bs, idxp->service_id, idxp->pfc, NULL,
307 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
308 if (rc != EOK) {
309 (void) fat_node_put(FS_NODE(nodep));
310 return rc;
311 }
312
313 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
314 if (FAT_IS_FAT32(bs)) {
315 nodep->firstc = uint16_t_le2host(d->firstc_lo) |
316 (uint16_t_le2host(d->firstc_hi) << 16);
317 } else
318 nodep->firstc = uint16_t_le2host(d->firstc);
319
320 if (d->attr & FAT_ATTR_SUBDIR) {
321 /*
322 * The only directory which does not have this bit set is the
323 * root directory itself. The root directory node is handled
324 * and initialized elsewhere.
325 */
326 nodep->type = FAT_DIRECTORY;
327
328 /*
329 * Unfortunately, the 'size' field of the FAT dentry is not
330 * defined for the directory entry type. We must determine the
331 * size of the directory by walking the FAT.
332 */
333 uint32_t clusters;
334 rc = fat_clusters_get(&clusters, bs, idxp->service_id,
335 nodep->firstc);
336 if (rc != EOK) {
337 (void) block_put(b);
338 (void) fat_node_put(FS_NODE(nodep));
339 return rc;
340 }
341 nodep->size = BPS(bs) * SPC(bs) * clusters;
342 } else {
343 nodep->type = FAT_FILE;
344 nodep->size = uint32_t_le2host(d->size);
345 }
346
347 nodep->lnkcnt = 1;
348 nodep->refcnt = 1;
349
350 rc = block_put(b);
351 if (rc != EOK) {
352 (void) fat_node_put(FS_NODE(nodep));
353 return rc;
354 }
355
356 /* Link the idx structure with the node structure. */
357 nodep->idx = idxp;
358 idxp->nodep = nodep;
359
360 *nodepp = nodep;
361 return EOK;
362}
363
364/*
365 * FAT libfs operations.
366 */
367
368int fat_root_get(fs_node_t **rfn, service_id_t service_id)
369{
370 return fat_node_get(rfn, service_id, 0);
371}
372
373int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
374{
375 fat_node_t *parentp = FAT_NODE(pfn);
376 char name[FAT_LFN_NAME_SIZE];
377 fat_dentry_t *d;
378 service_id_t service_id;
379 int rc;
380
381 fibril_mutex_lock(&parentp->idx->lock);
382 service_id = parentp->idx->service_id;
383 fibril_mutex_unlock(&parentp->idx->lock);
384
385 fat_directory_t di;
386 rc = fat_directory_open(parentp, &di);
387 if (rc != EOK)
388 return rc;
389
390 while (fat_directory_read(&di, name, &d) == EOK) {
391 if (fat_dentry_namecmp(name, component) == 0) {
392 /* hit */
393 fat_node_t *nodep;
394 aoff64_t o = di.pos %
395 (BPS(di.bs) / sizeof(fat_dentry_t));
396 fat_idx_t *idx = fat_idx_get_by_pos(service_id,
397 parentp->firstc, di.bnum * DPS(di.bs) + o);
398 if (!idx) {
399 /*
400 * Can happen if memory is low or if we
401 * run out of 32-bit indices.
402 */
403 rc = fat_directory_close(&di);
404 return (rc == EOK) ? ENOMEM : rc;
405 }
406 rc = fat_node_get_core(&nodep, idx);
407 fibril_mutex_unlock(&idx->lock);
408 if (rc != EOK) {
409 (void) fat_directory_close(&di);
410 return rc;
411 }
412 *rfn = FS_NODE(nodep);
413 rc = fat_directory_close(&di);
414 if (rc != EOK)
415 (void) fat_node_put(*rfn);
416 return rc;
417 } else {
418 rc = fat_directory_next(&di);
419 if (rc != EOK)
420 break;
421 }
422 }
423 (void) fat_directory_close(&di);
424 *rfn = NULL;
425 return EOK;
426}
427
428/** Instantiate a FAT in-core node. */
429int fat_node_get(fs_node_t **rfn, service_id_t service_id, fs_index_t index)
430{
431 fat_node_t *nodep;
432 fat_idx_t *idxp;
433 int rc;
434
435 idxp = fat_idx_get_by_index(service_id, index);
436 if (!idxp) {
437 *rfn = NULL;
438 return EOK;
439 }
440 /* idxp->lock held */
441 rc = fat_node_get_core(&nodep, idxp);
442 fibril_mutex_unlock(&idxp->lock);
443 if (rc == EOK)
444 *rfn = FS_NODE(nodep);
445 return rc;
446}
447
448int fat_node_open(fs_node_t *fn)
449{
450 /*
451 * Opening a file is stateless, nothing
452 * to be done here.
453 */
454 return EOK;
455}
456
457int fat_node_put(fs_node_t *fn)
458{
459 fat_node_t *nodep = FAT_NODE(fn);
460 bool destroy = false;
461
462 fibril_mutex_lock(&nodep->lock);
463 if (!--nodep->refcnt) {
464 if (nodep->idx) {
465 fibril_mutex_lock(&ffn_mutex);
466 list_append(&nodep->ffn_link, &ffn_list);
467 fibril_mutex_unlock(&ffn_mutex);
468 } else {
469 /*
470 * The node does not have any index structure associated
471 * with itself. This can only mean that we are releasing
472 * the node after a failed attempt to allocate the index
473 * structure for it.
474 */
475 destroy = true;
476 }
477 }
478 fibril_mutex_unlock(&nodep->lock);
479 if (destroy) {
480 free(nodep->bp);
481 free(nodep);
482 }
483 return EOK;
484}
485
486int fat_create_node(fs_node_t **rfn, service_id_t service_id, int flags)
487{
488 fat_idx_t *idxp;
489 fat_node_t *nodep;
490 fat_bs_t *bs;
491 fat_cluster_t mcl, lcl;
492 int rc;
493
494 bs = block_bb_get(service_id);
495 if (flags & L_DIRECTORY) {
496 /* allocate a cluster */
497 rc = fat_alloc_clusters(bs, service_id, 1, &mcl, &lcl);
498 if (rc != EOK)
499 return rc;
500 /* populate the new cluster with unused dentries */
501 rc = fat_zero_cluster(bs, service_id, mcl);
502 if (rc != EOK) {
503 (void) fat_free_clusters(bs, service_id, mcl);
504 return rc;
505 }
506 }
507
508 rc = fat_node_get_new(&nodep);
509 if (rc != EOK) {
510 (void) fat_free_clusters(bs, service_id, mcl);
511 return rc;
512 }
513 rc = fat_idx_get_new(&idxp, service_id);
514 if (rc != EOK) {
515 (void) fat_free_clusters(bs, service_id, mcl);
516 (void) fat_node_put(FS_NODE(nodep));
517 return rc;
518 }
519 /* idxp->lock held */
520 if (flags & L_DIRECTORY) {
521 nodep->type = FAT_DIRECTORY;
522 nodep->firstc = mcl;
523 nodep->size = BPS(bs) * SPC(bs);
524 } else {
525 nodep->type = FAT_FILE;
526 nodep->firstc = FAT_CLST_RES0;
527 nodep->size = 0;
528 }
529 nodep->lnkcnt = 0; /* not linked anywhere */
530 nodep->refcnt = 1;
531 nodep->dirty = true;
532
533 nodep->idx = idxp;
534 idxp->nodep = nodep;
535
536 fibril_mutex_unlock(&idxp->lock);
537 *rfn = FS_NODE(nodep);
538 return EOK;
539}
540
541int fat_destroy_node(fs_node_t *fn)
542{
543 fat_node_t *nodep = FAT_NODE(fn);
544 fat_bs_t *bs;
545 bool has_children;
546 int rc;
547
548 /*
549 * The node is not reachable from the file system. This means that the
550 * link count should be zero and that the index structure cannot be
551 * found in the position hash. Obviously, we don't need to lock the node
552 * nor its index structure.
553 */
554 assert(nodep->lnkcnt == 0);
555
556 /*
557 * The node may not have any children.
558 */
559 rc = fat_has_children(&has_children, fn);
560 if (rc != EOK)
561 return rc;
562 assert(!has_children);
563
564 bs = block_bb_get(nodep->idx->service_id);
565 if (nodep->firstc != FAT_CLST_RES0) {
566 assert(nodep->size);
567 /* Free all clusters allocated to the node. */
568 rc = fat_free_clusters(bs, nodep->idx->service_id,
569 nodep->firstc);
570 }
571
572 fat_idx_destroy(nodep->idx);
573 free(nodep->bp);
574 free(nodep);
575 return rc;
576}
577
578int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
579{
580 fat_node_t *parentp = FAT_NODE(pfn);
581 fat_node_t *childp = FAT_NODE(cfn);
582 fat_dentry_t *d;
583 fat_bs_t *bs;
584 block_t *b;
585 fat_directory_t di;
586 fat_dentry_t de;
587 int rc;
588
589 fibril_mutex_lock(&childp->lock);
590 if (childp->lnkcnt == 1) {
591 /*
592 * On FAT, we don't support multiple hard links.
593 */
594 fibril_mutex_unlock(&childp->lock);
595 return EMLINK;
596 }
597 assert(childp->lnkcnt == 0);
598 fibril_mutex_unlock(&childp->lock);
599
600 if (!fat_valid_name(name))
601 return ENOTSUP;
602
603 fibril_mutex_lock(&parentp->idx->lock);
604 bs = block_bb_get(parentp->idx->service_id);
605 rc = fat_directory_open(parentp, &di);
606 if (rc != EOK) {
607 fibril_mutex_unlock(&parentp->idx->lock);
608 return rc;
609 }
610
611 /*
612 * At this point we only establish the link between the parent and the
613 * child. The dentry, except of the name and the extension, will remain
614 * uninitialized until the corresponding node is synced. Thus the valid
615 * dentry data is kept in the child node structure.
616 */
617 memset(&de, 0, sizeof(fat_dentry_t));
618
619 rc = fat_directory_write(&di, name, &de);
620 if (rc != EOK) {
621 (void) fat_directory_close(&di);
622 fibril_mutex_unlock(&parentp->idx->lock);
623 return rc;
624 }
625 rc = fat_directory_close(&di);
626 if (rc != EOK) {
627 fibril_mutex_unlock(&parentp->idx->lock);
628 return rc;
629 }
630
631 fibril_mutex_unlock(&parentp->idx->lock);
632
633 fibril_mutex_lock(&childp->idx->lock);
634
635 if (childp->type == FAT_DIRECTORY) {
636 /*
637 * If possible, create the Sub-directory Identifier Entry and
638 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
639 * These entries are not mandatory according to Standard
640 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
641 * rather a sign of our good will.
642 */
643 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
644 if (rc != EOK) {
645 /*
646 * Rather than returning an error, simply skip the
647 * creation of these two entries.
648 */
649 goto skip_dots;
650 }
651 d = (fat_dentry_t *) b->data;
652 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
653 (memcmp(d->name, FAT_NAME_DOT, FAT_NAME_LEN)) == 0) {
654 memset(d, 0, sizeof(fat_dentry_t));
655 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN);
656 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
657 d->attr = FAT_ATTR_SUBDIR;
658 d->firstc = host2uint16_t_le(childp->firstc);
659 /* TODO: initialize also the date/time members. */
660 }
661 d++;
662 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
663 (memcmp(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN) == 0)) {
664 memset(d, 0, sizeof(fat_dentry_t));
665 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN);
666 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
667 d->attr = FAT_ATTR_SUBDIR;
668 d->firstc = (parentp->firstc == FAT_ROOT_CLST(bs)) ?
669 host2uint16_t_le(FAT_CLST_ROOTPAR) :
670 host2uint16_t_le(parentp->firstc);
671 /* TODO: initialize also the date/time members. */
672 }
673 b->dirty = true; /* need to sync block */
674 /*
675 * Ignore the return value as we would have fallen through on error
676 * anyway.
677 */
678 (void) block_put(b);
679 }
680skip_dots:
681
682 childp->idx->pfc = parentp->firstc;
683 childp->idx->pdi = di.pos; /* di.pos holds absolute position of SFN entry */
684 fibril_mutex_unlock(&childp->idx->lock);
685
686 fibril_mutex_lock(&childp->lock);
687 childp->lnkcnt = 1;
688 childp->dirty = true; /* need to sync node */
689 fibril_mutex_unlock(&childp->lock);
690
691 /*
692 * Hash in the index structure into the position hash.
693 */
694 fat_idx_hashin(childp->idx);
695
696 return EOK;
697}
698
699int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
700{
701 fat_node_t *parentp = FAT_NODE(pfn);
702 fat_node_t *childp = FAT_NODE(cfn);
703 bool has_children;
704 int rc;
705
706 if (!parentp)
707 return EBUSY;
708
709 rc = fat_has_children(&has_children, cfn);
710 if (rc != EOK)
711 return rc;
712 if (has_children)
713 return ENOTEMPTY;
714
715 fibril_mutex_lock(&parentp->lock);
716 fibril_mutex_lock(&childp->lock);
717 assert(childp->lnkcnt == 1);
718 fibril_mutex_lock(&childp->idx->lock);
719
720 fat_directory_t di;
721 rc = fat_directory_open(parentp, &di);
722 if (rc != EOK)
723 goto error;
724 rc = fat_directory_seek(&di, childp->idx->pdi);
725 if (rc != EOK)
726 goto error;
727 rc = fat_directory_erase(&di);
728 if (rc != EOK)
729 goto error;
730 rc = fat_directory_close(&di);
731 if (rc != EOK)
732 goto error;
733
734 /* remove the index structure from the position hash */
735 fat_idx_hashout(childp->idx);
736 /* clear position information */
737 childp->idx->pfc = FAT_CLST_RES0;
738 childp->idx->pdi = 0;
739 fibril_mutex_unlock(&childp->idx->lock);
740 childp->lnkcnt = 0;
741 childp->refcnt++; /* keep the node in memory until destroyed */
742 childp->dirty = true;
743 fibril_mutex_unlock(&childp->lock);
744 fibril_mutex_unlock(&parentp->lock);
745
746 return EOK;
747
748error:
749 (void) fat_directory_close(&di);
750 fibril_mutex_unlock(&childp->idx->lock);
751 fibril_mutex_unlock(&childp->lock);
752 fibril_mutex_unlock(&parentp->lock);
753 return rc;
754}
755
756int fat_has_children(bool *has_children, fs_node_t *fn)
757{
758 fat_bs_t *bs;
759 fat_node_t *nodep = FAT_NODE(fn);
760 unsigned blocks;
761 block_t *b;
762 unsigned i, j;
763 int rc;
764
765 if (nodep->type != FAT_DIRECTORY) {
766 *has_children = false;
767 return EOK;
768 }
769
770 fibril_mutex_lock(&nodep->idx->lock);
771 bs = block_bb_get(nodep->idx->service_id);
772
773 blocks = nodep->size / BPS(bs);
774
775 for (i = 0; i < blocks; i++) {
776 fat_dentry_t *d;
777
778 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
779 if (rc != EOK) {
780 fibril_mutex_unlock(&nodep->idx->lock);
781 return rc;
782 }
783 for (j = 0; j < DPS(bs); j++) {
784 d = ((fat_dentry_t *)b->data) + j;
785 switch (fat_classify_dentry(d)) {
786 case FAT_DENTRY_SKIP:
787 case FAT_DENTRY_FREE:
788 continue;
789 case FAT_DENTRY_LAST:
790 rc = block_put(b);
791 fibril_mutex_unlock(&nodep->idx->lock);
792 *has_children = false;
793 return rc;
794 default:
795 case FAT_DENTRY_VALID:
796 rc = block_put(b);
797 fibril_mutex_unlock(&nodep->idx->lock);
798 *has_children = true;
799 return rc;
800 }
801 }
802 rc = block_put(b);
803 if (rc != EOK) {
804 fibril_mutex_unlock(&nodep->idx->lock);
805 return rc;
806 }
807 }
808
809 fibril_mutex_unlock(&nodep->idx->lock);
810 *has_children = false;
811 return EOK;
812}
813
814
815fs_index_t fat_index_get(fs_node_t *fn)
816{
817 return FAT_NODE(fn)->idx->index;
818}
819
820aoff64_t fat_size_get(fs_node_t *fn)
821{
822 return FAT_NODE(fn)->size;
823}
824
825unsigned fat_lnkcnt_get(fs_node_t *fn)
826{
827 return FAT_NODE(fn)->lnkcnt;
828}
829
830bool fat_is_directory(fs_node_t *fn)
831{
832 return FAT_NODE(fn)->type == FAT_DIRECTORY;
833}
834
835bool fat_is_file(fs_node_t *fn)
836{
837 return FAT_NODE(fn)->type == FAT_FILE;
838}
839
840service_id_t fat_service_get(fs_node_t *node)
841{
842 return 0;
843}
844
845int fat_size_block(service_id_t service_id, uint32_t *size)
846{
847 fat_bs_t *bs;
848
849 bs = block_bb_get(service_id);
850 *size = BPC(bs);
851
852 return EOK;
853}
854
855int fat_total_block_count(service_id_t service_id, uint64_t *count)
856{
857 fat_bs_t *bs;
858
859 bs = block_bb_get(service_id);
860 *count = (SPC(bs)) ? TS(bs) / SPC(bs) : 0;
861
862 return EOK;
863}
864
865int fat_free_block_count(service_id_t service_id, uint64_t *count)
866{
867 fat_bs_t *bs;
868 fat_cluster_t e0;
869 uint64_t block_count;
870 int rc;
871 uint32_t cluster_no, clusters;
872
873 block_count = 0;
874 bs = block_bb_get(service_id);
875 clusters = (SPC(bs)) ? TS(bs) / SPC(bs) : 0;
876 for (cluster_no = 0; cluster_no < clusters; cluster_no++) {
877 rc = fat_get_cluster(bs, service_id, FAT1, cluster_no, &e0);
878 if (rc != EOK)
879 return EIO;
880
881 if (e0 == FAT_CLST_RES0)
882 block_count++;
883 }
884 *count = block_count;
885
886 return EOK;
887}
888
889/** libfs operations */
890libfs_ops_t fat_libfs_ops = {
891 .root_get = fat_root_get,
892 .match = fat_match,
893 .node_get = fat_node_get,
894 .node_open = fat_node_open,
895 .node_put = fat_node_put,
896 .create = fat_create_node,
897 .destroy = fat_destroy_node,
898 .link = fat_link,
899 .unlink = fat_unlink,
900 .has_children = fat_has_children,
901 .index_get = fat_index_get,
902 .size_get = fat_size_get,
903 .lnkcnt_get = fat_lnkcnt_get,
904 .is_directory = fat_is_directory,
905 .is_file = fat_is_file,
906 .service_get = fat_service_get,
907 .size_block = fat_size_block,
908 .total_block_count = fat_total_block_count,
909 .free_block_count = fat_free_block_count
910};
911
912/*
913 * FAT VFS_OUT operations.
914 */
915
916static int
917fat_mounted(service_id_t service_id, const char *opts, fs_index_t *index,
918 aoff64_t *size, unsigned *linkcnt)
919{
920 enum cache_mode cmode = CACHE_MODE_WB;
921 fat_bs_t *bs;
922 fat_instance_t *instance;
923 int rc;
924
925 instance = malloc(sizeof(fat_instance_t));
926 if (!instance)
927 return ENOMEM;
928 instance->lfn_enabled = true;
929
930 /* Parse mount options. */
931 char *mntopts = (char *) opts;
932 char *opt;
933 while ((opt = str_tok(mntopts, " ,", &mntopts)) != NULL) {
934 if (str_cmp(opt, "wtcache") == 0)
935 cmode = CACHE_MODE_WT;
936 else if (str_cmp(opt, "nolfn") == 0)
937 instance->lfn_enabled = false;
938 }
939
940 /* initialize libblock */
941 rc = block_init(EXCHANGE_SERIALIZE, service_id, BS_SIZE);
942 if (rc != EOK) {
943 free(instance);
944 return rc;
945 }
946
947 /* prepare the boot block */
948 rc = block_bb_read(service_id, BS_BLOCK);
949 if (rc != EOK) {
950 free(instance);
951 block_fini(service_id);
952 return rc;
953 }
954
955 /* get the buffer with the boot sector */
956 bs = block_bb_get(service_id);
957
958 if (BPS(bs) != BS_SIZE) {
959 free(instance);
960 block_fini(service_id);
961 return ENOTSUP;
962 }
963
964 /* Initialize the block cache */
965 rc = block_cache_init(service_id, BPS(bs), 0 /* XXX */, cmode);
966 if (rc != EOK) {
967 free(instance);
968 block_fini(service_id);
969 return rc;
970 }
971
972 /* Do some simple sanity checks on the file system. */
973 rc = fat_sanity_check(bs, service_id);
974 if (rc != EOK) {
975 free(instance);
976 (void) block_cache_fini(service_id);
977 block_fini(service_id);
978 return rc;
979 }
980
981 rc = fat_idx_init_by_service_id(service_id);
982 if (rc != EOK) {
983 free(instance);
984 (void) block_cache_fini(service_id);
985 block_fini(service_id);
986 return rc;
987 }
988
989 /* Initialize the root node. */
990 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
991 if (!rfn) {
992 free(instance);
993 (void) block_cache_fini(service_id);
994 block_fini(service_id);
995 fat_idx_fini_by_service_id(service_id);
996 return ENOMEM;
997 }
998
999 fs_node_initialize(rfn);
1000 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1001 if (!rootp) {
1002 free(instance);
1003 free(rfn);
1004 (void) block_cache_fini(service_id);
1005 block_fini(service_id);
1006 fat_idx_fini_by_service_id(service_id);
1007 return ENOMEM;
1008 }
1009 fat_node_initialize(rootp);
1010
1011 fat_idx_t *ridxp = fat_idx_get_by_pos(service_id, FAT_CLST_ROOTPAR, 0);
1012 if (!ridxp) {
1013 free(instance);
1014 free(rfn);
1015 free(rootp);
1016 (void) block_cache_fini(service_id);
1017 block_fini(service_id);
1018 fat_idx_fini_by_service_id(service_id);
1019 return ENOMEM;
1020 }
1021 assert(ridxp->index == 0);
1022 /* ridxp->lock held */
1023
1024 rootp->type = FAT_DIRECTORY;
1025 rootp->firstc = FAT_ROOT_CLST(bs);
1026 rootp->refcnt = 1;
1027 rootp->lnkcnt = 0; /* FS root is not linked */
1028
1029 if (FAT_IS_FAT32(bs)) {
1030 uint32_t clusters;
1031 rc = fat_clusters_get(&clusters, bs, service_id, rootp->firstc);
1032 if (rc != EOK) {
1033 fibril_mutex_unlock(&ridxp->lock);
1034 free(instance);
1035 free(rfn);
1036 free(rootp);
1037 (void) block_cache_fini(service_id);
1038 block_fini(service_id);
1039 fat_idx_fini_by_service_id(service_id);
1040 return ENOTSUP;
1041 }
1042 rootp->size = BPS(bs) * SPC(bs) * clusters;
1043 } else
1044 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
1045
1046 rc = fs_instance_create(service_id, instance);
1047 if (rc != EOK) {
1048 fibril_mutex_unlock(&ridxp->lock);
1049 free(instance);
1050 free(rfn);
1051 free(rootp);
1052 (void) block_cache_fini(service_id);
1053 block_fini(service_id);
1054 fat_idx_fini_by_service_id(service_id);
1055 return rc;
1056 }
1057
1058 rootp->idx = ridxp;
1059 ridxp->nodep = rootp;
1060 rootp->bp = rfn;
1061 rfn->data = rootp;
1062
1063 fibril_mutex_unlock(&ridxp->lock);
1064
1065 *index = ridxp->index;
1066 *size = rootp->size;
1067 *linkcnt = rootp->lnkcnt;
1068
1069 return EOK;
1070}
1071
1072static int fat_update_fat32_fsinfo(service_id_t service_id)
1073{
1074 fat_bs_t *bs;
1075 fat32_fsinfo_t *info;
1076 block_t *b;
1077 int rc;
1078
1079 bs = block_bb_get(service_id);
1080 assert(FAT_IS_FAT32(bs));
1081
1082 rc = block_get(&b, service_id, uint16_t_le2host(bs->fat32.fsinfo_sec),
1083 BLOCK_FLAGS_NONE);
1084 if (rc != EOK)
1085 return rc;
1086
1087 info = (fat32_fsinfo_t *) b->data;
1088
1089 if (memcmp(info->sig1, FAT32_FSINFO_SIG1, sizeof(info->sig1)) != 0 ||
1090 memcmp(info->sig2, FAT32_FSINFO_SIG2, sizeof(info->sig2)) != 0 ||
1091 memcmp(info->sig3, FAT32_FSINFO_SIG3, sizeof(info->sig3)) != 0) {
1092 (void) block_put(b);
1093 return EINVAL;
1094 }
1095
1096 /* For now, invalidate the counter. */
1097 info->free_clusters = host2uint16_t_le(-1);
1098
1099 b->dirty = true;
1100 return block_put(b);
1101}
1102
1103static int fat_unmounted(service_id_t service_id)
1104{
1105 fs_node_t *fn;
1106 fat_node_t *nodep;
1107 fat_bs_t *bs;
1108 int rc;
1109
1110 bs = block_bb_get(service_id);
1111
1112 rc = fat_root_get(&fn, service_id);
1113 if (rc != EOK)
1114 return rc;
1115 nodep = FAT_NODE(fn);
1116
1117 /*
1118 * We expect exactly two references on the root node. One for the
1119 * fat_root_get() above and one created in fat_mounted().
1120 */
1121 if (nodep->refcnt != 2) {
1122 (void) fat_node_put(fn);
1123 return EBUSY;
1124 }
1125
1126 if (FAT_IS_FAT32(bs)) {
1127 /*
1128 * Attempt to update the FAT32 FS info.
1129 */
1130 (void) fat_update_fat32_fsinfo(service_id);
1131 }
1132
1133 /*
1134 * Put the root node and force it to the FAT free node list.
1135 */
1136 (void) fat_node_put(fn);
1137 (void) fat_node_put(fn);
1138
1139 /*
1140 * Perform cleanup of the node structures, index structures and
1141 * associated data. Write back this file system's dirty blocks and
1142 * stop using libblock for this instance.
1143 */
1144 (void) fat_node_fini_by_service_id(service_id);
1145 fat_idx_fini_by_service_id(service_id);
1146 (void) block_cache_fini(service_id);
1147 block_fini(service_id);
1148
1149 void *data;
1150 if (fs_instance_get(service_id, &data) == EOK) {
1151 fs_instance_destroy(service_id);
1152 free(data);
1153 }
1154
1155 return EOK;
1156}
1157
1158static int
1159fat_read(service_id_t service_id, fs_index_t index, aoff64_t pos,
1160 size_t *rbytes)
1161{
1162 fs_node_t *fn;
1163 fat_node_t *nodep;
1164 fat_bs_t *bs;
1165 size_t bytes;
1166 block_t *b;
1167 int rc;
1168
1169 rc = fat_node_get(&fn, service_id, index);
1170 if (rc != EOK)
1171 return rc;
1172 if (!fn)
1173 return ENOENT;
1174 nodep = FAT_NODE(fn);
1175
1176 ipc_callid_t callid;
1177 size_t len;
1178 if (!async_data_read_receive(&callid, &len)) {
1179 fat_node_put(fn);
1180 async_answer_0(callid, EINVAL);
1181 return EINVAL;
1182 }
1183
1184 bs = block_bb_get(service_id);
1185
1186 if (nodep->type == FAT_FILE) {
1187 /*
1188 * Our strategy for regular file reads is to read one block at
1189 * most and make use of the possibility to return less data than
1190 * requested. This keeps the code very simple.
1191 */
1192 if (pos >= nodep->size) {
1193 /* reading beyond the EOF */
1194 bytes = 0;
1195 (void) async_data_read_finalize(callid, NULL, 0);
1196 } else {
1197 bytes = min(len, BPS(bs) - pos % BPS(bs));
1198 bytes = min(bytes, nodep->size - pos);
1199 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
1200 BLOCK_FLAGS_NONE);
1201 if (rc != EOK) {
1202 fat_node_put(fn);
1203 async_answer_0(callid, rc);
1204 return rc;
1205 }
1206 (void) async_data_read_finalize(callid,
1207 b->data + pos % BPS(bs), bytes);
1208 rc = block_put(b);
1209 if (rc != EOK) {
1210 fat_node_put(fn);
1211 return rc;
1212 }
1213 }
1214 } else {
1215 aoff64_t spos = pos;
1216 char name[FAT_LFN_NAME_SIZE];
1217 fat_dentry_t *d;
1218
1219 assert(nodep->type == FAT_DIRECTORY);
1220 assert(nodep->size % BPS(bs) == 0);
1221 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
1222
1223 fat_directory_t di;
1224 rc = fat_directory_open(nodep, &di);
1225 if (rc != EOK)
1226 goto err;
1227 rc = fat_directory_seek(&di, pos);
1228 if (rc != EOK) {
1229 (void) fat_directory_close(&di);
1230 goto err;
1231 }
1232
1233 rc = fat_directory_read(&di, name, &d);
1234 if (rc == EOK)
1235 goto hit;
1236 if (rc == ENOENT)
1237 goto miss;
1238
1239err:
1240 (void) fat_node_put(fn);
1241 async_answer_0(callid, rc);
1242 return rc;
1243
1244miss:
1245 rc = fat_directory_close(&di);
1246 if (rc != EOK)
1247 goto err;
1248 rc = fat_node_put(fn);
1249 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1250 *rbytes = 0;
1251 return rc != EOK ? rc : ENOENT;
1252
1253hit:
1254 pos = di.pos;
1255 rc = fat_directory_close(&di);
1256 if (rc != EOK)
1257 goto err;
1258 (void) async_data_read_finalize(callid, name,
1259 str_size(name) + 1);
1260 bytes = (pos - spos) + 1;
1261 }
1262
1263 rc = fat_node_put(fn);
1264 *rbytes = bytes;
1265 return rc;
1266}
1267
1268static int
1269fat_write(service_id_t service_id, fs_index_t index, aoff64_t pos,
1270 size_t *wbytes, aoff64_t *nsize)
1271{
1272 fs_node_t *fn;
1273 fat_node_t *nodep;
1274 fat_bs_t *bs;
1275 size_t bytes;
1276 block_t *b;
1277 aoff64_t boundary;
1278 int flags = BLOCK_FLAGS_NONE;
1279 int rc;
1280
1281 rc = fat_node_get(&fn, service_id, index);
1282 if (rc != EOK)
1283 return rc;
1284 if (!fn)
1285 return ENOENT;
1286 nodep = FAT_NODE(fn);
1287
1288 ipc_callid_t callid;
1289 size_t len;
1290 if (!async_data_write_receive(&callid, &len)) {
1291 (void) fat_node_put(fn);
1292 async_answer_0(callid, EINVAL);
1293 return EINVAL;
1294 }
1295
1296 bs = block_bb_get(service_id);
1297
1298 /*
1299 * In all scenarios, we will attempt to write out only one block worth
1300 * of data at maximum. There might be some more efficient approaches,
1301 * but this one greatly simplifies fat_write(). Note that we can afford
1302 * to do this because the client must be ready to handle the return
1303 * value signalizing a smaller number of bytes written.
1304 */
1305 bytes = min(len, BPS(bs) - pos % BPS(bs));
1306 if (bytes == BPS(bs))
1307 flags |= BLOCK_FLAGS_NOREAD;
1308
1309 boundary = ROUND_UP(nodep->size, BPC(bs));
1310 if (pos < boundary) {
1311 /*
1312 * This is the easier case - we are either overwriting already
1313 * existing contents or writing behind the EOF, but still within
1314 * the limits of the last cluster. The node size may grow to the
1315 * next block size boundary.
1316 */
1317 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1318 if (rc != EOK) {
1319 (void) fat_node_put(fn);
1320 async_answer_0(callid, rc);
1321 return rc;
1322 }
1323 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
1324 if (rc != EOK) {
1325 (void) fat_node_put(fn);
1326 async_answer_0(callid, rc);
1327 return rc;
1328 }
1329 (void) async_data_write_finalize(callid,
1330 b->data + pos % BPS(bs), bytes);
1331 b->dirty = true; /* need to sync block */
1332 rc = block_put(b);
1333 if (rc != EOK) {
1334 (void) fat_node_put(fn);
1335 return rc;
1336 }
1337 if (pos + bytes > nodep->size) {
1338 nodep->size = pos + bytes;
1339 nodep->dirty = true; /* need to sync node */
1340 }
1341 *wbytes = bytes;
1342 *nsize = nodep->size;
1343 rc = fat_node_put(fn);
1344 return rc;
1345 } else {
1346 /*
1347 * This is the more difficult case. We must allocate new
1348 * clusters for the node and zero them out.
1349 */
1350 unsigned nclsts;
1351 fat_cluster_t mcl, lcl;
1352
1353 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
1354 /* create an independent chain of nclsts clusters in all FATs */
1355 rc = fat_alloc_clusters(bs, service_id, nclsts, &mcl, &lcl);
1356 if (rc != EOK) {
1357 /* could not allocate a chain of nclsts clusters */
1358 (void) fat_node_put(fn);
1359 async_answer_0(callid, rc);
1360 return rc;
1361 }
1362 /* zero fill any gaps */
1363 rc = fat_fill_gap(bs, nodep, mcl, pos);
1364 if (rc != EOK) {
1365 (void) fat_free_clusters(bs, service_id, mcl);
1366 (void) fat_node_put(fn);
1367 async_answer_0(callid, rc);
1368 return rc;
1369 }
1370 rc = _fat_block_get(&b, bs, service_id, lcl, NULL,
1371 (pos / BPS(bs)) % SPC(bs), flags);
1372 if (rc != EOK) {
1373 (void) fat_free_clusters(bs, service_id, mcl);
1374 (void) fat_node_put(fn);
1375 async_answer_0(callid, rc);
1376 return rc;
1377 }
1378 (void) async_data_write_finalize(callid,
1379 b->data + pos % BPS(bs), bytes);
1380 b->dirty = true; /* need to sync block */
1381 rc = block_put(b);
1382 if (rc != EOK) {
1383 (void) fat_free_clusters(bs, service_id, mcl);
1384 (void) fat_node_put(fn);
1385 return rc;
1386 }
1387 /*
1388 * Append the cluster chain starting in mcl to the end of the
1389 * node's cluster chain.
1390 */
1391 rc = fat_append_clusters(bs, nodep, mcl, lcl);
1392 if (rc != EOK) {
1393 (void) fat_free_clusters(bs, service_id, mcl);
1394 (void) fat_node_put(fn);
1395 return rc;
1396 }
1397 *nsize = nodep->size = pos + bytes;
1398 rc = fat_node_put(fn);
1399 nodep->dirty = true; /* need to sync node */
1400 *wbytes = bytes;
1401 return rc;
1402 }
1403}
1404
1405static int
1406fat_truncate(service_id_t service_id, fs_index_t index, aoff64_t size)
1407{
1408 fs_node_t *fn;
1409 fat_node_t *nodep;
1410 fat_bs_t *bs;
1411 int rc;
1412
1413 rc = fat_node_get(&fn, service_id, index);
1414 if (rc != EOK)
1415 return rc;
1416 if (!fn)
1417 return ENOENT;
1418 nodep = FAT_NODE(fn);
1419
1420 bs = block_bb_get(service_id);
1421
1422 if (nodep->size == size) {
1423 rc = EOK;
1424 } else if (nodep->size < size) {
1425 /*
1426 * The standard says we have the freedom to grow the node.
1427 * For now, we simply return an error.
1428 */
1429 rc = EINVAL;
1430 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
1431 /*
1432 * The node will be shrunk, but no clusters will be deallocated.
1433 */
1434 nodep->size = size;
1435 nodep->dirty = true; /* need to sync node */
1436 rc = EOK;
1437 } else {
1438 /*
1439 * The node will be shrunk, clusters will be deallocated.
1440 */
1441 if (size == 0) {
1442 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1443 if (rc != EOK)
1444 goto out;
1445 } else {
1446 fat_cluster_t lastc;
1447 rc = fat_cluster_walk(bs, service_id, nodep->firstc,
1448 &lastc, NULL, (size - 1) / BPC(bs));
1449 if (rc != EOK)
1450 goto out;
1451 rc = fat_chop_clusters(bs, nodep, lastc);
1452 if (rc != EOK)
1453 goto out;
1454 }
1455 nodep->size = size;
1456 nodep->dirty = true; /* need to sync node */
1457 rc = EOK;
1458 }
1459out:
1460 fat_node_put(fn);
1461 return rc;
1462}
1463
1464static int fat_close(service_id_t service_id, fs_index_t index)
1465{
1466 return EOK;
1467}
1468
1469static int fat_destroy(service_id_t service_id, fs_index_t index)
1470{
1471 fs_node_t *fn;
1472 fat_node_t *nodep;
1473 int rc;
1474
1475 rc = fat_node_get(&fn, service_id, index);
1476 if (rc != EOK)
1477 return rc;
1478 if (!fn)
1479 return ENOENT;
1480
1481 nodep = FAT_NODE(fn);
1482 /*
1483 * We should have exactly two references. One for the above
1484 * call to fat_node_get() and one from fat_unlink().
1485 */
1486 assert(nodep->refcnt == 2);
1487
1488 rc = fat_destroy_node(fn);
1489 return rc;
1490}
1491
1492static int fat_sync(service_id_t service_id, fs_index_t index)
1493{
1494 fs_node_t *fn;
1495 int rc = fat_node_get(&fn, service_id, index);
1496 if (rc != EOK)
1497 return rc;
1498 if (!fn)
1499 return ENOENT;
1500
1501 fat_node_t *nodep = FAT_NODE(fn);
1502
1503 nodep->dirty = true;
1504 rc = fat_node_sync(nodep);
1505
1506 fat_node_put(fn);
1507 return rc;
1508}
1509
1510vfs_out_ops_t fat_ops = {
1511 .mounted = fat_mounted,
1512 .unmounted = fat_unmounted,
1513 .read = fat_read,
1514 .write = fat_write,
1515 .truncate = fat_truncate,
1516 .close = fat_close,
1517 .destroy = fat_destroy,
1518 .sync = fat_sync,
1519};
1520
1521/**
1522 * @}
1523 */
Note: See TracBrowser for help on using the repository browser.