source: mainline/uspace/srv/fs/fat/fat_ops.c@ 3dd148d

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 3dd148d was 3dd148d, checked in by Manuele Conti <conti.ma@…>, 12 years ago

Change stafs function operation to allow correct error handling.

  • Property mode set to 100644
File size: 35.6 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2011 Oleg Romanenko
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup fs
31 * @{
32 */
33
34/**
35 * @file fat_ops.c
36 * @brief Implementation of VFS operations for the FAT file system server.
37 */
38
39#include "fat.h"
40#include "fat_dentry.h"
41#include "fat_fat.h"
42#include "fat_directory.h"
43#include "../../vfs/vfs.h"
44#include <libfs.h>
45#include <block.h>
46#include <ipc/services.h>
47#include <ipc/loc.h>
48#include <macros.h>
49#include <async.h>
50#include <errno.h>
51#include <str.h>
52#include <byteorder.h>
53#include <adt/hash_table.h>
54#include <adt/list.h>
55#include <assert.h>
56#include <fibril_synch.h>
57#include <sys/mman.h>
58#include <align.h>
59#include <malloc.h>
60#include <str.h>
61
62#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
63#define FS_NODE(node) ((node) ? (node)->bp : NULL)
64
65#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
66#define BPC(bs) (BPS((bs)) * SPC((bs)))
67
68/** Mutex protecting the list of cached free FAT nodes. */
69static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
70
71/** List of cached free FAT nodes. */
72static LIST_INITIALIZE(ffn_list);
73
74/*
75 * Forward declarations of FAT libfs operations.
76 */
77static int fat_root_get(fs_node_t **, service_id_t);
78static int fat_match(fs_node_t **, fs_node_t *, const char *);
79static int fat_node_get(fs_node_t **, service_id_t, fs_index_t);
80static int fat_node_open(fs_node_t *);
81static int fat_node_put(fs_node_t *);
82static int fat_create_node(fs_node_t **, service_id_t, int);
83static int fat_destroy_node(fs_node_t *);
84static int fat_link(fs_node_t *, fs_node_t *, const char *);
85static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
86static int fat_has_children(bool *, fs_node_t *);
87static fs_index_t fat_index_get(fs_node_t *);
88static aoff64_t fat_size_get(fs_node_t *);
89static unsigned fat_lnkcnt_get(fs_node_t *);
90static bool fat_is_directory(fs_node_t *);
91static bool fat_is_file(fs_node_t *node);
92static service_id_t fat_service_get(fs_node_t *node);
93static int fat_size_block(service_id_t, uint32_t *);
94static int fat_total_block_count(service_id_t, uint64_t *);
95static int fat_free_block_count(service_id_t, uint64_t *);
96
97/*
98 * Helper functions.
99 */
100static void fat_node_initialize(fat_node_t *node)
101{
102 fibril_mutex_initialize(&node->lock);
103 node->bp = NULL;
104 node->idx = NULL;
105 node->type = 0;
106 link_initialize(&node->ffn_link);
107 node->size = 0;
108 node->lnkcnt = 0;
109 node->refcnt = 0;
110 node->dirty = false;
111 node->lastc_cached_valid = false;
112 node->lastc_cached_value = 0;
113 node->currc_cached_valid = false;
114 node->currc_cached_bn = 0;
115 node->currc_cached_value = 0;
116}
117
118static int fat_node_sync(fat_node_t *node)
119{
120 block_t *b;
121 fat_bs_t *bs;
122 fat_dentry_t *d;
123 int rc;
124
125 assert(node->dirty);
126
127 bs = block_bb_get(node->idx->service_id);
128
129 /* Read the block that contains the dentry of interest. */
130 rc = _fat_block_get(&b, bs, node->idx->service_id, node->idx->pfc,
131 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
132 BLOCK_FLAGS_NONE);
133 if (rc != EOK)
134 return rc;
135
136 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
137
138 d->firstc = host2uint16_t_le(node->firstc);
139 if (node->type == FAT_FILE) {
140 d->size = host2uint32_t_le(node->size);
141 } else if (node->type == FAT_DIRECTORY) {
142 d->attr = FAT_ATTR_SUBDIR;
143 }
144
145 /* TODO: update other fields? (e.g time fields) */
146
147 b->dirty = true; /* need to sync block */
148 rc = block_put(b);
149 return rc;
150}
151
152static int fat_node_fini_by_service_id(service_id_t service_id)
153{
154 fat_node_t *nodep;
155 int rc;
156
157 /*
158 * We are called from fat_unmounted() and assume that there are already
159 * no nodes belonging to this instance with non-zero refcount. Therefore
160 * it is sufficient to clean up only the FAT free node list.
161 */
162
163restart:
164 fibril_mutex_lock(&ffn_mutex);
165 list_foreach(ffn_list, lnk) {
166 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
167 if (!fibril_mutex_trylock(&nodep->lock)) {
168 fibril_mutex_unlock(&ffn_mutex);
169 goto restart;
170 }
171 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
172 fibril_mutex_unlock(&nodep->lock);
173 fibril_mutex_unlock(&ffn_mutex);
174 goto restart;
175 }
176 if (nodep->idx->service_id != service_id) {
177 fibril_mutex_unlock(&nodep->idx->lock);
178 fibril_mutex_unlock(&nodep->lock);
179 continue;
180 }
181
182 list_remove(&nodep->ffn_link);
183 fibril_mutex_unlock(&ffn_mutex);
184
185 /*
186 * We can unlock the node and its index structure because we are
187 * the last player on this playground and VFS is preventing new
188 * players from entering.
189 */
190 fibril_mutex_unlock(&nodep->idx->lock);
191 fibril_mutex_unlock(&nodep->lock);
192
193 if (nodep->dirty) {
194 rc = fat_node_sync(nodep);
195 if (rc != EOK)
196 return rc;
197 }
198 nodep->idx->nodep = NULL;
199 free(nodep->bp);
200 free(nodep);
201
202 /* Need to restart because we changed ffn_list. */
203 goto restart;
204 }
205 fibril_mutex_unlock(&ffn_mutex);
206
207 return EOK;
208}
209
210static int fat_node_get_new(fat_node_t **nodepp)
211{
212 fs_node_t *fn;
213 fat_node_t *nodep;
214 int rc;
215
216 fibril_mutex_lock(&ffn_mutex);
217 if (!list_empty(&ffn_list)) {
218 /* Try to use a cached free node structure. */
219 fat_idx_t *idxp_tmp;
220 nodep = list_get_instance(list_first(&ffn_list), fat_node_t,
221 ffn_link);
222 if (!fibril_mutex_trylock(&nodep->lock))
223 goto skip_cache;
224 idxp_tmp = nodep->idx;
225 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
226 fibril_mutex_unlock(&nodep->lock);
227 goto skip_cache;
228 }
229 list_remove(&nodep->ffn_link);
230 fibril_mutex_unlock(&ffn_mutex);
231 if (nodep->dirty) {
232 rc = fat_node_sync(nodep);
233 if (rc != EOK) {
234 idxp_tmp->nodep = NULL;
235 fibril_mutex_unlock(&nodep->lock);
236 fibril_mutex_unlock(&idxp_tmp->lock);
237 free(nodep->bp);
238 free(nodep);
239 return rc;
240 }
241 }
242 idxp_tmp->nodep = NULL;
243 fibril_mutex_unlock(&nodep->lock);
244 fibril_mutex_unlock(&idxp_tmp->lock);
245 fn = FS_NODE(nodep);
246 } else {
247skip_cache:
248 /* Try to allocate a new node structure. */
249 fibril_mutex_unlock(&ffn_mutex);
250 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
251 if (!fn)
252 return ENOMEM;
253 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
254 if (!nodep) {
255 free(fn);
256 return ENOMEM;
257 }
258 }
259 fat_node_initialize(nodep);
260 fs_node_initialize(fn);
261 fn->data = nodep;
262 nodep->bp = fn;
263
264 *nodepp = nodep;
265 return EOK;
266}
267
268/** Internal version of fat_node_get().
269 *
270 * @param idxp Locked index structure.
271 */
272static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
273{
274 block_t *b;
275 fat_bs_t *bs;
276 fat_dentry_t *d;
277 fat_node_t *nodep = NULL;
278 int rc;
279
280 if (idxp->nodep) {
281 /*
282 * We are lucky.
283 * The node is already instantiated in memory.
284 */
285 fibril_mutex_lock(&idxp->nodep->lock);
286 if (!idxp->nodep->refcnt++) {
287 fibril_mutex_lock(&ffn_mutex);
288 list_remove(&idxp->nodep->ffn_link);
289 fibril_mutex_unlock(&ffn_mutex);
290 }
291 fibril_mutex_unlock(&idxp->nodep->lock);
292 *nodepp = idxp->nodep;
293 return EOK;
294 }
295
296 /*
297 * We must instantiate the node from the file system.
298 */
299
300 assert(idxp->pfc);
301
302 rc = fat_node_get_new(&nodep);
303 if (rc != EOK)
304 return rc;
305
306 bs = block_bb_get(idxp->service_id);
307
308 /* Read the block that contains the dentry of interest. */
309 rc = _fat_block_get(&b, bs, idxp->service_id, idxp->pfc, NULL,
310 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
311 if (rc != EOK) {
312 (void) fat_node_put(FS_NODE(nodep));
313 return rc;
314 }
315
316 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
317 if (FAT_IS_FAT32(bs)) {
318 nodep->firstc = uint16_t_le2host(d->firstc_lo) |
319 (uint16_t_le2host(d->firstc_hi) << 16);
320 } else
321 nodep->firstc = uint16_t_le2host(d->firstc);
322
323 if (d->attr & FAT_ATTR_SUBDIR) {
324 /*
325 * The only directory which does not have this bit set is the
326 * root directory itself. The root directory node is handled
327 * and initialized elsewhere.
328 */
329 nodep->type = FAT_DIRECTORY;
330
331 /*
332 * Unfortunately, the 'size' field of the FAT dentry is not
333 * defined for the directory entry type. We must determine the
334 * size of the directory by walking the FAT.
335 */
336 uint32_t clusters;
337 rc = fat_clusters_get(&clusters, bs, idxp->service_id,
338 nodep->firstc);
339 if (rc != EOK) {
340 (void) block_put(b);
341 (void) fat_node_put(FS_NODE(nodep));
342 return rc;
343 }
344 nodep->size = BPS(bs) * SPC(bs) * clusters;
345 } else {
346 nodep->type = FAT_FILE;
347 nodep->size = uint32_t_le2host(d->size);
348 }
349
350 nodep->lnkcnt = 1;
351 nodep->refcnt = 1;
352
353 rc = block_put(b);
354 if (rc != EOK) {
355 (void) fat_node_put(FS_NODE(nodep));
356 return rc;
357 }
358
359 /* Link the idx structure with the node structure. */
360 nodep->idx = idxp;
361 idxp->nodep = nodep;
362
363 *nodepp = nodep;
364 return EOK;
365}
366
367/*
368 * FAT libfs operations.
369 */
370
371int fat_root_get(fs_node_t **rfn, service_id_t service_id)
372{
373 return fat_node_get(rfn, service_id, 0);
374}
375
376int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
377{
378 fat_node_t *parentp = FAT_NODE(pfn);
379 char name[FAT_LFN_NAME_SIZE];
380 fat_dentry_t *d;
381 service_id_t service_id;
382 int rc;
383
384 fibril_mutex_lock(&parentp->idx->lock);
385 service_id = parentp->idx->service_id;
386 fibril_mutex_unlock(&parentp->idx->lock);
387
388 fat_directory_t di;
389 rc = fat_directory_open(parentp, &di);
390 if (rc != EOK)
391 return rc;
392
393 while (fat_directory_read(&di, name, &d) == EOK) {
394 if (fat_dentry_namecmp(name, component) == 0) {
395 /* hit */
396 fat_node_t *nodep;
397 aoff64_t o = di.pos %
398 (BPS(di.bs) / sizeof(fat_dentry_t));
399 fat_idx_t *idx = fat_idx_get_by_pos(service_id,
400 parentp->firstc, di.bnum * DPS(di.bs) + o);
401 if (!idx) {
402 /*
403 * Can happen if memory is low or if we
404 * run out of 32-bit indices.
405 */
406 rc = fat_directory_close(&di);
407 return (rc == EOK) ? ENOMEM : rc;
408 }
409 rc = fat_node_get_core(&nodep, idx);
410 fibril_mutex_unlock(&idx->lock);
411 if (rc != EOK) {
412 (void) fat_directory_close(&di);
413 return rc;
414 }
415 *rfn = FS_NODE(nodep);
416 rc = fat_directory_close(&di);
417 if (rc != EOK)
418 (void) fat_node_put(*rfn);
419 return rc;
420 } else {
421 rc = fat_directory_next(&di);
422 if (rc != EOK)
423 break;
424 }
425 }
426 (void) fat_directory_close(&di);
427 *rfn = NULL;
428 return EOK;
429}
430
431/** Instantiate a FAT in-core node. */
432int fat_node_get(fs_node_t **rfn, service_id_t service_id, fs_index_t index)
433{
434 fat_node_t *nodep;
435 fat_idx_t *idxp;
436 int rc;
437
438 idxp = fat_idx_get_by_index(service_id, index);
439 if (!idxp) {
440 *rfn = NULL;
441 return EOK;
442 }
443 /* idxp->lock held */
444 rc = fat_node_get_core(&nodep, idxp);
445 fibril_mutex_unlock(&idxp->lock);
446 if (rc == EOK)
447 *rfn = FS_NODE(nodep);
448 return rc;
449}
450
451int fat_node_open(fs_node_t *fn)
452{
453 /*
454 * Opening a file is stateless, nothing
455 * to be done here.
456 */
457 return EOK;
458}
459
460int fat_node_put(fs_node_t *fn)
461{
462 fat_node_t *nodep = FAT_NODE(fn);
463 bool destroy = false;
464
465 fibril_mutex_lock(&nodep->lock);
466 if (!--nodep->refcnt) {
467 if (nodep->idx) {
468 fibril_mutex_lock(&ffn_mutex);
469 list_append(&nodep->ffn_link, &ffn_list);
470 fibril_mutex_unlock(&ffn_mutex);
471 } else {
472 /*
473 * The node does not have any index structure associated
474 * with itself. This can only mean that we are releasing
475 * the node after a failed attempt to allocate the index
476 * structure for it.
477 */
478 destroy = true;
479 }
480 }
481 fibril_mutex_unlock(&nodep->lock);
482 if (destroy) {
483 free(nodep->bp);
484 free(nodep);
485 }
486 return EOK;
487}
488
489int fat_create_node(fs_node_t **rfn, service_id_t service_id, int flags)
490{
491 fat_idx_t *idxp;
492 fat_node_t *nodep;
493 fat_bs_t *bs;
494 fat_cluster_t mcl, lcl;
495 int rc;
496
497 bs = block_bb_get(service_id);
498 if (flags & L_DIRECTORY) {
499 /* allocate a cluster */
500 rc = fat_alloc_clusters(bs, service_id, 1, &mcl, &lcl);
501 if (rc != EOK)
502 return rc;
503 /* populate the new cluster with unused dentries */
504 rc = fat_zero_cluster(bs, service_id, mcl);
505 if (rc != EOK) {
506 (void) fat_free_clusters(bs, service_id, mcl);
507 return rc;
508 }
509 }
510
511 rc = fat_node_get_new(&nodep);
512 if (rc != EOK) {
513 (void) fat_free_clusters(bs, service_id, mcl);
514 return rc;
515 }
516 rc = fat_idx_get_new(&idxp, service_id);
517 if (rc != EOK) {
518 (void) fat_free_clusters(bs, service_id, mcl);
519 (void) fat_node_put(FS_NODE(nodep));
520 return rc;
521 }
522 /* idxp->lock held */
523 if (flags & L_DIRECTORY) {
524 nodep->type = FAT_DIRECTORY;
525 nodep->firstc = mcl;
526 nodep->size = BPS(bs) * SPC(bs);
527 } else {
528 nodep->type = FAT_FILE;
529 nodep->firstc = FAT_CLST_RES0;
530 nodep->size = 0;
531 }
532 nodep->lnkcnt = 0; /* not linked anywhere */
533 nodep->refcnt = 1;
534 nodep->dirty = true;
535
536 nodep->idx = idxp;
537 idxp->nodep = nodep;
538
539 fibril_mutex_unlock(&idxp->lock);
540 *rfn = FS_NODE(nodep);
541 return EOK;
542}
543
544int fat_destroy_node(fs_node_t *fn)
545{
546 fat_node_t *nodep = FAT_NODE(fn);
547 fat_bs_t *bs;
548 bool has_children;
549 int rc;
550
551 /*
552 * The node is not reachable from the file system. This means that the
553 * link count should be zero and that the index structure cannot be
554 * found in the position hash. Obviously, we don't need to lock the node
555 * nor its index structure.
556 */
557 assert(nodep->lnkcnt == 0);
558
559 /*
560 * The node may not have any children.
561 */
562 rc = fat_has_children(&has_children, fn);
563 if (rc != EOK)
564 return rc;
565 assert(!has_children);
566
567 bs = block_bb_get(nodep->idx->service_id);
568 if (nodep->firstc != FAT_CLST_RES0) {
569 assert(nodep->size);
570 /* Free all clusters allocated to the node. */
571 rc = fat_free_clusters(bs, nodep->idx->service_id,
572 nodep->firstc);
573 }
574
575 fat_idx_destroy(nodep->idx);
576 free(nodep->bp);
577 free(nodep);
578 return rc;
579}
580
581int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
582{
583 fat_node_t *parentp = FAT_NODE(pfn);
584 fat_node_t *childp = FAT_NODE(cfn);
585 fat_dentry_t *d;
586 fat_bs_t *bs;
587 block_t *b;
588 fat_directory_t di;
589 fat_dentry_t de;
590 int rc;
591
592 fibril_mutex_lock(&childp->lock);
593 if (childp->lnkcnt == 1) {
594 /*
595 * On FAT, we don't support multiple hard links.
596 */
597 fibril_mutex_unlock(&childp->lock);
598 return EMLINK;
599 }
600 assert(childp->lnkcnt == 0);
601 fibril_mutex_unlock(&childp->lock);
602
603 if (!fat_valid_name(name))
604 return ENOTSUP;
605
606 fibril_mutex_lock(&parentp->idx->lock);
607 bs = block_bb_get(parentp->idx->service_id);
608 rc = fat_directory_open(parentp, &di);
609 if (rc != EOK) {
610 fibril_mutex_unlock(&parentp->idx->lock);
611 return rc;
612 }
613
614 /*
615 * At this point we only establish the link between the parent and the
616 * child. The dentry, except of the name and the extension, will remain
617 * uninitialized until the corresponding node is synced. Thus the valid
618 * dentry data is kept in the child node structure.
619 */
620 memset(&de, 0, sizeof(fat_dentry_t));
621
622 rc = fat_directory_write(&di, name, &de);
623 if (rc != EOK) {
624 (void) fat_directory_close(&di);
625 fibril_mutex_unlock(&parentp->idx->lock);
626 return rc;
627 }
628 rc = fat_directory_close(&di);
629 if (rc != EOK) {
630 fibril_mutex_unlock(&parentp->idx->lock);
631 return rc;
632 }
633
634 fibril_mutex_unlock(&parentp->idx->lock);
635
636 fibril_mutex_lock(&childp->idx->lock);
637
638 if (childp->type == FAT_DIRECTORY) {
639 /*
640 * If possible, create the Sub-directory Identifier Entry and
641 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
642 * These entries are not mandatory according to Standard
643 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
644 * rather a sign of our good will.
645 */
646 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
647 if (rc != EOK) {
648 /*
649 * Rather than returning an error, simply skip the
650 * creation of these two entries.
651 */
652 goto skip_dots;
653 }
654 d = (fat_dentry_t *) b->data;
655 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
656 (memcmp(d->name, FAT_NAME_DOT, FAT_NAME_LEN)) == 0) {
657 memset(d, 0, sizeof(fat_dentry_t));
658 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN);
659 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
660 d->attr = FAT_ATTR_SUBDIR;
661 d->firstc = host2uint16_t_le(childp->firstc);
662 /* TODO: initialize also the date/time members. */
663 }
664 d++;
665 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
666 (memcmp(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN) == 0)) {
667 memset(d, 0, sizeof(fat_dentry_t));
668 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN);
669 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
670 d->attr = FAT_ATTR_SUBDIR;
671 d->firstc = (parentp->firstc == FAT_ROOT_CLST(bs)) ?
672 host2uint16_t_le(FAT_CLST_ROOTPAR) :
673 host2uint16_t_le(parentp->firstc);
674 /* TODO: initialize also the date/time members. */
675 }
676 b->dirty = true; /* need to sync block */
677 /*
678 * Ignore the return value as we would have fallen through on error
679 * anyway.
680 */
681 (void) block_put(b);
682 }
683skip_dots:
684
685 childp->idx->pfc = parentp->firstc;
686 childp->idx->pdi = di.pos; /* di.pos holds absolute position of SFN entry */
687 fibril_mutex_unlock(&childp->idx->lock);
688
689 fibril_mutex_lock(&childp->lock);
690 childp->lnkcnt = 1;
691 childp->dirty = true; /* need to sync node */
692 fibril_mutex_unlock(&childp->lock);
693
694 /*
695 * Hash in the index structure into the position hash.
696 */
697 fat_idx_hashin(childp->idx);
698
699 return EOK;
700}
701
702int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
703{
704 fat_node_t *parentp = FAT_NODE(pfn);
705 fat_node_t *childp = FAT_NODE(cfn);
706 bool has_children;
707 int rc;
708
709 if (!parentp)
710 return EBUSY;
711
712 rc = fat_has_children(&has_children, cfn);
713 if (rc != EOK)
714 return rc;
715 if (has_children)
716 return ENOTEMPTY;
717
718 fibril_mutex_lock(&parentp->lock);
719 fibril_mutex_lock(&childp->lock);
720 assert(childp->lnkcnt == 1);
721 fibril_mutex_lock(&childp->idx->lock);
722
723 fat_directory_t di;
724 rc = fat_directory_open(parentp, &di);
725 if (rc != EOK)
726 goto error;
727 rc = fat_directory_seek(&di, childp->idx->pdi);
728 if (rc != EOK)
729 goto error;
730 rc = fat_directory_erase(&di);
731 if (rc != EOK)
732 goto error;
733 rc = fat_directory_close(&di);
734 if (rc != EOK)
735 goto error;
736
737 /* remove the index structure from the position hash */
738 fat_idx_hashout(childp->idx);
739 /* clear position information */
740 childp->idx->pfc = FAT_CLST_RES0;
741 childp->idx->pdi = 0;
742 fibril_mutex_unlock(&childp->idx->lock);
743 childp->lnkcnt = 0;
744 childp->refcnt++; /* keep the node in memory until destroyed */
745 childp->dirty = true;
746 fibril_mutex_unlock(&childp->lock);
747 fibril_mutex_unlock(&parentp->lock);
748
749 return EOK;
750
751error:
752 (void) fat_directory_close(&di);
753 fibril_mutex_unlock(&childp->idx->lock);
754 fibril_mutex_unlock(&childp->lock);
755 fibril_mutex_unlock(&parentp->lock);
756 return rc;
757}
758
759int fat_has_children(bool *has_children, fs_node_t *fn)
760{
761 fat_bs_t *bs;
762 fat_node_t *nodep = FAT_NODE(fn);
763 unsigned blocks;
764 block_t *b;
765 unsigned i, j;
766 int rc;
767
768 if (nodep->type != FAT_DIRECTORY) {
769 *has_children = false;
770 return EOK;
771 }
772
773 fibril_mutex_lock(&nodep->idx->lock);
774 bs = block_bb_get(nodep->idx->service_id);
775
776 blocks = nodep->size / BPS(bs);
777
778 for (i = 0; i < blocks; i++) {
779 fat_dentry_t *d;
780
781 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
782 if (rc != EOK) {
783 fibril_mutex_unlock(&nodep->idx->lock);
784 return rc;
785 }
786 for (j = 0; j < DPS(bs); j++) {
787 d = ((fat_dentry_t *)b->data) + j;
788 switch (fat_classify_dentry(d)) {
789 case FAT_DENTRY_SKIP:
790 case FAT_DENTRY_FREE:
791 continue;
792 case FAT_DENTRY_LAST:
793 rc = block_put(b);
794 fibril_mutex_unlock(&nodep->idx->lock);
795 *has_children = false;
796 return rc;
797 default:
798 case FAT_DENTRY_VALID:
799 rc = block_put(b);
800 fibril_mutex_unlock(&nodep->idx->lock);
801 *has_children = true;
802 return rc;
803 }
804 }
805 rc = block_put(b);
806 if (rc != EOK) {
807 fibril_mutex_unlock(&nodep->idx->lock);
808 return rc;
809 }
810 }
811
812 fibril_mutex_unlock(&nodep->idx->lock);
813 *has_children = false;
814 return EOK;
815}
816
817
818fs_index_t fat_index_get(fs_node_t *fn)
819{
820 return FAT_NODE(fn)->idx->index;
821}
822
823aoff64_t fat_size_get(fs_node_t *fn)
824{
825 return FAT_NODE(fn)->size;
826}
827
828unsigned fat_lnkcnt_get(fs_node_t *fn)
829{
830 return FAT_NODE(fn)->lnkcnt;
831}
832
833bool fat_is_directory(fs_node_t *fn)
834{
835 return FAT_NODE(fn)->type == FAT_DIRECTORY;
836}
837
838bool fat_is_file(fs_node_t *fn)
839{
840 return FAT_NODE(fn)->type == FAT_FILE;
841}
842
843service_id_t fat_service_get(fs_node_t *node)
844{
845 return 0;
846}
847
848int fat_size_block(service_id_t service_id, uint32_t *size)
849{
850 fat_bs_t *bs;
851
852 bs = block_bb_get(service_id);
853 *size = BPC(bs);
854
855 return EOK;
856}
857
858int fat_total_block_count(service_id_t service_id, uint64_t *count)
859{
860 fat_bs_t *bs;
861
862 bs = block_bb_get(service_id);
863 *count = (SPC(bs)) ? TS(bs) / SPC(bs) : 0;
864
865 return EOK;
866}
867
868int fat_free_block_count(service_id_t service_id, uint64_t *count)
869{
870 fat_bs_t *bs;
871 fat_cluster_t e0;
872 uint64_t block_count;
873 int rc;
874 uint32_t cluster_no, clusters;
875
876 block_count = 0;
877 bs = block_bb_get(service_id);
878 clusters = (SPC(bs)) ? TS(bs) / SPC(bs) : 0;
879 for (cluster_no = 0; cluster_no < clusters; cluster_no++) {
880 rc = fat_get_cluster(bs, service_id, FAT1, cluster_no, &e0);
881 if (rc != EOK)
882 return EIO;
883
884 if (e0 == FAT_CLST_RES0)
885 block_count++;
886 }
887 *count = block_count;
888
889 return EOK;
890}
891
892/** libfs operations */
893libfs_ops_t fat_libfs_ops = {
894 .root_get = fat_root_get,
895 .match = fat_match,
896 .node_get = fat_node_get,
897 .node_open = fat_node_open,
898 .node_put = fat_node_put,
899 .create = fat_create_node,
900 .destroy = fat_destroy_node,
901 .link = fat_link,
902 .unlink = fat_unlink,
903 .has_children = fat_has_children,
904 .index_get = fat_index_get,
905 .size_get = fat_size_get,
906 .lnkcnt_get = fat_lnkcnt_get,
907 .is_directory = fat_is_directory,
908 .is_file = fat_is_file,
909 .service_get = fat_service_get,
910 .size_block = fat_size_block,
911 .total_block_count = fat_total_block_count,
912 .free_block_count = fat_free_block_count
913};
914
915/*
916 * FAT VFS_OUT operations.
917 */
918
919static int
920fat_mounted(service_id_t service_id, const char *opts, fs_index_t *index,
921 aoff64_t *size, unsigned *linkcnt)
922{
923 enum cache_mode cmode = CACHE_MODE_WB;
924 fat_bs_t *bs;
925 fat_instance_t *instance;
926 int rc;
927
928 instance = malloc(sizeof(fat_instance_t));
929 if (!instance)
930 return ENOMEM;
931 instance->lfn_enabled = true;
932
933 /* Parse mount options. */
934 char *mntopts = (char *) opts;
935 char *saveptr;
936 char *opt;
937 while ((opt = strtok_r(mntopts, " ,", &saveptr)) != NULL) {
938 if (str_cmp(opt, "wtcache") == 0)
939 cmode = CACHE_MODE_WT;
940 else if (str_cmp(opt, "nolfn") == 0)
941 instance->lfn_enabled = false;
942 mntopts = NULL;
943 }
944
945 /* initialize libblock */
946 rc = block_init(EXCHANGE_SERIALIZE, service_id, BS_SIZE);
947 if (rc != EOK) {
948 free(instance);
949 return rc;
950 }
951
952 /* prepare the boot block */
953 rc = block_bb_read(service_id, BS_BLOCK);
954 if (rc != EOK) {
955 free(instance);
956 block_fini(service_id);
957 return rc;
958 }
959
960 /* get the buffer with the boot sector */
961 bs = block_bb_get(service_id);
962
963 if (BPS(bs) != BS_SIZE) {
964 free(instance);
965 block_fini(service_id);
966 return ENOTSUP;
967 }
968
969 /* Initialize the block cache */
970 rc = block_cache_init(service_id, BPS(bs), 0 /* XXX */, cmode);
971 if (rc != EOK) {
972 free(instance);
973 block_fini(service_id);
974 return rc;
975 }
976
977 /* Do some simple sanity checks on the file system. */
978 rc = fat_sanity_check(bs, service_id);
979 if (rc != EOK) {
980 free(instance);
981 (void) block_cache_fini(service_id);
982 block_fini(service_id);
983 return rc;
984 }
985
986 rc = fat_idx_init_by_service_id(service_id);
987 if (rc != EOK) {
988 free(instance);
989 (void) block_cache_fini(service_id);
990 block_fini(service_id);
991 return rc;
992 }
993
994 /* Initialize the root node. */
995 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
996 if (!rfn) {
997 free(instance);
998 (void) block_cache_fini(service_id);
999 block_fini(service_id);
1000 fat_idx_fini_by_service_id(service_id);
1001 return ENOMEM;
1002 }
1003
1004 fs_node_initialize(rfn);
1005 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1006 if (!rootp) {
1007 free(instance);
1008 free(rfn);
1009 (void) block_cache_fini(service_id);
1010 block_fini(service_id);
1011 fat_idx_fini_by_service_id(service_id);
1012 return ENOMEM;
1013 }
1014 fat_node_initialize(rootp);
1015
1016 fat_idx_t *ridxp = fat_idx_get_by_pos(service_id, FAT_CLST_ROOTPAR, 0);
1017 if (!ridxp) {
1018 free(instance);
1019 free(rfn);
1020 free(rootp);
1021 (void) block_cache_fini(service_id);
1022 block_fini(service_id);
1023 fat_idx_fini_by_service_id(service_id);
1024 return ENOMEM;
1025 }
1026 assert(ridxp->index == 0);
1027 /* ridxp->lock held */
1028
1029 rootp->type = FAT_DIRECTORY;
1030 rootp->firstc = FAT_ROOT_CLST(bs);
1031 rootp->refcnt = 1;
1032 rootp->lnkcnt = 0; /* FS root is not linked */
1033
1034 if (FAT_IS_FAT32(bs)) {
1035 uint32_t clusters;
1036 rc = fat_clusters_get(&clusters, bs, service_id, rootp->firstc);
1037 if (rc != EOK) {
1038 fibril_mutex_unlock(&ridxp->lock);
1039 free(instance);
1040 free(rfn);
1041 free(rootp);
1042 (void) block_cache_fini(service_id);
1043 block_fini(service_id);
1044 fat_idx_fini_by_service_id(service_id);
1045 return ENOTSUP;
1046 }
1047 rootp->size = BPS(bs) * SPC(bs) * clusters;
1048 } else
1049 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
1050
1051 rc = fs_instance_create(service_id, instance);
1052 if (rc != EOK) {
1053 fibril_mutex_unlock(&ridxp->lock);
1054 free(instance);
1055 free(rfn);
1056 free(rootp);
1057 (void) block_cache_fini(service_id);
1058 block_fini(service_id);
1059 fat_idx_fini_by_service_id(service_id);
1060 return rc;
1061 }
1062
1063 rootp->idx = ridxp;
1064 ridxp->nodep = rootp;
1065 rootp->bp = rfn;
1066 rfn->data = rootp;
1067
1068 fibril_mutex_unlock(&ridxp->lock);
1069
1070 *index = ridxp->index;
1071 *size = rootp->size;
1072 *linkcnt = rootp->lnkcnt;
1073
1074 return EOK;
1075}
1076
1077static int fat_update_fat32_fsinfo(service_id_t service_id)
1078{
1079 fat_bs_t *bs;
1080 fat32_fsinfo_t *info;
1081 block_t *b;
1082 int rc;
1083
1084 bs = block_bb_get(service_id);
1085 assert(FAT_IS_FAT32(bs));
1086
1087 rc = block_get(&b, service_id, uint16_t_le2host(bs->fat32.fsinfo_sec),
1088 BLOCK_FLAGS_NONE);
1089 if (rc != EOK)
1090 return rc;
1091
1092 info = (fat32_fsinfo_t *) b->data;
1093
1094 if (memcmp(info->sig1, FAT32_FSINFO_SIG1, sizeof(info->sig1)) != 0 ||
1095 memcmp(info->sig2, FAT32_FSINFO_SIG2, sizeof(info->sig2)) != 0 ||
1096 memcmp(info->sig3, FAT32_FSINFO_SIG3, sizeof(info->sig3)) != 0) {
1097 (void) block_put(b);
1098 return EINVAL;
1099 }
1100
1101 /* For now, invalidate the counter. */
1102 info->free_clusters = host2uint16_t_le(-1);
1103
1104 b->dirty = true;
1105 return block_put(b);
1106}
1107
1108static int fat_unmounted(service_id_t service_id)
1109{
1110 fs_node_t *fn;
1111 fat_node_t *nodep;
1112 fat_bs_t *bs;
1113 int rc;
1114
1115 bs = block_bb_get(service_id);
1116
1117 rc = fat_root_get(&fn, service_id);
1118 if (rc != EOK)
1119 return rc;
1120 nodep = FAT_NODE(fn);
1121
1122 /*
1123 * We expect exactly two references on the root node. One for the
1124 * fat_root_get() above and one created in fat_mounted().
1125 */
1126 if (nodep->refcnt != 2) {
1127 (void) fat_node_put(fn);
1128 return EBUSY;
1129 }
1130
1131 if (FAT_IS_FAT32(bs)) {
1132 /*
1133 * Attempt to update the FAT32 FS info.
1134 */
1135 (void) fat_update_fat32_fsinfo(service_id);
1136 }
1137
1138 /*
1139 * Put the root node and force it to the FAT free node list.
1140 */
1141 (void) fat_node_put(fn);
1142 (void) fat_node_put(fn);
1143
1144 /*
1145 * Perform cleanup of the node structures, index structures and
1146 * associated data. Write back this file system's dirty blocks and
1147 * stop using libblock for this instance.
1148 */
1149 (void) fat_node_fini_by_service_id(service_id);
1150 fat_idx_fini_by_service_id(service_id);
1151 (void) block_cache_fini(service_id);
1152 block_fini(service_id);
1153
1154 void *data;
1155 if (fs_instance_get(service_id, &data) == EOK) {
1156 fs_instance_destroy(service_id);
1157 free(data);
1158 }
1159
1160 return EOK;
1161}
1162
1163static int
1164fat_read(service_id_t service_id, fs_index_t index, aoff64_t pos,
1165 size_t *rbytes)
1166{
1167 fs_node_t *fn;
1168 fat_node_t *nodep;
1169 fat_bs_t *bs;
1170 size_t bytes;
1171 block_t *b;
1172 int rc;
1173
1174 rc = fat_node_get(&fn, service_id, index);
1175 if (rc != EOK)
1176 return rc;
1177 if (!fn)
1178 return ENOENT;
1179 nodep = FAT_NODE(fn);
1180
1181 ipc_callid_t callid;
1182 size_t len;
1183 if (!async_data_read_receive(&callid, &len)) {
1184 fat_node_put(fn);
1185 async_answer_0(callid, EINVAL);
1186 return EINVAL;
1187 }
1188
1189 bs = block_bb_get(service_id);
1190
1191 if (nodep->type == FAT_FILE) {
1192 /*
1193 * Our strategy for regular file reads is to read one block at
1194 * most and make use of the possibility to return less data than
1195 * requested. This keeps the code very simple.
1196 */
1197 if (pos >= nodep->size) {
1198 /* reading beyond the EOF */
1199 bytes = 0;
1200 (void) async_data_read_finalize(callid, NULL, 0);
1201 } else {
1202 bytes = min(len, BPS(bs) - pos % BPS(bs));
1203 bytes = min(bytes, nodep->size - pos);
1204 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
1205 BLOCK_FLAGS_NONE);
1206 if (rc != EOK) {
1207 fat_node_put(fn);
1208 async_answer_0(callid, rc);
1209 return rc;
1210 }
1211 (void) async_data_read_finalize(callid,
1212 b->data + pos % BPS(bs), bytes);
1213 rc = block_put(b);
1214 if (rc != EOK) {
1215 fat_node_put(fn);
1216 return rc;
1217 }
1218 }
1219 } else {
1220 aoff64_t spos = pos;
1221 char name[FAT_LFN_NAME_SIZE];
1222 fat_dentry_t *d;
1223
1224 assert(nodep->type == FAT_DIRECTORY);
1225 assert(nodep->size % BPS(bs) == 0);
1226 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
1227
1228 fat_directory_t di;
1229 rc = fat_directory_open(nodep, &di);
1230 if (rc != EOK)
1231 goto err;
1232 rc = fat_directory_seek(&di, pos);
1233 if (rc != EOK) {
1234 (void) fat_directory_close(&di);
1235 goto err;
1236 }
1237
1238 rc = fat_directory_read(&di, name, &d);
1239 if (rc == EOK)
1240 goto hit;
1241 if (rc == ENOENT)
1242 goto miss;
1243
1244err:
1245 (void) fat_node_put(fn);
1246 async_answer_0(callid, rc);
1247 return rc;
1248
1249miss:
1250 rc = fat_directory_close(&di);
1251 if (rc != EOK)
1252 goto err;
1253 rc = fat_node_put(fn);
1254 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1255 *rbytes = 0;
1256 return rc != EOK ? rc : ENOENT;
1257
1258hit:
1259 pos = di.pos;
1260 rc = fat_directory_close(&di);
1261 if (rc != EOK)
1262 goto err;
1263 (void) async_data_read_finalize(callid, name,
1264 str_size(name) + 1);
1265 bytes = (pos - spos) + 1;
1266 }
1267
1268 rc = fat_node_put(fn);
1269 *rbytes = bytes;
1270 return rc;
1271}
1272
1273static int
1274fat_write(service_id_t service_id, fs_index_t index, aoff64_t pos,
1275 size_t *wbytes, aoff64_t *nsize)
1276{
1277 fs_node_t *fn;
1278 fat_node_t *nodep;
1279 fat_bs_t *bs;
1280 size_t bytes;
1281 block_t *b;
1282 aoff64_t boundary;
1283 int flags = BLOCK_FLAGS_NONE;
1284 int rc;
1285
1286 rc = fat_node_get(&fn, service_id, index);
1287 if (rc != EOK)
1288 return rc;
1289 if (!fn)
1290 return ENOENT;
1291 nodep = FAT_NODE(fn);
1292
1293 ipc_callid_t callid;
1294 size_t len;
1295 if (!async_data_write_receive(&callid, &len)) {
1296 (void) fat_node_put(fn);
1297 async_answer_0(callid, EINVAL);
1298 return EINVAL;
1299 }
1300
1301 bs = block_bb_get(service_id);
1302
1303 /*
1304 * In all scenarios, we will attempt to write out only one block worth
1305 * of data at maximum. There might be some more efficient approaches,
1306 * but this one greatly simplifies fat_write(). Note that we can afford
1307 * to do this because the client must be ready to handle the return
1308 * value signalizing a smaller number of bytes written.
1309 */
1310 bytes = min(len, BPS(bs) - pos % BPS(bs));
1311 if (bytes == BPS(bs))
1312 flags |= BLOCK_FLAGS_NOREAD;
1313
1314 boundary = ROUND_UP(nodep->size, BPC(bs));
1315 if (pos < boundary) {
1316 /*
1317 * This is the easier case - we are either overwriting already
1318 * existing contents or writing behind the EOF, but still within
1319 * the limits of the last cluster. The node size may grow to the
1320 * next block size boundary.
1321 */
1322 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1323 if (rc != EOK) {
1324 (void) fat_node_put(fn);
1325 async_answer_0(callid, rc);
1326 return rc;
1327 }
1328 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
1329 if (rc != EOK) {
1330 (void) fat_node_put(fn);
1331 async_answer_0(callid, rc);
1332 return rc;
1333 }
1334 (void) async_data_write_finalize(callid,
1335 b->data + pos % BPS(bs), bytes);
1336 b->dirty = true; /* need to sync block */
1337 rc = block_put(b);
1338 if (rc != EOK) {
1339 (void) fat_node_put(fn);
1340 return rc;
1341 }
1342 if (pos + bytes > nodep->size) {
1343 nodep->size = pos + bytes;
1344 nodep->dirty = true; /* need to sync node */
1345 }
1346 *wbytes = bytes;
1347 *nsize = nodep->size;
1348 rc = fat_node_put(fn);
1349 return rc;
1350 } else {
1351 /*
1352 * This is the more difficult case. We must allocate new
1353 * clusters for the node and zero them out.
1354 */
1355 unsigned nclsts;
1356 fat_cluster_t mcl, lcl;
1357
1358 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
1359 /* create an independent chain of nclsts clusters in all FATs */
1360 rc = fat_alloc_clusters(bs, service_id, nclsts, &mcl, &lcl);
1361 if (rc != EOK) {
1362 /* could not allocate a chain of nclsts clusters */
1363 (void) fat_node_put(fn);
1364 async_answer_0(callid, rc);
1365 return rc;
1366 }
1367 /* zero fill any gaps */
1368 rc = fat_fill_gap(bs, nodep, mcl, pos);
1369 if (rc != EOK) {
1370 (void) fat_free_clusters(bs, service_id, mcl);
1371 (void) fat_node_put(fn);
1372 async_answer_0(callid, rc);
1373 return rc;
1374 }
1375 rc = _fat_block_get(&b, bs, service_id, lcl, NULL,
1376 (pos / BPS(bs)) % SPC(bs), flags);
1377 if (rc != EOK) {
1378 (void) fat_free_clusters(bs, service_id, mcl);
1379 (void) fat_node_put(fn);
1380 async_answer_0(callid, rc);
1381 return rc;
1382 }
1383 (void) async_data_write_finalize(callid,
1384 b->data + pos % BPS(bs), bytes);
1385 b->dirty = true; /* need to sync block */
1386 rc = block_put(b);
1387 if (rc != EOK) {
1388 (void) fat_free_clusters(bs, service_id, mcl);
1389 (void) fat_node_put(fn);
1390 return rc;
1391 }
1392 /*
1393 * Append the cluster chain starting in mcl to the end of the
1394 * node's cluster chain.
1395 */
1396 rc = fat_append_clusters(bs, nodep, mcl, lcl);
1397 if (rc != EOK) {
1398 (void) fat_free_clusters(bs, service_id, mcl);
1399 (void) fat_node_put(fn);
1400 return rc;
1401 }
1402 *nsize = nodep->size = pos + bytes;
1403 rc = fat_node_put(fn);
1404 nodep->dirty = true; /* need to sync node */
1405 *wbytes = bytes;
1406 return rc;
1407 }
1408}
1409
1410static int
1411fat_truncate(service_id_t service_id, fs_index_t index, aoff64_t size)
1412{
1413 fs_node_t *fn;
1414 fat_node_t *nodep;
1415 fat_bs_t *bs;
1416 int rc;
1417
1418 rc = fat_node_get(&fn, service_id, index);
1419 if (rc != EOK)
1420 return rc;
1421 if (!fn)
1422 return ENOENT;
1423 nodep = FAT_NODE(fn);
1424
1425 bs = block_bb_get(service_id);
1426
1427 if (nodep->size == size) {
1428 rc = EOK;
1429 } else if (nodep->size < size) {
1430 /*
1431 * The standard says we have the freedom to grow the node.
1432 * For now, we simply return an error.
1433 */
1434 rc = EINVAL;
1435 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
1436 /*
1437 * The node will be shrunk, but no clusters will be deallocated.
1438 */
1439 nodep->size = size;
1440 nodep->dirty = true; /* need to sync node */
1441 rc = EOK;
1442 } else {
1443 /*
1444 * The node will be shrunk, clusters will be deallocated.
1445 */
1446 if (size == 0) {
1447 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1448 if (rc != EOK)
1449 goto out;
1450 } else {
1451 fat_cluster_t lastc;
1452 rc = fat_cluster_walk(bs, service_id, nodep->firstc,
1453 &lastc, NULL, (size - 1) / BPC(bs));
1454 if (rc != EOK)
1455 goto out;
1456 rc = fat_chop_clusters(bs, nodep, lastc);
1457 if (rc != EOK)
1458 goto out;
1459 }
1460 nodep->size = size;
1461 nodep->dirty = true; /* need to sync node */
1462 rc = EOK;
1463 }
1464out:
1465 fat_node_put(fn);
1466 return rc;
1467}
1468
1469static int fat_close(service_id_t service_id, fs_index_t index)
1470{
1471 return EOK;
1472}
1473
1474static int fat_destroy(service_id_t service_id, fs_index_t index)
1475{
1476 fs_node_t *fn;
1477 fat_node_t *nodep;
1478 int rc;
1479
1480 rc = fat_node_get(&fn, service_id, index);
1481 if (rc != EOK)
1482 return rc;
1483 if (!fn)
1484 return ENOENT;
1485
1486 nodep = FAT_NODE(fn);
1487 /*
1488 * We should have exactly two references. One for the above
1489 * call to fat_node_get() and one from fat_unlink().
1490 */
1491 assert(nodep->refcnt == 2);
1492
1493 rc = fat_destroy_node(fn);
1494 return rc;
1495}
1496
1497static int fat_sync(service_id_t service_id, fs_index_t index)
1498{
1499 fs_node_t *fn;
1500 int rc = fat_node_get(&fn, service_id, index);
1501 if (rc != EOK)
1502 return rc;
1503 if (!fn)
1504 return ENOENT;
1505
1506 fat_node_t *nodep = FAT_NODE(fn);
1507
1508 nodep->dirty = true;
1509 rc = fat_node_sync(nodep);
1510
1511 fat_node_put(fn);
1512 return rc;
1513}
1514
1515vfs_out_ops_t fat_ops = {
1516 .mounted = fat_mounted,
1517 .unmounted = fat_unmounted,
1518 .read = fat_read,
1519 .write = fat_write,
1520 .truncate = fat_truncate,
1521 .close = fat_close,
1522 .destroy = fat_destroy,
1523 .sync = fat_sync,
1524};
1525
1526/**
1527 * @}
1528 */
Note: See TracBrowser for help on using the repository browser.