source: mainline/uspace/srv/fs/fat/fat_ops.c@ f066a87

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f066a87 was d2c8533, checked in by Jiri Svoboda <jiri@…>, 9 years ago

File system probing groundwork. Only MFS can do it for now.

  • Property mode set to 100644
File size: 35.6 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2011 Oleg Romanenko
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup fs
31 * @{
32 */
33
34/**
35 * @file fat_ops.c
36 * @brief Implementation of VFS operations for the FAT file system server.
37 */
38
39#include "fat.h"
40#include "fat_dentry.h"
41#include "fat_fat.h"
42#include "fat_directory.h"
43#include "../../vfs/vfs.h"
44#include <libfs.h>
45#include <block.h>
46#include <ipc/services.h>
47#include <ipc/loc.h>
48#include <macros.h>
49#include <async.h>
50#include <errno.h>
51#include <str.h>
52#include <byteorder.h>
53#include <adt/hash_table.h>
54#include <adt/list.h>
55#include <assert.h>
56#include <fibril_synch.h>
57#include <align.h>
58#include <malloc.h>
59
60#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
61#define FS_NODE(node) ((node) ? (node)->bp : NULL)
62
63#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
64#define BPC(bs) (BPS((bs)) * SPC((bs)))
65
66/** Mutex protecting the list of cached free FAT nodes. */
67static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
68
69/** List of cached free FAT nodes. */
70static LIST_INITIALIZE(ffn_list);
71
72/*
73 * Forward declarations of FAT libfs operations.
74 */
75static int fat_root_get(fs_node_t **, service_id_t);
76static int fat_match(fs_node_t **, fs_node_t *, const char *);
77static int fat_node_get(fs_node_t **, service_id_t, fs_index_t);
78static int fat_node_open(fs_node_t *);
79static int fat_node_put(fs_node_t *);
80static int fat_create_node(fs_node_t **, service_id_t, int);
81static int fat_destroy_node(fs_node_t *);
82static int fat_link(fs_node_t *, fs_node_t *, const char *);
83static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
84static int fat_has_children(bool *, fs_node_t *);
85static fs_index_t fat_index_get(fs_node_t *);
86static aoff64_t fat_size_get(fs_node_t *);
87static unsigned fat_lnkcnt_get(fs_node_t *);
88static bool fat_is_directory(fs_node_t *);
89static bool fat_is_file(fs_node_t *node);
90static service_id_t fat_service_get(fs_node_t *node);
91static int fat_size_block(service_id_t, uint32_t *);
92static int fat_total_block_count(service_id_t, uint64_t *);
93static int fat_free_block_count(service_id_t, uint64_t *);
94
95/*
96 * Helper functions.
97 */
98static void fat_node_initialize(fat_node_t *node)
99{
100 fibril_mutex_initialize(&node->lock);
101 node->bp = NULL;
102 node->idx = NULL;
103 node->type = 0;
104 link_initialize(&node->ffn_link);
105 node->size = 0;
106 node->lnkcnt = 0;
107 node->refcnt = 0;
108 node->dirty = false;
109 node->lastc_cached_valid = false;
110 node->lastc_cached_value = 0;
111 node->currc_cached_valid = false;
112 node->currc_cached_bn = 0;
113 node->currc_cached_value = 0;
114}
115
116static int fat_node_sync(fat_node_t *node)
117{
118 block_t *b;
119 fat_bs_t *bs;
120 fat_dentry_t *d;
121 int rc;
122
123 assert(node->dirty);
124
125 bs = block_bb_get(node->idx->service_id);
126
127 /* Read the block that contains the dentry of interest. */
128 rc = _fat_block_get(&b, bs, node->idx->service_id, node->idx->pfc,
129 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
130 BLOCK_FLAGS_NONE);
131 if (rc != EOK)
132 return rc;
133
134 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
135
136 d->firstc = host2uint16_t_le(node->firstc);
137 if (node->type == FAT_FILE) {
138 d->size = host2uint32_t_le(node->size);
139 } else if (node->type == FAT_DIRECTORY) {
140 d->attr = FAT_ATTR_SUBDIR;
141 }
142
143 /* TODO: update other fields? (e.g time fields) */
144
145 b->dirty = true; /* need to sync block */
146 rc = block_put(b);
147 return rc;
148}
149
150static int fat_node_fini_by_service_id(service_id_t service_id)
151{
152 int rc;
153
154 /*
155 * We are called from fat_unmounted() and assume that there are already
156 * no nodes belonging to this instance with non-zero refcount. Therefore
157 * it is sufficient to clean up only the FAT free node list.
158 */
159
160restart:
161 fibril_mutex_lock(&ffn_mutex);
162 list_foreach(ffn_list, ffn_link, fat_node_t, nodep) {
163 if (!fibril_mutex_trylock(&nodep->lock)) {
164 fibril_mutex_unlock(&ffn_mutex);
165 goto restart;
166 }
167 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
168 fibril_mutex_unlock(&nodep->lock);
169 fibril_mutex_unlock(&ffn_mutex);
170 goto restart;
171 }
172 if (nodep->idx->service_id != service_id) {
173 fibril_mutex_unlock(&nodep->idx->lock);
174 fibril_mutex_unlock(&nodep->lock);
175 continue;
176 }
177
178 list_remove(&nodep->ffn_link);
179 fibril_mutex_unlock(&ffn_mutex);
180
181 /*
182 * We can unlock the node and its index structure because we are
183 * the last player on this playground and VFS is preventing new
184 * players from entering.
185 */
186 fibril_mutex_unlock(&nodep->idx->lock);
187 fibril_mutex_unlock(&nodep->lock);
188
189 if (nodep->dirty) {
190 rc = fat_node_sync(nodep);
191 if (rc != EOK)
192 return rc;
193 }
194 nodep->idx->nodep = NULL;
195 free(nodep->bp);
196 free(nodep);
197
198 /* Need to restart because we changed ffn_list. */
199 goto restart;
200 }
201 fibril_mutex_unlock(&ffn_mutex);
202
203 return EOK;
204}
205
206static int fat_node_get_new(fat_node_t **nodepp)
207{
208 fs_node_t *fn;
209 fat_node_t *nodep;
210 int rc;
211
212 fibril_mutex_lock(&ffn_mutex);
213 if (!list_empty(&ffn_list)) {
214 /* Try to use a cached free node structure. */
215 fat_idx_t *idxp_tmp;
216 nodep = list_get_instance(list_first(&ffn_list), fat_node_t,
217 ffn_link);
218 if (!fibril_mutex_trylock(&nodep->lock))
219 goto skip_cache;
220 idxp_tmp = nodep->idx;
221 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
222 fibril_mutex_unlock(&nodep->lock);
223 goto skip_cache;
224 }
225 list_remove(&nodep->ffn_link);
226 fibril_mutex_unlock(&ffn_mutex);
227 if (nodep->dirty) {
228 rc = fat_node_sync(nodep);
229 if (rc != EOK) {
230 idxp_tmp->nodep = NULL;
231 fibril_mutex_unlock(&nodep->lock);
232 fibril_mutex_unlock(&idxp_tmp->lock);
233 free(nodep->bp);
234 free(nodep);
235 return rc;
236 }
237 }
238 idxp_tmp->nodep = NULL;
239 fibril_mutex_unlock(&nodep->lock);
240 fibril_mutex_unlock(&idxp_tmp->lock);
241 fn = FS_NODE(nodep);
242 } else {
243skip_cache:
244 /* Try to allocate a new node structure. */
245 fibril_mutex_unlock(&ffn_mutex);
246 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
247 if (!fn)
248 return ENOMEM;
249 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
250 if (!nodep) {
251 free(fn);
252 return ENOMEM;
253 }
254 }
255 fat_node_initialize(nodep);
256 fs_node_initialize(fn);
257 fn->data = nodep;
258 nodep->bp = fn;
259
260 *nodepp = nodep;
261 return EOK;
262}
263
264/** Internal version of fat_node_get().
265 *
266 * @param idxp Locked index structure.
267 */
268static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
269{
270 block_t *b;
271 fat_bs_t *bs;
272 fat_dentry_t *d;
273 fat_node_t *nodep = NULL;
274 int rc;
275
276 if (idxp->nodep) {
277 /*
278 * We are lucky.
279 * The node is already instantiated in memory.
280 */
281 fibril_mutex_lock(&idxp->nodep->lock);
282 if (!idxp->nodep->refcnt++) {
283 fibril_mutex_lock(&ffn_mutex);
284 list_remove(&idxp->nodep->ffn_link);
285 fibril_mutex_unlock(&ffn_mutex);
286 }
287 fibril_mutex_unlock(&idxp->nodep->lock);
288 *nodepp = idxp->nodep;
289 return EOK;
290 }
291
292 /*
293 * We must instantiate the node from the file system.
294 */
295
296 assert(idxp->pfc);
297
298 rc = fat_node_get_new(&nodep);
299 if (rc != EOK)
300 return rc;
301
302 bs = block_bb_get(idxp->service_id);
303
304 /* Read the block that contains the dentry of interest. */
305 rc = _fat_block_get(&b, bs, idxp->service_id, idxp->pfc, NULL,
306 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
307 if (rc != EOK) {
308 (void) fat_node_put(FS_NODE(nodep));
309 return rc;
310 }
311
312 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
313 if (FAT_IS_FAT32(bs)) {
314 nodep->firstc = uint16_t_le2host(d->firstc_lo) |
315 (uint16_t_le2host(d->firstc_hi) << 16);
316 } else
317 nodep->firstc = uint16_t_le2host(d->firstc);
318
319 if (d->attr & FAT_ATTR_SUBDIR) {
320 /*
321 * The only directory which does not have this bit set is the
322 * root directory itself. The root directory node is handled
323 * and initialized elsewhere.
324 */
325 nodep->type = FAT_DIRECTORY;
326
327 /*
328 * Unfortunately, the 'size' field of the FAT dentry is not
329 * defined for the directory entry type. We must determine the
330 * size of the directory by walking the FAT.
331 */
332 uint32_t clusters;
333 rc = fat_clusters_get(&clusters, bs, idxp->service_id,
334 nodep->firstc);
335 if (rc != EOK) {
336 (void) block_put(b);
337 (void) fat_node_put(FS_NODE(nodep));
338 return rc;
339 }
340 nodep->size = BPS(bs) * SPC(bs) * clusters;
341 } else {
342 nodep->type = FAT_FILE;
343 nodep->size = uint32_t_le2host(d->size);
344 }
345
346 nodep->lnkcnt = 1;
347 nodep->refcnt = 1;
348
349 rc = block_put(b);
350 if (rc != EOK) {
351 (void) fat_node_put(FS_NODE(nodep));
352 return rc;
353 }
354
355 /* Link the idx structure with the node structure. */
356 nodep->idx = idxp;
357 idxp->nodep = nodep;
358
359 *nodepp = nodep;
360 return EOK;
361}
362
363/*
364 * FAT libfs operations.
365 */
366
367int fat_root_get(fs_node_t **rfn, service_id_t service_id)
368{
369 return fat_node_get(rfn, service_id, 0);
370}
371
372int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
373{
374 fat_node_t *parentp = FAT_NODE(pfn);
375 char name[FAT_LFN_NAME_SIZE];
376 fat_dentry_t *d;
377 service_id_t service_id;
378 int rc;
379
380 fibril_mutex_lock(&parentp->idx->lock);
381 service_id = parentp->idx->service_id;
382 fibril_mutex_unlock(&parentp->idx->lock);
383
384 fat_directory_t di;
385 rc = fat_directory_open(parentp, &di);
386 if (rc != EOK)
387 return rc;
388
389 while (fat_directory_read(&di, name, &d) == EOK) {
390 if (fat_dentry_namecmp(name, component) == 0) {
391 /* hit */
392 fat_node_t *nodep;
393 aoff64_t o = di.pos %
394 (BPS(di.bs) / sizeof(fat_dentry_t));
395 fat_idx_t *idx = fat_idx_get_by_pos(service_id,
396 parentp->firstc, di.bnum * DPS(di.bs) + o);
397 if (!idx) {
398 /*
399 * Can happen if memory is low or if we
400 * run out of 32-bit indices.
401 */
402 rc = fat_directory_close(&di);
403 return (rc == EOK) ? ENOMEM : rc;
404 }
405 rc = fat_node_get_core(&nodep, idx);
406 fibril_mutex_unlock(&idx->lock);
407 if (rc != EOK) {
408 (void) fat_directory_close(&di);
409 return rc;
410 }
411 *rfn = FS_NODE(nodep);
412 rc = fat_directory_close(&di);
413 if (rc != EOK)
414 (void) fat_node_put(*rfn);
415 return rc;
416 } else {
417 rc = fat_directory_next(&di);
418 if (rc != EOK)
419 break;
420 }
421 }
422 (void) fat_directory_close(&di);
423 *rfn = NULL;
424 return EOK;
425}
426
427/** Instantiate a FAT in-core node. */
428int fat_node_get(fs_node_t **rfn, service_id_t service_id, fs_index_t index)
429{
430 fat_node_t *nodep;
431 fat_idx_t *idxp;
432 int rc;
433
434 idxp = fat_idx_get_by_index(service_id, index);
435 if (!idxp) {
436 *rfn = NULL;
437 return EOK;
438 }
439 /* idxp->lock held */
440 rc = fat_node_get_core(&nodep, idxp);
441 fibril_mutex_unlock(&idxp->lock);
442 if (rc == EOK)
443 *rfn = FS_NODE(nodep);
444 return rc;
445}
446
447int fat_node_open(fs_node_t *fn)
448{
449 /*
450 * Opening a file is stateless, nothing
451 * to be done here.
452 */
453 return EOK;
454}
455
456int fat_node_put(fs_node_t *fn)
457{
458 fat_node_t *nodep = FAT_NODE(fn);
459 bool destroy = false;
460
461 fibril_mutex_lock(&nodep->lock);
462 if (!--nodep->refcnt) {
463 if (nodep->idx) {
464 fibril_mutex_lock(&ffn_mutex);
465 list_append(&nodep->ffn_link, &ffn_list);
466 fibril_mutex_unlock(&ffn_mutex);
467 } else {
468 /*
469 * The node does not have any index structure associated
470 * with itself. This can only mean that we are releasing
471 * the node after a failed attempt to allocate the index
472 * structure for it.
473 */
474 destroy = true;
475 }
476 }
477 fibril_mutex_unlock(&nodep->lock);
478 if (destroy) {
479 free(nodep->bp);
480 free(nodep);
481 }
482 return EOK;
483}
484
485int fat_create_node(fs_node_t **rfn, service_id_t service_id, int flags)
486{
487 fat_idx_t *idxp;
488 fat_node_t *nodep;
489 fat_bs_t *bs;
490 fat_cluster_t mcl, lcl;
491 int rc;
492
493 bs = block_bb_get(service_id);
494 if (flags & L_DIRECTORY) {
495 /* allocate a cluster */
496 rc = fat_alloc_clusters(bs, service_id, 1, &mcl, &lcl);
497 if (rc != EOK)
498 return rc;
499 /* populate the new cluster with unused dentries */
500 rc = fat_zero_cluster(bs, service_id, mcl);
501 if (rc != EOK) {
502 (void) fat_free_clusters(bs, service_id, mcl);
503 return rc;
504 }
505 }
506
507 rc = fat_node_get_new(&nodep);
508 if (rc != EOK) {
509 (void) fat_free_clusters(bs, service_id, mcl);
510 return rc;
511 }
512 rc = fat_idx_get_new(&idxp, service_id);
513 if (rc != EOK) {
514 (void) fat_free_clusters(bs, service_id, mcl);
515 (void) fat_node_put(FS_NODE(nodep));
516 return rc;
517 }
518 /* idxp->lock held */
519 if (flags & L_DIRECTORY) {
520 nodep->type = FAT_DIRECTORY;
521 nodep->firstc = mcl;
522 nodep->size = BPS(bs) * SPC(bs);
523 } else {
524 nodep->type = FAT_FILE;
525 nodep->firstc = FAT_CLST_RES0;
526 nodep->size = 0;
527 }
528 nodep->lnkcnt = 0; /* not linked anywhere */
529 nodep->refcnt = 1;
530 nodep->dirty = true;
531
532 nodep->idx = idxp;
533 idxp->nodep = nodep;
534
535 fibril_mutex_unlock(&idxp->lock);
536 *rfn = FS_NODE(nodep);
537 return EOK;
538}
539
540int fat_destroy_node(fs_node_t *fn)
541{
542 fat_node_t *nodep = FAT_NODE(fn);
543 fat_bs_t *bs;
544 bool has_children;
545 int rc;
546
547 /*
548 * The node is not reachable from the file system. This means that the
549 * link count should be zero and that the index structure cannot be
550 * found in the position hash. Obviously, we don't need to lock the node
551 * nor its index structure.
552 */
553 assert(nodep->lnkcnt == 0);
554
555 /*
556 * The node may not have any children.
557 */
558 rc = fat_has_children(&has_children, fn);
559 if (rc != EOK)
560 return rc;
561 assert(!has_children);
562
563 bs = block_bb_get(nodep->idx->service_id);
564 if (nodep->firstc != FAT_CLST_RES0) {
565 assert(nodep->size);
566 /* Free all clusters allocated to the node. */
567 rc = fat_free_clusters(bs, nodep->idx->service_id,
568 nodep->firstc);
569 }
570
571 fat_idx_destroy(nodep->idx);
572 free(nodep->bp);
573 free(nodep);
574 return rc;
575}
576
577int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
578{
579 fat_node_t *parentp = FAT_NODE(pfn);
580 fat_node_t *childp = FAT_NODE(cfn);
581 fat_dentry_t *d;
582 fat_bs_t *bs;
583 block_t *b;
584 fat_directory_t di;
585 fat_dentry_t de;
586 int rc;
587
588 fibril_mutex_lock(&childp->lock);
589 if (childp->lnkcnt == 1) {
590 /*
591 * On FAT, we don't support multiple hard links.
592 */
593 fibril_mutex_unlock(&childp->lock);
594 return EMLINK;
595 }
596 assert(childp->lnkcnt == 0);
597 fibril_mutex_unlock(&childp->lock);
598
599 if (!fat_valid_name(name))
600 return ENOTSUP;
601
602 fibril_mutex_lock(&parentp->idx->lock);
603 bs = block_bb_get(parentp->idx->service_id);
604 rc = fat_directory_open(parentp, &di);
605 if (rc != EOK) {
606 fibril_mutex_unlock(&parentp->idx->lock);
607 return rc;
608 }
609
610 /*
611 * At this point we only establish the link between the parent and the
612 * child. The dentry, except of the name and the extension, will remain
613 * uninitialized until the corresponding node is synced. Thus the valid
614 * dentry data is kept in the child node structure.
615 */
616 memset(&de, 0, sizeof(fat_dentry_t));
617
618 rc = fat_directory_write(&di, name, &de);
619 if (rc != EOK) {
620 (void) fat_directory_close(&di);
621 fibril_mutex_unlock(&parentp->idx->lock);
622 return rc;
623 }
624 rc = fat_directory_close(&di);
625 if (rc != EOK) {
626 fibril_mutex_unlock(&parentp->idx->lock);
627 return rc;
628 }
629
630 fibril_mutex_unlock(&parentp->idx->lock);
631
632 fibril_mutex_lock(&childp->idx->lock);
633
634 if (childp->type == FAT_DIRECTORY) {
635 /*
636 * If possible, create the Sub-directory Identifier Entry and
637 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
638 * These entries are not mandatory according to Standard
639 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
640 * rather a sign of our good will.
641 */
642 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
643 if (rc != EOK) {
644 /*
645 * Rather than returning an error, simply skip the
646 * creation of these two entries.
647 */
648 goto skip_dots;
649 }
650 d = (fat_dentry_t *) b->data;
651 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
652 (memcmp(d->name, FAT_NAME_DOT, FAT_NAME_LEN)) == 0) {
653 memset(d, 0, sizeof(fat_dentry_t));
654 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN);
655 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
656 d->attr = FAT_ATTR_SUBDIR;
657 d->firstc = host2uint16_t_le(childp->firstc);
658 /* TODO: initialize also the date/time members. */
659 }
660 d++;
661 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
662 (memcmp(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN) == 0)) {
663 memset(d, 0, sizeof(fat_dentry_t));
664 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN);
665 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
666 d->attr = FAT_ATTR_SUBDIR;
667 d->firstc = (parentp->firstc == FAT_ROOT_CLST(bs)) ?
668 host2uint16_t_le(FAT_CLST_ROOTPAR) :
669 host2uint16_t_le(parentp->firstc);
670 /* TODO: initialize also the date/time members. */
671 }
672 b->dirty = true; /* need to sync block */
673 /*
674 * Ignore the return value as we would have fallen through on error
675 * anyway.
676 */
677 (void) block_put(b);
678 }
679skip_dots:
680
681 childp->idx->pfc = parentp->firstc;
682 childp->idx->pdi = di.pos; /* di.pos holds absolute position of SFN entry */
683 fibril_mutex_unlock(&childp->idx->lock);
684
685 fibril_mutex_lock(&childp->lock);
686 childp->lnkcnt = 1;
687 childp->dirty = true; /* need to sync node */
688 fibril_mutex_unlock(&childp->lock);
689
690 /*
691 * Hash in the index structure into the position hash.
692 */
693 fat_idx_hashin(childp->idx);
694
695 return EOK;
696}
697
698int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
699{
700 fat_node_t *parentp = FAT_NODE(pfn);
701 fat_node_t *childp = FAT_NODE(cfn);
702 bool has_children;
703 int rc;
704
705 if (!parentp)
706 return EBUSY;
707
708 rc = fat_has_children(&has_children, cfn);
709 if (rc != EOK)
710 return rc;
711 if (has_children)
712 return ENOTEMPTY;
713
714 fibril_mutex_lock(&parentp->lock);
715 fibril_mutex_lock(&childp->lock);
716 assert(childp->lnkcnt == 1);
717 fibril_mutex_lock(&childp->idx->lock);
718
719 fat_directory_t di;
720 rc = fat_directory_open(parentp, &di);
721 if (rc != EOK)
722 goto error;
723 rc = fat_directory_seek(&di, childp->idx->pdi);
724 if (rc != EOK)
725 goto error;
726 rc = fat_directory_erase(&di);
727 if (rc != EOK)
728 goto error;
729 rc = fat_directory_close(&di);
730 if (rc != EOK)
731 goto error;
732
733 /* remove the index structure from the position hash */
734 fat_idx_hashout(childp->idx);
735 /* clear position information */
736 childp->idx->pfc = FAT_CLST_RES0;
737 childp->idx->pdi = 0;
738 fibril_mutex_unlock(&childp->idx->lock);
739 childp->lnkcnt = 0;
740 childp->refcnt++; /* keep the node in memory until destroyed */
741 childp->dirty = true;
742 fibril_mutex_unlock(&childp->lock);
743 fibril_mutex_unlock(&parentp->lock);
744
745 return EOK;
746
747error:
748 (void) fat_directory_close(&di);
749 fibril_mutex_unlock(&childp->idx->lock);
750 fibril_mutex_unlock(&childp->lock);
751 fibril_mutex_unlock(&parentp->lock);
752 return rc;
753}
754
755int fat_has_children(bool *has_children, fs_node_t *fn)
756{
757 fat_bs_t *bs;
758 fat_node_t *nodep = FAT_NODE(fn);
759 unsigned blocks;
760 block_t *b;
761 unsigned i, j;
762 int rc;
763
764 if (nodep->type != FAT_DIRECTORY) {
765 *has_children = false;
766 return EOK;
767 }
768
769 fibril_mutex_lock(&nodep->idx->lock);
770 bs = block_bb_get(nodep->idx->service_id);
771
772 blocks = nodep->size / BPS(bs);
773
774 for (i = 0; i < blocks; i++) {
775 fat_dentry_t *d;
776
777 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
778 if (rc != EOK) {
779 fibril_mutex_unlock(&nodep->idx->lock);
780 return rc;
781 }
782 for (j = 0; j < DPS(bs); j++) {
783 d = ((fat_dentry_t *)b->data) + j;
784 switch (fat_classify_dentry(d)) {
785 case FAT_DENTRY_SKIP:
786 case FAT_DENTRY_FREE:
787 continue;
788 case FAT_DENTRY_LAST:
789 rc = block_put(b);
790 fibril_mutex_unlock(&nodep->idx->lock);
791 *has_children = false;
792 return rc;
793 default:
794 case FAT_DENTRY_VALID:
795 rc = block_put(b);
796 fibril_mutex_unlock(&nodep->idx->lock);
797 *has_children = true;
798 return rc;
799 }
800 }
801 rc = block_put(b);
802 if (rc != EOK) {
803 fibril_mutex_unlock(&nodep->idx->lock);
804 return rc;
805 }
806 }
807
808 fibril_mutex_unlock(&nodep->idx->lock);
809 *has_children = false;
810 return EOK;
811}
812
813
814fs_index_t fat_index_get(fs_node_t *fn)
815{
816 return FAT_NODE(fn)->idx->index;
817}
818
819aoff64_t fat_size_get(fs_node_t *fn)
820{
821 return FAT_NODE(fn)->size;
822}
823
824unsigned fat_lnkcnt_get(fs_node_t *fn)
825{
826 return FAT_NODE(fn)->lnkcnt;
827}
828
829bool fat_is_directory(fs_node_t *fn)
830{
831 return FAT_NODE(fn)->type == FAT_DIRECTORY;
832}
833
834bool fat_is_file(fs_node_t *fn)
835{
836 return FAT_NODE(fn)->type == FAT_FILE;
837}
838
839service_id_t fat_service_get(fs_node_t *node)
840{
841 return 0;
842}
843
844int fat_size_block(service_id_t service_id, uint32_t *size)
845{
846 fat_bs_t *bs;
847
848 bs = block_bb_get(service_id);
849 *size = BPC(bs);
850
851 return EOK;
852}
853
854int fat_total_block_count(service_id_t service_id, uint64_t *count)
855{
856 fat_bs_t *bs;
857
858 bs = block_bb_get(service_id);
859 *count = (SPC(bs)) ? TS(bs) / SPC(bs) : 0;
860
861 return EOK;
862}
863
864int fat_free_block_count(service_id_t service_id, uint64_t *count)
865{
866 fat_bs_t *bs;
867 fat_cluster_t e0;
868 uint64_t block_count;
869 int rc;
870 uint32_t cluster_no, clusters;
871
872 block_count = 0;
873 bs = block_bb_get(service_id);
874 clusters = (SPC(bs)) ? TS(bs) / SPC(bs) : 0;
875 for (cluster_no = 0; cluster_no < clusters; cluster_no++) {
876 rc = fat_get_cluster(bs, service_id, FAT1, cluster_no, &e0);
877 if (rc != EOK)
878 return EIO;
879
880 if (e0 == FAT_CLST_RES0)
881 block_count++;
882 }
883 *count = block_count;
884
885 return EOK;
886}
887
888/** libfs operations */
889libfs_ops_t fat_libfs_ops = {
890 .root_get = fat_root_get,
891 .match = fat_match,
892 .node_get = fat_node_get,
893 .node_open = fat_node_open,
894 .node_put = fat_node_put,
895 .create = fat_create_node,
896 .destroy = fat_destroy_node,
897 .link = fat_link,
898 .unlink = fat_unlink,
899 .has_children = fat_has_children,
900 .index_get = fat_index_get,
901 .size_get = fat_size_get,
902 .lnkcnt_get = fat_lnkcnt_get,
903 .is_directory = fat_is_directory,
904 .is_file = fat_is_file,
905 .service_get = fat_service_get,
906 .size_block = fat_size_block,
907 .total_block_count = fat_total_block_count,
908 .free_block_count = fat_free_block_count
909};
910
911/*
912 * FAT VFS_OUT operations.
913 */
914
915static int fat_fsprobe(service_id_t service_id, vfs_fs_probe_info_t *info)
916{
917 return ENOTSUP;
918}
919
920static int
921fat_mounted(service_id_t service_id, const char *opts, fs_index_t *index,
922 aoff64_t *size, unsigned *linkcnt)
923{
924 enum cache_mode cmode = CACHE_MODE_WB;
925 fat_bs_t *bs;
926 fat_instance_t *instance;
927 int rc;
928
929 instance = malloc(sizeof(fat_instance_t));
930 if (!instance)
931 return ENOMEM;
932 instance->lfn_enabled = true;
933
934 /* Parse mount options. */
935 char *mntopts = (char *) opts;
936 char *opt;
937 while ((opt = str_tok(mntopts, " ,", &mntopts)) != NULL) {
938 if (str_cmp(opt, "wtcache") == 0)
939 cmode = CACHE_MODE_WT;
940 else if (str_cmp(opt, "nolfn") == 0)
941 instance->lfn_enabled = false;
942 }
943
944 /* initialize libblock */
945 rc = block_init(service_id, BS_SIZE);
946 if (rc != EOK) {
947 free(instance);
948 return rc;
949 }
950
951 /* prepare the boot block */
952 rc = block_bb_read(service_id, BS_BLOCK);
953 if (rc != EOK) {
954 free(instance);
955 block_fini(service_id);
956 return rc;
957 }
958
959 /* get the buffer with the boot sector */
960 bs = block_bb_get(service_id);
961
962 if (BPS(bs) != BS_SIZE) {
963 free(instance);
964 block_fini(service_id);
965 return ENOTSUP;
966 }
967
968 /* Initialize the block cache */
969 rc = block_cache_init(service_id, BPS(bs), 0 /* XXX */, cmode);
970 if (rc != EOK) {
971 free(instance);
972 block_fini(service_id);
973 return rc;
974 }
975
976 /* Do some simple sanity checks on the file system. */
977 rc = fat_sanity_check(bs, service_id);
978 if (rc != EOK) {
979 free(instance);
980 (void) block_cache_fini(service_id);
981 block_fini(service_id);
982 return rc;
983 }
984
985 rc = fat_idx_init_by_service_id(service_id);
986 if (rc != EOK) {
987 free(instance);
988 (void) block_cache_fini(service_id);
989 block_fini(service_id);
990 return rc;
991 }
992
993 /* Initialize the root node. */
994 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
995 if (!rfn) {
996 free(instance);
997 (void) block_cache_fini(service_id);
998 block_fini(service_id);
999 fat_idx_fini_by_service_id(service_id);
1000 return ENOMEM;
1001 }
1002
1003 fs_node_initialize(rfn);
1004 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1005 if (!rootp) {
1006 free(instance);
1007 free(rfn);
1008 (void) block_cache_fini(service_id);
1009 block_fini(service_id);
1010 fat_idx_fini_by_service_id(service_id);
1011 return ENOMEM;
1012 }
1013 fat_node_initialize(rootp);
1014
1015 fat_idx_t *ridxp = fat_idx_get_by_pos(service_id, FAT_CLST_ROOTPAR, 0);
1016 if (!ridxp) {
1017 free(instance);
1018 free(rfn);
1019 free(rootp);
1020 (void) block_cache_fini(service_id);
1021 block_fini(service_id);
1022 fat_idx_fini_by_service_id(service_id);
1023 return ENOMEM;
1024 }
1025 assert(ridxp->index == 0);
1026 /* ridxp->lock held */
1027
1028 rootp->type = FAT_DIRECTORY;
1029 rootp->firstc = FAT_ROOT_CLST(bs);
1030 rootp->refcnt = 1;
1031 rootp->lnkcnt = 0; /* FS root is not linked */
1032
1033 if (FAT_IS_FAT32(bs)) {
1034 uint32_t clusters;
1035 rc = fat_clusters_get(&clusters, bs, service_id, rootp->firstc);
1036 if (rc != EOK) {
1037 fibril_mutex_unlock(&ridxp->lock);
1038 free(instance);
1039 free(rfn);
1040 free(rootp);
1041 (void) block_cache_fini(service_id);
1042 block_fini(service_id);
1043 fat_idx_fini_by_service_id(service_id);
1044 return ENOTSUP;
1045 }
1046 rootp->size = BPS(bs) * SPC(bs) * clusters;
1047 } else
1048 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
1049
1050 rc = fs_instance_create(service_id, instance);
1051 if (rc != EOK) {
1052 fibril_mutex_unlock(&ridxp->lock);
1053 free(instance);
1054 free(rfn);
1055 free(rootp);
1056 (void) block_cache_fini(service_id);
1057 block_fini(service_id);
1058 fat_idx_fini_by_service_id(service_id);
1059 return rc;
1060 }
1061
1062 rootp->idx = ridxp;
1063 ridxp->nodep = rootp;
1064 rootp->bp = rfn;
1065 rfn->data = rootp;
1066
1067 fibril_mutex_unlock(&ridxp->lock);
1068
1069 *index = ridxp->index;
1070 *size = rootp->size;
1071 *linkcnt = rootp->lnkcnt;
1072
1073 return EOK;
1074}
1075
1076static int fat_update_fat32_fsinfo(service_id_t service_id)
1077{
1078 fat_bs_t *bs;
1079 fat32_fsinfo_t *info;
1080 block_t *b;
1081 int rc;
1082
1083 bs = block_bb_get(service_id);
1084 assert(FAT_IS_FAT32(bs));
1085
1086 rc = block_get(&b, service_id, uint16_t_le2host(bs->fat32.fsinfo_sec),
1087 BLOCK_FLAGS_NONE);
1088 if (rc != EOK)
1089 return rc;
1090
1091 info = (fat32_fsinfo_t *) b->data;
1092
1093 if (memcmp(info->sig1, FAT32_FSINFO_SIG1, sizeof(info->sig1)) != 0 ||
1094 memcmp(info->sig2, FAT32_FSINFO_SIG2, sizeof(info->sig2)) != 0 ||
1095 memcmp(info->sig3, FAT32_FSINFO_SIG3, sizeof(info->sig3)) != 0) {
1096 (void) block_put(b);
1097 return EINVAL;
1098 }
1099
1100 /* For now, invalidate the counter. */
1101 info->free_clusters = host2uint16_t_le(-1);
1102
1103 b->dirty = true;
1104 return block_put(b);
1105}
1106
1107static int fat_unmounted(service_id_t service_id)
1108{
1109 fs_node_t *fn;
1110 fat_node_t *nodep;
1111 fat_bs_t *bs;
1112 int rc;
1113
1114 bs = block_bb_get(service_id);
1115
1116 rc = fat_root_get(&fn, service_id);
1117 if (rc != EOK)
1118 return rc;
1119 nodep = FAT_NODE(fn);
1120
1121 /*
1122 * We expect exactly two references on the root node. One for the
1123 * fat_root_get() above and one created in fat_mounted().
1124 */
1125 if (nodep->refcnt != 2) {
1126 (void) fat_node_put(fn);
1127 return EBUSY;
1128 }
1129
1130 if (FAT_IS_FAT32(bs)) {
1131 /*
1132 * Attempt to update the FAT32 FS info.
1133 */
1134 (void) fat_update_fat32_fsinfo(service_id);
1135 }
1136
1137 /*
1138 * Put the root node and force it to the FAT free node list.
1139 */
1140 (void) fat_node_put(fn);
1141 (void) fat_node_put(fn);
1142
1143 /*
1144 * Perform cleanup of the node structures, index structures and
1145 * associated data. Write back this file system's dirty blocks and
1146 * stop using libblock for this instance.
1147 */
1148 (void) fat_node_fini_by_service_id(service_id);
1149 fat_idx_fini_by_service_id(service_id);
1150 (void) block_cache_fini(service_id);
1151 block_fini(service_id);
1152
1153 void *data;
1154 if (fs_instance_get(service_id, &data) == EOK) {
1155 fs_instance_destroy(service_id);
1156 free(data);
1157 }
1158
1159 return EOK;
1160}
1161
1162static int
1163fat_read(service_id_t service_id, fs_index_t index, aoff64_t pos,
1164 size_t *rbytes)
1165{
1166 fs_node_t *fn;
1167 fat_node_t *nodep;
1168 fat_bs_t *bs;
1169 size_t bytes;
1170 block_t *b;
1171 int rc;
1172
1173 rc = fat_node_get(&fn, service_id, index);
1174 if (rc != EOK)
1175 return rc;
1176 if (!fn)
1177 return ENOENT;
1178 nodep = FAT_NODE(fn);
1179
1180 ipc_callid_t callid;
1181 size_t len;
1182 if (!async_data_read_receive(&callid, &len)) {
1183 fat_node_put(fn);
1184 async_answer_0(callid, EINVAL);
1185 return EINVAL;
1186 }
1187
1188 bs = block_bb_get(service_id);
1189
1190 if (nodep->type == FAT_FILE) {
1191 /*
1192 * Our strategy for regular file reads is to read one block at
1193 * most and make use of the possibility to return less data than
1194 * requested. This keeps the code very simple.
1195 */
1196 if (pos >= nodep->size) {
1197 /* reading beyond the EOF */
1198 bytes = 0;
1199 (void) async_data_read_finalize(callid, NULL, 0);
1200 } else {
1201 bytes = min(len, BPS(bs) - pos % BPS(bs));
1202 bytes = min(bytes, nodep->size - pos);
1203 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
1204 BLOCK_FLAGS_NONE);
1205 if (rc != EOK) {
1206 fat_node_put(fn);
1207 async_answer_0(callid, rc);
1208 return rc;
1209 }
1210 (void) async_data_read_finalize(callid,
1211 b->data + pos % BPS(bs), bytes);
1212 rc = block_put(b);
1213 if (rc != EOK) {
1214 fat_node_put(fn);
1215 return rc;
1216 }
1217 }
1218 } else {
1219 aoff64_t spos = pos;
1220 char name[FAT_LFN_NAME_SIZE];
1221 fat_dentry_t *d;
1222
1223 assert(nodep->type == FAT_DIRECTORY);
1224 assert(nodep->size % BPS(bs) == 0);
1225 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
1226
1227 fat_directory_t di;
1228 rc = fat_directory_open(nodep, &di);
1229 if (rc != EOK)
1230 goto err;
1231 rc = fat_directory_seek(&di, pos);
1232 if (rc != EOK) {
1233 (void) fat_directory_close(&di);
1234 goto err;
1235 }
1236
1237 rc = fat_directory_read(&di, name, &d);
1238 if (rc == EOK)
1239 goto hit;
1240 if (rc == ENOENT)
1241 goto miss;
1242
1243err:
1244 (void) fat_node_put(fn);
1245 async_answer_0(callid, rc);
1246 return rc;
1247
1248miss:
1249 rc = fat_directory_close(&di);
1250 if (rc != EOK)
1251 goto err;
1252 rc = fat_node_put(fn);
1253 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1254 *rbytes = 0;
1255 return rc != EOK ? rc : ENOENT;
1256
1257hit:
1258 pos = di.pos;
1259 rc = fat_directory_close(&di);
1260 if (rc != EOK)
1261 goto err;
1262 (void) async_data_read_finalize(callid, name,
1263 str_size(name) + 1);
1264 bytes = (pos - spos) + 1;
1265 }
1266
1267 rc = fat_node_put(fn);
1268 *rbytes = bytes;
1269 return rc;
1270}
1271
1272static int
1273fat_write(service_id_t service_id, fs_index_t index, aoff64_t pos,
1274 size_t *wbytes, aoff64_t *nsize)
1275{
1276 fs_node_t *fn;
1277 fat_node_t *nodep;
1278 fat_bs_t *bs;
1279 size_t bytes;
1280 block_t *b;
1281 aoff64_t boundary;
1282 int flags = BLOCK_FLAGS_NONE;
1283 int rc;
1284
1285 rc = fat_node_get(&fn, service_id, index);
1286 if (rc != EOK)
1287 return rc;
1288 if (!fn)
1289 return ENOENT;
1290 nodep = FAT_NODE(fn);
1291
1292 ipc_callid_t callid;
1293 size_t len;
1294 if (!async_data_write_receive(&callid, &len)) {
1295 (void) fat_node_put(fn);
1296 async_answer_0(callid, EINVAL);
1297 return EINVAL;
1298 }
1299
1300 bs = block_bb_get(service_id);
1301
1302 /*
1303 * In all scenarios, we will attempt to write out only one block worth
1304 * of data at maximum. There might be some more efficient approaches,
1305 * but this one greatly simplifies fat_write(). Note that we can afford
1306 * to do this because the client must be ready to handle the return
1307 * value signalizing a smaller number of bytes written.
1308 */
1309 bytes = min(len, BPS(bs) - pos % BPS(bs));
1310 if (bytes == BPS(bs))
1311 flags |= BLOCK_FLAGS_NOREAD;
1312
1313 boundary = ROUND_UP(nodep->size, BPC(bs));
1314 if (pos < boundary) {
1315 /*
1316 * This is the easier case - we are either overwriting already
1317 * existing contents or writing behind the EOF, but still within
1318 * the limits of the last cluster. The node size may grow to the
1319 * next block size boundary.
1320 */
1321 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1322 if (rc != EOK) {
1323 (void) fat_node_put(fn);
1324 async_answer_0(callid, rc);
1325 return rc;
1326 }
1327 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
1328 if (rc != EOK) {
1329 (void) fat_node_put(fn);
1330 async_answer_0(callid, rc);
1331 return rc;
1332 }
1333 (void) async_data_write_finalize(callid,
1334 b->data + pos % BPS(bs), bytes);
1335 b->dirty = true; /* need to sync block */
1336 rc = block_put(b);
1337 if (rc != EOK) {
1338 (void) fat_node_put(fn);
1339 return rc;
1340 }
1341 if (pos + bytes > nodep->size) {
1342 nodep->size = pos + bytes;
1343 nodep->dirty = true; /* need to sync node */
1344 }
1345 *wbytes = bytes;
1346 *nsize = nodep->size;
1347 rc = fat_node_put(fn);
1348 return rc;
1349 } else {
1350 /*
1351 * This is the more difficult case. We must allocate new
1352 * clusters for the node and zero them out.
1353 */
1354 unsigned nclsts;
1355 fat_cluster_t mcl, lcl;
1356
1357 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
1358 /* create an independent chain of nclsts clusters in all FATs */
1359 rc = fat_alloc_clusters(bs, service_id, nclsts, &mcl, &lcl);
1360 if (rc != EOK) {
1361 /* could not allocate a chain of nclsts clusters */
1362 (void) fat_node_put(fn);
1363 async_answer_0(callid, rc);
1364 return rc;
1365 }
1366 /* zero fill any gaps */
1367 rc = fat_fill_gap(bs, nodep, mcl, pos);
1368 if (rc != EOK) {
1369 (void) fat_free_clusters(bs, service_id, mcl);
1370 (void) fat_node_put(fn);
1371 async_answer_0(callid, rc);
1372 return rc;
1373 }
1374 rc = _fat_block_get(&b, bs, service_id, lcl, NULL,
1375 (pos / BPS(bs)) % SPC(bs), flags);
1376 if (rc != EOK) {
1377 (void) fat_free_clusters(bs, service_id, mcl);
1378 (void) fat_node_put(fn);
1379 async_answer_0(callid, rc);
1380 return rc;
1381 }
1382 (void) async_data_write_finalize(callid,
1383 b->data + pos % BPS(bs), bytes);
1384 b->dirty = true; /* need to sync block */
1385 rc = block_put(b);
1386 if (rc != EOK) {
1387 (void) fat_free_clusters(bs, service_id, mcl);
1388 (void) fat_node_put(fn);
1389 return rc;
1390 }
1391 /*
1392 * Append the cluster chain starting in mcl to the end of the
1393 * node's cluster chain.
1394 */
1395 rc = fat_append_clusters(bs, nodep, mcl, lcl);
1396 if (rc != EOK) {
1397 (void) fat_free_clusters(bs, service_id, mcl);
1398 (void) fat_node_put(fn);
1399 return rc;
1400 }
1401 *nsize = nodep->size = pos + bytes;
1402 rc = fat_node_put(fn);
1403 nodep->dirty = true; /* need to sync node */
1404 *wbytes = bytes;
1405 return rc;
1406 }
1407}
1408
1409static int
1410fat_truncate(service_id_t service_id, fs_index_t index, aoff64_t size)
1411{
1412 fs_node_t *fn;
1413 fat_node_t *nodep;
1414 fat_bs_t *bs;
1415 int rc;
1416
1417 rc = fat_node_get(&fn, service_id, index);
1418 if (rc != EOK)
1419 return rc;
1420 if (!fn)
1421 return ENOENT;
1422 nodep = FAT_NODE(fn);
1423
1424 bs = block_bb_get(service_id);
1425
1426 if (nodep->size == size) {
1427 rc = EOK;
1428 } else if (nodep->size < size) {
1429 /*
1430 * The standard says we have the freedom to grow the node.
1431 * For now, we simply return an error.
1432 */
1433 rc = EINVAL;
1434 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
1435 /*
1436 * The node will be shrunk, but no clusters will be deallocated.
1437 */
1438 nodep->size = size;
1439 nodep->dirty = true; /* need to sync node */
1440 rc = EOK;
1441 } else {
1442 /*
1443 * The node will be shrunk, clusters will be deallocated.
1444 */
1445 if (size == 0) {
1446 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1447 if (rc != EOK)
1448 goto out;
1449 } else {
1450 fat_cluster_t lastc;
1451 rc = fat_cluster_walk(bs, service_id, nodep->firstc,
1452 &lastc, NULL, (size - 1) / BPC(bs));
1453 if (rc != EOK)
1454 goto out;
1455 rc = fat_chop_clusters(bs, nodep, lastc);
1456 if (rc != EOK)
1457 goto out;
1458 }
1459 nodep->size = size;
1460 nodep->dirty = true; /* need to sync node */
1461 rc = EOK;
1462 }
1463out:
1464 fat_node_put(fn);
1465 return rc;
1466}
1467
1468static int fat_close(service_id_t service_id, fs_index_t index)
1469{
1470 return EOK;
1471}
1472
1473static int fat_destroy(service_id_t service_id, fs_index_t index)
1474{
1475 fs_node_t *fn;
1476 fat_node_t *nodep;
1477 int rc;
1478
1479 rc = fat_node_get(&fn, service_id, index);
1480 if (rc != EOK)
1481 return rc;
1482 if (!fn)
1483 return ENOENT;
1484
1485 nodep = FAT_NODE(fn);
1486 /*
1487 * We should have exactly two references. One for the above
1488 * call to fat_node_get() and one from fat_unlink().
1489 */
1490 assert(nodep->refcnt == 2);
1491
1492 rc = fat_destroy_node(fn);
1493 return rc;
1494}
1495
1496static int fat_sync(service_id_t service_id, fs_index_t index)
1497{
1498 fs_node_t *fn;
1499 int rc = fat_node_get(&fn, service_id, index);
1500 if (rc != EOK)
1501 return rc;
1502 if (!fn)
1503 return ENOENT;
1504
1505 fat_node_t *nodep = FAT_NODE(fn);
1506
1507 nodep->dirty = true;
1508 rc = fat_node_sync(nodep);
1509
1510 fat_node_put(fn);
1511 return rc;
1512}
1513
1514vfs_out_ops_t fat_ops = {
1515 .fsprobe = fat_fsprobe,
1516 .mounted = fat_mounted,
1517 .unmounted = fat_unmounted,
1518 .read = fat_read,
1519 .write = fat_write,
1520 .truncate = fat_truncate,
1521 .close = fat_close,
1522 .destroy = fat_destroy,
1523 .sync = fat_sync,
1524};
1525
1526/**
1527 * @}
1528 */
Note: See TracBrowser for help on using the repository browser.