source: mainline/uspace/srv/fs/fat/fat_ops.c@ c9e954c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c9e954c was 758f8d5, checked in by Maurizio Lombardi <m.lombardi85@…>, 13 years ago

merge the df branch

  • Property mode set to 100644
File size: 35.5 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2011 Oleg Romanenko
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup fs
31 * @{
32 */
33
34/**
35 * @file fat_ops.c
36 * @brief Implementation of VFS operations for the FAT file system server.
37 */
38
39#include "fat.h"
40#include "fat_dentry.h"
41#include "fat_fat.h"
42#include "fat_directory.h"
43#include "../../vfs/vfs.h"
44#include <libfs.h>
45#include <block.h>
46#include <ipc/services.h>
47#include <ipc/loc.h>
48#include <macros.h>
49#include <async.h>
50#include <errno.h>
51#include <str.h>
52#include <byteorder.h>
53#include <adt/hash_table.h>
54#include <adt/list.h>
55#include <assert.h>
56#include <fibril_synch.h>
57#include <sys/mman.h>
58#include <align.h>
59#include <malloc.h>
60#include <str.h>
61
62#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
63#define FS_NODE(node) ((node) ? (node)->bp : NULL)
64
65#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
66#define BPC(bs) (BPS((bs)) * SPC((bs)))
67
68/** Mutex protecting the list of cached free FAT nodes. */
69static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
70
71/** List of cached free FAT nodes. */
72static LIST_INITIALIZE(ffn_list);
73
74/*
75 * Forward declarations of FAT libfs operations.
76 */
77static int fat_root_get(fs_node_t **, service_id_t);
78static int fat_match(fs_node_t **, fs_node_t *, const char *);
79static int fat_node_get(fs_node_t **, service_id_t, fs_index_t);
80static int fat_node_open(fs_node_t *);
81static int fat_node_put(fs_node_t *);
82static int fat_create_node(fs_node_t **, service_id_t, int);
83static int fat_destroy_node(fs_node_t *);
84static int fat_link(fs_node_t *, fs_node_t *, const char *);
85static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
86static int fat_has_children(bool *, fs_node_t *);
87static fs_index_t fat_index_get(fs_node_t *);
88static aoff64_t fat_size_get(fs_node_t *);
89static unsigned fat_lnkcnt_get(fs_node_t *);
90static bool fat_is_directory(fs_node_t *);
91static bool fat_is_file(fs_node_t *node);
92static service_id_t fat_service_get(fs_node_t *node);
93static int fat_size_block(service_id_t, uint32_t *);
94static int fat_total_block_count(service_id_t, uint64_t *);
95static int fat_free_block_count(service_id_t, uint64_t *);
96
97/*
98 * Helper functions.
99 */
100static void fat_node_initialize(fat_node_t *node)
101{
102 fibril_mutex_initialize(&node->lock);
103 node->bp = NULL;
104 node->idx = NULL;
105 node->type = 0;
106 link_initialize(&node->ffn_link);
107 node->size = 0;
108 node->lnkcnt = 0;
109 node->refcnt = 0;
110 node->dirty = false;
111 node->lastc_cached_valid = false;
112 node->lastc_cached_value = 0;
113 node->currc_cached_valid = false;
114 node->currc_cached_bn = 0;
115 node->currc_cached_value = 0;
116}
117
118static int fat_node_sync(fat_node_t *node)
119{
120 block_t *b;
121 fat_bs_t *bs;
122 fat_dentry_t *d;
123 int rc;
124
125 assert(node->dirty);
126
127 bs = block_bb_get(node->idx->service_id);
128
129 /* Read the block that contains the dentry of interest. */
130 rc = _fat_block_get(&b, bs, node->idx->service_id, node->idx->pfc,
131 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
132 BLOCK_FLAGS_NONE);
133 if (rc != EOK)
134 return rc;
135
136 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
137
138 d->firstc = host2uint16_t_le(node->firstc);
139 if (node->type == FAT_FILE) {
140 d->size = host2uint32_t_le(node->size);
141 } else if (node->type == FAT_DIRECTORY) {
142 d->attr = FAT_ATTR_SUBDIR;
143 }
144
145 /* TODO: update other fields? (e.g time fields) */
146
147 b->dirty = true; /* need to sync block */
148 rc = block_put(b);
149 return rc;
150}
151
152static int fat_node_fini_by_service_id(service_id_t service_id)
153{
154 int rc;
155
156 /*
157 * We are called from fat_unmounted() and assume that there are already
158 * no nodes belonging to this instance with non-zero refcount. Therefore
159 * it is sufficient to clean up only the FAT free node list.
160 */
161
162restart:
163 fibril_mutex_lock(&ffn_mutex);
164 list_foreach(ffn_list, ffn_link, fat_node_t, nodep) {
165 if (!fibril_mutex_trylock(&nodep->lock)) {
166 fibril_mutex_unlock(&ffn_mutex);
167 goto restart;
168 }
169 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
170 fibril_mutex_unlock(&nodep->lock);
171 fibril_mutex_unlock(&ffn_mutex);
172 goto restart;
173 }
174 if (nodep->idx->service_id != service_id) {
175 fibril_mutex_unlock(&nodep->idx->lock);
176 fibril_mutex_unlock(&nodep->lock);
177 continue;
178 }
179
180 list_remove(&nodep->ffn_link);
181 fibril_mutex_unlock(&ffn_mutex);
182
183 /*
184 * We can unlock the node and its index structure because we are
185 * the last player on this playground and VFS is preventing new
186 * players from entering.
187 */
188 fibril_mutex_unlock(&nodep->idx->lock);
189 fibril_mutex_unlock(&nodep->lock);
190
191 if (nodep->dirty) {
192 rc = fat_node_sync(nodep);
193 if (rc != EOK)
194 return rc;
195 }
196 nodep->idx->nodep = NULL;
197 free(nodep->bp);
198 free(nodep);
199
200 /* Need to restart because we changed ffn_list. */
201 goto restart;
202 }
203 fibril_mutex_unlock(&ffn_mutex);
204
205 return EOK;
206}
207
208static int fat_node_get_new(fat_node_t **nodepp)
209{
210 fs_node_t *fn;
211 fat_node_t *nodep;
212 int rc;
213
214 fibril_mutex_lock(&ffn_mutex);
215 if (!list_empty(&ffn_list)) {
216 /* Try to use a cached free node structure. */
217 fat_idx_t *idxp_tmp;
218 nodep = list_get_instance(list_first(&ffn_list), fat_node_t,
219 ffn_link);
220 if (!fibril_mutex_trylock(&nodep->lock))
221 goto skip_cache;
222 idxp_tmp = nodep->idx;
223 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
224 fibril_mutex_unlock(&nodep->lock);
225 goto skip_cache;
226 }
227 list_remove(&nodep->ffn_link);
228 fibril_mutex_unlock(&ffn_mutex);
229 if (nodep->dirty) {
230 rc = fat_node_sync(nodep);
231 if (rc != EOK) {
232 idxp_tmp->nodep = NULL;
233 fibril_mutex_unlock(&nodep->lock);
234 fibril_mutex_unlock(&idxp_tmp->lock);
235 free(nodep->bp);
236 free(nodep);
237 return rc;
238 }
239 }
240 idxp_tmp->nodep = NULL;
241 fibril_mutex_unlock(&nodep->lock);
242 fibril_mutex_unlock(&idxp_tmp->lock);
243 fn = FS_NODE(nodep);
244 } else {
245skip_cache:
246 /* Try to allocate a new node structure. */
247 fibril_mutex_unlock(&ffn_mutex);
248 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
249 if (!fn)
250 return ENOMEM;
251 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
252 if (!nodep) {
253 free(fn);
254 return ENOMEM;
255 }
256 }
257 fat_node_initialize(nodep);
258 fs_node_initialize(fn);
259 fn->data = nodep;
260 nodep->bp = fn;
261
262 *nodepp = nodep;
263 return EOK;
264}
265
266/** Internal version of fat_node_get().
267 *
268 * @param idxp Locked index structure.
269 */
270static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
271{
272 block_t *b;
273 fat_bs_t *bs;
274 fat_dentry_t *d;
275 fat_node_t *nodep = NULL;
276 int rc;
277
278 if (idxp->nodep) {
279 /*
280 * We are lucky.
281 * The node is already instantiated in memory.
282 */
283 fibril_mutex_lock(&idxp->nodep->lock);
284 if (!idxp->nodep->refcnt++) {
285 fibril_mutex_lock(&ffn_mutex);
286 list_remove(&idxp->nodep->ffn_link);
287 fibril_mutex_unlock(&ffn_mutex);
288 }
289 fibril_mutex_unlock(&idxp->nodep->lock);
290 *nodepp = idxp->nodep;
291 return EOK;
292 }
293
294 /*
295 * We must instantiate the node from the file system.
296 */
297
298 assert(idxp->pfc);
299
300 rc = fat_node_get_new(&nodep);
301 if (rc != EOK)
302 return rc;
303
304 bs = block_bb_get(idxp->service_id);
305
306 /* Read the block that contains the dentry of interest. */
307 rc = _fat_block_get(&b, bs, idxp->service_id, idxp->pfc, NULL,
308 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
309 if (rc != EOK) {
310 (void) fat_node_put(FS_NODE(nodep));
311 return rc;
312 }
313
314 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
315 if (FAT_IS_FAT32(bs)) {
316 nodep->firstc = uint16_t_le2host(d->firstc_lo) |
317 (uint16_t_le2host(d->firstc_hi) << 16);
318 } else
319 nodep->firstc = uint16_t_le2host(d->firstc);
320
321 if (d->attr & FAT_ATTR_SUBDIR) {
322 /*
323 * The only directory which does not have this bit set is the
324 * root directory itself. The root directory node is handled
325 * and initialized elsewhere.
326 */
327 nodep->type = FAT_DIRECTORY;
328
329 /*
330 * Unfortunately, the 'size' field of the FAT dentry is not
331 * defined for the directory entry type. We must determine the
332 * size of the directory by walking the FAT.
333 */
334 uint32_t clusters;
335 rc = fat_clusters_get(&clusters, bs, idxp->service_id,
336 nodep->firstc);
337 if (rc != EOK) {
338 (void) block_put(b);
339 (void) fat_node_put(FS_NODE(nodep));
340 return rc;
341 }
342 nodep->size = BPS(bs) * SPC(bs) * clusters;
343 } else {
344 nodep->type = FAT_FILE;
345 nodep->size = uint32_t_le2host(d->size);
346 }
347
348 nodep->lnkcnt = 1;
349 nodep->refcnt = 1;
350
351 rc = block_put(b);
352 if (rc != EOK) {
353 (void) fat_node_put(FS_NODE(nodep));
354 return rc;
355 }
356
357 /* Link the idx structure with the node structure. */
358 nodep->idx = idxp;
359 idxp->nodep = nodep;
360
361 *nodepp = nodep;
362 return EOK;
363}
364
365/*
366 * FAT libfs operations.
367 */
368
369int fat_root_get(fs_node_t **rfn, service_id_t service_id)
370{
371 return fat_node_get(rfn, service_id, 0);
372}
373
374int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
375{
376 fat_node_t *parentp = FAT_NODE(pfn);
377 char name[FAT_LFN_NAME_SIZE];
378 fat_dentry_t *d;
379 service_id_t service_id;
380 int rc;
381
382 fibril_mutex_lock(&parentp->idx->lock);
383 service_id = parentp->idx->service_id;
384 fibril_mutex_unlock(&parentp->idx->lock);
385
386 fat_directory_t di;
387 rc = fat_directory_open(parentp, &di);
388 if (rc != EOK)
389 return rc;
390
391 while (fat_directory_read(&di, name, &d) == EOK) {
392 if (fat_dentry_namecmp(name, component) == 0) {
393 /* hit */
394 fat_node_t *nodep;
395 aoff64_t o = di.pos %
396 (BPS(di.bs) / sizeof(fat_dentry_t));
397 fat_idx_t *idx = fat_idx_get_by_pos(service_id,
398 parentp->firstc, di.bnum * DPS(di.bs) + o);
399 if (!idx) {
400 /*
401 * Can happen if memory is low or if we
402 * run out of 32-bit indices.
403 */
404 rc = fat_directory_close(&di);
405 return (rc == EOK) ? ENOMEM : rc;
406 }
407 rc = fat_node_get_core(&nodep, idx);
408 fibril_mutex_unlock(&idx->lock);
409 if (rc != EOK) {
410 (void) fat_directory_close(&di);
411 return rc;
412 }
413 *rfn = FS_NODE(nodep);
414 rc = fat_directory_close(&di);
415 if (rc != EOK)
416 (void) fat_node_put(*rfn);
417 return rc;
418 } else {
419 rc = fat_directory_next(&di);
420 if (rc != EOK)
421 break;
422 }
423 }
424 (void) fat_directory_close(&di);
425 *rfn = NULL;
426 return EOK;
427}
428
429/** Instantiate a FAT in-core node. */
430int fat_node_get(fs_node_t **rfn, service_id_t service_id, fs_index_t index)
431{
432 fat_node_t *nodep;
433 fat_idx_t *idxp;
434 int rc;
435
436 idxp = fat_idx_get_by_index(service_id, index);
437 if (!idxp) {
438 *rfn = NULL;
439 return EOK;
440 }
441 /* idxp->lock held */
442 rc = fat_node_get_core(&nodep, idxp);
443 fibril_mutex_unlock(&idxp->lock);
444 if (rc == EOK)
445 *rfn = FS_NODE(nodep);
446 return rc;
447}
448
449int fat_node_open(fs_node_t *fn)
450{
451 /*
452 * Opening a file is stateless, nothing
453 * to be done here.
454 */
455 return EOK;
456}
457
458int fat_node_put(fs_node_t *fn)
459{
460 fat_node_t *nodep = FAT_NODE(fn);
461 bool destroy = false;
462
463 fibril_mutex_lock(&nodep->lock);
464 if (!--nodep->refcnt) {
465 if (nodep->idx) {
466 fibril_mutex_lock(&ffn_mutex);
467 list_append(&nodep->ffn_link, &ffn_list);
468 fibril_mutex_unlock(&ffn_mutex);
469 } else {
470 /*
471 * The node does not have any index structure associated
472 * with itself. This can only mean that we are releasing
473 * the node after a failed attempt to allocate the index
474 * structure for it.
475 */
476 destroy = true;
477 }
478 }
479 fibril_mutex_unlock(&nodep->lock);
480 if (destroy) {
481 free(nodep->bp);
482 free(nodep);
483 }
484 return EOK;
485}
486
487int fat_create_node(fs_node_t **rfn, service_id_t service_id, int flags)
488{
489 fat_idx_t *idxp;
490 fat_node_t *nodep;
491 fat_bs_t *bs;
492 fat_cluster_t mcl, lcl;
493 int rc;
494
495 bs = block_bb_get(service_id);
496 if (flags & L_DIRECTORY) {
497 /* allocate a cluster */
498 rc = fat_alloc_clusters(bs, service_id, 1, &mcl, &lcl);
499 if (rc != EOK)
500 return rc;
501 /* populate the new cluster with unused dentries */
502 rc = fat_zero_cluster(bs, service_id, mcl);
503 if (rc != EOK) {
504 (void) fat_free_clusters(bs, service_id, mcl);
505 return rc;
506 }
507 }
508
509 rc = fat_node_get_new(&nodep);
510 if (rc != EOK) {
511 (void) fat_free_clusters(bs, service_id, mcl);
512 return rc;
513 }
514 rc = fat_idx_get_new(&idxp, service_id);
515 if (rc != EOK) {
516 (void) fat_free_clusters(bs, service_id, mcl);
517 (void) fat_node_put(FS_NODE(nodep));
518 return rc;
519 }
520 /* idxp->lock held */
521 if (flags & L_DIRECTORY) {
522 nodep->type = FAT_DIRECTORY;
523 nodep->firstc = mcl;
524 nodep->size = BPS(bs) * SPC(bs);
525 } else {
526 nodep->type = FAT_FILE;
527 nodep->firstc = FAT_CLST_RES0;
528 nodep->size = 0;
529 }
530 nodep->lnkcnt = 0; /* not linked anywhere */
531 nodep->refcnt = 1;
532 nodep->dirty = true;
533
534 nodep->idx = idxp;
535 idxp->nodep = nodep;
536
537 fibril_mutex_unlock(&idxp->lock);
538 *rfn = FS_NODE(nodep);
539 return EOK;
540}
541
542int fat_destroy_node(fs_node_t *fn)
543{
544 fat_node_t *nodep = FAT_NODE(fn);
545 fat_bs_t *bs;
546 bool has_children;
547 int rc;
548
549 /*
550 * The node is not reachable from the file system. This means that the
551 * link count should be zero and that the index structure cannot be
552 * found in the position hash. Obviously, we don't need to lock the node
553 * nor its index structure.
554 */
555 assert(nodep->lnkcnt == 0);
556
557 /*
558 * The node may not have any children.
559 */
560 rc = fat_has_children(&has_children, fn);
561 if (rc != EOK)
562 return rc;
563 assert(!has_children);
564
565 bs = block_bb_get(nodep->idx->service_id);
566 if (nodep->firstc != FAT_CLST_RES0) {
567 assert(nodep->size);
568 /* Free all clusters allocated to the node. */
569 rc = fat_free_clusters(bs, nodep->idx->service_id,
570 nodep->firstc);
571 }
572
573 fat_idx_destroy(nodep->idx);
574 free(nodep->bp);
575 free(nodep);
576 return rc;
577}
578
579int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
580{
581 fat_node_t *parentp = FAT_NODE(pfn);
582 fat_node_t *childp = FAT_NODE(cfn);
583 fat_dentry_t *d;
584 fat_bs_t *bs;
585 block_t *b;
586 fat_directory_t di;
587 fat_dentry_t de;
588 int rc;
589
590 fibril_mutex_lock(&childp->lock);
591 if (childp->lnkcnt == 1) {
592 /*
593 * On FAT, we don't support multiple hard links.
594 */
595 fibril_mutex_unlock(&childp->lock);
596 return EMLINK;
597 }
598 assert(childp->lnkcnt == 0);
599 fibril_mutex_unlock(&childp->lock);
600
601 if (!fat_valid_name(name))
602 return ENOTSUP;
603
604 fibril_mutex_lock(&parentp->idx->lock);
605 bs = block_bb_get(parentp->idx->service_id);
606 rc = fat_directory_open(parentp, &di);
607 if (rc != EOK) {
608 fibril_mutex_unlock(&parentp->idx->lock);
609 return rc;
610 }
611
612 /*
613 * At this point we only establish the link between the parent and the
614 * child. The dentry, except of the name and the extension, will remain
615 * uninitialized until the corresponding node is synced. Thus the valid
616 * dentry data is kept in the child node structure.
617 */
618 memset(&de, 0, sizeof(fat_dentry_t));
619
620 rc = fat_directory_write(&di, name, &de);
621 if (rc != EOK) {
622 (void) fat_directory_close(&di);
623 fibril_mutex_unlock(&parentp->idx->lock);
624 return rc;
625 }
626 rc = fat_directory_close(&di);
627 if (rc != EOK) {
628 fibril_mutex_unlock(&parentp->idx->lock);
629 return rc;
630 }
631
632 fibril_mutex_unlock(&parentp->idx->lock);
633
634 fibril_mutex_lock(&childp->idx->lock);
635
636 if (childp->type == FAT_DIRECTORY) {
637 /*
638 * If possible, create the Sub-directory Identifier Entry and
639 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
640 * These entries are not mandatory according to Standard
641 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
642 * rather a sign of our good will.
643 */
644 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
645 if (rc != EOK) {
646 /*
647 * Rather than returning an error, simply skip the
648 * creation of these two entries.
649 */
650 goto skip_dots;
651 }
652 d = (fat_dentry_t *) b->data;
653 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
654 (memcmp(d->name, FAT_NAME_DOT, FAT_NAME_LEN)) == 0) {
655 memset(d, 0, sizeof(fat_dentry_t));
656 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN);
657 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
658 d->attr = FAT_ATTR_SUBDIR;
659 d->firstc = host2uint16_t_le(childp->firstc);
660 /* TODO: initialize also the date/time members. */
661 }
662 d++;
663 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
664 (memcmp(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN) == 0)) {
665 memset(d, 0, sizeof(fat_dentry_t));
666 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN);
667 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
668 d->attr = FAT_ATTR_SUBDIR;
669 d->firstc = (parentp->firstc == FAT_ROOT_CLST(bs)) ?
670 host2uint16_t_le(FAT_CLST_ROOTPAR) :
671 host2uint16_t_le(parentp->firstc);
672 /* TODO: initialize also the date/time members. */
673 }
674 b->dirty = true; /* need to sync block */
675 /*
676 * Ignore the return value as we would have fallen through on error
677 * anyway.
678 */
679 (void) block_put(b);
680 }
681skip_dots:
682
683 childp->idx->pfc = parentp->firstc;
684 childp->idx->pdi = di.pos; /* di.pos holds absolute position of SFN entry */
685 fibril_mutex_unlock(&childp->idx->lock);
686
687 fibril_mutex_lock(&childp->lock);
688 childp->lnkcnt = 1;
689 childp->dirty = true; /* need to sync node */
690 fibril_mutex_unlock(&childp->lock);
691
692 /*
693 * Hash in the index structure into the position hash.
694 */
695 fat_idx_hashin(childp->idx);
696
697 return EOK;
698}
699
700int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
701{
702 fat_node_t *parentp = FAT_NODE(pfn);
703 fat_node_t *childp = FAT_NODE(cfn);
704 bool has_children;
705 int rc;
706
707 if (!parentp)
708 return EBUSY;
709
710 rc = fat_has_children(&has_children, cfn);
711 if (rc != EOK)
712 return rc;
713 if (has_children)
714 return ENOTEMPTY;
715
716 fibril_mutex_lock(&parentp->lock);
717 fibril_mutex_lock(&childp->lock);
718 assert(childp->lnkcnt == 1);
719 fibril_mutex_lock(&childp->idx->lock);
720
721 fat_directory_t di;
722 rc = fat_directory_open(parentp, &di);
723 if (rc != EOK)
724 goto error;
725 rc = fat_directory_seek(&di, childp->idx->pdi);
726 if (rc != EOK)
727 goto error;
728 rc = fat_directory_erase(&di);
729 if (rc != EOK)
730 goto error;
731 rc = fat_directory_close(&di);
732 if (rc != EOK)
733 goto error;
734
735 /* remove the index structure from the position hash */
736 fat_idx_hashout(childp->idx);
737 /* clear position information */
738 childp->idx->pfc = FAT_CLST_RES0;
739 childp->idx->pdi = 0;
740 fibril_mutex_unlock(&childp->idx->lock);
741 childp->lnkcnt = 0;
742 childp->refcnt++; /* keep the node in memory until destroyed */
743 childp->dirty = true;
744 fibril_mutex_unlock(&childp->lock);
745 fibril_mutex_unlock(&parentp->lock);
746
747 return EOK;
748
749error:
750 (void) fat_directory_close(&di);
751 fibril_mutex_unlock(&childp->idx->lock);
752 fibril_mutex_unlock(&childp->lock);
753 fibril_mutex_unlock(&parentp->lock);
754 return rc;
755}
756
757int fat_has_children(bool *has_children, fs_node_t *fn)
758{
759 fat_bs_t *bs;
760 fat_node_t *nodep = FAT_NODE(fn);
761 unsigned blocks;
762 block_t *b;
763 unsigned i, j;
764 int rc;
765
766 if (nodep->type != FAT_DIRECTORY) {
767 *has_children = false;
768 return EOK;
769 }
770
771 fibril_mutex_lock(&nodep->idx->lock);
772 bs = block_bb_get(nodep->idx->service_id);
773
774 blocks = nodep->size / BPS(bs);
775
776 for (i = 0; i < blocks; i++) {
777 fat_dentry_t *d;
778
779 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
780 if (rc != EOK) {
781 fibril_mutex_unlock(&nodep->idx->lock);
782 return rc;
783 }
784 for (j = 0; j < DPS(bs); j++) {
785 d = ((fat_dentry_t *)b->data) + j;
786 switch (fat_classify_dentry(d)) {
787 case FAT_DENTRY_SKIP:
788 case FAT_DENTRY_FREE:
789 continue;
790 case FAT_DENTRY_LAST:
791 rc = block_put(b);
792 fibril_mutex_unlock(&nodep->idx->lock);
793 *has_children = false;
794 return rc;
795 default:
796 case FAT_DENTRY_VALID:
797 rc = block_put(b);
798 fibril_mutex_unlock(&nodep->idx->lock);
799 *has_children = true;
800 return rc;
801 }
802 }
803 rc = block_put(b);
804 if (rc != EOK) {
805 fibril_mutex_unlock(&nodep->idx->lock);
806 return rc;
807 }
808 }
809
810 fibril_mutex_unlock(&nodep->idx->lock);
811 *has_children = false;
812 return EOK;
813}
814
815
816fs_index_t fat_index_get(fs_node_t *fn)
817{
818 return FAT_NODE(fn)->idx->index;
819}
820
821aoff64_t fat_size_get(fs_node_t *fn)
822{
823 return FAT_NODE(fn)->size;
824}
825
826unsigned fat_lnkcnt_get(fs_node_t *fn)
827{
828 return FAT_NODE(fn)->lnkcnt;
829}
830
831bool fat_is_directory(fs_node_t *fn)
832{
833 return FAT_NODE(fn)->type == FAT_DIRECTORY;
834}
835
836bool fat_is_file(fs_node_t *fn)
837{
838 return FAT_NODE(fn)->type == FAT_FILE;
839}
840
841service_id_t fat_service_get(fs_node_t *node)
842{
843 return 0;
844}
845
846int fat_size_block(service_id_t service_id, uint32_t *size)
847{
848 fat_bs_t *bs;
849
850 bs = block_bb_get(service_id);
851 *size = BPC(bs);
852
853 return EOK;
854}
855
856int fat_total_block_count(service_id_t service_id, uint64_t *count)
857{
858 fat_bs_t *bs;
859
860 bs = block_bb_get(service_id);
861 *count = (SPC(bs)) ? TS(bs) / SPC(bs) : 0;
862
863 return EOK;
864}
865
866int fat_free_block_count(service_id_t service_id, uint64_t *count)
867{
868 fat_bs_t *bs;
869 fat_cluster_t e0;
870 uint64_t block_count;
871 int rc;
872 uint32_t cluster_no, clusters;
873
874 block_count = 0;
875 bs = block_bb_get(service_id);
876 clusters = (SPC(bs)) ? TS(bs) / SPC(bs) : 0;
877 for (cluster_no = 0; cluster_no < clusters; cluster_no++) {
878 rc = fat_get_cluster(bs, service_id, FAT1, cluster_no, &e0);
879 if (rc != EOK)
880 return EIO;
881
882 if (e0 == FAT_CLST_RES0)
883 block_count++;
884 }
885 *count = block_count;
886
887 return EOK;
888}
889
890/** libfs operations */
891libfs_ops_t fat_libfs_ops = {
892 .root_get = fat_root_get,
893 .match = fat_match,
894 .node_get = fat_node_get,
895 .node_open = fat_node_open,
896 .node_put = fat_node_put,
897 .create = fat_create_node,
898 .destroy = fat_destroy_node,
899 .link = fat_link,
900 .unlink = fat_unlink,
901 .has_children = fat_has_children,
902 .index_get = fat_index_get,
903 .size_get = fat_size_get,
904 .lnkcnt_get = fat_lnkcnt_get,
905 .is_directory = fat_is_directory,
906 .is_file = fat_is_file,
907 .service_get = fat_service_get,
908 .size_block = fat_size_block,
909 .total_block_count = fat_total_block_count,
910 .free_block_count = fat_free_block_count
911};
912
913/*
914 * FAT VFS_OUT operations.
915 */
916
917static int
918fat_mounted(service_id_t service_id, const char *opts, fs_index_t *index,
919 aoff64_t *size, unsigned *linkcnt)
920{
921 enum cache_mode cmode = CACHE_MODE_WB;
922 fat_bs_t *bs;
923 fat_instance_t *instance;
924 int rc;
925
926 instance = malloc(sizeof(fat_instance_t));
927 if (!instance)
928 return ENOMEM;
929 instance->lfn_enabled = true;
930
931 /* Parse mount options. */
932 char *mntopts = (char *) opts;
933 char *saveptr;
934 char *opt;
935 while ((opt = strtok_r(mntopts, " ,", &saveptr)) != NULL) {
936 if (str_cmp(opt, "wtcache") == 0)
937 cmode = CACHE_MODE_WT;
938 else if (str_cmp(opt, "nolfn") == 0)
939 instance->lfn_enabled = false;
940 mntopts = NULL;
941 }
942
943 /* initialize libblock */
944 rc = block_init(EXCHANGE_SERIALIZE, service_id, BS_SIZE);
945 if (rc != EOK) {
946 free(instance);
947 return rc;
948 }
949
950 /* prepare the boot block */
951 rc = block_bb_read(service_id, BS_BLOCK);
952 if (rc != EOK) {
953 free(instance);
954 block_fini(service_id);
955 return rc;
956 }
957
958 /* get the buffer with the boot sector */
959 bs = block_bb_get(service_id);
960
961 if (BPS(bs) != BS_SIZE) {
962 free(instance);
963 block_fini(service_id);
964 return ENOTSUP;
965 }
966
967 /* Initialize the block cache */
968 rc = block_cache_init(service_id, BPS(bs), 0 /* XXX */, cmode);
969 if (rc != EOK) {
970 free(instance);
971 block_fini(service_id);
972 return rc;
973 }
974
975 /* Do some simple sanity checks on the file system. */
976 rc = fat_sanity_check(bs, service_id);
977 if (rc != EOK) {
978 free(instance);
979 (void) block_cache_fini(service_id);
980 block_fini(service_id);
981 return rc;
982 }
983
984 rc = fat_idx_init_by_service_id(service_id);
985 if (rc != EOK) {
986 free(instance);
987 (void) block_cache_fini(service_id);
988 block_fini(service_id);
989 return rc;
990 }
991
992 /* Initialize the root node. */
993 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
994 if (!rfn) {
995 free(instance);
996 (void) block_cache_fini(service_id);
997 block_fini(service_id);
998 fat_idx_fini_by_service_id(service_id);
999 return ENOMEM;
1000 }
1001
1002 fs_node_initialize(rfn);
1003 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1004 if (!rootp) {
1005 free(instance);
1006 free(rfn);
1007 (void) block_cache_fini(service_id);
1008 block_fini(service_id);
1009 fat_idx_fini_by_service_id(service_id);
1010 return ENOMEM;
1011 }
1012 fat_node_initialize(rootp);
1013
1014 fat_idx_t *ridxp = fat_idx_get_by_pos(service_id, FAT_CLST_ROOTPAR, 0);
1015 if (!ridxp) {
1016 free(instance);
1017 free(rfn);
1018 free(rootp);
1019 (void) block_cache_fini(service_id);
1020 block_fini(service_id);
1021 fat_idx_fini_by_service_id(service_id);
1022 return ENOMEM;
1023 }
1024 assert(ridxp->index == 0);
1025 /* ridxp->lock held */
1026
1027 rootp->type = FAT_DIRECTORY;
1028 rootp->firstc = FAT_ROOT_CLST(bs);
1029 rootp->refcnt = 1;
1030 rootp->lnkcnt = 0; /* FS root is not linked */
1031
1032 if (FAT_IS_FAT32(bs)) {
1033 uint32_t clusters;
1034 rc = fat_clusters_get(&clusters, bs, service_id, rootp->firstc);
1035 if (rc != EOK) {
1036 fibril_mutex_unlock(&ridxp->lock);
1037 free(instance);
1038 free(rfn);
1039 free(rootp);
1040 (void) block_cache_fini(service_id);
1041 block_fini(service_id);
1042 fat_idx_fini_by_service_id(service_id);
1043 return ENOTSUP;
1044 }
1045 rootp->size = BPS(bs) * SPC(bs) * clusters;
1046 } else
1047 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
1048
1049 rc = fs_instance_create(service_id, instance);
1050 if (rc != EOK) {
1051 fibril_mutex_unlock(&ridxp->lock);
1052 free(instance);
1053 free(rfn);
1054 free(rootp);
1055 (void) block_cache_fini(service_id);
1056 block_fini(service_id);
1057 fat_idx_fini_by_service_id(service_id);
1058 return rc;
1059 }
1060
1061 rootp->idx = ridxp;
1062 ridxp->nodep = rootp;
1063 rootp->bp = rfn;
1064 rfn->data = rootp;
1065
1066 fibril_mutex_unlock(&ridxp->lock);
1067
1068 *index = ridxp->index;
1069 *size = rootp->size;
1070 *linkcnt = rootp->lnkcnt;
1071
1072 return EOK;
1073}
1074
1075static int fat_update_fat32_fsinfo(service_id_t service_id)
1076{
1077 fat_bs_t *bs;
1078 fat32_fsinfo_t *info;
1079 block_t *b;
1080 int rc;
1081
1082 bs = block_bb_get(service_id);
1083 assert(FAT_IS_FAT32(bs));
1084
1085 rc = block_get(&b, service_id, uint16_t_le2host(bs->fat32.fsinfo_sec),
1086 BLOCK_FLAGS_NONE);
1087 if (rc != EOK)
1088 return rc;
1089
1090 info = (fat32_fsinfo_t *) b->data;
1091
1092 if (memcmp(info->sig1, FAT32_FSINFO_SIG1, sizeof(info->sig1)) != 0 ||
1093 memcmp(info->sig2, FAT32_FSINFO_SIG2, sizeof(info->sig2)) != 0 ||
1094 memcmp(info->sig3, FAT32_FSINFO_SIG3, sizeof(info->sig3)) != 0) {
1095 (void) block_put(b);
1096 return EINVAL;
1097 }
1098
1099 /* For now, invalidate the counter. */
1100 info->free_clusters = host2uint16_t_le(-1);
1101
1102 b->dirty = true;
1103 return block_put(b);
1104}
1105
1106static int fat_unmounted(service_id_t service_id)
1107{
1108 fs_node_t *fn;
1109 fat_node_t *nodep;
1110 fat_bs_t *bs;
1111 int rc;
1112
1113 bs = block_bb_get(service_id);
1114
1115 rc = fat_root_get(&fn, service_id);
1116 if (rc != EOK)
1117 return rc;
1118 nodep = FAT_NODE(fn);
1119
1120 /*
1121 * We expect exactly two references on the root node. One for the
1122 * fat_root_get() above and one created in fat_mounted().
1123 */
1124 if (nodep->refcnt != 2) {
1125 (void) fat_node_put(fn);
1126 return EBUSY;
1127 }
1128
1129 if (FAT_IS_FAT32(bs)) {
1130 /*
1131 * Attempt to update the FAT32 FS info.
1132 */
1133 (void) fat_update_fat32_fsinfo(service_id);
1134 }
1135
1136 /*
1137 * Put the root node and force it to the FAT free node list.
1138 */
1139 (void) fat_node_put(fn);
1140 (void) fat_node_put(fn);
1141
1142 /*
1143 * Perform cleanup of the node structures, index structures and
1144 * associated data. Write back this file system's dirty blocks and
1145 * stop using libblock for this instance.
1146 */
1147 (void) fat_node_fini_by_service_id(service_id);
1148 fat_idx_fini_by_service_id(service_id);
1149 (void) block_cache_fini(service_id);
1150 block_fini(service_id);
1151
1152 void *data;
1153 if (fs_instance_get(service_id, &data) == EOK) {
1154 fs_instance_destroy(service_id);
1155 free(data);
1156 }
1157
1158 return EOK;
1159}
1160
1161static int
1162fat_read(service_id_t service_id, fs_index_t index, aoff64_t pos,
1163 size_t *rbytes)
1164{
1165 fs_node_t *fn;
1166 fat_node_t *nodep;
1167 fat_bs_t *bs;
1168 size_t bytes;
1169 block_t *b;
1170 int rc;
1171
1172 rc = fat_node_get(&fn, service_id, index);
1173 if (rc != EOK)
1174 return rc;
1175 if (!fn)
1176 return ENOENT;
1177 nodep = FAT_NODE(fn);
1178
1179 ipc_callid_t callid;
1180 size_t len;
1181 if (!async_data_read_receive(&callid, &len)) {
1182 fat_node_put(fn);
1183 async_answer_0(callid, EINVAL);
1184 return EINVAL;
1185 }
1186
1187 bs = block_bb_get(service_id);
1188
1189 if (nodep->type == FAT_FILE) {
1190 /*
1191 * Our strategy for regular file reads is to read one block at
1192 * most and make use of the possibility to return less data than
1193 * requested. This keeps the code very simple.
1194 */
1195 if (pos >= nodep->size) {
1196 /* reading beyond the EOF */
1197 bytes = 0;
1198 (void) async_data_read_finalize(callid, NULL, 0);
1199 } else {
1200 bytes = min(len, BPS(bs) - pos % BPS(bs));
1201 bytes = min(bytes, nodep->size - pos);
1202 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
1203 BLOCK_FLAGS_NONE);
1204 if (rc != EOK) {
1205 fat_node_put(fn);
1206 async_answer_0(callid, rc);
1207 return rc;
1208 }
1209 (void) async_data_read_finalize(callid,
1210 b->data + pos % BPS(bs), bytes);
1211 rc = block_put(b);
1212 if (rc != EOK) {
1213 fat_node_put(fn);
1214 return rc;
1215 }
1216 }
1217 } else {
1218 aoff64_t spos = pos;
1219 char name[FAT_LFN_NAME_SIZE];
1220 fat_dentry_t *d;
1221
1222 assert(nodep->type == FAT_DIRECTORY);
1223 assert(nodep->size % BPS(bs) == 0);
1224 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
1225
1226 fat_directory_t di;
1227 rc = fat_directory_open(nodep, &di);
1228 if (rc != EOK)
1229 goto err;
1230 rc = fat_directory_seek(&di, pos);
1231 if (rc != EOK) {
1232 (void) fat_directory_close(&di);
1233 goto err;
1234 }
1235
1236 rc = fat_directory_read(&di, name, &d);
1237 if (rc == EOK)
1238 goto hit;
1239 if (rc == ENOENT)
1240 goto miss;
1241
1242err:
1243 (void) fat_node_put(fn);
1244 async_answer_0(callid, rc);
1245 return rc;
1246
1247miss:
1248 rc = fat_directory_close(&di);
1249 if (rc != EOK)
1250 goto err;
1251 rc = fat_node_put(fn);
1252 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1253 *rbytes = 0;
1254 return rc != EOK ? rc : ENOENT;
1255
1256hit:
1257 pos = di.pos;
1258 rc = fat_directory_close(&di);
1259 if (rc != EOK)
1260 goto err;
1261 (void) async_data_read_finalize(callid, name,
1262 str_size(name) + 1);
1263 bytes = (pos - spos) + 1;
1264 }
1265
1266 rc = fat_node_put(fn);
1267 *rbytes = bytes;
1268 return rc;
1269}
1270
1271static int
1272fat_write(service_id_t service_id, fs_index_t index, aoff64_t pos,
1273 size_t *wbytes, aoff64_t *nsize)
1274{
1275 fs_node_t *fn;
1276 fat_node_t *nodep;
1277 fat_bs_t *bs;
1278 size_t bytes;
1279 block_t *b;
1280 aoff64_t boundary;
1281 int flags = BLOCK_FLAGS_NONE;
1282 int rc;
1283
1284 rc = fat_node_get(&fn, service_id, index);
1285 if (rc != EOK)
1286 return rc;
1287 if (!fn)
1288 return ENOENT;
1289 nodep = FAT_NODE(fn);
1290
1291 ipc_callid_t callid;
1292 size_t len;
1293 if (!async_data_write_receive(&callid, &len)) {
1294 (void) fat_node_put(fn);
1295 async_answer_0(callid, EINVAL);
1296 return EINVAL;
1297 }
1298
1299 bs = block_bb_get(service_id);
1300
1301 /*
1302 * In all scenarios, we will attempt to write out only one block worth
1303 * of data at maximum. There might be some more efficient approaches,
1304 * but this one greatly simplifies fat_write(). Note that we can afford
1305 * to do this because the client must be ready to handle the return
1306 * value signalizing a smaller number of bytes written.
1307 */
1308 bytes = min(len, BPS(bs) - pos % BPS(bs));
1309 if (bytes == BPS(bs))
1310 flags |= BLOCK_FLAGS_NOREAD;
1311
1312 boundary = ROUND_UP(nodep->size, BPC(bs));
1313 if (pos < boundary) {
1314 /*
1315 * This is the easier case - we are either overwriting already
1316 * existing contents or writing behind the EOF, but still within
1317 * the limits of the last cluster. The node size may grow to the
1318 * next block size boundary.
1319 */
1320 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1321 if (rc != EOK) {
1322 (void) fat_node_put(fn);
1323 async_answer_0(callid, rc);
1324 return rc;
1325 }
1326 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
1327 if (rc != EOK) {
1328 (void) fat_node_put(fn);
1329 async_answer_0(callid, rc);
1330 return rc;
1331 }
1332 (void) async_data_write_finalize(callid,
1333 b->data + pos % BPS(bs), bytes);
1334 b->dirty = true; /* need to sync block */
1335 rc = block_put(b);
1336 if (rc != EOK) {
1337 (void) fat_node_put(fn);
1338 return rc;
1339 }
1340 if (pos + bytes > nodep->size) {
1341 nodep->size = pos + bytes;
1342 nodep->dirty = true; /* need to sync node */
1343 }
1344 *wbytes = bytes;
1345 *nsize = nodep->size;
1346 rc = fat_node_put(fn);
1347 return rc;
1348 } else {
1349 /*
1350 * This is the more difficult case. We must allocate new
1351 * clusters for the node and zero them out.
1352 */
1353 unsigned nclsts;
1354 fat_cluster_t mcl, lcl;
1355
1356 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
1357 /* create an independent chain of nclsts clusters in all FATs */
1358 rc = fat_alloc_clusters(bs, service_id, nclsts, &mcl, &lcl);
1359 if (rc != EOK) {
1360 /* could not allocate a chain of nclsts clusters */
1361 (void) fat_node_put(fn);
1362 async_answer_0(callid, rc);
1363 return rc;
1364 }
1365 /* zero fill any gaps */
1366 rc = fat_fill_gap(bs, nodep, mcl, pos);
1367 if (rc != EOK) {
1368 (void) fat_free_clusters(bs, service_id, mcl);
1369 (void) fat_node_put(fn);
1370 async_answer_0(callid, rc);
1371 return rc;
1372 }
1373 rc = _fat_block_get(&b, bs, service_id, lcl, NULL,
1374 (pos / BPS(bs)) % SPC(bs), flags);
1375 if (rc != EOK) {
1376 (void) fat_free_clusters(bs, service_id, mcl);
1377 (void) fat_node_put(fn);
1378 async_answer_0(callid, rc);
1379 return rc;
1380 }
1381 (void) async_data_write_finalize(callid,
1382 b->data + pos % BPS(bs), bytes);
1383 b->dirty = true; /* need to sync block */
1384 rc = block_put(b);
1385 if (rc != EOK) {
1386 (void) fat_free_clusters(bs, service_id, mcl);
1387 (void) fat_node_put(fn);
1388 return rc;
1389 }
1390 /*
1391 * Append the cluster chain starting in mcl to the end of the
1392 * node's cluster chain.
1393 */
1394 rc = fat_append_clusters(bs, nodep, mcl, lcl);
1395 if (rc != EOK) {
1396 (void) fat_free_clusters(bs, service_id, mcl);
1397 (void) fat_node_put(fn);
1398 return rc;
1399 }
1400 *nsize = nodep->size = pos + bytes;
1401 rc = fat_node_put(fn);
1402 nodep->dirty = true; /* need to sync node */
1403 *wbytes = bytes;
1404 return rc;
1405 }
1406}
1407
1408static int
1409fat_truncate(service_id_t service_id, fs_index_t index, aoff64_t size)
1410{
1411 fs_node_t *fn;
1412 fat_node_t *nodep;
1413 fat_bs_t *bs;
1414 int rc;
1415
1416 rc = fat_node_get(&fn, service_id, index);
1417 if (rc != EOK)
1418 return rc;
1419 if (!fn)
1420 return ENOENT;
1421 nodep = FAT_NODE(fn);
1422
1423 bs = block_bb_get(service_id);
1424
1425 if (nodep->size == size) {
1426 rc = EOK;
1427 } else if (nodep->size < size) {
1428 /*
1429 * The standard says we have the freedom to grow the node.
1430 * For now, we simply return an error.
1431 */
1432 rc = EINVAL;
1433 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
1434 /*
1435 * The node will be shrunk, but no clusters will be deallocated.
1436 */
1437 nodep->size = size;
1438 nodep->dirty = true; /* need to sync node */
1439 rc = EOK;
1440 } else {
1441 /*
1442 * The node will be shrunk, clusters will be deallocated.
1443 */
1444 if (size == 0) {
1445 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1446 if (rc != EOK)
1447 goto out;
1448 } else {
1449 fat_cluster_t lastc;
1450 rc = fat_cluster_walk(bs, service_id, nodep->firstc,
1451 &lastc, NULL, (size - 1) / BPC(bs));
1452 if (rc != EOK)
1453 goto out;
1454 rc = fat_chop_clusters(bs, nodep, lastc);
1455 if (rc != EOK)
1456 goto out;
1457 }
1458 nodep->size = size;
1459 nodep->dirty = true; /* need to sync node */
1460 rc = EOK;
1461 }
1462out:
1463 fat_node_put(fn);
1464 return rc;
1465}
1466
1467static int fat_close(service_id_t service_id, fs_index_t index)
1468{
1469 return EOK;
1470}
1471
1472static int fat_destroy(service_id_t service_id, fs_index_t index)
1473{
1474 fs_node_t *fn;
1475 fat_node_t *nodep;
1476 int rc;
1477
1478 rc = fat_node_get(&fn, service_id, index);
1479 if (rc != EOK)
1480 return rc;
1481 if (!fn)
1482 return ENOENT;
1483
1484 nodep = FAT_NODE(fn);
1485 /*
1486 * We should have exactly two references. One for the above
1487 * call to fat_node_get() and one from fat_unlink().
1488 */
1489 assert(nodep->refcnt == 2);
1490
1491 rc = fat_destroy_node(fn);
1492 return rc;
1493}
1494
1495static int fat_sync(service_id_t service_id, fs_index_t index)
1496{
1497 fs_node_t *fn;
1498 int rc = fat_node_get(&fn, service_id, index);
1499 if (rc != EOK)
1500 return rc;
1501 if (!fn)
1502 return ENOENT;
1503
1504 fat_node_t *nodep = FAT_NODE(fn);
1505
1506 nodep->dirty = true;
1507 rc = fat_node_sync(nodep);
1508
1509 fat_node_put(fn);
1510 return rc;
1511}
1512
1513vfs_out_ops_t fat_ops = {
1514 .mounted = fat_mounted,
1515 .unmounted = fat_unmounted,
1516 .read = fat_read,
1517 .write = fat_write,
1518 .truncate = fat_truncate,
1519 .close = fat_close,
1520 .destroy = fat_destroy,
1521 .sync = fat_sync,
1522};
1523
1524/**
1525 * @}
1526 */
Note: See TracBrowser for help on using the repository browser.