source: mainline/uspace/srv/fs/fat/fat_ops.c@ b9060a83

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b9060a83 was 010b52d8, checked in by Oleg Romanenko <romanenko.oleg@…>, 14 years ago

Minor fix in fat_match

  • Property mode set to 100644
File size: 37.2 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "fat_directory.h"
42#include "../../vfs/vfs.h"
43#include <libfs.h>
44#include <libblock.h>
45#include <ipc/services.h>
46#include <ipc/devmap.h>
47#include <macros.h>
48#include <async.h>
49#include <errno.h>
50#include <str.h>
51#include <byteorder.h>
52#include <adt/hash_table.h>
53#include <adt/list.h>
54#include <assert.h>
55#include <fibril_synch.h>
56#include <sys/mman.h>
57#include <align.h>
58#include <malloc.h>
59#include <str.h>
60
61#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
62#define FS_NODE(node) ((node) ? (node)->bp : NULL)
63
64#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
65#define BPC(bs) (BPS((bs)) * SPC((bs)))
66
67/** Mutex protecting the list of cached free FAT nodes. */
68static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
69
70/** List of cached free FAT nodes. */
71static LIST_INITIALIZE(ffn_head);
72
73/*
74 * Forward declarations of FAT libfs operations.
75 */
76static int fat_root_get(fs_node_t **, devmap_handle_t);
77static int fat_match(fs_node_t **, fs_node_t *, const char *);
78static int fat_node_get(fs_node_t **, devmap_handle_t, fs_index_t);
79static int fat_node_open(fs_node_t *);
80static int fat_node_put(fs_node_t *);
81static int fat_create_node(fs_node_t **, devmap_handle_t, int);
82static int fat_destroy_node(fs_node_t *);
83static int fat_link(fs_node_t *, fs_node_t *, const char *);
84static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
85static int fat_has_children(bool *, fs_node_t *);
86static fs_index_t fat_index_get(fs_node_t *);
87static aoff64_t fat_size_get(fs_node_t *);
88static unsigned fat_lnkcnt_get(fs_node_t *);
89static char fat_plb_get_char(unsigned);
90static bool fat_is_directory(fs_node_t *);
91static bool fat_is_file(fs_node_t *node);
92static devmap_handle_t fat_device_get(fs_node_t *node);
93
94/*
95 * Helper functions.
96 */
97static void fat_node_initialize(fat_node_t *node)
98{
99 fibril_mutex_initialize(&node->lock);
100 node->bp = NULL;
101 node->idx = NULL;
102 node->type = 0;
103 link_initialize(&node->ffn_link);
104 node->size = 0;
105 node->lnkcnt = 0;
106 node->refcnt = 0;
107 node->dirty = false;
108 node->lastc_cached_valid = false;
109 node->lastc_cached_value = FAT32_CLST_LAST1;
110 node->currc_cached_valid = false;
111 node->currc_cached_bn = 0;
112 node->currc_cached_value = FAT32_CLST_LAST1;
113}
114
115static int fat_node_sync(fat_node_t *node)
116{
117 block_t *b;
118 fat_bs_t *bs;
119 fat_dentry_t *d;
120 int rc;
121
122 assert(node->dirty);
123
124 bs = block_bb_get(node->idx->devmap_handle);
125
126 /* Read the block that contains the dentry of interest. */
127 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc,
128 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
129 BLOCK_FLAGS_NONE);
130 if (rc != EOK)
131 return rc;
132
133 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
134
135 d->firstc = host2uint16_t_le(node->firstc);
136 if (node->type == FAT_FILE) {
137 d->size = host2uint32_t_le(node->size);
138 } else if (node->type == FAT_DIRECTORY) {
139 d->attr = FAT_ATTR_SUBDIR;
140 }
141
142 /* TODO: update other fields? (e.g time fields) */
143
144 b->dirty = true; /* need to sync block */
145 rc = block_put(b);
146 return rc;
147}
148
149static int fat_node_fini_by_devmap_handle(devmap_handle_t devmap_handle)
150{
151 link_t *lnk;
152 fat_node_t *nodep;
153 int rc;
154
155 /*
156 * We are called from fat_unmounted() and assume that there are already
157 * no nodes belonging to this instance with non-zero refcount. Therefore
158 * it is sufficient to clean up only the FAT free node list.
159 */
160
161restart:
162 fibril_mutex_lock(&ffn_mutex);
163 for (lnk = ffn_head.next; lnk != &ffn_head; lnk = lnk->next) {
164 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
165 if (!fibril_mutex_trylock(&nodep->lock)) {
166 fibril_mutex_unlock(&ffn_mutex);
167 goto restart;
168 }
169 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
170 fibril_mutex_unlock(&nodep->lock);
171 fibril_mutex_unlock(&ffn_mutex);
172 goto restart;
173 }
174 if (nodep->idx->devmap_handle != devmap_handle) {
175 fibril_mutex_unlock(&nodep->idx->lock);
176 fibril_mutex_unlock(&nodep->lock);
177 continue;
178 }
179
180 list_remove(&nodep->ffn_link);
181 fibril_mutex_unlock(&ffn_mutex);
182
183 /*
184 * We can unlock the node and its index structure because we are
185 * the last player on this playground and VFS is preventing new
186 * players from entering.
187 */
188 fibril_mutex_unlock(&nodep->idx->lock);
189 fibril_mutex_unlock(&nodep->lock);
190
191 if (nodep->dirty) {
192 rc = fat_node_sync(nodep);
193 if (rc != EOK)
194 return rc;
195 }
196 nodep->idx->nodep = NULL;
197 free(nodep->bp);
198 free(nodep);
199
200 /* Need to restart because we changed the ffn_head list. */
201 goto restart;
202 }
203 fibril_mutex_unlock(&ffn_mutex);
204
205 return EOK;
206}
207
208static int fat_node_get_new(fat_node_t **nodepp)
209{
210 fs_node_t *fn;
211 fat_node_t *nodep;
212 int rc;
213
214 fibril_mutex_lock(&ffn_mutex);
215 if (!list_empty(&ffn_head)) {
216 /* Try to use a cached free node structure. */
217 fat_idx_t *idxp_tmp;
218 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
219 if (!fibril_mutex_trylock(&nodep->lock))
220 goto skip_cache;
221 idxp_tmp = nodep->idx;
222 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
223 fibril_mutex_unlock(&nodep->lock);
224 goto skip_cache;
225 }
226 list_remove(&nodep->ffn_link);
227 fibril_mutex_unlock(&ffn_mutex);
228 if (nodep->dirty) {
229 rc = fat_node_sync(nodep);
230 if (rc != EOK) {
231 idxp_tmp->nodep = NULL;
232 fibril_mutex_unlock(&nodep->lock);
233 fibril_mutex_unlock(&idxp_tmp->lock);
234 free(nodep->bp);
235 free(nodep);
236 return rc;
237 }
238 }
239 idxp_tmp->nodep = NULL;
240 fibril_mutex_unlock(&nodep->lock);
241 fibril_mutex_unlock(&idxp_tmp->lock);
242 fn = FS_NODE(nodep);
243 } else {
244skip_cache:
245 /* Try to allocate a new node structure. */
246 fibril_mutex_unlock(&ffn_mutex);
247 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
248 if (!fn)
249 return ENOMEM;
250 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
251 if (!nodep) {
252 free(fn);
253 return ENOMEM;
254 }
255 }
256 fat_node_initialize(nodep);
257 fs_node_initialize(fn);
258 fn->data = nodep;
259 nodep->bp = fn;
260
261 *nodepp = nodep;
262 return EOK;
263}
264
265/** Internal version of fat_node_get().
266 *
267 * @param idxp Locked index structure.
268 */
269static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
270{
271 block_t *b;
272 fat_bs_t *bs;
273 fat_dentry_t *d;
274 fat_node_t *nodep = NULL;
275 int rc;
276
277 if (idxp->nodep) {
278 /*
279 * We are lucky.
280 * The node is already instantiated in memory.
281 */
282 fibril_mutex_lock(&idxp->nodep->lock);
283 if (!idxp->nodep->refcnt++) {
284 fibril_mutex_lock(&ffn_mutex);
285 list_remove(&idxp->nodep->ffn_link);
286 fibril_mutex_unlock(&ffn_mutex);
287 }
288 fibril_mutex_unlock(&idxp->nodep->lock);
289 *nodepp = idxp->nodep;
290 return EOK;
291 }
292
293 /*
294 * We must instantiate the node from the file system.
295 */
296
297 assert(idxp->pfc);
298
299 rc = fat_node_get_new(&nodep);
300 if (rc != EOK)
301 return rc;
302
303 bs = block_bb_get(idxp->devmap_handle);
304
305 /* Read the block that contains the dentry of interest. */
306 rc = _fat_block_get(&b, bs, idxp->devmap_handle, idxp->pfc, NULL,
307 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
308 if (rc != EOK) {
309 (void) fat_node_put(FS_NODE(nodep));
310 return rc;
311 }
312
313 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
314 if (FAT_IS_FAT32(bs)) {
315 nodep->firstc = uint16_t_le2host(d->firstc_lo) |
316 (uint16_t_le2host(d->firstc_hi) << 16);
317 }
318 else
319 nodep->firstc = uint16_t_le2host(d->firstc);
320
321 if (d->attr & FAT_ATTR_SUBDIR) {
322 /*
323 * The only directory which does not have this bit set is the
324 * root directory itself. The root directory node is handled
325 * and initialized elsewhere.
326 */
327 nodep->type = FAT_DIRECTORY;
328
329 /*
330 * Unfortunately, the 'size' field of the FAT dentry is not
331 * defined for the directory entry type. We must determine the
332 * size of the directory by walking the FAT.
333 */
334 /* TODO uint16_t clusters to uint32_t */
335 uint16_t clusters;
336 rc = fat_clusters_get(&clusters, bs, idxp->devmap_handle, nodep->firstc);
337 if (rc != EOK) {
338 (void) block_put(b);
339 (void) fat_node_put(FS_NODE(nodep));
340 return rc;
341 }
342 nodep->size = BPS(bs) * SPC(bs) * clusters;
343 } else {
344 nodep->type = FAT_FILE;
345 nodep->size = uint32_t_le2host(d->size);
346 }
347
348 nodep->lnkcnt = 1;
349 nodep->refcnt = 1;
350
351 rc = block_put(b);
352 if (rc != EOK) {
353 (void) fat_node_put(FS_NODE(nodep));
354 return rc;
355 }
356
357 /* Link the idx structure with the node structure. */
358 nodep->idx = idxp;
359 idxp->nodep = nodep;
360
361 *nodepp = nodep;
362 return EOK;
363}
364
365/*
366 * FAT libfs operations.
367 */
368
369int fat_root_get(fs_node_t **rfn, devmap_handle_t devmap_handle)
370{
371 return fat_node_get(rfn, devmap_handle, 0);
372}
373
374int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
375{
376 fat_node_t *parentp = FAT_NODE(pfn);
377 char name[FAT_LFN_NAME_SIZE];
378 fat_dentry_t *d;
379 devmap_handle_t devmap_handle;
380 int rc;
381
382 fibril_mutex_lock(&parentp->idx->lock);
383 devmap_handle = parentp->idx->devmap_handle;
384 fibril_mutex_unlock(&parentp->idx->lock);
385
386 fat_directory_t di;
387 fat_directory_open(parentp, &di);
388
389 while (fat_directory_read(&di, name, &d) == EOK) {
390 if (fat_dentry_namecmp(name, component) == 0) {
391 /* hit */
392 fat_node_t *nodep;
393 aoff64_t o = (di.pos-1) % (BPS(di.bs) / sizeof(fat_dentry_t));
394 fat_idx_t *idx = fat_idx_get_by_pos(devmap_handle,
395 parentp->firstc, di.bnum * DPS(di.bs) + o);
396 if (!idx) {
397 /*
398 * Can happen if memory is low or if we
399 * run out of 32-bit indices.
400 */
401 rc = fat_directory_close(&di);
402 return (rc == EOK) ? ENOMEM : rc;
403 }
404 rc = fat_node_get_core(&nodep, idx);
405 fibril_mutex_unlock(&idx->lock);
406 if (rc != EOK) {
407 (void) fat_directory_close(&di);
408 return rc;
409 }
410 *rfn = FS_NODE(nodep);
411 rc = fat_directory_close(&di);
412 if (rc != EOK)
413 (void) fat_node_put(*rfn);
414 return rc;
415 }
416 }
417 (void) fat_directory_close(&di);
418 *rfn = NULL;
419 return EOK;
420}
421
422/** Instantiate a FAT in-core node. */
423int fat_node_get(fs_node_t **rfn, devmap_handle_t devmap_handle, fs_index_t index)
424{
425 fat_node_t *nodep;
426 fat_idx_t *idxp;
427 int rc;
428
429 idxp = fat_idx_get_by_index(devmap_handle, index);
430 if (!idxp) {
431 *rfn = NULL;
432 return EOK;
433 }
434 /* idxp->lock held */
435 rc = fat_node_get_core(&nodep, idxp);
436 fibril_mutex_unlock(&idxp->lock);
437 if (rc == EOK)
438 *rfn = FS_NODE(nodep);
439 return rc;
440}
441
442int fat_node_open(fs_node_t *fn)
443{
444 /*
445 * Opening a file is stateless, nothing
446 * to be done here.
447 */
448 return EOK;
449}
450
451int fat_node_put(fs_node_t *fn)
452{
453 fat_node_t *nodep = FAT_NODE(fn);
454 bool destroy = false;
455
456 fibril_mutex_lock(&nodep->lock);
457 if (!--nodep->refcnt) {
458 if (nodep->idx) {
459 fibril_mutex_lock(&ffn_mutex);
460 list_append(&nodep->ffn_link, &ffn_head);
461 fibril_mutex_unlock(&ffn_mutex);
462 } else {
463 /*
464 * The node does not have any index structure associated
465 * with itself. This can only mean that we are releasing
466 * the node after a failed attempt to allocate the index
467 * structure for it.
468 */
469 destroy = true;
470 }
471 }
472 fibril_mutex_unlock(&nodep->lock);
473 if (destroy) {
474 free(nodep->bp);
475 free(nodep);
476 }
477 return EOK;
478}
479
480int fat_create_node(fs_node_t **rfn, devmap_handle_t devmap_handle, int flags)
481{
482 fat_idx_t *idxp;
483 fat_node_t *nodep;
484 fat_bs_t *bs;
485 fat_cluster_t mcl, lcl;
486 int rc;
487
488 bs = block_bb_get(devmap_handle);
489 if (flags & L_DIRECTORY) {
490 /* allocate a cluster */
491 rc = fat_alloc_clusters(bs, devmap_handle, 1, &mcl, &lcl);
492 if (rc != EOK)
493 return rc;
494 /* populate the new cluster with unused dentries */
495 rc = fat_zero_cluster(bs, devmap_handle, mcl);
496 if (rc != EOK) {
497 (void) fat_free_clusters(bs, devmap_handle, mcl);
498 return rc;
499 }
500 }
501
502 rc = fat_node_get_new(&nodep);
503 if (rc != EOK) {
504 (void) fat_free_clusters(bs, devmap_handle, mcl);
505 return rc;
506 }
507 rc = fat_idx_get_new(&idxp, devmap_handle);
508 if (rc != EOK) {
509 (void) fat_free_clusters(bs, devmap_handle, mcl);
510 (void) fat_node_put(FS_NODE(nodep));
511 return rc;
512 }
513 /* idxp->lock held */
514 if (flags & L_DIRECTORY) {
515 nodep->type = FAT_DIRECTORY;
516 nodep->firstc = mcl;
517 nodep->size = BPS(bs) * SPC(bs);
518 } else {
519 nodep->type = FAT_FILE;
520 nodep->firstc = FAT_CLST_RES0;
521 nodep->size = 0;
522 }
523 nodep->lnkcnt = 0; /* not linked anywhere */
524 nodep->refcnt = 1;
525 nodep->dirty = true;
526
527 nodep->idx = idxp;
528 idxp->nodep = nodep;
529
530 fibril_mutex_unlock(&idxp->lock);
531 *rfn = FS_NODE(nodep);
532 return EOK;
533}
534
535int fat_destroy_node(fs_node_t *fn)
536{
537 fat_node_t *nodep = FAT_NODE(fn);
538 fat_bs_t *bs;
539 bool has_children;
540 int rc;
541
542 /*
543 * The node is not reachable from the file system. This means that the
544 * link count should be zero and that the index structure cannot be
545 * found in the position hash. Obviously, we don't need to lock the node
546 * nor its index structure.
547 */
548 assert(nodep->lnkcnt == 0);
549
550 /*
551 * The node may not have any children.
552 */
553 rc = fat_has_children(&has_children, fn);
554 if (rc != EOK)
555 return rc;
556 assert(!has_children);
557
558 bs = block_bb_get(nodep->idx->devmap_handle);
559 if (nodep->firstc != FAT_CLST_RES0) {
560 assert(nodep->size);
561 /* Free all clusters allocated to the node. */
562 rc = fat_free_clusters(bs, nodep->idx->devmap_handle,
563 nodep->firstc);
564 }
565
566 fat_idx_destroy(nodep->idx);
567 free(nodep->bp);
568 free(nodep);
569 return rc;
570}
571
572int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
573{
574 fat_node_t *parentp = FAT_NODE(pfn);
575 fat_node_t *childp = FAT_NODE(cfn);
576 fat_dentry_t *d;
577 fat_bs_t *bs;
578 block_t *b;
579 unsigned i, j;
580 unsigned blocks;
581 fat_cluster_t mcl, lcl;
582 int rc;
583
584 fibril_mutex_lock(&childp->lock);
585 if (childp->lnkcnt == 1) {
586 /*
587 * On FAT, we don't support multiple hard links.
588 */
589 fibril_mutex_unlock(&childp->lock);
590 return EMLINK;
591 }
592 assert(childp->lnkcnt == 0);
593 fibril_mutex_unlock(&childp->lock);
594
595 if (!fat_dentry_name_verify(name)) {
596 /*
597 * Attempt to create unsupported name.
598 */
599 return ENOTSUP;
600 }
601
602 /*
603 * Get us an unused parent node's dentry or grow the parent and allocate
604 * a new one.
605 */
606
607 fibril_mutex_lock(&parentp->idx->lock);
608 bs = block_bb_get(parentp->idx->devmap_handle);
609
610 blocks = parentp->size / BPS(bs);
611
612 for (i = 0; i < blocks; i++) {
613 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
614 if (rc != EOK) {
615 fibril_mutex_unlock(&parentp->idx->lock);
616 return rc;
617 }
618 for (j = 0; j < DPS(bs); j++) {
619 d = ((fat_dentry_t *)b->data) + j;
620 switch (fat_classify_dentry(d)) {
621 case FAT_DENTRY_LFN:
622 case FAT_DENTRY_SKIP:
623 case FAT_DENTRY_VALID:
624 /* skipping used and meta entries */
625 continue;
626 case FAT_DENTRY_FREE:
627 case FAT_DENTRY_LAST:
628 /* found an empty slot */
629 goto hit;
630 }
631 }
632 rc = block_put(b);
633 if (rc != EOK) {
634 fibril_mutex_unlock(&parentp->idx->lock);
635 return rc;
636 }
637 }
638 j = 0;
639
640 /*
641 * We need to grow the parent in order to create a new unused dentry.
642 */
643 if (!FAT_IS_FAT32(bs) && parentp->firstc == FAT_CLST_ROOT) {
644 /* Can't grow the root directory. */
645 fibril_mutex_unlock(&parentp->idx->lock);
646 return ENOSPC;
647 }
648 rc = fat_alloc_clusters(bs, parentp->idx->devmap_handle, 1, &mcl, &lcl);
649 if (rc != EOK) {
650 fibril_mutex_unlock(&parentp->idx->lock);
651 return rc;
652 }
653 rc = fat_zero_cluster(bs, parentp->idx->devmap_handle, mcl);
654 if (rc != EOK) {
655 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
656 fibril_mutex_unlock(&parentp->idx->lock);
657 return rc;
658 }
659 rc = fat_append_clusters(bs, parentp, mcl, lcl);
660 if (rc != EOK) {
661 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
662 fibril_mutex_unlock(&parentp->idx->lock);
663 return rc;
664 }
665 parentp->size += BPS(bs) * SPC(bs);
666 parentp->dirty = true; /* need to sync node */
667 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
668 if (rc != EOK) {
669 fibril_mutex_unlock(&parentp->idx->lock);
670 return rc;
671 }
672 d = (fat_dentry_t *)b->data;
673
674hit:
675 /*
676 * At this point we only establish the link between the parent and the
677 * child. The dentry, except of the name and the extension, will remain
678 * uninitialized until the corresponding node is synced. Thus the valid
679 * dentry data is kept in the child node structure.
680 */
681 memset(d, 0, sizeof(fat_dentry_t));
682 fat_dentry_name_set(d, name);
683 b->dirty = true; /* need to sync block */
684 rc = block_put(b);
685 fibril_mutex_unlock(&parentp->idx->lock);
686 if (rc != EOK)
687 return rc;
688
689 fibril_mutex_lock(&childp->idx->lock);
690
691 if (childp->type == FAT_DIRECTORY) {
692 /*
693 * If possible, create the Sub-directory Identifier Entry and
694 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
695 * These entries are not mandatory according to Standard
696 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
697 * rather a sign of our good will.
698 */
699 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
700 if (rc != EOK) {
701 /*
702 * Rather than returning an error, simply skip the
703 * creation of these two entries.
704 */
705 goto skip_dots;
706 }
707 d = (fat_dentry_t *) b->data;
708 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
709 (bcmp(d->name, FAT_NAME_DOT, FAT_NAME_LEN)) == 0) {
710 memset(d, 0, sizeof(fat_dentry_t));
711 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN);
712 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
713 d->attr = FAT_ATTR_SUBDIR;
714 d->firstc = host2uint16_t_le(childp->firstc);
715 /* TODO: initialize also the date/time members. */
716 }
717 d++;
718 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
719 (bcmp(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN) == 0)) {
720 memset(d, 0, sizeof(fat_dentry_t));
721 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN);
722 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
723 d->attr = FAT_ATTR_SUBDIR;
724 d->firstc = (parentp->firstc == FAT_ROOT_CLST(bs)) ?
725 host2uint16_t_le(FAT_CLST_ROOTPAR) :
726 host2uint16_t_le(parentp->firstc);
727 /* TODO: initialize also the date/time members. */
728 }
729 b->dirty = true; /* need to sync block */
730 /*
731 * Ignore the return value as we would have fallen through on error
732 * anyway.
733 */
734 (void) block_put(b);
735 }
736skip_dots:
737
738 childp->idx->pfc = parentp->firstc;
739 childp->idx->pdi = i * DPS(bs) + j;
740 fibril_mutex_unlock(&childp->idx->lock);
741
742 fibril_mutex_lock(&childp->lock);
743 childp->lnkcnt = 1;
744 childp->dirty = true; /* need to sync node */
745 fibril_mutex_unlock(&childp->lock);
746
747 /*
748 * Hash in the index structure into the position hash.
749 */
750 fat_idx_hashin(childp->idx);
751
752 return EOK;
753}
754
755int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
756{
757 fat_node_t *parentp = FAT_NODE(pfn);
758 fat_node_t *childp = FAT_NODE(cfn);
759 fat_bs_t *bs;
760 fat_dentry_t *d;
761 block_t *b;
762 bool has_children;
763 int rc;
764
765 if (!parentp)
766 return EBUSY;
767
768 rc = fat_has_children(&has_children, cfn);
769 if (rc != EOK)
770 return rc;
771 if (has_children)
772 return ENOTEMPTY;
773
774 fibril_mutex_lock(&parentp->lock);
775 fibril_mutex_lock(&childp->lock);
776 assert(childp->lnkcnt == 1);
777 fibril_mutex_lock(&childp->idx->lock);
778 bs = block_bb_get(childp->idx->devmap_handle);
779
780 rc = _fat_block_get(&b, bs, childp->idx->devmap_handle, childp->idx->pfc,
781 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
782 BLOCK_FLAGS_NONE);
783 if (rc != EOK)
784 goto error;
785 d = (fat_dentry_t *)b->data +
786 (childp->idx->pdi % (BPS(bs) / sizeof(fat_dentry_t)));
787 /* mark the dentry as not-currently-used */
788 d->name[0] = FAT_DENTRY_ERASED;
789 b->dirty = true; /* need to sync block */
790 rc = block_put(b);
791 if (rc != EOK)
792 goto error;
793
794 /* remove the index structure from the position hash */
795 fat_idx_hashout(childp->idx);
796 /* clear position information */
797 childp->idx->pfc = FAT_CLST_RES0;
798 childp->idx->pdi = 0;
799 fibril_mutex_unlock(&childp->idx->lock);
800 childp->lnkcnt = 0;
801 childp->refcnt++; /* keep the node in memory until destroyed */
802 childp->dirty = true;
803 fibril_mutex_unlock(&childp->lock);
804 fibril_mutex_unlock(&parentp->lock);
805
806 return EOK;
807
808error:
809 fibril_mutex_unlock(&parentp->idx->lock);
810 fibril_mutex_unlock(&childp->lock);
811 fibril_mutex_unlock(&childp->idx->lock);
812 return rc;
813}
814
815int fat_has_children(bool *has_children, fs_node_t *fn)
816{
817 fat_bs_t *bs;
818 fat_node_t *nodep = FAT_NODE(fn);
819 unsigned blocks;
820 block_t *b;
821 unsigned i, j;
822 int rc;
823
824 if (nodep->type != FAT_DIRECTORY) {
825 *has_children = false;
826 return EOK;
827 }
828
829 fibril_mutex_lock(&nodep->idx->lock);
830 bs = block_bb_get(nodep->idx->devmap_handle);
831
832 blocks = nodep->size / BPS(bs);
833
834 for (i = 0; i < blocks; i++) {
835 fat_dentry_t *d;
836
837 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
838 if (rc != EOK) {
839 fibril_mutex_unlock(&nodep->idx->lock);
840 return rc;
841 }
842 for (j = 0; j < DPS(bs); j++) {
843 d = ((fat_dentry_t *)b->data) + j;
844 switch (fat_classify_dentry(d)) {
845 case FAT_DENTRY_SKIP:
846 case FAT_DENTRY_FREE:
847 continue;
848 case FAT_DENTRY_LAST:
849 rc = block_put(b);
850 fibril_mutex_unlock(&nodep->idx->lock);
851 *has_children = false;
852 return rc;
853 default:
854 case FAT_DENTRY_VALID:
855 rc = block_put(b);
856 fibril_mutex_unlock(&nodep->idx->lock);
857 *has_children = true;
858 return rc;
859 }
860 }
861 rc = block_put(b);
862 if (rc != EOK) {
863 fibril_mutex_unlock(&nodep->idx->lock);
864 return rc;
865 }
866 }
867
868 fibril_mutex_unlock(&nodep->idx->lock);
869 *has_children = false;
870 return EOK;
871}
872
873
874fs_index_t fat_index_get(fs_node_t *fn)
875{
876 return FAT_NODE(fn)->idx->index;
877}
878
879aoff64_t fat_size_get(fs_node_t *fn)
880{
881 return FAT_NODE(fn)->size;
882}
883
884unsigned fat_lnkcnt_get(fs_node_t *fn)
885{
886 return FAT_NODE(fn)->lnkcnt;
887}
888
889char fat_plb_get_char(unsigned pos)
890{
891 return fat_reg.plb_ro[pos % PLB_SIZE];
892}
893
894bool fat_is_directory(fs_node_t *fn)
895{
896 return FAT_NODE(fn)->type == FAT_DIRECTORY;
897}
898
899bool fat_is_file(fs_node_t *fn)
900{
901 return FAT_NODE(fn)->type == FAT_FILE;
902}
903
904devmap_handle_t fat_device_get(fs_node_t *node)
905{
906 return 0;
907}
908
909/** libfs operations */
910libfs_ops_t fat_libfs_ops = {
911 .root_get = fat_root_get,
912 .match = fat_match,
913 .node_get = fat_node_get,
914 .node_open = fat_node_open,
915 .node_put = fat_node_put,
916 .create = fat_create_node,
917 .destroy = fat_destroy_node,
918 .link = fat_link,
919 .unlink = fat_unlink,
920 .has_children = fat_has_children,
921 .index_get = fat_index_get,
922 .size_get = fat_size_get,
923 .lnkcnt_get = fat_lnkcnt_get,
924 .plb_get_char = fat_plb_get_char,
925 .is_directory = fat_is_directory,
926 .is_file = fat_is_file,
927 .device_get = fat_device_get
928};
929
930/*
931 * VFS operations.
932 */
933
934void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
935{
936 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
937 enum cache_mode cmode;
938 fat_bs_t *bs;
939
940 /* Accept the mount options */
941 char *opts;
942 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL);
943
944 if (rc != EOK) {
945 async_answer_0(rid, rc);
946 return;
947 }
948
949 /* Check for option enabling write through. */
950 if (str_cmp(opts, "wtcache") == 0)
951 cmode = CACHE_MODE_WT;
952 else
953 cmode = CACHE_MODE_WB;
954
955 free(opts);
956
957 /* initialize libblock */
958 rc = block_init(devmap_handle, BS_SIZE);
959 if (rc != EOK) {
960 async_answer_0(rid, rc);
961 return;
962 }
963
964 /* prepare the boot block */
965 rc = block_bb_read(devmap_handle, BS_BLOCK);
966 if (rc != EOK) {
967 block_fini(devmap_handle);
968 async_answer_0(rid, rc);
969 return;
970 }
971
972 /* get the buffer with the boot sector */
973 bs = block_bb_get(devmap_handle);
974
975 if (BPS(bs) != BS_SIZE) {
976 block_fini(devmap_handle);
977 async_answer_0(rid, ENOTSUP);
978 return;
979 }
980
981 /* Initialize the block cache */
982 rc = block_cache_init(devmap_handle, BPS(bs), 0 /* XXX */, cmode);
983 if (rc != EOK) {
984 block_fini(devmap_handle);
985 async_answer_0(rid, rc);
986 return;
987 }
988
989 /* Do some simple sanity checks on the file system. */
990 rc = fat_sanity_check(bs, devmap_handle);
991 if (rc != EOK) {
992 (void) block_cache_fini(devmap_handle);
993 block_fini(devmap_handle);
994 async_answer_0(rid, rc);
995 return;
996 }
997
998 rc = fat_idx_init_by_devmap_handle(devmap_handle);
999 if (rc != EOK) {
1000 (void) block_cache_fini(devmap_handle);
1001 block_fini(devmap_handle);
1002 async_answer_0(rid, rc);
1003 return;
1004 }
1005
1006 /* Initialize the root node. */
1007 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
1008 if (!rfn) {
1009 (void) block_cache_fini(devmap_handle);
1010 block_fini(devmap_handle);
1011 fat_idx_fini_by_devmap_handle(devmap_handle);
1012 async_answer_0(rid, ENOMEM);
1013 return;
1014 }
1015
1016 fs_node_initialize(rfn);
1017 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1018 if (!rootp) {
1019 free(rfn);
1020 (void) block_cache_fini(devmap_handle);
1021 block_fini(devmap_handle);
1022 fat_idx_fini_by_devmap_handle(devmap_handle);
1023 async_answer_0(rid, ENOMEM);
1024 return;
1025 }
1026 fat_node_initialize(rootp);
1027
1028 fat_idx_t *ridxp = fat_idx_get_by_pos(devmap_handle, FAT_CLST_ROOTPAR, 0);
1029 if (!ridxp) {
1030 free(rfn);
1031 free(rootp);
1032 (void) block_cache_fini(devmap_handle);
1033 block_fini(devmap_handle);
1034 fat_idx_fini_by_devmap_handle(devmap_handle);
1035 async_answer_0(rid, ENOMEM);
1036 return;
1037 }
1038 assert(ridxp->index == 0);
1039 /* ridxp->lock held */
1040
1041 rootp->type = FAT_DIRECTORY;
1042 rootp->firstc = FAT_ROOT_CLST(bs);
1043 rootp->refcnt = 1;
1044 rootp->lnkcnt = 0; /* FS root is not linked */
1045
1046 if (FAT_IS_FAT32(bs)) {
1047 uint16_t clusters;
1048 rc = fat_clusters_get(&clusters, bs, devmap_handle, rootp->firstc);
1049 if (rc != EOK) {
1050 free(rfn);
1051 free(rootp);
1052 free(ridxp); /* TODO: Is it right way to free ridxp? */
1053 (void) block_cache_fini(devmap_handle);
1054 block_fini(devmap_handle);
1055 fat_idx_fini_by_devmap_handle(devmap_handle);
1056 async_answer_0(rid, ENOTSUP);
1057 return;
1058 }
1059 rootp->size = BPS(bs) * SPC(bs) * clusters;
1060 } else
1061 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
1062
1063 rootp->idx = ridxp;
1064 ridxp->nodep = rootp;
1065 rootp->bp = rfn;
1066 rfn->data = rootp;
1067
1068 fibril_mutex_unlock(&ridxp->lock);
1069
1070 async_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
1071}
1072
1073void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1074{
1075 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1076}
1077
1078void fat_unmounted(ipc_callid_t rid, ipc_call_t *request)
1079{
1080 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1081 fs_node_t *fn;
1082 fat_node_t *nodep;
1083 int rc;
1084
1085 rc = fat_root_get(&fn, devmap_handle);
1086 if (rc != EOK) {
1087 async_answer_0(rid, rc);
1088 return;
1089 }
1090 nodep = FAT_NODE(fn);
1091
1092 /*
1093 * We expect exactly two references on the root node. One for the
1094 * fat_root_get() above and one created in fat_mounted().
1095 */
1096 if (nodep->refcnt != 2) {
1097 (void) fat_node_put(fn);
1098 async_answer_0(rid, EBUSY);
1099 return;
1100 }
1101
1102 /*
1103 * Put the root node and force it to the FAT free node list.
1104 */
1105 (void) fat_node_put(fn);
1106 (void) fat_node_put(fn);
1107
1108 /*
1109 * Perform cleanup of the node structures, index structures and
1110 * associated data. Write back this file system's dirty blocks and
1111 * stop using libblock for this instance.
1112 */
1113 (void) fat_node_fini_by_devmap_handle(devmap_handle);
1114 fat_idx_fini_by_devmap_handle(devmap_handle);
1115 (void) block_cache_fini(devmap_handle);
1116 block_fini(devmap_handle);
1117
1118 async_answer_0(rid, EOK);
1119}
1120
1121void fat_unmount(ipc_callid_t rid, ipc_call_t *request)
1122{
1123 libfs_unmount(&fat_libfs_ops, rid, request);
1124}
1125
1126void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1127{
1128 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1129}
1130
1131void fat_read(ipc_callid_t rid, ipc_call_t *request)
1132{
1133 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1134 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1135 aoff64_t pos =
1136 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1137 fs_node_t *fn;
1138 fat_node_t *nodep;
1139 fat_bs_t *bs;
1140 size_t bytes;
1141 block_t *b;
1142 int rc;
1143
1144 rc = fat_node_get(&fn, devmap_handle, index);
1145 if (rc != EOK) {
1146 async_answer_0(rid, rc);
1147 return;
1148 }
1149 if (!fn) {
1150 async_answer_0(rid, ENOENT);
1151 return;
1152 }
1153 nodep = FAT_NODE(fn);
1154
1155 ipc_callid_t callid;
1156 size_t len;
1157 if (!async_data_read_receive(&callid, &len)) {
1158 fat_node_put(fn);
1159 async_answer_0(callid, EINVAL);
1160 async_answer_0(rid, EINVAL);
1161 return;
1162 }
1163
1164 bs = block_bb_get(devmap_handle);
1165
1166 if (nodep->type == FAT_FILE) {
1167 /*
1168 * Our strategy for regular file reads is to read one block at
1169 * most and make use of the possibility to return less data than
1170 * requested. This keeps the code very simple.
1171 */
1172 if (pos >= nodep->size) {
1173 /* reading beyond the EOF */
1174 bytes = 0;
1175 (void) async_data_read_finalize(callid, NULL, 0);
1176 } else {
1177 bytes = min(len, BPS(bs) - pos % BPS(bs));
1178 bytes = min(bytes, nodep->size - pos);
1179 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
1180 BLOCK_FLAGS_NONE);
1181 if (rc != EOK) {
1182 fat_node_put(fn);
1183 async_answer_0(callid, rc);
1184 async_answer_0(rid, rc);
1185 return;
1186 }
1187 (void) async_data_read_finalize(callid,
1188 b->data + pos % BPS(bs), bytes);
1189 rc = block_put(b);
1190 if (rc != EOK) {
1191 fat_node_put(fn);
1192 async_answer_0(rid, rc);
1193 return;
1194 }
1195 }
1196 } else {
1197 aoff64_t spos = pos;
1198 char name[FAT_LFN_NAME_SIZE];
1199 fat_dentry_t *d;
1200
1201 assert(nodep->type == FAT_DIRECTORY);
1202 assert(nodep->size % BPS(bs) == 0);
1203 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
1204
1205 fat_directory_t di;
1206 fat_directory_open(nodep, &di);
1207 di.pos = pos;
1208
1209 rc = fat_directory_read(&di, name, &d);
1210 if (rc == EOK) goto hit;
1211 if (rc == ENOENT) goto miss;
1212
1213err:
1214 (void) fat_node_put(fn);
1215 async_answer_0(callid, rc);
1216 async_answer_0(rid, rc);
1217 return;
1218
1219miss:
1220 rc = fat_directory_close(&di);
1221 if (rc!=EOK)
1222 goto err;
1223 rc = fat_node_put(fn);
1224 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1225 async_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
1226 return;
1227
1228hit:
1229 pos = di.pos;
1230 rc = fat_directory_close(&di);
1231 if (rc!=EOK)
1232 goto err;
1233 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
1234 bytes = (pos - spos);
1235 }
1236
1237 rc = fat_node_put(fn);
1238 async_answer_1(rid, rc, (sysarg_t)bytes);
1239}
1240
1241void fat_write(ipc_callid_t rid, ipc_call_t *request)
1242{
1243 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1244 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1245 aoff64_t pos =
1246 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1247 fs_node_t *fn;
1248 fat_node_t *nodep;
1249 fat_bs_t *bs;
1250 size_t bytes, size;
1251 block_t *b;
1252 aoff64_t boundary;
1253 int flags = BLOCK_FLAGS_NONE;
1254 int rc;
1255
1256 rc = fat_node_get(&fn, devmap_handle, index);
1257 if (rc != EOK) {
1258 async_answer_0(rid, rc);
1259 return;
1260 }
1261 if (!fn) {
1262 async_answer_0(rid, ENOENT);
1263 return;
1264 }
1265 nodep = FAT_NODE(fn);
1266
1267 ipc_callid_t callid;
1268 size_t len;
1269 if (!async_data_write_receive(&callid, &len)) {
1270 (void) fat_node_put(fn);
1271 async_answer_0(callid, EINVAL);
1272 async_answer_0(rid, EINVAL);
1273 return;
1274 }
1275
1276 bs = block_bb_get(devmap_handle);
1277
1278 /*
1279 * In all scenarios, we will attempt to write out only one block worth
1280 * of data at maximum. There might be some more efficient approaches,
1281 * but this one greatly simplifies fat_write(). Note that we can afford
1282 * to do this because the client must be ready to handle the return
1283 * value signalizing a smaller number of bytes written.
1284 */
1285 bytes = min(len, BPS(bs) - pos % BPS(bs));
1286 if (bytes == BPS(bs))
1287 flags |= BLOCK_FLAGS_NOREAD;
1288
1289 boundary = ROUND_UP(nodep->size, BPC(bs));
1290 if (pos < boundary) {
1291 /*
1292 * This is the easier case - we are either overwriting already
1293 * existing contents or writing behind the EOF, but still within
1294 * the limits of the last cluster. The node size may grow to the
1295 * next block size boundary.
1296 */
1297 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1298 if (rc != EOK) {
1299 (void) fat_node_put(fn);
1300 async_answer_0(callid, rc);
1301 async_answer_0(rid, rc);
1302 return;
1303 }
1304 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
1305 if (rc != EOK) {
1306 (void) fat_node_put(fn);
1307 async_answer_0(callid, rc);
1308 async_answer_0(rid, rc);
1309 return;
1310 }
1311 (void) async_data_write_finalize(callid,
1312 b->data + pos % BPS(bs), bytes);
1313 b->dirty = true; /* need to sync block */
1314 rc = block_put(b);
1315 if (rc != EOK) {
1316 (void) fat_node_put(fn);
1317 async_answer_0(rid, rc);
1318 return;
1319 }
1320 if (pos + bytes > nodep->size) {
1321 nodep->size = pos + bytes;
1322 nodep->dirty = true; /* need to sync node */
1323 }
1324 size = nodep->size;
1325 rc = fat_node_put(fn);
1326 async_answer_2(rid, rc, bytes, nodep->size);
1327 return;
1328 } else {
1329 /*
1330 * This is the more difficult case. We must allocate new
1331 * clusters for the node and zero them out.
1332 */
1333 unsigned nclsts;
1334 fat_cluster_t mcl, lcl;
1335
1336 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
1337 /* create an independent chain of nclsts clusters in all FATs */
1338 rc = fat_alloc_clusters(bs, devmap_handle, nclsts, &mcl, &lcl);
1339 if (rc != EOK) {
1340 /* could not allocate a chain of nclsts clusters */
1341 (void) fat_node_put(fn);
1342 async_answer_0(callid, rc);
1343 async_answer_0(rid, rc);
1344 return;
1345 }
1346 /* zero fill any gaps */
1347 rc = fat_fill_gap(bs, nodep, mcl, pos);
1348 if (rc != EOK) {
1349 (void) fat_free_clusters(bs, devmap_handle, mcl);
1350 (void) fat_node_put(fn);
1351 async_answer_0(callid, rc);
1352 async_answer_0(rid, rc);
1353 return;
1354 }
1355 rc = _fat_block_get(&b, bs, devmap_handle, lcl, NULL,
1356 (pos / BPS(bs)) % SPC(bs), flags);
1357 if (rc != EOK) {
1358 (void) fat_free_clusters(bs, devmap_handle, mcl);
1359 (void) fat_node_put(fn);
1360 async_answer_0(callid, rc);
1361 async_answer_0(rid, rc);
1362 return;
1363 }
1364 (void) async_data_write_finalize(callid,
1365 b->data + pos % BPS(bs), bytes);
1366 b->dirty = true; /* need to sync block */
1367 rc = block_put(b);
1368 if (rc != EOK) {
1369 (void) fat_free_clusters(bs, devmap_handle, mcl);
1370 (void) fat_node_put(fn);
1371 async_answer_0(rid, rc);
1372 return;
1373 }
1374 /*
1375 * Append the cluster chain starting in mcl to the end of the
1376 * node's cluster chain.
1377 */
1378 rc = fat_append_clusters(bs, nodep, mcl, lcl);
1379 if (rc != EOK) {
1380 (void) fat_free_clusters(bs, devmap_handle, mcl);
1381 (void) fat_node_put(fn);
1382 async_answer_0(rid, rc);
1383 return;
1384 }
1385 nodep->size = size = pos + bytes;
1386 nodep->dirty = true; /* need to sync node */
1387 rc = fat_node_put(fn);
1388 async_answer_2(rid, rc, bytes, size);
1389 return;
1390 }
1391}
1392
1393void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1394{
1395 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1396 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1397 aoff64_t size =
1398 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
1399 fs_node_t *fn;
1400 fat_node_t *nodep;
1401 fat_bs_t *bs;
1402 int rc;
1403
1404 rc = fat_node_get(&fn, devmap_handle, index);
1405 if (rc != EOK) {
1406 async_answer_0(rid, rc);
1407 return;
1408 }
1409 if (!fn) {
1410 async_answer_0(rid, ENOENT);
1411 return;
1412 }
1413 nodep = FAT_NODE(fn);
1414
1415 bs = block_bb_get(devmap_handle);
1416
1417 if (nodep->size == size) {
1418 rc = EOK;
1419 } else if (nodep->size < size) {
1420 /*
1421 * The standard says we have the freedom to grow the node.
1422 * For now, we simply return an error.
1423 */
1424 rc = EINVAL;
1425 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
1426 /*
1427 * The node will be shrunk, but no clusters will be deallocated.
1428 */
1429 nodep->size = size;
1430 nodep->dirty = true; /* need to sync node */
1431 rc = EOK;
1432 } else {
1433 /*
1434 * The node will be shrunk, clusters will be deallocated.
1435 */
1436 if (size == 0) {
1437 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1438 if (rc != EOK)
1439 goto out;
1440 } else {
1441 fat_cluster_t lastc;
1442 rc = fat_cluster_walk(bs, devmap_handle, nodep->firstc,
1443 &lastc, NULL, (size - 1) / BPC(bs));
1444 if (rc != EOK)
1445 goto out;
1446 rc = fat_chop_clusters(bs, nodep, lastc);
1447 if (rc != EOK)
1448 goto out;
1449 }
1450 nodep->size = size;
1451 nodep->dirty = true; /* need to sync node */
1452 rc = EOK;
1453 }
1454out:
1455 fat_node_put(fn);
1456 async_answer_0(rid, rc);
1457 return;
1458}
1459
1460void fat_close(ipc_callid_t rid, ipc_call_t *request)
1461{
1462 async_answer_0(rid, EOK);
1463}
1464
1465void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1466{
1467 devmap_handle_t devmap_handle = (devmap_handle_t)IPC_GET_ARG1(*request);
1468 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1469 fs_node_t *fn;
1470 fat_node_t *nodep;
1471 int rc;
1472
1473 rc = fat_node_get(&fn, devmap_handle, index);
1474 if (rc != EOK) {
1475 async_answer_0(rid, rc);
1476 return;
1477 }
1478 if (!fn) {
1479 async_answer_0(rid, ENOENT);
1480 return;
1481 }
1482
1483 nodep = FAT_NODE(fn);
1484 /*
1485 * We should have exactly two references. One for the above
1486 * call to fat_node_get() and one from fat_unlink().
1487 */
1488 assert(nodep->refcnt == 2);
1489
1490 rc = fat_destroy_node(fn);
1491 async_answer_0(rid, rc);
1492}
1493
1494void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1495{
1496 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1497}
1498
1499void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1500{
1501 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1502}
1503
1504void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1505{
1506 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
1507 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1508
1509 fs_node_t *fn;
1510 int rc = fat_node_get(&fn, devmap_handle, index);
1511 if (rc != EOK) {
1512 async_answer_0(rid, rc);
1513 return;
1514 }
1515 if (!fn) {
1516 async_answer_0(rid, ENOENT);
1517 return;
1518 }
1519
1520 fat_node_t *nodep = FAT_NODE(fn);
1521
1522 nodep->dirty = true;
1523 rc = fat_node_sync(nodep);
1524
1525 fat_node_put(fn);
1526 async_answer_0(rid, rc);
1527}
1528
1529/**
1530 * @}
1531 */
Note: See TracBrowser for help on using the repository browser.