source: mainline/uspace/srv/fs/fat/fat_ops.c@ 4b4668e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4b4668e was 4b4668e, checked in by Jakub Jermar <jakub@…>, 16 years ago

Make fat_link() return an error code instead of hitting an assertion on an I/O
error.

  • Property mode set to 100644
File size: 31.4 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/ipc.h>
45#include <ipc/services.h>
46#include <ipc/devmap.h>
47#include <async.h>
48#include <errno.h>
49#include <string.h>
50#include <byteorder.h>
51#include <adt/hash_table.h>
52#include <adt/list.h>
53#include <assert.h>
54#include <fibril_sync.h>
55#include <sys/mman.h>
56#include <align.h>
57
58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
61/** Mutex protecting the list of cached free FAT nodes. */
62static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
63
64/** List of cached free FAT nodes. */
65static LIST_INITIALIZE(ffn_head);
66
67static void fat_node_initialize(fat_node_t *node)
68{
69 fibril_mutex_initialize(&node->lock);
70 node->bp = NULL;
71 node->idx = NULL;
72 node->type = 0;
73 link_initialize(&node->ffn_link);
74 node->size = 0;
75 node->lnkcnt = 0;
76 node->refcnt = 0;
77 node->dirty = false;
78}
79
80static int fat_node_sync(fat_node_t *node)
81{
82 block_t *b;
83 fat_bs_t *bs;
84 fat_dentry_t *d;
85 uint16_t bps;
86 unsigned dps;
87 int rc;
88
89 assert(node->dirty);
90
91 bs = block_bb_get(node->idx->dev_handle);
92 bps = uint16_t_le2host(bs->bps);
93 dps = bps / sizeof(fat_dentry_t);
94
95 /* Read the block that contains the dentry of interest. */
96 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc,
97 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
98 if (rc != EOK)
99 return rc;
100
101 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
102
103 d->firstc = host2uint16_t_le(node->firstc);
104 if (node->type == FAT_FILE) {
105 d->size = host2uint32_t_le(node->size);
106 } else if (node->type == FAT_DIRECTORY) {
107 d->attr = FAT_ATTR_SUBDIR;
108 }
109
110 /* TODO: update other fields? (e.g time fields) */
111
112 b->dirty = true; /* need to sync block */
113 rc = block_put(b);
114 return rc;
115}
116
117static fat_node_t *fat_node_get_new(void)
118{
119 fs_node_t *fn;
120 fat_node_t *nodep;
121 int rc;
122
123 fibril_mutex_lock(&ffn_mutex);
124 if (!list_empty(&ffn_head)) {
125 /* Try to use a cached free node structure. */
126 fat_idx_t *idxp_tmp;
127 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
128 if (!fibril_mutex_trylock(&nodep->lock))
129 goto skip_cache;
130 idxp_tmp = nodep->idx;
131 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
132 fibril_mutex_unlock(&nodep->lock);
133 goto skip_cache;
134 }
135 list_remove(&nodep->ffn_link);
136 fibril_mutex_unlock(&ffn_mutex);
137 if (nodep->dirty) {
138 rc = fat_node_sync(nodep);
139 assert(rc == EOK);
140 }
141 idxp_tmp->nodep = NULL;
142 fibril_mutex_unlock(&nodep->lock);
143 fibril_mutex_unlock(&idxp_tmp->lock);
144 fn = FS_NODE(nodep);
145 } else {
146skip_cache:
147 /* Try to allocate a new node structure. */
148 fibril_mutex_unlock(&ffn_mutex);
149 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
150 if (!fn)
151 return NULL;
152 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
153 if (!nodep) {
154 free(fn);
155 return NULL;
156 }
157 }
158 fat_node_initialize(nodep);
159 fs_node_initialize(fn);
160 fn->data = nodep;
161 nodep->bp = fn;
162
163 return nodep;
164}
165
166/** Internal version of fat_node_get().
167 *
168 * @param idxp Locked index structure.
169 */
170static fat_node_t *fat_node_get_core(fat_idx_t *idxp)
171{
172 block_t *b;
173 fat_bs_t *bs;
174 fat_dentry_t *d;
175 fat_node_t *nodep = NULL;
176 unsigned bps;
177 unsigned spc;
178 unsigned dps;
179 int rc;
180
181 if (idxp->nodep) {
182 /*
183 * We are lucky.
184 * The node is already instantiated in memory.
185 */
186 fibril_mutex_lock(&idxp->nodep->lock);
187 if (!idxp->nodep->refcnt++)
188 list_remove(&idxp->nodep->ffn_link);
189 fibril_mutex_unlock(&idxp->nodep->lock);
190 return idxp->nodep;
191 }
192
193 /*
194 * We must instantiate the node from the file system.
195 */
196
197 assert(idxp->pfc);
198
199 nodep = fat_node_get_new();
200 if (!nodep)
201 return NULL;
202
203 bs = block_bb_get(idxp->dev_handle);
204 bps = uint16_t_le2host(bs->bps);
205 spc = bs->spc;
206 dps = bps / sizeof(fat_dentry_t);
207
208 /* Read the block that contains the dentry of interest. */
209 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc,
210 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
211 assert(rc == EOK);
212
213 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
214 if (d->attr & FAT_ATTR_SUBDIR) {
215 /*
216 * The only directory which does not have this bit set is the
217 * root directory itself. The root directory node is handled
218 * and initialized elsewhere.
219 */
220 nodep->type = FAT_DIRECTORY;
221 /*
222 * Unfortunately, the 'size' field of the FAT dentry is not
223 * defined for the directory entry type. We must determine the
224 * size of the directory by walking the FAT.
225 */
226 uint16_t clusters;
227 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle,
228 uint16_t_le2host(d->firstc));
229 assert(rc == EOK);
230 nodep->size = bps * spc * clusters;
231 } else {
232 nodep->type = FAT_FILE;
233 nodep->size = uint32_t_le2host(d->size);
234 }
235 nodep->firstc = uint16_t_le2host(d->firstc);
236 nodep->lnkcnt = 1;
237 nodep->refcnt = 1;
238
239 rc = block_put(b);
240 assert(rc == EOK);
241
242 /* Link the idx structure with the node structure. */
243 nodep->idx = idxp;
244 idxp->nodep = nodep;
245
246 return nodep;
247}
248
249/*
250 * Forward declarations of FAT libfs operations.
251 */
252static fs_node_t *fat_node_get(dev_handle_t, fs_index_t);
253static void fat_node_put(fs_node_t *);
254static fs_node_t *fat_create_node(dev_handle_t, int);
255static int fat_destroy_node(fs_node_t *);
256static int fat_link(fs_node_t *, fs_node_t *, const char *);
257static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
258static fs_node_t *fat_match(fs_node_t *, const char *);
259static fs_index_t fat_index_get(fs_node_t *);
260static size_t fat_size_get(fs_node_t *);
261static unsigned fat_lnkcnt_get(fs_node_t *);
262static bool fat_has_children(fs_node_t *);
263static fs_node_t *fat_root_get(dev_handle_t);
264static char fat_plb_get_char(unsigned);
265static bool fat_is_directory(fs_node_t *);
266static bool fat_is_file(fs_node_t *node);
267
268/*
269 * FAT libfs operations.
270 */
271
272/** Instantiate a FAT in-core node. */
273fs_node_t *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
274{
275 fat_node_t *nodep;
276 fat_idx_t *idxp;
277
278 idxp = fat_idx_get_by_index(dev_handle, index);
279 if (!idxp)
280 return NULL;
281 /* idxp->lock held */
282 nodep = fat_node_get_core(idxp);
283 fibril_mutex_unlock(&idxp->lock);
284 return FS_NODE(nodep);
285}
286
287void fat_node_put(fs_node_t *fn)
288{
289 fat_node_t *nodep = FAT_NODE(fn);
290 bool destroy = false;
291
292 fibril_mutex_lock(&nodep->lock);
293 if (!--nodep->refcnt) {
294 if (nodep->idx) {
295 fibril_mutex_lock(&ffn_mutex);
296 list_append(&nodep->ffn_link, &ffn_head);
297 fibril_mutex_unlock(&ffn_mutex);
298 } else {
299 /*
300 * The node does not have any index structure associated
301 * with itself. This can only mean that we are releasing
302 * the node after a failed attempt to allocate the index
303 * structure for it.
304 */
305 destroy = true;
306 }
307 }
308 fibril_mutex_unlock(&nodep->lock);
309 if (destroy) {
310 free(nodep->bp);
311 free(nodep);
312 }
313}
314
315fs_node_t *fat_create_node(dev_handle_t dev_handle, int flags)
316{
317 fat_idx_t *idxp;
318 fat_node_t *nodep;
319 fat_bs_t *bs;
320 fat_cluster_t mcl, lcl;
321 uint16_t bps;
322 int rc;
323
324 bs = block_bb_get(dev_handle);
325 bps = uint16_t_le2host(bs->bps);
326 if (flags & L_DIRECTORY) {
327 /* allocate a cluster */
328 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
329 if (rc != EOK)
330 return NULL;
331 }
332
333 nodep = fat_node_get_new();
334 if (!nodep) {
335 (void) fat_free_clusters(bs, dev_handle, mcl);
336 return NULL;
337 }
338 idxp = fat_idx_get_new(dev_handle);
339 if (!idxp) {
340 (void) fat_free_clusters(bs, dev_handle, mcl);
341 fat_node_put(FS_NODE(nodep));
342 return NULL;
343 }
344 /* idxp->lock held */
345 if (flags & L_DIRECTORY) {
346 /* Populate the new cluster with unused dentries. */
347 rc = fat_zero_cluster(bs, dev_handle, mcl);
348 assert(rc == EOK);
349 nodep->type = FAT_DIRECTORY;
350 nodep->firstc = mcl;
351 nodep->size = bps * bs->spc;
352 } else {
353 nodep->type = FAT_FILE;
354 nodep->firstc = FAT_CLST_RES0;
355 nodep->size = 0;
356 }
357 nodep->lnkcnt = 0; /* not linked anywhere */
358 nodep->refcnt = 1;
359 nodep->dirty = true;
360
361 nodep->idx = idxp;
362 idxp->nodep = nodep;
363
364 fibril_mutex_unlock(&idxp->lock);
365 return FS_NODE(nodep);
366}
367
368int fat_destroy_node(fs_node_t *fn)
369{
370 fat_node_t *nodep = FAT_NODE(fn);
371 fat_bs_t *bs;
372 int rc = EOK;
373
374 /*
375 * The node is not reachable from the file system. This means that the
376 * link count should be zero and that the index structure cannot be
377 * found in the position hash. Obviously, we don't need to lock the node
378 * nor its index structure.
379 */
380 assert(nodep->lnkcnt == 0);
381
382 /*
383 * The node may not have any children.
384 */
385 assert(fat_has_children(fn) == false);
386
387 bs = block_bb_get(nodep->idx->dev_handle);
388 if (nodep->firstc != FAT_CLST_RES0) {
389 assert(nodep->size);
390 /* Free all clusters allocated to the node. */
391 rc = fat_free_clusters(bs, nodep->idx->dev_handle,
392 nodep->firstc);
393 }
394
395 fat_idx_destroy(nodep->idx);
396 free(nodep->bp);
397 free(nodep);
398 return rc;
399}
400
401int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
402{
403 fat_node_t *parentp = FAT_NODE(pfn);
404 fat_node_t *childp = FAT_NODE(cfn);
405 fat_dentry_t *d;
406 fat_bs_t *bs;
407 block_t *b;
408 unsigned i, j;
409 uint16_t bps;
410 unsigned dps;
411 unsigned blocks;
412 fat_cluster_t mcl, lcl;
413 int rc;
414
415 fibril_mutex_lock(&childp->lock);
416 if (childp->lnkcnt == 1) {
417 /*
418 * On FAT, we don't support multiple hard links.
419 */
420 fibril_mutex_unlock(&childp->lock);
421 return EMLINK;
422 }
423 assert(childp->lnkcnt == 0);
424 fibril_mutex_unlock(&childp->lock);
425
426 if (!fat_dentry_name_verify(name)) {
427 /*
428 * Attempt to create unsupported name.
429 */
430 return ENOTSUP;
431 }
432
433 /*
434 * Get us an unused parent node's dentry or grow the parent and allocate
435 * a new one.
436 */
437
438 fibril_mutex_lock(&parentp->idx->lock);
439 bs = block_bb_get(parentp->idx->dev_handle);
440 bps = uint16_t_le2host(bs->bps);
441 dps = bps / sizeof(fat_dentry_t);
442
443 blocks = parentp->size / bps;
444
445 for (i = 0; i < blocks; i++) {
446 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
447 if (rc != EOK) {
448 fibril_mutex_unlock(&parentp->idx->lock);
449 return rc;
450 }
451 for (j = 0; j < dps; j++) {
452 d = ((fat_dentry_t *)b->data) + j;
453 switch (fat_classify_dentry(d)) {
454 case FAT_DENTRY_SKIP:
455 case FAT_DENTRY_VALID:
456 /* skipping used and meta entries */
457 continue;
458 case FAT_DENTRY_FREE:
459 case FAT_DENTRY_LAST:
460 /* found an empty slot */
461 goto hit;
462 }
463 }
464 rc = block_put(b);
465 if (rc != EOK) {
466 fibril_mutex_unlock(&parentp->idx->lock);
467 return rc;
468 }
469 }
470 j = 0;
471
472 /*
473 * We need to grow the parent in order to create a new unused dentry.
474 */
475 if (parentp->firstc == FAT_CLST_ROOT) {
476 /* Can't grow the root directory. */
477 fibril_mutex_unlock(&parentp->idx->lock);
478 return ENOSPC;
479 }
480 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
481 if (rc != EOK) {
482 fibril_mutex_unlock(&parentp->idx->lock);
483 return rc;
484 }
485 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl);
486 if (rc != EOK) {
487 fibril_mutex_unlock(&parentp->idx->lock);
488 return rc;
489 }
490 rc = fat_append_clusters(bs, parentp, mcl);
491 if (rc != EOK) {
492 fibril_mutex_unlock(&parentp->idx->lock);
493 return rc;
494 }
495 parentp->size += bps * bs->spc;
496 parentp->dirty = true; /* need to sync node */
497 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
498 if (rc != EOK) {
499 fibril_mutex_unlock(&parentp->idx->lock);
500 return rc;
501 }
502 d = (fat_dentry_t *)b->data;
503
504hit:
505 /*
506 * At this point we only establish the link between the parent and the
507 * child. The dentry, except of the name and the extension, will remain
508 * uninitialized until the corresponding node is synced. Thus the valid
509 * dentry data is kept in the child node structure.
510 */
511 memset(d, 0, sizeof(fat_dentry_t));
512 fat_dentry_name_set(d, name);
513 b->dirty = true; /* need to sync block */
514 rc = block_put(b);
515 fibril_mutex_unlock(&parentp->idx->lock);
516 if (rc != EOK)
517 return rc;
518
519 fibril_mutex_lock(&childp->idx->lock);
520
521 /*
522 * If possible, create the Sub-directory Identifier Entry and the
523 * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
524 * are not mandatory according to Standard ECMA-107 and HelenOS VFS does
525 * not use them anyway, so this is rather a sign of our good will.
526 */
527 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
528 if (rc != EOK) {
529 /*
530 * Rather than returning an error, simply skip the creation of
531 * these two entries.
532 */
533 goto skip_dots;
534 }
535 d = (fat_dentry_t *)b->data;
536 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
537 str_cmp(d->name, FAT_NAME_DOT) == 0) {
538 memset(d, 0, sizeof(fat_dentry_t));
539 str_cpy(d->name, 8, FAT_NAME_DOT);
540 str_cpy(d->ext, 3, FAT_EXT_PAD);
541 d->attr = FAT_ATTR_SUBDIR;
542 d->firstc = host2uint16_t_le(childp->firstc);
543 /* TODO: initialize also the date/time members. */
544 }
545 d++;
546 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
547 str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) {
548 memset(d, 0, sizeof(fat_dentry_t));
549 str_cpy(d->name, 8, FAT_NAME_DOT_DOT);
550 str_cpy(d->ext, 3, FAT_EXT_PAD);
551 d->attr = FAT_ATTR_SUBDIR;
552 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
553 host2uint16_t_le(FAT_CLST_RES0) :
554 host2uint16_t_le(parentp->firstc);
555 /* TODO: initialize also the date/time members. */
556 }
557 b->dirty = true; /* need to sync block */
558 /*
559 * Ignore the return value as we would have fallen through on error
560 * anyway.
561 */
562 (void) block_put(b);
563skip_dots:
564
565 childp->idx->pfc = parentp->firstc;
566 childp->idx->pdi = i * dps + j;
567 fibril_mutex_unlock(&childp->idx->lock);
568
569 fibril_mutex_lock(&childp->lock);
570 childp->lnkcnt = 1;
571 childp->dirty = true; /* need to sync node */
572 fibril_mutex_unlock(&childp->lock);
573
574 /*
575 * Hash in the index structure into the position hash.
576 */
577 fat_idx_hashin(childp->idx);
578
579 return EOK;
580}
581
582int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
583{
584 fat_node_t *parentp = FAT_NODE(pfn);
585 fat_node_t *childp = FAT_NODE(cfn);
586 fat_bs_t *bs;
587 fat_dentry_t *d;
588 uint16_t bps;
589 block_t *b;
590 int rc;
591
592 if (!parentp)
593 return EBUSY;
594
595 if (fat_has_children(cfn))
596 return ENOTEMPTY;
597
598 fibril_mutex_lock(&parentp->lock);
599 fibril_mutex_lock(&childp->lock);
600 assert(childp->lnkcnt == 1);
601 fibril_mutex_lock(&childp->idx->lock);
602 bs = block_bb_get(childp->idx->dev_handle);
603 bps = uint16_t_le2host(bs->bps);
604
605 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc,
606 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
607 BLOCK_FLAGS_NONE);
608 assert(rc == EOK);
609 d = (fat_dentry_t *)b->data +
610 (childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
611 /* mark the dentry as not-currently-used */
612 d->name[0] = FAT_DENTRY_ERASED;
613 b->dirty = true; /* need to sync block */
614 rc = block_put(b);
615 assert(rc == EOK);
616
617 /* remove the index structure from the position hash */
618 fat_idx_hashout(childp->idx);
619 /* clear position information */
620 childp->idx->pfc = FAT_CLST_RES0;
621 childp->idx->pdi = 0;
622 fibril_mutex_unlock(&childp->idx->lock);
623 childp->lnkcnt = 0;
624 childp->dirty = true;
625 fibril_mutex_unlock(&childp->lock);
626 fibril_mutex_unlock(&parentp->lock);
627
628 return EOK;
629}
630
631fs_node_t *fat_match(fs_node_t *pfn, const char *component)
632{
633 fat_bs_t *bs;
634 fat_node_t *parentp = FAT_NODE(pfn);
635 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
636 unsigned i, j;
637 unsigned bps; /* bytes per sector */
638 unsigned dps; /* dentries per sector */
639 unsigned blocks;
640 fat_dentry_t *d;
641 block_t *b;
642 int rc;
643
644 fibril_mutex_lock(&parentp->idx->lock);
645 bs = block_bb_get(parentp->idx->dev_handle);
646 bps = uint16_t_le2host(bs->bps);
647 dps = bps / sizeof(fat_dentry_t);
648 blocks = parentp->size / bps;
649 for (i = 0; i < blocks; i++) {
650 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
651 assert(rc == EOK);
652 for (j = 0; j < dps; j++) {
653 d = ((fat_dentry_t *)b->data) + j;
654 switch (fat_classify_dentry(d)) {
655 case FAT_DENTRY_SKIP:
656 case FAT_DENTRY_FREE:
657 continue;
658 case FAT_DENTRY_LAST:
659 rc = block_put(b);
660 assert(rc == EOK);
661 fibril_mutex_unlock(&parentp->idx->lock);
662 return NULL;
663 default:
664 case FAT_DENTRY_VALID:
665 fat_dentry_name_get(d, name);
666 break;
667 }
668 if (fat_dentry_namecmp(name, component) == 0) {
669 /* hit */
670 fat_node_t *nodep;
671 /*
672 * Assume tree hierarchy for locking. We
673 * already have the parent and now we are going
674 * to lock the child. Never lock in the oposite
675 * order.
676 */
677 fat_idx_t *idx = fat_idx_get_by_pos(
678 parentp->idx->dev_handle, parentp->firstc,
679 i * dps + j);
680 fibril_mutex_unlock(&parentp->idx->lock);
681 if (!idx) {
682 /*
683 * Can happen if memory is low or if we
684 * run out of 32-bit indices.
685 */
686 rc = block_put(b);
687 assert(rc == EOK);
688 return NULL;
689 }
690 nodep = fat_node_get_core(idx);
691 fibril_mutex_unlock(&idx->lock);
692 rc = block_put(b);
693 assert(rc == EOK);
694 return FS_NODE(nodep);
695 }
696 }
697 rc = block_put(b);
698 assert(rc == EOK);
699 }
700
701 fibril_mutex_unlock(&parentp->idx->lock);
702 return NULL;
703}
704
705fs_index_t fat_index_get(fs_node_t *fn)
706{
707 return FAT_NODE(fn)->idx->index;
708}
709
710size_t fat_size_get(fs_node_t *fn)
711{
712 return FAT_NODE(fn)->size;
713}
714
715unsigned fat_lnkcnt_get(fs_node_t *fn)
716{
717 return FAT_NODE(fn)->lnkcnt;
718}
719
720bool fat_has_children(fs_node_t *fn)
721{
722 fat_bs_t *bs;
723 fat_node_t *nodep = FAT_NODE(fn);
724 unsigned bps;
725 unsigned dps;
726 unsigned blocks;
727 block_t *b;
728 unsigned i, j;
729 int rc;
730
731 if (nodep->type != FAT_DIRECTORY)
732 return false;
733
734 fibril_mutex_lock(&nodep->idx->lock);
735 bs = block_bb_get(nodep->idx->dev_handle);
736 bps = uint16_t_le2host(bs->bps);
737 dps = bps / sizeof(fat_dentry_t);
738
739 blocks = nodep->size / bps;
740
741 for (i = 0; i < blocks; i++) {
742 fat_dentry_t *d;
743
744 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
745 assert(rc == EOK);
746 for (j = 0; j < dps; j++) {
747 d = ((fat_dentry_t *)b->data) + j;
748 switch (fat_classify_dentry(d)) {
749 case FAT_DENTRY_SKIP:
750 case FAT_DENTRY_FREE:
751 continue;
752 case FAT_DENTRY_LAST:
753 rc = block_put(b);
754 assert(rc == EOK);
755 fibril_mutex_unlock(&nodep->idx->lock);
756 return false;
757 default:
758 case FAT_DENTRY_VALID:
759 rc = block_put(b);
760 assert(rc == EOK);
761 fibril_mutex_unlock(&nodep->idx->lock);
762 return true;
763 }
764 rc = block_put(b);
765 assert(rc == EOK);
766 fibril_mutex_unlock(&nodep->idx->lock);
767 return true;
768 }
769 rc = block_put(b);
770 assert(rc == EOK);
771 }
772
773 fibril_mutex_unlock(&nodep->idx->lock);
774 return false;
775}
776
777fs_node_t *fat_root_get(dev_handle_t dev_handle)
778{
779 return fat_node_get(dev_handle, 0);
780}
781
782char fat_plb_get_char(unsigned pos)
783{
784 return fat_reg.plb_ro[pos % PLB_SIZE];
785}
786
787bool fat_is_directory(fs_node_t *fn)
788{
789 return FAT_NODE(fn)->type == FAT_DIRECTORY;
790}
791
792bool fat_is_file(fs_node_t *fn)
793{
794 return FAT_NODE(fn)->type == FAT_FILE;
795}
796
797/** libfs operations */
798libfs_ops_t fat_libfs_ops = {
799 .match = fat_match,
800 .node_get = fat_node_get,
801 .node_put = fat_node_put,
802 .create = fat_create_node,
803 .destroy = fat_destroy_node,
804 .link = fat_link,
805 .unlink = fat_unlink,
806 .index_get = fat_index_get,
807 .size_get = fat_size_get,
808 .lnkcnt_get = fat_lnkcnt_get,
809 .has_children = fat_has_children,
810 .root_get = fat_root_get,
811 .plb_get_char = fat_plb_get_char,
812 .is_directory = fat_is_directory,
813 .is_file = fat_is_file
814};
815
816/*
817 * VFS operations.
818 */
819
820void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
821{
822 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
823 enum cache_mode cmode;
824 fat_bs_t *bs;
825 uint16_t bps;
826 uint16_t rde;
827 int rc;
828
829 /* accept the mount options */
830 ipc_callid_t callid;
831 size_t size;
832 if (!ipc_data_write_receive(&callid, &size)) {
833 ipc_answer_0(callid, EINVAL);
834 ipc_answer_0(rid, EINVAL);
835 return;
836 }
837 char *opts = malloc(size + 1);
838 if (!opts) {
839 ipc_answer_0(callid, ENOMEM);
840 ipc_answer_0(rid, ENOMEM);
841 return;
842 }
843 ipcarg_t retval = ipc_data_write_finalize(callid, opts, size);
844 if (retval != EOK) {
845 ipc_answer_0(rid, retval);
846 free(opts);
847 return;
848 }
849 opts[size] = '\0';
850
851 /* Check for option enabling write through. */
852 if (str_cmp(opts, "wtcache") == 0)
853 cmode = CACHE_MODE_WT;
854 else
855 cmode = CACHE_MODE_WB;
856
857 /* initialize libblock */
858 rc = block_init(dev_handle, BS_SIZE);
859 if (rc != EOK) {
860 ipc_answer_0(rid, rc);
861 return;
862 }
863
864 /* prepare the boot block */
865 rc = block_bb_read(dev_handle, BS_BLOCK);
866 if (rc != EOK) {
867 block_fini(dev_handle);
868 ipc_answer_0(rid, rc);
869 return;
870 }
871
872 /* get the buffer with the boot sector */
873 bs = block_bb_get(dev_handle);
874
875 /* Read the number of root directory entries. */
876 bps = uint16_t_le2host(bs->bps);
877 rde = uint16_t_le2host(bs->root_ent_max);
878
879 if (bps != BS_SIZE) {
880 block_fini(dev_handle);
881 ipc_answer_0(rid, ENOTSUP);
882 return;
883 }
884
885 /* Initialize the block cache */
886 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);
887 if (rc != EOK) {
888 block_fini(dev_handle);
889 ipc_answer_0(rid, rc);
890 return;
891 }
892
893 rc = fat_idx_init_by_dev_handle(dev_handle);
894 if (rc != EOK) {
895 block_fini(dev_handle);
896 ipc_answer_0(rid, rc);
897 return;
898 }
899
900 /* Initialize the root node. */
901 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
902 if (!rfn) {
903 block_fini(dev_handle);
904 fat_idx_fini_by_dev_handle(dev_handle);
905 ipc_answer_0(rid, ENOMEM);
906 return;
907 }
908 fs_node_initialize(rfn);
909 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
910 if (!rootp) {
911 free(rfn);
912 block_fini(dev_handle);
913 fat_idx_fini_by_dev_handle(dev_handle);
914 ipc_answer_0(rid, ENOMEM);
915 return;
916 }
917 fat_node_initialize(rootp);
918
919 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
920 if (!ridxp) {
921 free(rfn);
922 free(rootp);
923 block_fini(dev_handle);
924 fat_idx_fini_by_dev_handle(dev_handle);
925 ipc_answer_0(rid, ENOMEM);
926 return;
927 }
928 assert(ridxp->index == 0);
929 /* ridxp->lock held */
930
931 rootp->type = FAT_DIRECTORY;
932 rootp->firstc = FAT_CLST_ROOT;
933 rootp->refcnt = 1;
934 rootp->lnkcnt = 0; /* FS root is not linked */
935 rootp->size = rde * sizeof(fat_dentry_t);
936 rootp->idx = ridxp;
937 ridxp->nodep = rootp;
938 rootp->bp = rfn;
939 rfn->data = rootp;
940
941 fibril_mutex_unlock(&ridxp->lock);
942
943 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
944}
945
946void fat_mount(ipc_callid_t rid, ipc_call_t *request)
947{
948 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
949}
950
951void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
952{
953 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
954}
955
956void fat_read(ipc_callid_t rid, ipc_call_t *request)
957{
958 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
959 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
960 off_t pos = (off_t)IPC_GET_ARG3(*request);
961 fs_node_t *fn = fat_node_get(dev_handle, index);
962 fat_node_t *nodep;
963 fat_bs_t *bs;
964 uint16_t bps;
965 size_t bytes;
966 block_t *b;
967 int rc;
968
969 if (!fn) {
970 ipc_answer_0(rid, ENOENT);
971 return;
972 }
973 nodep = FAT_NODE(fn);
974
975 ipc_callid_t callid;
976 size_t len;
977 if (!ipc_data_read_receive(&callid, &len)) {
978 fat_node_put(fn);
979 ipc_answer_0(callid, EINVAL);
980 ipc_answer_0(rid, EINVAL);
981 return;
982 }
983
984 bs = block_bb_get(dev_handle);
985 bps = uint16_t_le2host(bs->bps);
986
987 if (nodep->type == FAT_FILE) {
988 /*
989 * Our strategy for regular file reads is to read one block at
990 * most and make use of the possibility to return less data than
991 * requested. This keeps the code very simple.
992 */
993 if (pos >= nodep->size) {
994 /* reading beyond the EOF */
995 bytes = 0;
996 (void) ipc_data_read_finalize(callid, NULL, 0);
997 } else {
998 bytes = min(len, bps - pos % bps);
999 bytes = min(bytes, nodep->size - pos);
1000 rc = fat_block_get(&b, bs, nodep, pos / bps,
1001 BLOCK_FLAGS_NONE);
1002 assert(rc == EOK);
1003 (void) ipc_data_read_finalize(callid, b->data + pos % bps,
1004 bytes);
1005 rc = block_put(b);
1006 assert(rc == EOK);
1007 }
1008 } else {
1009 unsigned bnum;
1010 off_t spos = pos;
1011 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1012 fat_dentry_t *d;
1013
1014 assert(nodep->type == FAT_DIRECTORY);
1015 assert(nodep->size % bps == 0);
1016 assert(bps % sizeof(fat_dentry_t) == 0);
1017
1018 /*
1019 * Our strategy for readdir() is to use the position pointer as
1020 * an index into the array of all dentries. On entry, it points
1021 * to the first unread dentry. If we skip any dentries, we bump
1022 * the position pointer accordingly.
1023 */
1024 bnum = (pos * sizeof(fat_dentry_t)) / bps;
1025 while (bnum < nodep->size / bps) {
1026 off_t o;
1027
1028 rc = fat_block_get(&b, bs, nodep, bnum,
1029 BLOCK_FLAGS_NONE);
1030 assert(rc == EOK);
1031 for (o = pos % (bps / sizeof(fat_dentry_t));
1032 o < bps / sizeof(fat_dentry_t);
1033 o++, pos++) {
1034 d = ((fat_dentry_t *)b->data) + o;
1035 switch (fat_classify_dentry(d)) {
1036 case FAT_DENTRY_SKIP:
1037 case FAT_DENTRY_FREE:
1038 continue;
1039 case FAT_DENTRY_LAST:
1040 rc = block_put(b);
1041 assert(rc == EOK);
1042 goto miss;
1043 default:
1044 case FAT_DENTRY_VALID:
1045 fat_dentry_name_get(d, name);
1046 rc == block_put(b);
1047 assert(rc == EOK);
1048 goto hit;
1049 }
1050 }
1051 rc = block_put(b);
1052 assert(rc == EOK);
1053 bnum++;
1054 }
1055miss:
1056 fat_node_put(fn);
1057 ipc_answer_0(callid, ENOENT);
1058 ipc_answer_1(rid, ENOENT, 0);
1059 return;
1060hit:
1061 (void) ipc_data_read_finalize(callid, name, str_size(name) + 1);
1062 bytes = (pos - spos) + 1;
1063 }
1064
1065 fat_node_put(fn);
1066 ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
1067}
1068
1069void fat_write(ipc_callid_t rid, ipc_call_t *request)
1070{
1071 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1072 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1073 off_t pos = (off_t)IPC_GET_ARG3(*request);
1074 fs_node_t *fn = fat_node_get(dev_handle, index);
1075 fat_node_t *nodep;
1076 fat_bs_t *bs;
1077 size_t bytes;
1078 block_t *b;
1079 uint16_t bps;
1080 unsigned spc;
1081 unsigned bpc; /* bytes per cluster */
1082 off_t boundary;
1083 int flags = BLOCK_FLAGS_NONE;
1084 int rc;
1085
1086 if (!fn) {
1087 ipc_answer_0(rid, ENOENT);
1088 return;
1089 }
1090 nodep = FAT_NODE(fn);
1091
1092 ipc_callid_t callid;
1093 size_t len;
1094 if (!ipc_data_write_receive(&callid, &len)) {
1095 fat_node_put(fn);
1096 ipc_answer_0(callid, EINVAL);
1097 ipc_answer_0(rid, EINVAL);
1098 return;
1099 }
1100
1101 bs = block_bb_get(dev_handle);
1102 bps = uint16_t_le2host(bs->bps);
1103 spc = bs->spc;
1104 bpc = bps * spc;
1105
1106 /*
1107 * In all scenarios, we will attempt to write out only one block worth
1108 * of data at maximum. There might be some more efficient approaches,
1109 * but this one greatly simplifies fat_write(). Note that we can afford
1110 * to do this because the client must be ready to handle the return
1111 * value signalizing a smaller number of bytes written.
1112 */
1113 bytes = min(len, bps - pos % bps);
1114 if (bytes == bps)
1115 flags |= BLOCK_FLAGS_NOREAD;
1116
1117 boundary = ROUND_UP(nodep->size, bpc);
1118 if (pos < boundary) {
1119 /*
1120 * This is the easier case - we are either overwriting already
1121 * existing contents or writing behind the EOF, but still within
1122 * the limits of the last cluster. The node size may grow to the
1123 * next block size boundary.
1124 */
1125 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1126 assert(rc == EOK);
1127 rc = fat_block_get(&b, bs, nodep, pos / bps, flags);
1128 assert(rc == EOK);
1129 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1130 bytes);
1131 b->dirty = true; /* need to sync block */
1132 rc = block_put(b);
1133 assert(rc == EOK);
1134 if (pos + bytes > nodep->size) {
1135 nodep->size = pos + bytes;
1136 nodep->dirty = true; /* need to sync node */
1137 }
1138 ipc_answer_2(rid, EOK, bytes, nodep->size);
1139 fat_node_put(fn);
1140 return;
1141 } else {
1142 /*
1143 * This is the more difficult case. We must allocate new
1144 * clusters for the node and zero them out.
1145 */
1146 int status;
1147 unsigned nclsts;
1148 fat_cluster_t mcl, lcl;
1149
1150 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
1151 /* create an independent chain of nclsts clusters in all FATs */
1152 status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1153 if (status != EOK) {
1154 /* could not allocate a chain of nclsts clusters */
1155 fat_node_put(fn);
1156 ipc_answer_0(callid, status);
1157 ipc_answer_0(rid, status);
1158 return;
1159 }
1160 /* zero fill any gaps */
1161 rc = fat_fill_gap(bs, nodep, mcl, pos);
1162 assert(rc == EOK);
1163 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc,
1164 flags);
1165 assert(rc == EOK);
1166 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1167 bytes);
1168 b->dirty = true; /* need to sync block */
1169 rc = block_put(b);
1170 assert(rc == EOK);
1171 /*
1172 * Append the cluster chain starting in mcl to the end of the
1173 * node's cluster chain.
1174 */
1175 rc = fat_append_clusters(bs, nodep, mcl);
1176 assert(rc == EOK);
1177 nodep->size = pos + bytes;
1178 nodep->dirty = true; /* need to sync node */
1179 ipc_answer_2(rid, EOK, bytes, nodep->size);
1180 fat_node_put(fn);
1181 return;
1182 }
1183}
1184
1185void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1186{
1187 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1188 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1189 size_t size = (off_t)IPC_GET_ARG3(*request);
1190 fs_node_t *fn = fat_node_get(dev_handle, index);
1191 fat_node_t *nodep;
1192 fat_bs_t *bs;
1193 uint16_t bps;
1194 uint8_t spc;
1195 unsigned bpc; /* bytes per cluster */
1196 int rc;
1197
1198 if (!fn) {
1199 ipc_answer_0(rid, ENOENT);
1200 return;
1201 }
1202 nodep = FAT_NODE(fn);
1203
1204 bs = block_bb_get(dev_handle);
1205 bps = uint16_t_le2host(bs->bps);
1206 spc = bs->spc;
1207 bpc = bps * spc;
1208
1209 if (nodep->size == size) {
1210 rc = EOK;
1211 } else if (nodep->size < size) {
1212 /*
1213 * The standard says we have the freedom to grow the node.
1214 * For now, we simply return an error.
1215 */
1216 rc = EINVAL;
1217 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
1218 /*
1219 * The node will be shrunk, but no clusters will be deallocated.
1220 */
1221 nodep->size = size;
1222 nodep->dirty = true; /* need to sync node */
1223 rc = EOK;
1224 } else {
1225 /*
1226 * The node will be shrunk, clusters will be deallocated.
1227 */
1228 if (size == 0) {
1229 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1230 if (rc != EOK)
1231 goto out;
1232 } else {
1233 fat_cluster_t lastc;
1234 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc,
1235 &lastc, NULL, (size - 1) / bpc);
1236 if (rc != EOK)
1237 goto out;
1238 rc = fat_chop_clusters(bs, nodep, lastc);
1239 if (rc != EOK)
1240 goto out;
1241 }
1242 nodep->size = size;
1243 nodep->dirty = true; /* need to sync node */
1244 rc = EOK;
1245 }
1246out:
1247 fat_node_put(fn);
1248 ipc_answer_0(rid, rc);
1249 return;
1250}
1251
1252void fat_close(ipc_callid_t rid, ipc_call_t *request)
1253{
1254 ipc_answer_0(rid, EOK);
1255}
1256
1257void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1258{
1259 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1260 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1261 int rc;
1262
1263 fs_node_t *fn = fat_node_get(dev_handle, index);
1264 if (!fn) {
1265 ipc_answer_0(rid, ENOENT);
1266 return;
1267 }
1268
1269 rc = fat_destroy_node(fn);
1270 ipc_answer_0(rid, rc);
1271}
1272
1273void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1274{
1275 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1276}
1277
1278void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1279{
1280 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1281}
1282
1283void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1284{
1285 /* Dummy implementation */
1286 ipc_answer_0(rid, EOK);
1287}
1288
1289/**
1290 * @}
1291 */
Note: See TracBrowser for help on using the repository browser.