source: mainline/uspace/srv/fs/fat/fat_ops.c@ 00a9625

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 00a9625 was 00a9625, checked in by Jakub Jermar <jakub@…>, 16 years ago

fat_create_node() should mark new zeroed blocks dirty.

  • Property mode set to 100644
File size: 29.9 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/ipc.h>
45#include <ipc/services.h>
46#include <ipc/devmap.h>
47#include <async.h>
48#include <errno.h>
49#include <string.h>
50#include <byteorder.h>
51#include <adt/hash_table.h>
52#include <adt/list.h>
53#include <assert.h>
54#include <fibril_sync.h>
55#include <sys/mman.h>
56#include <align.h>
57
58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
61/** Mutex protecting the list of cached free FAT nodes. */
62static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
63
64/** List of cached free FAT nodes. */
65static LIST_INITIALIZE(ffn_head);
66
67static void fat_node_initialize(fat_node_t *node)
68{
69 fibril_mutex_initialize(&node->lock);
70 node->bp = NULL;
71 node->idx = NULL;
72 node->type = 0;
73 link_initialize(&node->ffn_link);
74 node->size = 0;
75 node->lnkcnt = 0;
76 node->refcnt = 0;
77 node->dirty = false;
78}
79
80static void fat_node_sync(fat_node_t *node)
81{
82 block_t *b;
83 fat_bs_t *bs;
84 fat_dentry_t *d;
85 uint16_t bps;
86 unsigned dps;
87
88 assert(node->dirty);
89
90 bs = block_bb_get(node->idx->dev_handle);
91 bps = uint16_t_le2host(bs->bps);
92 dps = bps / sizeof(fat_dentry_t);
93
94 /* Read the block that contains the dentry of interest. */
95 b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc,
96 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
97
98 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
99
100 d->firstc = host2uint16_t_le(node->firstc);
101 if (node->type == FAT_FILE) {
102 d->size = host2uint32_t_le(node->size);
103 } else if (node->type == FAT_DIRECTORY) {
104 d->attr = FAT_ATTR_SUBDIR;
105 }
106
107 /* TODO: update other fields? (e.g time fields) */
108
109 b->dirty = true; /* need to sync block */
110 block_put(b);
111}
112
113static fat_node_t *fat_node_get_new(void)
114{
115 fs_node_t *fn;
116 fat_node_t *nodep;
117
118 fibril_mutex_lock(&ffn_mutex);
119 if (!list_empty(&ffn_head)) {
120 /* Try to use a cached free node structure. */
121 fat_idx_t *idxp_tmp;
122 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
123 if (!fibril_mutex_trylock(&nodep->lock))
124 goto skip_cache;
125 idxp_tmp = nodep->idx;
126 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
127 fibril_mutex_unlock(&nodep->lock);
128 goto skip_cache;
129 }
130 list_remove(&nodep->ffn_link);
131 fibril_mutex_unlock(&ffn_mutex);
132 if (nodep->dirty)
133 fat_node_sync(nodep);
134 idxp_tmp->nodep = NULL;
135 fibril_mutex_unlock(&nodep->lock);
136 fibril_mutex_unlock(&idxp_tmp->lock);
137 fn = FS_NODE(nodep);
138 } else {
139skip_cache:
140 /* Try to allocate a new node structure. */
141 fibril_mutex_unlock(&ffn_mutex);
142 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
143 if (!fn)
144 return NULL;
145 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
146 if (!nodep) {
147 free(fn);
148 return NULL;
149 }
150 }
151 fat_node_initialize(nodep);
152 fs_node_initialize(fn);
153 fn->data = nodep;
154 nodep->bp = fn;
155
156 return nodep;
157}
158
159/** Internal version of fat_node_get().
160 *
161 * @param idxp Locked index structure.
162 */
163static fat_node_t *fat_node_get_core(fat_idx_t *idxp)
164{
165 block_t *b;
166 fat_bs_t *bs;
167 fat_dentry_t *d;
168 fat_node_t *nodep = NULL;
169 unsigned bps;
170 unsigned spc;
171 unsigned dps;
172
173 if (idxp->nodep) {
174 /*
175 * We are lucky.
176 * The node is already instantiated in memory.
177 */
178 fibril_mutex_lock(&idxp->nodep->lock);
179 if (!idxp->nodep->refcnt++)
180 list_remove(&idxp->nodep->ffn_link);
181 fibril_mutex_unlock(&idxp->nodep->lock);
182 return idxp->nodep;
183 }
184
185 /*
186 * We must instantiate the node from the file system.
187 */
188
189 assert(idxp->pfc);
190
191 nodep = fat_node_get_new();
192 if (!nodep)
193 return NULL;
194
195 bs = block_bb_get(idxp->dev_handle);
196 bps = uint16_t_le2host(bs->bps);
197 spc = bs->spc;
198 dps = bps / sizeof(fat_dentry_t);
199
200 /* Read the block that contains the dentry of interest. */
201 b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc,
202 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
203 assert(b);
204
205 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
206 if (d->attr & FAT_ATTR_SUBDIR) {
207 /*
208 * The only directory which does not have this bit set is the
209 * root directory itself. The root directory node is handled
210 * and initialized elsewhere.
211 */
212 nodep->type = FAT_DIRECTORY;
213 /*
214 * Unfortunately, the 'size' field of the FAT dentry is not
215 * defined for the directory entry type. We must determine the
216 * size of the directory by walking the FAT.
217 */
218 nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle,
219 uint16_t_le2host(d->firstc));
220 } else {
221 nodep->type = FAT_FILE;
222 nodep->size = uint32_t_le2host(d->size);
223 }
224 nodep->firstc = uint16_t_le2host(d->firstc);
225 nodep->lnkcnt = 1;
226 nodep->refcnt = 1;
227
228 block_put(b);
229
230 /* Link the idx structure with the node structure. */
231 nodep->idx = idxp;
232 idxp->nodep = nodep;
233
234 return nodep;
235}
236
237/*
238 * Forward declarations of FAT libfs operations.
239 */
240static fs_node_t *fat_node_get(dev_handle_t, fs_index_t);
241static void fat_node_put(fs_node_t *);
242static fs_node_t *fat_create_node(dev_handle_t, int);
243static int fat_destroy_node(fs_node_t *);
244static int fat_link(fs_node_t *, fs_node_t *, const char *);
245static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
246static fs_node_t *fat_match(fs_node_t *, const char *);
247static fs_index_t fat_index_get(fs_node_t *);
248static size_t fat_size_get(fs_node_t *);
249static unsigned fat_lnkcnt_get(fs_node_t *);
250static bool fat_has_children(fs_node_t *);
251static fs_node_t *fat_root_get(dev_handle_t);
252static char fat_plb_get_char(unsigned);
253static bool fat_is_directory(fs_node_t *);
254static bool fat_is_file(fs_node_t *node);
255
256/*
257 * FAT libfs operations.
258 */
259
260/** Instantiate a FAT in-core node. */
261fs_node_t *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
262{
263 fat_node_t *nodep;
264 fat_idx_t *idxp;
265
266 idxp = fat_idx_get_by_index(dev_handle, index);
267 if (!idxp)
268 return NULL;
269 /* idxp->lock held */
270 nodep = fat_node_get_core(idxp);
271 fibril_mutex_unlock(&idxp->lock);
272 return FS_NODE(nodep);
273}
274
275void fat_node_put(fs_node_t *fn)
276{
277 fat_node_t *nodep = FAT_NODE(fn);
278 bool destroy = false;
279
280 fibril_mutex_lock(&nodep->lock);
281 if (!--nodep->refcnt) {
282 if (nodep->idx) {
283 fibril_mutex_lock(&ffn_mutex);
284 list_append(&nodep->ffn_link, &ffn_head);
285 fibril_mutex_unlock(&ffn_mutex);
286 } else {
287 /*
288 * The node does not have any index structure associated
289 * with itself. This can only mean that we are releasing
290 * the node after a failed attempt to allocate the index
291 * structure for it.
292 */
293 destroy = true;
294 }
295 }
296 fibril_mutex_unlock(&nodep->lock);
297 if (destroy) {
298 free(nodep->bp);
299 free(nodep);
300 }
301}
302
303fs_node_t *fat_create_node(dev_handle_t dev_handle, int flags)
304{
305 fat_idx_t *idxp;
306 fat_node_t *nodep;
307 fat_bs_t *bs;
308 fat_cluster_t mcl, lcl;
309 uint16_t bps;
310 int rc;
311
312 bs = block_bb_get(dev_handle);
313 bps = uint16_t_le2host(bs->bps);
314 if (flags & L_DIRECTORY) {
315 /* allocate a cluster */
316 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
317 if (rc != EOK)
318 return NULL;
319 }
320
321 nodep = fat_node_get_new();
322 if (!nodep) {
323 fat_free_clusters(bs, dev_handle, mcl);
324 return NULL;
325 }
326 idxp = fat_idx_get_new(dev_handle);
327 if (!idxp) {
328 fat_free_clusters(bs, dev_handle, mcl);
329 fat_node_put(FS_NODE(nodep));
330 return NULL;
331 }
332 /* idxp->lock held */
333 if (flags & L_DIRECTORY) {
334 int i;
335 block_t *b;
336
337 /*
338 * Populate the new cluster with unused dentries.
339 */
340 for (i = 0; i < bs->spc; i++) {
341 b = _fat_block_get(bs, dev_handle, mcl, i,
342 BLOCK_FLAGS_NOREAD);
343 /* mark all dentries as never-used */
344 memset(b->data, 0, bps);
345 b->dirty = true;
346 block_put(b);
347 }
348 nodep->type = FAT_DIRECTORY;
349 nodep->firstc = mcl;
350 nodep->size = bps * bs->spc;
351 } else {
352 nodep->type = FAT_FILE;
353 nodep->firstc = FAT_CLST_RES0;
354 nodep->size = 0;
355 }
356 nodep->lnkcnt = 0; /* not linked anywhere */
357 nodep->refcnt = 1;
358 nodep->dirty = true;
359
360 nodep->idx = idxp;
361 idxp->nodep = nodep;
362
363 fibril_mutex_unlock(&idxp->lock);
364 return FS_NODE(nodep);
365}
366
367int fat_destroy_node(fs_node_t *fn)
368{
369 fat_node_t *nodep = FAT_NODE(fn);
370 fat_bs_t *bs;
371
372 /*
373 * The node is not reachable from the file system. This means that the
374 * link count should be zero and that the index structure cannot be
375 * found in the position hash. Obviously, we don't need to lock the node
376 * nor its index structure.
377 */
378 assert(nodep->lnkcnt == 0);
379
380 /*
381 * The node may not have any children.
382 */
383 assert(fat_has_children(fn) == false);
384
385 bs = block_bb_get(nodep->idx->dev_handle);
386 if (nodep->firstc != FAT_CLST_RES0) {
387 assert(nodep->size);
388 /* Free all clusters allocated to the node. */
389 fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc);
390 }
391
392 fat_idx_destroy(nodep->idx);
393 free(nodep->bp);
394 free(nodep);
395 return EOK;
396}
397
398int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
399{
400 fat_node_t *parentp = FAT_NODE(pfn);
401 fat_node_t *childp = FAT_NODE(cfn);
402 fat_dentry_t *d;
403 fat_bs_t *bs;
404 block_t *b;
405 unsigned i, j;
406 uint16_t bps;
407 unsigned dps;
408 unsigned blocks;
409 fat_cluster_t mcl, lcl;
410 int rc;
411
412 fibril_mutex_lock(&childp->lock);
413 if (childp->lnkcnt == 1) {
414 /*
415 * On FAT, we don't support multiple hard links.
416 */
417 fibril_mutex_unlock(&childp->lock);
418 return EMLINK;
419 }
420 assert(childp->lnkcnt == 0);
421 fibril_mutex_unlock(&childp->lock);
422
423 if (!fat_dentry_name_verify(name)) {
424 /*
425 * Attempt to create unsupported name.
426 */
427 return ENOTSUP;
428 }
429
430 /*
431 * Get us an unused parent node's dentry or grow the parent and allocate
432 * a new one.
433 */
434
435 fibril_mutex_lock(&parentp->idx->lock);
436 bs = block_bb_get(parentp->idx->dev_handle);
437 bps = uint16_t_le2host(bs->bps);
438 dps = bps / sizeof(fat_dentry_t);
439
440 blocks = parentp->size / bps;
441
442 for (i = 0; i < blocks; i++) {
443 b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
444 for (j = 0; j < dps; j++) {
445 d = ((fat_dentry_t *)b->data) + j;
446 switch (fat_classify_dentry(d)) {
447 case FAT_DENTRY_SKIP:
448 case FAT_DENTRY_VALID:
449 /* skipping used and meta entries */
450 continue;
451 case FAT_DENTRY_FREE:
452 case FAT_DENTRY_LAST:
453 /* found an empty slot */
454 goto hit;
455 }
456 }
457 block_put(b);
458 }
459 j = 0;
460
461 /*
462 * We need to grow the parent in order to create a new unused dentry.
463 */
464 if (parentp->idx->pfc == FAT_CLST_ROOT) {
465 /* Can't grow the root directory. */
466 fibril_mutex_unlock(&parentp->idx->lock);
467 return ENOSPC;
468 }
469 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
470 if (rc != EOK) {
471 fibril_mutex_unlock(&parentp->idx->lock);
472 return rc;
473 }
474 fat_append_clusters(bs, parentp, mcl);
475 b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NOREAD);
476 d = (fat_dentry_t *)b->data;
477 /*
478 * Clear all dentries in the block except for the first one (the first
479 * dentry will be cleared in the next step).
480 */
481 memset(d + 1, 0, bps - sizeof(fat_dentry_t));
482
483hit:
484 /*
485 * At this point we only establish the link between the parent and the
486 * child. The dentry, except of the name and the extension, will remain
487 * uninitialized until the corresponding node is synced. Thus the valid
488 * dentry data is kept in the child node structure.
489 */
490 memset(d, 0, sizeof(fat_dentry_t));
491 fat_dentry_name_set(d, name);
492 b->dirty = true; /* need to sync block */
493 block_put(b);
494 fibril_mutex_unlock(&parentp->idx->lock);
495
496 fibril_mutex_lock(&childp->idx->lock);
497
498 /*
499 * If possible, create the Sub-directory Identifier Entry and the
500 * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
501 * are not mandatory according to Standard ECMA-107 and HelenOS VFS does
502 * not use them anyway, so this is rather a sign of our good will.
503 */
504 b = fat_block_get(bs, childp, 0, BLOCK_FLAGS_NONE);
505 d = (fat_dentry_t *)b->data;
506 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
507 str_cmp(d->name, FAT_NAME_DOT) == 0) {
508 memset(d, 0, sizeof(fat_dentry_t));
509 str_cpy(d->name, 8, FAT_NAME_DOT);
510 str_cpy(d->ext, 3, FAT_EXT_PAD);
511 d->attr = FAT_ATTR_SUBDIR;
512 d->firstc = host2uint16_t_le(childp->firstc);
513 /* TODO: initialize also the date/time members. */
514 }
515 d++;
516 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
517 str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) {
518 memset(d, 0, sizeof(fat_dentry_t));
519 str_cpy(d->name, 8, FAT_NAME_DOT_DOT);
520 str_cpy(d->ext, 3, FAT_EXT_PAD);
521 d->attr = FAT_ATTR_SUBDIR;
522 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
523 host2uint16_t_le(FAT_CLST_RES0) :
524 host2uint16_t_le(parentp->firstc);
525 /* TODO: initialize also the date/time members. */
526 }
527 b->dirty = true; /* need to sync block */
528 block_put(b);
529
530 childp->idx->pfc = parentp->firstc;
531 childp->idx->pdi = i * dps + j;
532 fibril_mutex_unlock(&childp->idx->lock);
533
534 fibril_mutex_lock(&childp->lock);
535 childp->lnkcnt = 1;
536 childp->dirty = true; /* need to sync node */
537 fibril_mutex_unlock(&childp->lock);
538
539 /*
540 * Hash in the index structure into the position hash.
541 */
542 fat_idx_hashin(childp->idx);
543
544 return EOK;
545}
546
547int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
548{
549 fat_node_t *parentp = FAT_NODE(pfn);
550 fat_node_t *childp = FAT_NODE(cfn);
551 fat_bs_t *bs;
552 fat_dentry_t *d;
553 uint16_t bps;
554 block_t *b;
555
556 if (!parentp)
557 return EBUSY;
558
559 if (fat_has_children(cfn))
560 return ENOTEMPTY;
561
562 fibril_mutex_lock(&parentp->lock);
563 fibril_mutex_lock(&childp->lock);
564 assert(childp->lnkcnt == 1);
565 fibril_mutex_lock(&childp->idx->lock);
566 bs = block_bb_get(childp->idx->dev_handle);
567 bps = uint16_t_le2host(bs->bps);
568
569 b = _fat_block_get(bs, childp->idx->dev_handle, childp->idx->pfc,
570 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
571 BLOCK_FLAGS_NONE);
572 d = (fat_dentry_t *)b->data +
573 (childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
574 /* mark the dentry as not-currently-used */
575 d->name[0] = FAT_DENTRY_ERASED;
576 b->dirty = true; /* need to sync block */
577 block_put(b);
578
579 /* remove the index structure from the position hash */
580 fat_idx_hashout(childp->idx);
581 /* clear position information */
582 childp->idx->pfc = FAT_CLST_RES0;
583 childp->idx->pdi = 0;
584 fibril_mutex_unlock(&childp->idx->lock);
585 childp->lnkcnt = 0;
586 childp->dirty = true;
587 fibril_mutex_unlock(&childp->lock);
588 fibril_mutex_unlock(&parentp->lock);
589
590 return EOK;
591}
592
593fs_node_t *fat_match(fs_node_t *pfn, const char *component)
594{
595 fat_bs_t *bs;
596 fat_node_t *parentp = FAT_NODE(pfn);
597 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
598 unsigned i, j;
599 unsigned bps; /* bytes per sector */
600 unsigned dps; /* dentries per sector */
601 unsigned blocks;
602 fat_dentry_t *d;
603 block_t *b;
604
605 fibril_mutex_lock(&parentp->idx->lock);
606 bs = block_bb_get(parentp->idx->dev_handle);
607 bps = uint16_t_le2host(bs->bps);
608 dps = bps / sizeof(fat_dentry_t);
609 blocks = parentp->size / bps;
610 for (i = 0; i < blocks; i++) {
611 b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
612 for (j = 0; j < dps; j++) {
613 d = ((fat_dentry_t *)b->data) + j;
614 switch (fat_classify_dentry(d)) {
615 case FAT_DENTRY_SKIP:
616 case FAT_DENTRY_FREE:
617 continue;
618 case FAT_DENTRY_LAST:
619 block_put(b);
620 fibril_mutex_unlock(&parentp->idx->lock);
621 return NULL;
622 default:
623 case FAT_DENTRY_VALID:
624 fat_dentry_name_get(d, name);
625 break;
626 }
627 if (fat_dentry_namecmp(name, component) == 0) {
628 /* hit */
629 fat_node_t *nodep;
630 /*
631 * Assume tree hierarchy for locking. We
632 * already have the parent and now we are going
633 * to lock the child. Never lock in the oposite
634 * order.
635 */
636 fat_idx_t *idx = fat_idx_get_by_pos(
637 parentp->idx->dev_handle, parentp->firstc,
638 i * dps + j);
639 fibril_mutex_unlock(&parentp->idx->lock);
640 if (!idx) {
641 /*
642 * Can happen if memory is low or if we
643 * run out of 32-bit indices.
644 */
645 block_put(b);
646 return NULL;
647 }
648 nodep = fat_node_get_core(idx);
649 fibril_mutex_unlock(&idx->lock);
650 block_put(b);
651 return FS_NODE(nodep);
652 }
653 }
654 block_put(b);
655 }
656
657 fibril_mutex_unlock(&parentp->idx->lock);
658 return NULL;
659}
660
661fs_index_t fat_index_get(fs_node_t *fn)
662{
663 return FAT_NODE(fn)->idx->index;
664}
665
666size_t fat_size_get(fs_node_t *fn)
667{
668 return FAT_NODE(fn)->size;
669}
670
671unsigned fat_lnkcnt_get(fs_node_t *fn)
672{
673 return FAT_NODE(fn)->lnkcnt;
674}
675
676bool fat_has_children(fs_node_t *fn)
677{
678 fat_bs_t *bs;
679 fat_node_t *nodep = FAT_NODE(fn);
680 unsigned bps;
681 unsigned dps;
682 unsigned blocks;
683 block_t *b;
684 unsigned i, j;
685
686 if (nodep->type != FAT_DIRECTORY)
687 return false;
688
689 fibril_mutex_lock(&nodep->idx->lock);
690 bs = block_bb_get(nodep->idx->dev_handle);
691 bps = uint16_t_le2host(bs->bps);
692 dps = bps / sizeof(fat_dentry_t);
693
694 blocks = nodep->size / bps;
695
696 for (i = 0; i < blocks; i++) {
697 fat_dentry_t *d;
698
699 b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE);
700 for (j = 0; j < dps; j++) {
701 d = ((fat_dentry_t *)b->data) + j;
702 switch (fat_classify_dentry(d)) {
703 case FAT_DENTRY_SKIP:
704 case FAT_DENTRY_FREE:
705 continue;
706 case FAT_DENTRY_LAST:
707 block_put(b);
708 fibril_mutex_unlock(&nodep->idx->lock);
709 return false;
710 default:
711 case FAT_DENTRY_VALID:
712 block_put(b);
713 fibril_mutex_unlock(&nodep->idx->lock);
714 return true;
715 }
716 block_put(b);
717 fibril_mutex_unlock(&nodep->idx->lock);
718 return true;
719 }
720 block_put(b);
721 }
722
723 fibril_mutex_unlock(&nodep->idx->lock);
724 return false;
725}
726
727fs_node_t *fat_root_get(dev_handle_t dev_handle)
728{
729 return fat_node_get(dev_handle, 0);
730}
731
732char fat_plb_get_char(unsigned pos)
733{
734 return fat_reg.plb_ro[pos % PLB_SIZE];
735}
736
737bool fat_is_directory(fs_node_t *fn)
738{
739 return FAT_NODE(fn)->type == FAT_DIRECTORY;
740}
741
742bool fat_is_file(fs_node_t *fn)
743{
744 return FAT_NODE(fn)->type == FAT_FILE;
745}
746
747/** libfs operations */
748libfs_ops_t fat_libfs_ops = {
749 .match = fat_match,
750 .node_get = fat_node_get,
751 .node_put = fat_node_put,
752 .create = fat_create_node,
753 .destroy = fat_destroy_node,
754 .link = fat_link,
755 .unlink = fat_unlink,
756 .index_get = fat_index_get,
757 .size_get = fat_size_get,
758 .lnkcnt_get = fat_lnkcnt_get,
759 .has_children = fat_has_children,
760 .root_get = fat_root_get,
761 .plb_get_char = fat_plb_get_char,
762 .is_directory = fat_is_directory,
763 .is_file = fat_is_file
764};
765
766/*
767 * VFS operations.
768 */
769
770void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
771{
772 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
773 enum cache_mode cmode;
774 fat_bs_t *bs;
775 uint16_t bps;
776 uint16_t rde;
777 int rc;
778
779 /* accept the mount options */
780 ipc_callid_t callid;
781 size_t size;
782 if (!ipc_data_write_receive(&callid, &size)) {
783 ipc_answer_0(callid, EINVAL);
784 ipc_answer_0(rid, EINVAL);
785 return;
786 }
787 char *opts = malloc(size + 1);
788 if (!opts) {
789 ipc_answer_0(callid, ENOMEM);
790 ipc_answer_0(rid, ENOMEM);
791 return;
792 }
793 ipcarg_t retval = ipc_data_write_finalize(callid, opts, size);
794 if (retval != EOK) {
795 ipc_answer_0(rid, retval);
796 free(opts);
797 return;
798 }
799 opts[size] = '\0';
800
801 /* Check for option enabling write through. */
802 if (str_cmp(opts, "wtcache") == 0)
803 cmode = CACHE_MODE_WT;
804 else
805 cmode = CACHE_MODE_WB;
806
807 /* initialize libblock */
808 rc = block_init(dev_handle, BS_SIZE);
809 if (rc != EOK) {
810 ipc_answer_0(rid, rc);
811 return;
812 }
813
814 /* prepare the boot block */
815 rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE);
816 if (rc != EOK) {
817 block_fini(dev_handle);
818 ipc_answer_0(rid, rc);
819 return;
820 }
821
822 /* get the buffer with the boot sector */
823 bs = block_bb_get(dev_handle);
824
825 /* Read the number of root directory entries. */
826 bps = uint16_t_le2host(bs->bps);
827 rde = uint16_t_le2host(bs->root_ent_max);
828
829 if (bps != BS_SIZE) {
830 block_fini(dev_handle);
831 ipc_answer_0(rid, ENOTSUP);
832 return;
833 }
834
835 /* Initialize the block cache */
836 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);
837 if (rc != EOK) {
838 block_fini(dev_handle);
839 ipc_answer_0(rid, rc);
840 return;
841 }
842
843 rc = fat_idx_init_by_dev_handle(dev_handle);
844 if (rc != EOK) {
845 block_fini(dev_handle);
846 ipc_answer_0(rid, rc);
847 return;
848 }
849
850 /* Initialize the root node. */
851 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
852 if (!rfn) {
853 block_fini(dev_handle);
854 fat_idx_fini_by_dev_handle(dev_handle);
855 ipc_answer_0(rid, ENOMEM);
856 return;
857 }
858 fs_node_initialize(rfn);
859 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
860 if (!rootp) {
861 free(rfn);
862 block_fini(dev_handle);
863 fat_idx_fini_by_dev_handle(dev_handle);
864 ipc_answer_0(rid, ENOMEM);
865 return;
866 }
867 fat_node_initialize(rootp);
868
869 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
870 if (!ridxp) {
871 free(rfn);
872 free(rootp);
873 block_fini(dev_handle);
874 fat_idx_fini_by_dev_handle(dev_handle);
875 ipc_answer_0(rid, ENOMEM);
876 return;
877 }
878 assert(ridxp->index == 0);
879 /* ridxp->lock held */
880
881 rootp->type = FAT_DIRECTORY;
882 rootp->firstc = FAT_CLST_ROOT;
883 rootp->refcnt = 1;
884 rootp->lnkcnt = 0; /* FS root is not linked */
885 rootp->size = rde * sizeof(fat_dentry_t);
886 rootp->idx = ridxp;
887 ridxp->nodep = rootp;
888 rootp->bp = rfn;
889 rfn->data = rootp;
890
891 fibril_mutex_unlock(&ridxp->lock);
892
893 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
894}
895
896void fat_mount(ipc_callid_t rid, ipc_call_t *request)
897{
898 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
899}
900
901void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
902{
903 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
904}
905
906void fat_read(ipc_callid_t rid, ipc_call_t *request)
907{
908 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
909 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
910 off_t pos = (off_t)IPC_GET_ARG3(*request);
911 fs_node_t *fn = fat_node_get(dev_handle, index);
912 fat_node_t *nodep;
913 fat_bs_t *bs;
914 uint16_t bps;
915 size_t bytes;
916 block_t *b;
917
918 if (!fn) {
919 ipc_answer_0(rid, ENOENT);
920 return;
921 }
922 nodep = FAT_NODE(fn);
923
924 ipc_callid_t callid;
925 size_t len;
926 if (!ipc_data_read_receive(&callid, &len)) {
927 fat_node_put(fn);
928 ipc_answer_0(callid, EINVAL);
929 ipc_answer_0(rid, EINVAL);
930 return;
931 }
932
933 bs = block_bb_get(dev_handle);
934 bps = uint16_t_le2host(bs->bps);
935
936 if (nodep->type == FAT_FILE) {
937 /*
938 * Our strategy for regular file reads is to read one block at
939 * most and make use of the possibility to return less data than
940 * requested. This keeps the code very simple.
941 */
942 if (pos >= nodep->size) {
943 /* reading beyond the EOF */
944 bytes = 0;
945 (void) ipc_data_read_finalize(callid, NULL, 0);
946 } else {
947 bytes = min(len, bps - pos % bps);
948 bytes = min(bytes, nodep->size - pos);
949 b = fat_block_get(bs, nodep, pos / bps,
950 BLOCK_FLAGS_NONE);
951 (void) ipc_data_read_finalize(callid, b->data + pos % bps,
952 bytes);
953 block_put(b);
954 }
955 } else {
956 unsigned bnum;
957 off_t spos = pos;
958 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
959 fat_dentry_t *d;
960
961 assert(nodep->type == FAT_DIRECTORY);
962 assert(nodep->size % bps == 0);
963 assert(bps % sizeof(fat_dentry_t) == 0);
964
965 /*
966 * Our strategy for readdir() is to use the position pointer as
967 * an index into the array of all dentries. On entry, it points
968 * to the first unread dentry. If we skip any dentries, we bump
969 * the position pointer accordingly.
970 */
971 bnum = (pos * sizeof(fat_dentry_t)) / bps;
972 while (bnum < nodep->size / bps) {
973 off_t o;
974
975 b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE);
976 for (o = pos % (bps / sizeof(fat_dentry_t));
977 o < bps / sizeof(fat_dentry_t);
978 o++, pos++) {
979 d = ((fat_dentry_t *)b->data) + o;
980 switch (fat_classify_dentry(d)) {
981 case FAT_DENTRY_SKIP:
982 case FAT_DENTRY_FREE:
983 continue;
984 case FAT_DENTRY_LAST:
985 block_put(b);
986 goto miss;
987 default:
988 case FAT_DENTRY_VALID:
989 fat_dentry_name_get(d, name);
990 block_put(b);
991 goto hit;
992 }
993 }
994 block_put(b);
995 bnum++;
996 }
997miss:
998 fat_node_put(fn);
999 ipc_answer_0(callid, ENOENT);
1000 ipc_answer_1(rid, ENOENT, 0);
1001 return;
1002hit:
1003 (void) ipc_data_read_finalize(callid, name, str_size(name) + 1);
1004 bytes = (pos - spos) + 1;
1005 }
1006
1007 fat_node_put(fn);
1008 ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
1009}
1010
1011void fat_write(ipc_callid_t rid, ipc_call_t *request)
1012{
1013 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1014 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1015 off_t pos = (off_t)IPC_GET_ARG3(*request);
1016 fs_node_t *fn = fat_node_get(dev_handle, index);
1017 fat_node_t *nodep;
1018 fat_bs_t *bs;
1019 size_t bytes;
1020 block_t *b;
1021 uint16_t bps;
1022 unsigned spc;
1023 unsigned bpc; /* bytes per cluster */
1024 off_t boundary;
1025 int flags = BLOCK_FLAGS_NONE;
1026
1027 if (!fn) {
1028 ipc_answer_0(rid, ENOENT);
1029 return;
1030 }
1031 nodep = FAT_NODE(fn);
1032
1033 ipc_callid_t callid;
1034 size_t len;
1035 if (!ipc_data_write_receive(&callid, &len)) {
1036 fat_node_put(fn);
1037 ipc_answer_0(callid, EINVAL);
1038 ipc_answer_0(rid, EINVAL);
1039 return;
1040 }
1041
1042 bs = block_bb_get(dev_handle);
1043 bps = uint16_t_le2host(bs->bps);
1044 spc = bs->spc;
1045 bpc = bps * spc;
1046
1047 /*
1048 * In all scenarios, we will attempt to write out only one block worth
1049 * of data at maximum. There might be some more efficient approaches,
1050 * but this one greatly simplifies fat_write(). Note that we can afford
1051 * to do this because the client must be ready to handle the return
1052 * value signalizing a smaller number of bytes written.
1053 */
1054 bytes = min(len, bps - pos % bps);
1055 if (bytes == bps)
1056 flags |= BLOCK_FLAGS_NOREAD;
1057
1058 boundary = ROUND_UP(nodep->size, bpc);
1059 if (pos < boundary) {
1060 /*
1061 * This is the easier case - we are either overwriting already
1062 * existing contents or writing behind the EOF, but still within
1063 * the limits of the last cluster. The node size may grow to the
1064 * next block size boundary.
1065 */
1066 fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1067 b = fat_block_get(bs, nodep, pos / bps, flags);
1068 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1069 bytes);
1070 b->dirty = true; /* need to sync block */
1071 block_put(b);
1072 if (pos + bytes > nodep->size) {
1073 nodep->size = pos + bytes;
1074 nodep->dirty = true; /* need to sync node */
1075 }
1076 ipc_answer_2(rid, EOK, bytes, nodep->size);
1077 fat_node_put(fn);
1078 return;
1079 } else {
1080 /*
1081 * This is the more difficult case. We must allocate new
1082 * clusters for the node and zero them out.
1083 */
1084 int status;
1085 unsigned nclsts;
1086 fat_cluster_t mcl, lcl;
1087
1088 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
1089 /* create an independent chain of nclsts clusters in all FATs */
1090 status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1091 if (status != EOK) {
1092 /* could not allocate a chain of nclsts clusters */
1093 fat_node_put(fn);
1094 ipc_answer_0(callid, status);
1095 ipc_answer_0(rid, status);
1096 return;
1097 }
1098 /* zero fill any gaps */
1099 fat_fill_gap(bs, nodep, mcl, pos);
1100 b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc,
1101 flags);
1102 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1103 bytes);
1104 b->dirty = true; /* need to sync block */
1105 block_put(b);
1106 /*
1107 * Append the cluster chain starting in mcl to the end of the
1108 * node's cluster chain.
1109 */
1110 fat_append_clusters(bs, nodep, mcl);
1111 nodep->size = pos + bytes;
1112 nodep->dirty = true; /* need to sync node */
1113 ipc_answer_2(rid, EOK, bytes, nodep->size);
1114 fat_node_put(fn);
1115 return;
1116 }
1117}
1118
1119void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1120{
1121 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1122 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1123 size_t size = (off_t)IPC_GET_ARG3(*request);
1124 fs_node_t *fn = fat_node_get(dev_handle, index);
1125 fat_node_t *nodep;
1126 fat_bs_t *bs;
1127 uint16_t bps;
1128 uint8_t spc;
1129 unsigned bpc; /* bytes per cluster */
1130 int rc;
1131
1132 if (!fn) {
1133 ipc_answer_0(rid, ENOENT);
1134 return;
1135 }
1136 nodep = FAT_NODE(fn);
1137
1138 bs = block_bb_get(dev_handle);
1139 bps = uint16_t_le2host(bs->bps);
1140 spc = bs->spc;
1141 bpc = bps * spc;
1142
1143 if (nodep->size == size) {
1144 rc = EOK;
1145 } else if (nodep->size < size) {
1146 /*
1147 * The standard says we have the freedom to grow the node.
1148 * For now, we simply return an error.
1149 */
1150 rc = EINVAL;
1151 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
1152 /*
1153 * The node will be shrunk, but no clusters will be deallocated.
1154 */
1155 nodep->size = size;
1156 nodep->dirty = true; /* need to sync node */
1157 rc = EOK;
1158 } else {
1159 /*
1160 * The node will be shrunk, clusters will be deallocated.
1161 */
1162 if (size == 0) {
1163 fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1164 } else {
1165 fat_cluster_t lastc;
1166 (void) fat_cluster_walk(bs, dev_handle, nodep->firstc,
1167 &lastc, (size - 1) / bpc);
1168 fat_chop_clusters(bs, nodep, lastc);
1169 }
1170 nodep->size = size;
1171 nodep->dirty = true; /* need to sync node */
1172 rc = EOK;
1173 }
1174 fat_node_put(fn);
1175 ipc_answer_0(rid, rc);
1176 return;
1177}
1178
1179void fat_close(ipc_callid_t rid, ipc_call_t *request)
1180{
1181 ipc_answer_0(rid, EOK);
1182}
1183
1184void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1185{
1186 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1187 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1188 int rc;
1189
1190 fs_node_t *fn = fat_node_get(dev_handle, index);
1191 if (!fn) {
1192 ipc_answer_0(rid, ENOENT);
1193 return;
1194 }
1195
1196 rc = fat_destroy_node(fn);
1197 ipc_answer_0(rid, rc);
1198}
1199
1200void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1201{
1202 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1203}
1204
1205void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1206{
1207 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1208}
1209
1210void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1211{
1212 /* Dummy implementation */
1213 ipc_answer_0(rid, EOK);
1214}
1215
1216/**
1217 * @}
1218 */
Note: See TracBrowser for help on using the repository browser.