source: mainline/uspace/srv/fs/fat/fat_ops.c@ e6bc3a5

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e6bc3a5 was e6bc3a5, checked in by Jakub Jermar <jakub@…>, 16 years ago

fat_node_get_core() needs to take the ffn_mutex before it manipulates the
ffn_head list.

  • Property mode set to 100644
File size: 32.9 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/ipc.h>
45#include <ipc/services.h>
46#include <ipc/devmap.h>
47#include <async.h>
48#include <errno.h>
49#include <string.h>
50#include <byteorder.h>
51#include <adt/hash_table.h>
52#include <adt/list.h>
53#include <assert.h>
54#include <fibril_sync.h>
55#include <sys/mman.h>
56#include <align.h>
57
58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
61/** Mutex protecting the list of cached free FAT nodes. */
62static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
63
64/** List of cached free FAT nodes. */
65static LIST_INITIALIZE(ffn_head);
66
67static void fat_node_initialize(fat_node_t *node)
68{
69 fibril_mutex_initialize(&node->lock);
70 node->bp = NULL;
71 node->idx = NULL;
72 node->type = 0;
73 link_initialize(&node->ffn_link);
74 node->size = 0;
75 node->lnkcnt = 0;
76 node->refcnt = 0;
77 node->dirty = false;
78}
79
80static int fat_node_sync(fat_node_t *node)
81{
82 block_t *b;
83 fat_bs_t *bs;
84 fat_dentry_t *d;
85 uint16_t bps;
86 unsigned dps;
87 int rc;
88
89 assert(node->dirty);
90
91 bs = block_bb_get(node->idx->dev_handle);
92 bps = uint16_t_le2host(bs->bps);
93 dps = bps / sizeof(fat_dentry_t);
94
95 /* Read the block that contains the dentry of interest. */
96 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc,
97 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
98 if (rc != EOK)
99 return rc;
100
101 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
102
103 d->firstc = host2uint16_t_le(node->firstc);
104 if (node->type == FAT_FILE) {
105 d->size = host2uint32_t_le(node->size);
106 } else if (node->type == FAT_DIRECTORY) {
107 d->attr = FAT_ATTR_SUBDIR;
108 }
109
110 /* TODO: update other fields? (e.g time fields) */
111
112 b->dirty = true; /* need to sync block */
113 rc = block_put(b);
114 return rc;
115}
116
117static fat_node_t *fat_node_get_new(void)
118{
119 fs_node_t *fn;
120 fat_node_t *nodep;
121 int rc;
122
123 fibril_mutex_lock(&ffn_mutex);
124 if (!list_empty(&ffn_head)) {
125 /* Try to use a cached free node structure. */
126 fat_idx_t *idxp_tmp;
127 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
128 if (!fibril_mutex_trylock(&nodep->lock))
129 goto skip_cache;
130 idxp_tmp = nodep->idx;
131 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
132 fibril_mutex_unlock(&nodep->lock);
133 goto skip_cache;
134 }
135 list_remove(&nodep->ffn_link);
136 fibril_mutex_unlock(&ffn_mutex);
137 if (nodep->dirty) {
138 rc = fat_node_sync(nodep);
139 assert(rc == EOK);
140 }
141 idxp_tmp->nodep = NULL;
142 fibril_mutex_unlock(&nodep->lock);
143 fibril_mutex_unlock(&idxp_tmp->lock);
144 fn = FS_NODE(nodep);
145 } else {
146skip_cache:
147 /* Try to allocate a new node structure. */
148 fibril_mutex_unlock(&ffn_mutex);
149 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
150 if (!fn)
151 return NULL;
152 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
153 if (!nodep) {
154 free(fn);
155 return NULL;
156 }
157 }
158 fat_node_initialize(nodep);
159 fs_node_initialize(fn);
160 fn->data = nodep;
161 nodep->bp = fn;
162
163 return nodep;
164}
165
166/** Internal version of fat_node_get().
167 *
168 * @param idxp Locked index structure.
169 */
170static fat_node_t *fat_node_get_core(fat_idx_t *idxp)
171{
172 block_t *b;
173 fat_bs_t *bs;
174 fat_dentry_t *d;
175 fat_node_t *nodep = NULL;
176 unsigned bps;
177 unsigned spc;
178 unsigned dps;
179 int rc;
180
181 if (idxp->nodep) {
182 /*
183 * We are lucky.
184 * The node is already instantiated in memory.
185 */
186 fibril_mutex_lock(&idxp->nodep->lock);
187 if (!idxp->nodep->refcnt++) {
188 fibril_mutex_lock(&ffn_mutex);
189 list_remove(&idxp->nodep->ffn_link);
190 fibril_mutex_unlock(&ffn_mutex);
191 }
192 fibril_mutex_unlock(&idxp->nodep->lock);
193 return idxp->nodep;
194 }
195
196 /*
197 * We must instantiate the node from the file system.
198 */
199
200 assert(idxp->pfc);
201
202 nodep = fat_node_get_new();
203 if (!nodep)
204 return NULL;
205
206 bs = block_bb_get(idxp->dev_handle);
207 bps = uint16_t_le2host(bs->bps);
208 spc = bs->spc;
209 dps = bps / sizeof(fat_dentry_t);
210
211 /* Read the block that contains the dentry of interest. */
212 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc,
213 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
214 assert(rc == EOK);
215
216 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
217 if (d->attr & FAT_ATTR_SUBDIR) {
218 /*
219 * The only directory which does not have this bit set is the
220 * root directory itself. The root directory node is handled
221 * and initialized elsewhere.
222 */
223 nodep->type = FAT_DIRECTORY;
224 /*
225 * Unfortunately, the 'size' field of the FAT dentry is not
226 * defined for the directory entry type. We must determine the
227 * size of the directory by walking the FAT.
228 */
229 uint16_t clusters;
230 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle,
231 uint16_t_le2host(d->firstc));
232 assert(rc == EOK);
233 nodep->size = bps * spc * clusters;
234 } else {
235 nodep->type = FAT_FILE;
236 nodep->size = uint32_t_le2host(d->size);
237 }
238 nodep->firstc = uint16_t_le2host(d->firstc);
239 nodep->lnkcnt = 1;
240 nodep->refcnt = 1;
241
242 rc = block_put(b);
243 assert(rc == EOK);
244
245 /* Link the idx structure with the node structure. */
246 nodep->idx = idxp;
247 idxp->nodep = nodep;
248
249 return nodep;
250}
251
252/*
253 * Forward declarations of FAT libfs operations.
254 */
255static int fat_root_get(fs_node_t **, dev_handle_t);
256static int fat_match(fs_node_t **, fs_node_t *, const char *);
257static int fat_node_get(fs_node_t **, dev_handle_t, fs_index_t);
258static int fat_node_put(fs_node_t *);
259static int fat_create_node(fs_node_t **, dev_handle_t, int);
260static int fat_destroy_node(fs_node_t *);
261static int fat_link(fs_node_t *, fs_node_t *, const char *);
262static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
263static int fat_has_children(bool *, fs_node_t *);
264static fs_index_t fat_index_get(fs_node_t *);
265static size_t fat_size_get(fs_node_t *);
266static unsigned fat_lnkcnt_get(fs_node_t *);
267static char fat_plb_get_char(unsigned);
268static bool fat_is_directory(fs_node_t *);
269static bool fat_is_file(fs_node_t *node);
270
271/*
272 * FAT libfs operations.
273 */
274
275int fat_root_get(fs_node_t **rfn, dev_handle_t dev_handle)
276{
277 return fat_node_get(rfn, dev_handle, 0);
278}
279
280int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
281{
282 fat_bs_t *bs;
283 fat_node_t *parentp = FAT_NODE(pfn);
284 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
285 unsigned i, j;
286 unsigned bps; /* bytes per sector */
287 unsigned dps; /* dentries per sector */
288 unsigned blocks;
289 fat_dentry_t *d;
290 block_t *b;
291 int rc;
292
293 fibril_mutex_lock(&parentp->idx->lock);
294 bs = block_bb_get(parentp->idx->dev_handle);
295 bps = uint16_t_le2host(bs->bps);
296 dps = bps / sizeof(fat_dentry_t);
297 blocks = parentp->size / bps;
298 for (i = 0; i < blocks; i++) {
299 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
300 if (rc != EOK) {
301 fibril_mutex_unlock(&parentp->idx->lock);
302 return rc;
303 }
304 for (j = 0; j < dps; j++) {
305 d = ((fat_dentry_t *)b->data) + j;
306 switch (fat_classify_dentry(d)) {
307 case FAT_DENTRY_SKIP:
308 case FAT_DENTRY_FREE:
309 continue;
310 case FAT_DENTRY_LAST:
311 rc = block_put(b);
312 /* expect EOK as b was not dirty */
313 assert(rc == EOK);
314 fibril_mutex_unlock(&parentp->idx->lock);
315 *rfn = NULL;
316 return EOK;
317 default:
318 case FAT_DENTRY_VALID:
319 fat_dentry_name_get(d, name);
320 break;
321 }
322 if (fat_dentry_namecmp(name, component) == 0) {
323 /* hit */
324 fat_node_t *nodep;
325 /*
326 * Assume tree hierarchy for locking. We
327 * already have the parent and now we are going
328 * to lock the child. Never lock in the oposite
329 * order.
330 */
331 fat_idx_t *idx = fat_idx_get_by_pos(
332 parentp->idx->dev_handle, parentp->firstc,
333 i * dps + j);
334 fibril_mutex_unlock(&parentp->idx->lock);
335 if (!idx) {
336 /*
337 * Can happen if memory is low or if we
338 * run out of 32-bit indices.
339 */
340 rc = block_put(b);
341 /* expect EOK as b was not dirty */
342 assert(rc == EOK);
343 return ENOMEM;
344 }
345 nodep = fat_node_get_core(idx);
346 fibril_mutex_unlock(&idx->lock);
347 rc = block_put(b);
348 /* expect EOK as b was not dirty */
349 assert(rc == EOK);
350 *rfn = FS_NODE(nodep);
351 return EOK;
352 }
353 }
354 rc = block_put(b);
355 assert(rc == EOK); /* expect EOK as b was not dirty */
356 }
357
358 fibril_mutex_unlock(&parentp->idx->lock);
359 *rfn = NULL;
360 return EOK;
361}
362
363/** Instantiate a FAT in-core node. */
364int fat_node_get(fs_node_t **rfn, dev_handle_t dev_handle, fs_index_t index)
365{
366 fat_node_t *nodep;
367 fat_idx_t *idxp;
368
369 idxp = fat_idx_get_by_index(dev_handle, index);
370 if (!idxp) {
371 *rfn = NULL;
372 return EOK;
373 }
374 /* idxp->lock held */
375 nodep = fat_node_get_core(idxp);
376 fibril_mutex_unlock(&idxp->lock);
377 *rfn = FS_NODE(nodep);
378 return EOK;
379}
380
381int fat_node_put(fs_node_t *fn)
382{
383 fat_node_t *nodep = FAT_NODE(fn);
384 bool destroy = false;
385
386 fibril_mutex_lock(&nodep->lock);
387 if (!--nodep->refcnt) {
388 if (nodep->idx) {
389 fibril_mutex_lock(&ffn_mutex);
390 list_append(&nodep->ffn_link, &ffn_head);
391 fibril_mutex_unlock(&ffn_mutex);
392 } else {
393 /*
394 * The node does not have any index structure associated
395 * with itself. This can only mean that we are releasing
396 * the node after a failed attempt to allocate the index
397 * structure for it.
398 */
399 destroy = true;
400 }
401 }
402 fibril_mutex_unlock(&nodep->lock);
403 if (destroy) {
404 free(nodep->bp);
405 free(nodep);
406 }
407 return EOK;
408}
409
410int fat_create_node(fs_node_t **rfn, dev_handle_t dev_handle, int flags)
411{
412 fat_idx_t *idxp;
413 fat_node_t *nodep;
414 fat_bs_t *bs;
415 fat_cluster_t mcl, lcl;
416 uint16_t bps;
417 int rc;
418
419 bs = block_bb_get(dev_handle);
420 bps = uint16_t_le2host(bs->bps);
421 if (flags & L_DIRECTORY) {
422 /* allocate a cluster */
423 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
424 if (rc != EOK)
425 return rc;
426 /* populate the new cluster with unused dentries */
427 rc = fat_zero_cluster(bs, dev_handle, mcl);
428 if (rc != EOK) {
429 (void) fat_free_clusters(bs, dev_handle, mcl);
430 return rc;
431 }
432 }
433
434 nodep = fat_node_get_new();
435 if (!nodep) {
436 (void) fat_free_clusters(bs, dev_handle, mcl);
437 return ENOMEM; /* FIXME: determine the true error code */
438 }
439 idxp = fat_idx_get_new(dev_handle);
440 if (!idxp) {
441 (void) fat_free_clusters(bs, dev_handle, mcl);
442 (void) fat_node_put(FS_NODE(nodep));
443 return ENOMEM; /* FIXME: determine the true error code */
444 }
445 /* idxp->lock held */
446 if (flags & L_DIRECTORY) {
447 nodep->type = FAT_DIRECTORY;
448 nodep->firstc = mcl;
449 nodep->size = bps * bs->spc;
450 } else {
451 nodep->type = FAT_FILE;
452 nodep->firstc = FAT_CLST_RES0;
453 nodep->size = 0;
454 }
455 nodep->lnkcnt = 0; /* not linked anywhere */
456 nodep->refcnt = 1;
457 nodep->dirty = true;
458
459 nodep->idx = idxp;
460 idxp->nodep = nodep;
461
462 fibril_mutex_unlock(&idxp->lock);
463 *rfn = FS_NODE(nodep);
464 return EOK;
465}
466
467int fat_destroy_node(fs_node_t *fn)
468{
469 fat_node_t *nodep = FAT_NODE(fn);
470 fat_bs_t *bs;
471 bool has_children;
472 int rc;
473
474 /*
475 * The node is not reachable from the file system. This means that the
476 * link count should be zero and that the index structure cannot be
477 * found in the position hash. Obviously, we don't need to lock the node
478 * nor its index structure.
479 */
480 assert(nodep->lnkcnt == 0);
481
482 /*
483 * The node may not have any children.
484 */
485 rc = fat_has_children(&has_children, fn);
486 if (rc != EOK)
487 return rc;
488 assert(!has_children);
489
490 bs = block_bb_get(nodep->idx->dev_handle);
491 if (nodep->firstc != FAT_CLST_RES0) {
492 assert(nodep->size);
493 /* Free all clusters allocated to the node. */
494 rc = fat_free_clusters(bs, nodep->idx->dev_handle,
495 nodep->firstc);
496 }
497
498 fat_idx_destroy(nodep->idx);
499 free(nodep->bp);
500 free(nodep);
501 return rc;
502}
503
504int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
505{
506 fat_node_t *parentp = FAT_NODE(pfn);
507 fat_node_t *childp = FAT_NODE(cfn);
508 fat_dentry_t *d;
509 fat_bs_t *bs;
510 block_t *b;
511 unsigned i, j;
512 uint16_t bps;
513 unsigned dps;
514 unsigned blocks;
515 fat_cluster_t mcl, lcl;
516 int rc;
517
518 fibril_mutex_lock(&childp->lock);
519 if (childp->lnkcnt == 1) {
520 /*
521 * On FAT, we don't support multiple hard links.
522 */
523 fibril_mutex_unlock(&childp->lock);
524 return EMLINK;
525 }
526 assert(childp->lnkcnt == 0);
527 fibril_mutex_unlock(&childp->lock);
528
529 if (!fat_dentry_name_verify(name)) {
530 /*
531 * Attempt to create unsupported name.
532 */
533 return ENOTSUP;
534 }
535
536 /*
537 * Get us an unused parent node's dentry or grow the parent and allocate
538 * a new one.
539 */
540
541 fibril_mutex_lock(&parentp->idx->lock);
542 bs = block_bb_get(parentp->idx->dev_handle);
543 bps = uint16_t_le2host(bs->bps);
544 dps = bps / sizeof(fat_dentry_t);
545
546 blocks = parentp->size / bps;
547
548 for (i = 0; i < blocks; i++) {
549 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
550 if (rc != EOK) {
551 fibril_mutex_unlock(&parentp->idx->lock);
552 return rc;
553 }
554 for (j = 0; j < dps; j++) {
555 d = ((fat_dentry_t *)b->data) + j;
556 switch (fat_classify_dentry(d)) {
557 case FAT_DENTRY_SKIP:
558 case FAT_DENTRY_VALID:
559 /* skipping used and meta entries */
560 continue;
561 case FAT_DENTRY_FREE:
562 case FAT_DENTRY_LAST:
563 /* found an empty slot */
564 goto hit;
565 }
566 }
567 rc = block_put(b);
568 if (rc != EOK) {
569 fibril_mutex_unlock(&parentp->idx->lock);
570 return rc;
571 }
572 }
573 j = 0;
574
575 /*
576 * We need to grow the parent in order to create a new unused dentry.
577 */
578 if (parentp->firstc == FAT_CLST_ROOT) {
579 /* Can't grow the root directory. */
580 fibril_mutex_unlock(&parentp->idx->lock);
581 return ENOSPC;
582 }
583 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
584 if (rc != EOK) {
585 fibril_mutex_unlock(&parentp->idx->lock);
586 return rc;
587 }
588 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl);
589 if (rc != EOK) {
590 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
591 fibril_mutex_unlock(&parentp->idx->lock);
592 return rc;
593 }
594 rc = fat_append_clusters(bs, parentp, mcl);
595 if (rc != EOK) {
596 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
597 fibril_mutex_unlock(&parentp->idx->lock);
598 return rc;
599 }
600 parentp->size += bps * bs->spc;
601 parentp->dirty = true; /* need to sync node */
602 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
603 if (rc != EOK) {
604 fibril_mutex_unlock(&parentp->idx->lock);
605 return rc;
606 }
607 d = (fat_dentry_t *)b->data;
608
609hit:
610 /*
611 * At this point we only establish the link between the parent and the
612 * child. The dentry, except of the name and the extension, will remain
613 * uninitialized until the corresponding node is synced. Thus the valid
614 * dentry data is kept in the child node structure.
615 */
616 memset(d, 0, sizeof(fat_dentry_t));
617 fat_dentry_name_set(d, name);
618 b->dirty = true; /* need to sync block */
619 rc = block_put(b);
620 fibril_mutex_unlock(&parentp->idx->lock);
621 if (rc != EOK)
622 return rc;
623
624 fibril_mutex_lock(&childp->idx->lock);
625
626 /*
627 * If possible, create the Sub-directory Identifier Entry and the
628 * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
629 * are not mandatory according to Standard ECMA-107 and HelenOS VFS does
630 * not use them anyway, so this is rather a sign of our good will.
631 */
632 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
633 if (rc != EOK) {
634 /*
635 * Rather than returning an error, simply skip the creation of
636 * these two entries.
637 */
638 goto skip_dots;
639 }
640 d = (fat_dentry_t *)b->data;
641 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
642 str_cmp(d->name, FAT_NAME_DOT) == 0) {
643 memset(d, 0, sizeof(fat_dentry_t));
644 str_cpy(d->name, 8, FAT_NAME_DOT);
645 str_cpy(d->ext, 3, FAT_EXT_PAD);
646 d->attr = FAT_ATTR_SUBDIR;
647 d->firstc = host2uint16_t_le(childp->firstc);
648 /* TODO: initialize also the date/time members. */
649 }
650 d++;
651 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
652 str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) {
653 memset(d, 0, sizeof(fat_dentry_t));
654 str_cpy(d->name, 8, FAT_NAME_DOT_DOT);
655 str_cpy(d->ext, 3, FAT_EXT_PAD);
656 d->attr = FAT_ATTR_SUBDIR;
657 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
658 host2uint16_t_le(FAT_CLST_RES0) :
659 host2uint16_t_le(parentp->firstc);
660 /* TODO: initialize also the date/time members. */
661 }
662 b->dirty = true; /* need to sync block */
663 /*
664 * Ignore the return value as we would have fallen through on error
665 * anyway.
666 */
667 (void) block_put(b);
668skip_dots:
669
670 childp->idx->pfc = parentp->firstc;
671 childp->idx->pdi = i * dps + j;
672 fibril_mutex_unlock(&childp->idx->lock);
673
674 fibril_mutex_lock(&childp->lock);
675 childp->lnkcnt = 1;
676 childp->dirty = true; /* need to sync node */
677 fibril_mutex_unlock(&childp->lock);
678
679 /*
680 * Hash in the index structure into the position hash.
681 */
682 fat_idx_hashin(childp->idx);
683
684 return EOK;
685}
686
687int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
688{
689 fat_node_t *parentp = FAT_NODE(pfn);
690 fat_node_t *childp = FAT_NODE(cfn);
691 fat_bs_t *bs;
692 fat_dentry_t *d;
693 uint16_t bps;
694 block_t *b;
695 bool has_children;
696 int rc;
697
698 if (!parentp)
699 return EBUSY;
700
701 rc = fat_has_children(&has_children, cfn);
702 if (rc != EOK)
703 return rc;
704 if (has_children)
705 return ENOTEMPTY;
706
707 fibril_mutex_lock(&parentp->lock);
708 fibril_mutex_lock(&childp->lock);
709 assert(childp->lnkcnt == 1);
710 fibril_mutex_lock(&childp->idx->lock);
711 bs = block_bb_get(childp->idx->dev_handle);
712 bps = uint16_t_le2host(bs->bps);
713
714 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc,
715 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
716 BLOCK_FLAGS_NONE);
717 if (rc != EOK)
718 goto error;
719 d = (fat_dentry_t *)b->data +
720 (childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
721 /* mark the dentry as not-currently-used */
722 d->name[0] = FAT_DENTRY_ERASED;
723 b->dirty = true; /* need to sync block */
724 rc = block_put(b);
725 if (rc != EOK)
726 goto error;
727
728 /* remove the index structure from the position hash */
729 fat_idx_hashout(childp->idx);
730 /* clear position information */
731 childp->idx->pfc = FAT_CLST_RES0;
732 childp->idx->pdi = 0;
733 fibril_mutex_unlock(&childp->idx->lock);
734 childp->lnkcnt = 0;
735 childp->dirty = true;
736 fibril_mutex_unlock(&childp->lock);
737 fibril_mutex_unlock(&parentp->lock);
738
739 return EOK;
740
741error:
742 fibril_mutex_unlock(&parentp->idx->lock);
743 fibril_mutex_unlock(&childp->lock);
744 fibril_mutex_unlock(&childp->idx->lock);
745 return rc;
746}
747
748int fat_has_children(bool *has_children, fs_node_t *fn)
749{
750 fat_bs_t *bs;
751 fat_node_t *nodep = FAT_NODE(fn);
752 unsigned bps;
753 unsigned dps;
754 unsigned blocks;
755 block_t *b;
756 unsigned i, j;
757 int rc;
758
759 if (nodep->type != FAT_DIRECTORY) {
760 *has_children = false;
761 return EOK;
762 }
763
764 fibril_mutex_lock(&nodep->idx->lock);
765 bs = block_bb_get(nodep->idx->dev_handle);
766 bps = uint16_t_le2host(bs->bps);
767 dps = bps / sizeof(fat_dentry_t);
768
769 blocks = nodep->size / bps;
770
771 for (i = 0; i < blocks; i++) {
772 fat_dentry_t *d;
773
774 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
775 if (rc != EOK) {
776 fibril_mutex_unlock(&nodep->idx->lock);
777 return rc;
778 }
779 for (j = 0; j < dps; j++) {
780 d = ((fat_dentry_t *)b->data) + j;
781 switch (fat_classify_dentry(d)) {
782 case FAT_DENTRY_SKIP:
783 case FAT_DENTRY_FREE:
784 continue;
785 case FAT_DENTRY_LAST:
786 rc = block_put(b);
787 /* expect EOK as b was not dirty */
788 assert(rc == EOK);
789 fibril_mutex_unlock(&nodep->idx->lock);
790 *has_children = false;
791 return EOK;
792 default:
793 case FAT_DENTRY_VALID:
794 rc = block_put(b);
795 /* expect EOK as b was not dirty */
796 assert(rc == EOK);
797 fibril_mutex_unlock(&nodep->idx->lock);
798 *has_children = true;
799 return EOK;
800 }
801 }
802 rc = block_put(b);
803 assert(rc == EOK); /* expect EOK as b was not dirty */
804 }
805
806 fibril_mutex_unlock(&nodep->idx->lock);
807 *has_children = false;
808 return EOK;
809}
810
811
812fs_index_t fat_index_get(fs_node_t *fn)
813{
814 return FAT_NODE(fn)->idx->index;
815}
816
817size_t fat_size_get(fs_node_t *fn)
818{
819 return FAT_NODE(fn)->size;
820}
821
822unsigned fat_lnkcnt_get(fs_node_t *fn)
823{
824 return FAT_NODE(fn)->lnkcnt;
825}
826
827char fat_plb_get_char(unsigned pos)
828{
829 return fat_reg.plb_ro[pos % PLB_SIZE];
830}
831
832bool fat_is_directory(fs_node_t *fn)
833{
834 return FAT_NODE(fn)->type == FAT_DIRECTORY;
835}
836
837bool fat_is_file(fs_node_t *fn)
838{
839 return FAT_NODE(fn)->type == FAT_FILE;
840}
841
842/** libfs operations */
843libfs_ops_t fat_libfs_ops = {
844 .root_get = fat_root_get,
845 .match = fat_match,
846 .node_get = fat_node_get,
847 .node_put = fat_node_put,
848 .create = fat_create_node,
849 .destroy = fat_destroy_node,
850 .link = fat_link,
851 .unlink = fat_unlink,
852 .has_children = fat_has_children,
853 .index_get = fat_index_get,
854 .size_get = fat_size_get,
855 .lnkcnt_get = fat_lnkcnt_get,
856 .plb_get_char = fat_plb_get_char,
857 .is_directory = fat_is_directory,
858 .is_file = fat_is_file
859};
860
861/*
862 * VFS operations.
863 */
864
865void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
866{
867 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
868 enum cache_mode cmode;
869 fat_bs_t *bs;
870 uint16_t bps;
871 uint16_t rde;
872 int rc;
873
874 /* accept the mount options */
875 ipc_callid_t callid;
876 size_t size;
877 if (!ipc_data_write_receive(&callid, &size)) {
878 ipc_answer_0(callid, EINVAL);
879 ipc_answer_0(rid, EINVAL);
880 return;
881 }
882 char *opts = malloc(size + 1);
883 if (!opts) {
884 ipc_answer_0(callid, ENOMEM);
885 ipc_answer_0(rid, ENOMEM);
886 return;
887 }
888 ipcarg_t retval = ipc_data_write_finalize(callid, opts, size);
889 if (retval != EOK) {
890 ipc_answer_0(rid, retval);
891 free(opts);
892 return;
893 }
894 opts[size] = '\0';
895
896 /* Check for option enabling write through. */
897 if (str_cmp(opts, "wtcache") == 0)
898 cmode = CACHE_MODE_WT;
899 else
900 cmode = CACHE_MODE_WB;
901
902 /* initialize libblock */
903 rc = block_init(dev_handle, BS_SIZE);
904 if (rc != EOK) {
905 ipc_answer_0(rid, rc);
906 return;
907 }
908
909 /* prepare the boot block */
910 rc = block_bb_read(dev_handle, BS_BLOCK);
911 if (rc != EOK) {
912 block_fini(dev_handle);
913 ipc_answer_0(rid, rc);
914 return;
915 }
916
917 /* get the buffer with the boot sector */
918 bs = block_bb_get(dev_handle);
919
920 /* Read the number of root directory entries. */
921 bps = uint16_t_le2host(bs->bps);
922 rde = uint16_t_le2host(bs->root_ent_max);
923
924 if (bps != BS_SIZE) {
925 block_fini(dev_handle);
926 ipc_answer_0(rid, ENOTSUP);
927 return;
928 }
929
930 /* Initialize the block cache */
931 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);
932 if (rc != EOK) {
933 block_fini(dev_handle);
934 ipc_answer_0(rid, rc);
935 return;
936 }
937
938 rc = fat_idx_init_by_dev_handle(dev_handle);
939 if (rc != EOK) {
940 block_fini(dev_handle);
941 ipc_answer_0(rid, rc);
942 return;
943 }
944
945 /* Initialize the root node. */
946 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
947 if (!rfn) {
948 block_fini(dev_handle);
949 fat_idx_fini_by_dev_handle(dev_handle);
950 ipc_answer_0(rid, ENOMEM);
951 return;
952 }
953 fs_node_initialize(rfn);
954 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
955 if (!rootp) {
956 free(rfn);
957 block_fini(dev_handle);
958 fat_idx_fini_by_dev_handle(dev_handle);
959 ipc_answer_0(rid, ENOMEM);
960 return;
961 }
962 fat_node_initialize(rootp);
963
964 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
965 if (!ridxp) {
966 free(rfn);
967 free(rootp);
968 block_fini(dev_handle);
969 fat_idx_fini_by_dev_handle(dev_handle);
970 ipc_answer_0(rid, ENOMEM);
971 return;
972 }
973 assert(ridxp->index == 0);
974 /* ridxp->lock held */
975
976 rootp->type = FAT_DIRECTORY;
977 rootp->firstc = FAT_CLST_ROOT;
978 rootp->refcnt = 1;
979 rootp->lnkcnt = 0; /* FS root is not linked */
980 rootp->size = rde * sizeof(fat_dentry_t);
981 rootp->idx = ridxp;
982 ridxp->nodep = rootp;
983 rootp->bp = rfn;
984 rfn->data = rootp;
985
986 fibril_mutex_unlock(&ridxp->lock);
987
988 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
989}
990
991void fat_mount(ipc_callid_t rid, ipc_call_t *request)
992{
993 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
994}
995
996void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
997{
998 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
999}
1000
1001void fat_read(ipc_callid_t rid, ipc_call_t *request)
1002{
1003 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1004 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1005 off_t pos = (off_t)IPC_GET_ARG3(*request);
1006 fs_node_t *fn;
1007 fat_node_t *nodep;
1008 fat_bs_t *bs;
1009 uint16_t bps;
1010 size_t bytes;
1011 block_t *b;
1012 int rc;
1013
1014 rc = fat_node_get(&fn, dev_handle, index);
1015 if (rc != EOK) {
1016 ipc_answer_0(rid, rc);
1017 return;
1018 }
1019 if (!fn) {
1020 ipc_answer_0(rid, ENOENT);
1021 return;
1022 }
1023 nodep = FAT_NODE(fn);
1024
1025 ipc_callid_t callid;
1026 size_t len;
1027 if (!ipc_data_read_receive(&callid, &len)) {
1028 fat_node_put(fn);
1029 ipc_answer_0(callid, EINVAL);
1030 ipc_answer_0(rid, EINVAL);
1031 return;
1032 }
1033
1034 bs = block_bb_get(dev_handle);
1035 bps = uint16_t_le2host(bs->bps);
1036
1037 if (nodep->type == FAT_FILE) {
1038 /*
1039 * Our strategy for regular file reads is to read one block at
1040 * most and make use of the possibility to return less data than
1041 * requested. This keeps the code very simple.
1042 */
1043 if (pos >= nodep->size) {
1044 /* reading beyond the EOF */
1045 bytes = 0;
1046 (void) ipc_data_read_finalize(callid, NULL, 0);
1047 } else {
1048 bytes = min(len, bps - pos % bps);
1049 bytes = min(bytes, nodep->size - pos);
1050 rc = fat_block_get(&b, bs, nodep, pos / bps,
1051 BLOCK_FLAGS_NONE);
1052 assert(rc == EOK);
1053 (void) ipc_data_read_finalize(callid, b->data + pos % bps,
1054 bytes);
1055 rc = block_put(b);
1056 assert(rc == EOK);
1057 }
1058 } else {
1059 unsigned bnum;
1060 off_t spos = pos;
1061 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1062 fat_dentry_t *d;
1063
1064 assert(nodep->type == FAT_DIRECTORY);
1065 assert(nodep->size % bps == 0);
1066 assert(bps % sizeof(fat_dentry_t) == 0);
1067
1068 /*
1069 * Our strategy for readdir() is to use the position pointer as
1070 * an index into the array of all dentries. On entry, it points
1071 * to the first unread dentry. If we skip any dentries, we bump
1072 * the position pointer accordingly.
1073 */
1074 bnum = (pos * sizeof(fat_dentry_t)) / bps;
1075 while (bnum < nodep->size / bps) {
1076 off_t o;
1077
1078 rc = fat_block_get(&b, bs, nodep, bnum,
1079 BLOCK_FLAGS_NONE);
1080 assert(rc == EOK);
1081 for (o = pos % (bps / sizeof(fat_dentry_t));
1082 o < bps / sizeof(fat_dentry_t);
1083 o++, pos++) {
1084 d = ((fat_dentry_t *)b->data) + o;
1085 switch (fat_classify_dentry(d)) {
1086 case FAT_DENTRY_SKIP:
1087 case FAT_DENTRY_FREE:
1088 continue;
1089 case FAT_DENTRY_LAST:
1090 rc = block_put(b);
1091 assert(rc == EOK);
1092 goto miss;
1093 default:
1094 case FAT_DENTRY_VALID:
1095 fat_dentry_name_get(d, name);
1096 rc = block_put(b);
1097 assert(rc == EOK);
1098 goto hit;
1099 }
1100 }
1101 rc = block_put(b);
1102 assert(rc == EOK);
1103 bnum++;
1104 }
1105miss:
1106 fat_node_put(fn);
1107 ipc_answer_0(callid, ENOENT);
1108 ipc_answer_1(rid, ENOENT, 0);
1109 return;
1110hit:
1111 (void) ipc_data_read_finalize(callid, name, str_size(name) + 1);
1112 bytes = (pos - spos) + 1;
1113 }
1114
1115 fat_node_put(fn);
1116 ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
1117}
1118
1119void fat_write(ipc_callid_t rid, ipc_call_t *request)
1120{
1121 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1122 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1123 off_t pos = (off_t)IPC_GET_ARG3(*request);
1124 fs_node_t *fn;
1125 fat_node_t *nodep;
1126 fat_bs_t *bs;
1127 size_t bytes;
1128 block_t *b;
1129 uint16_t bps;
1130 unsigned spc;
1131 unsigned bpc; /* bytes per cluster */
1132 off_t boundary;
1133 int flags = BLOCK_FLAGS_NONE;
1134 int rc;
1135
1136 rc = fat_node_get(&fn, dev_handle, index);
1137 if (rc != EOK) {
1138 ipc_answer_0(rid, rc);
1139 return;
1140 }
1141 if (!fn) {
1142 ipc_answer_0(rid, ENOENT);
1143 return;
1144 }
1145 nodep = FAT_NODE(fn);
1146
1147 ipc_callid_t callid;
1148 size_t len;
1149 if (!ipc_data_write_receive(&callid, &len)) {
1150 fat_node_put(fn);
1151 ipc_answer_0(callid, EINVAL);
1152 ipc_answer_0(rid, EINVAL);
1153 return;
1154 }
1155
1156 bs = block_bb_get(dev_handle);
1157 bps = uint16_t_le2host(bs->bps);
1158 spc = bs->spc;
1159 bpc = bps * spc;
1160
1161 /*
1162 * In all scenarios, we will attempt to write out only one block worth
1163 * of data at maximum. There might be some more efficient approaches,
1164 * but this one greatly simplifies fat_write(). Note that we can afford
1165 * to do this because the client must be ready to handle the return
1166 * value signalizing a smaller number of bytes written.
1167 */
1168 bytes = min(len, bps - pos % bps);
1169 if (bytes == bps)
1170 flags |= BLOCK_FLAGS_NOREAD;
1171
1172 boundary = ROUND_UP(nodep->size, bpc);
1173 if (pos < boundary) {
1174 /*
1175 * This is the easier case - we are either overwriting already
1176 * existing contents or writing behind the EOF, but still within
1177 * the limits of the last cluster. The node size may grow to the
1178 * next block size boundary.
1179 */
1180 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1181 assert(rc == EOK);
1182 rc = fat_block_get(&b, bs, nodep, pos / bps, flags);
1183 assert(rc == EOK);
1184 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1185 bytes);
1186 b->dirty = true; /* need to sync block */
1187 rc = block_put(b);
1188 assert(rc == EOK);
1189 if (pos + bytes > nodep->size) {
1190 nodep->size = pos + bytes;
1191 nodep->dirty = true; /* need to sync node */
1192 }
1193 ipc_answer_2(rid, EOK, bytes, nodep->size);
1194 fat_node_put(fn);
1195 return;
1196 } else {
1197 /*
1198 * This is the more difficult case. We must allocate new
1199 * clusters for the node and zero them out.
1200 */
1201 int status;
1202 unsigned nclsts;
1203 fat_cluster_t mcl, lcl;
1204
1205 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
1206 /* create an independent chain of nclsts clusters in all FATs */
1207 status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1208 if (status != EOK) {
1209 /* could not allocate a chain of nclsts clusters */
1210 fat_node_put(fn);
1211 ipc_answer_0(callid, status);
1212 ipc_answer_0(rid, status);
1213 return;
1214 }
1215 /* zero fill any gaps */
1216 rc = fat_fill_gap(bs, nodep, mcl, pos);
1217 assert(rc == EOK);
1218 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc,
1219 flags);
1220 assert(rc == EOK);
1221 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1222 bytes);
1223 b->dirty = true; /* need to sync block */
1224 rc = block_put(b);
1225 assert(rc == EOK);
1226 /*
1227 * Append the cluster chain starting in mcl to the end of the
1228 * node's cluster chain.
1229 */
1230 rc = fat_append_clusters(bs, nodep, mcl);
1231 assert(rc == EOK);
1232 nodep->size = pos + bytes;
1233 nodep->dirty = true; /* need to sync node */
1234 ipc_answer_2(rid, EOK, bytes, nodep->size);
1235 fat_node_put(fn);
1236 return;
1237 }
1238}
1239
1240void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1241{
1242 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1243 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1244 size_t size = (off_t)IPC_GET_ARG3(*request);
1245 fs_node_t *fn;
1246 fat_node_t *nodep;
1247 fat_bs_t *bs;
1248 uint16_t bps;
1249 uint8_t spc;
1250 unsigned bpc; /* bytes per cluster */
1251 int rc;
1252
1253 rc = fat_node_get(&fn, dev_handle, index);
1254 if (rc != EOK) {
1255 ipc_answer_0(rid, rc);
1256 return;
1257 }
1258 if (!fn) {
1259 ipc_answer_0(rid, ENOENT);
1260 return;
1261 }
1262 nodep = FAT_NODE(fn);
1263
1264 bs = block_bb_get(dev_handle);
1265 bps = uint16_t_le2host(bs->bps);
1266 spc = bs->spc;
1267 bpc = bps * spc;
1268
1269 if (nodep->size == size) {
1270 rc = EOK;
1271 } else if (nodep->size < size) {
1272 /*
1273 * The standard says we have the freedom to grow the node.
1274 * For now, we simply return an error.
1275 */
1276 rc = EINVAL;
1277 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
1278 /*
1279 * The node will be shrunk, but no clusters will be deallocated.
1280 */
1281 nodep->size = size;
1282 nodep->dirty = true; /* need to sync node */
1283 rc = EOK;
1284 } else {
1285 /*
1286 * The node will be shrunk, clusters will be deallocated.
1287 */
1288 if (size == 0) {
1289 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1290 if (rc != EOK)
1291 goto out;
1292 } else {
1293 fat_cluster_t lastc;
1294 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc,
1295 &lastc, NULL, (size - 1) / bpc);
1296 if (rc != EOK)
1297 goto out;
1298 rc = fat_chop_clusters(bs, nodep, lastc);
1299 if (rc != EOK)
1300 goto out;
1301 }
1302 nodep->size = size;
1303 nodep->dirty = true; /* need to sync node */
1304 rc = EOK;
1305 }
1306out:
1307 fat_node_put(fn);
1308 ipc_answer_0(rid, rc);
1309 return;
1310}
1311
1312void fat_close(ipc_callid_t rid, ipc_call_t *request)
1313{
1314 ipc_answer_0(rid, EOK);
1315}
1316
1317void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1318{
1319 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1320 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1321 fs_node_t *fn;
1322 int rc;
1323
1324 rc = fat_node_get(&fn, dev_handle, index);
1325 if (rc != EOK) {
1326 ipc_answer_0(rid, rc);
1327 return;
1328 }
1329 if (!fn) {
1330 ipc_answer_0(rid, ENOENT);
1331 return;
1332 }
1333
1334 rc = fat_destroy_node(fn);
1335 ipc_answer_0(rid, rc);
1336}
1337
1338void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1339{
1340 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1341}
1342
1343void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1344{
1345 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1346}
1347
1348void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1349{
1350 /* Dummy implementation */
1351 ipc_answer_0(rid, EOK);
1352}
1353
1354/**
1355 * @}
1356 */
Note: See TracBrowser for help on using the repository browser.