source: mainline/uspace/srv/fs/fat/fat_ops.c@ 17bf658

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 17bf658 was 17bf658, checked in by Jakub Jermar <jakub@…>, 16 years ago

Make fat_node_get_new() return an error code.

  • Property mode set to 100644
File size: 33.1 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/ipc.h>
45#include <ipc/services.h>
46#include <ipc/devmap.h>
47#include <async.h>
48#include <errno.h>
49#include <string.h>
50#include <byteorder.h>
51#include <adt/hash_table.h>
52#include <adt/list.h>
53#include <assert.h>
54#include <fibril_sync.h>
55#include <sys/mman.h>
56#include <align.h>
57
58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
61/** Mutex protecting the list of cached free FAT nodes. */
62static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
63
64/** List of cached free FAT nodes. */
65static LIST_INITIALIZE(ffn_head);
66
67static void fat_node_initialize(fat_node_t *node)
68{
69 fibril_mutex_initialize(&node->lock);
70 node->bp = NULL;
71 node->idx = NULL;
72 node->type = 0;
73 link_initialize(&node->ffn_link);
74 node->size = 0;
75 node->lnkcnt = 0;
76 node->refcnt = 0;
77 node->dirty = false;
78}
79
80static int fat_node_sync(fat_node_t *node)
81{
82 block_t *b;
83 fat_bs_t *bs;
84 fat_dentry_t *d;
85 uint16_t bps;
86 unsigned dps;
87 int rc;
88
89 assert(node->dirty);
90
91 bs = block_bb_get(node->idx->dev_handle);
92 bps = uint16_t_le2host(bs->bps);
93 dps = bps / sizeof(fat_dentry_t);
94
95 /* Read the block that contains the dentry of interest. */
96 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc,
97 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
98 if (rc != EOK)
99 return rc;
100
101 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
102
103 d->firstc = host2uint16_t_le(node->firstc);
104 if (node->type == FAT_FILE) {
105 d->size = host2uint32_t_le(node->size);
106 } else if (node->type == FAT_DIRECTORY) {
107 d->attr = FAT_ATTR_SUBDIR;
108 }
109
110 /* TODO: update other fields? (e.g time fields) */
111
112 b->dirty = true; /* need to sync block */
113 rc = block_put(b);
114 return rc;
115}
116
117static int fat_node_get_new(fat_node_t **nodepp)
118{
119 fs_node_t *fn;
120 fat_node_t *nodep;
121 int rc;
122
123 fibril_mutex_lock(&ffn_mutex);
124 if (!list_empty(&ffn_head)) {
125 /* Try to use a cached free node structure. */
126 fat_idx_t *idxp_tmp;
127 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
128 if (!fibril_mutex_trylock(&nodep->lock))
129 goto skip_cache;
130 idxp_tmp = nodep->idx;
131 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
132 fibril_mutex_unlock(&nodep->lock);
133 goto skip_cache;
134 }
135 list_remove(&nodep->ffn_link);
136 fibril_mutex_unlock(&ffn_mutex);
137 if (nodep->dirty) {
138 rc = fat_node_sync(nodep);
139 if (rc != EOK) {
140 idxp_tmp->nodep = NULL;
141 fibril_mutex_unlock(&nodep->lock);
142 fibril_mutex_unlock(&idxp_tmp->lock);
143 free(nodep->bp);
144 free(nodep);
145 return rc;
146 }
147 }
148 idxp_tmp->nodep = NULL;
149 fibril_mutex_unlock(&nodep->lock);
150 fibril_mutex_unlock(&idxp_tmp->lock);
151 fn = FS_NODE(nodep);
152 } else {
153skip_cache:
154 /* Try to allocate a new node structure. */
155 fibril_mutex_unlock(&ffn_mutex);
156 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
157 if (!fn)
158 return ENOMEM;
159 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
160 if (!nodep) {
161 free(fn);
162 return ENOMEM;
163 }
164 }
165 fat_node_initialize(nodep);
166 fs_node_initialize(fn);
167 fn->data = nodep;
168 nodep->bp = fn;
169
170 *nodepp = nodep;
171 return EOK;
172}
173
174/** Internal version of fat_node_get().
175 *
176 * @param idxp Locked index structure.
177 */
178static fat_node_t *fat_node_get_core(fat_idx_t *idxp)
179{
180 block_t *b;
181 fat_bs_t *bs;
182 fat_dentry_t *d;
183 fat_node_t *nodep = NULL;
184 unsigned bps;
185 unsigned spc;
186 unsigned dps;
187 int rc;
188
189 if (idxp->nodep) {
190 /*
191 * We are lucky.
192 * The node is already instantiated in memory.
193 */
194 fibril_mutex_lock(&idxp->nodep->lock);
195 if (!idxp->nodep->refcnt++) {
196 fibril_mutex_lock(&ffn_mutex);
197 list_remove(&idxp->nodep->ffn_link);
198 fibril_mutex_unlock(&ffn_mutex);
199 }
200 fibril_mutex_unlock(&idxp->nodep->lock);
201 return idxp->nodep;
202 }
203
204 /*
205 * We must instantiate the node from the file system.
206 */
207
208 assert(idxp->pfc);
209
210 rc = fat_node_get_new(&nodep);
211 if (rc != EOK)
212 return NULL;
213
214 bs = block_bb_get(idxp->dev_handle);
215 bps = uint16_t_le2host(bs->bps);
216 spc = bs->spc;
217 dps = bps / sizeof(fat_dentry_t);
218
219 /* Read the block that contains the dentry of interest. */
220 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc,
221 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
222 assert(rc == EOK);
223
224 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
225 if (d->attr & FAT_ATTR_SUBDIR) {
226 /*
227 * The only directory which does not have this bit set is the
228 * root directory itself. The root directory node is handled
229 * and initialized elsewhere.
230 */
231 nodep->type = FAT_DIRECTORY;
232 /*
233 * Unfortunately, the 'size' field of the FAT dentry is not
234 * defined for the directory entry type. We must determine the
235 * size of the directory by walking the FAT.
236 */
237 uint16_t clusters;
238 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle,
239 uint16_t_le2host(d->firstc));
240 assert(rc == EOK);
241 nodep->size = bps * spc * clusters;
242 } else {
243 nodep->type = FAT_FILE;
244 nodep->size = uint32_t_le2host(d->size);
245 }
246 nodep->firstc = uint16_t_le2host(d->firstc);
247 nodep->lnkcnt = 1;
248 nodep->refcnt = 1;
249
250 rc = block_put(b);
251 assert(rc == EOK);
252
253 /* Link the idx structure with the node structure. */
254 nodep->idx = idxp;
255 idxp->nodep = nodep;
256
257 return nodep;
258}
259
260/*
261 * Forward declarations of FAT libfs operations.
262 */
263static int fat_root_get(fs_node_t **, dev_handle_t);
264static int fat_match(fs_node_t **, fs_node_t *, const char *);
265static int fat_node_get(fs_node_t **, dev_handle_t, fs_index_t);
266static int fat_node_put(fs_node_t *);
267static int fat_create_node(fs_node_t **, dev_handle_t, int);
268static int fat_destroy_node(fs_node_t *);
269static int fat_link(fs_node_t *, fs_node_t *, const char *);
270static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
271static int fat_has_children(bool *, fs_node_t *);
272static fs_index_t fat_index_get(fs_node_t *);
273static size_t fat_size_get(fs_node_t *);
274static unsigned fat_lnkcnt_get(fs_node_t *);
275static char fat_plb_get_char(unsigned);
276static bool fat_is_directory(fs_node_t *);
277static bool fat_is_file(fs_node_t *node);
278
279/*
280 * FAT libfs operations.
281 */
282
283int fat_root_get(fs_node_t **rfn, dev_handle_t dev_handle)
284{
285 return fat_node_get(rfn, dev_handle, 0);
286}
287
288int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
289{
290 fat_bs_t *bs;
291 fat_node_t *parentp = FAT_NODE(pfn);
292 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
293 unsigned i, j;
294 unsigned bps; /* bytes per sector */
295 unsigned dps; /* dentries per sector */
296 unsigned blocks;
297 fat_dentry_t *d;
298 block_t *b;
299 int rc;
300
301 fibril_mutex_lock(&parentp->idx->lock);
302 bs = block_bb_get(parentp->idx->dev_handle);
303 bps = uint16_t_le2host(bs->bps);
304 dps = bps / sizeof(fat_dentry_t);
305 blocks = parentp->size / bps;
306 for (i = 0; i < blocks; i++) {
307 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
308 if (rc != EOK) {
309 fibril_mutex_unlock(&parentp->idx->lock);
310 return rc;
311 }
312 for (j = 0; j < dps; j++) {
313 d = ((fat_dentry_t *)b->data) + j;
314 switch (fat_classify_dentry(d)) {
315 case FAT_DENTRY_SKIP:
316 case FAT_DENTRY_FREE:
317 continue;
318 case FAT_DENTRY_LAST:
319 rc = block_put(b);
320 /* expect EOK as b was not dirty */
321 assert(rc == EOK);
322 fibril_mutex_unlock(&parentp->idx->lock);
323 *rfn = NULL;
324 return EOK;
325 default:
326 case FAT_DENTRY_VALID:
327 fat_dentry_name_get(d, name);
328 break;
329 }
330 if (fat_dentry_namecmp(name, component) == 0) {
331 /* hit */
332 fat_node_t *nodep;
333 /*
334 * Assume tree hierarchy for locking. We
335 * already have the parent and now we are going
336 * to lock the child. Never lock in the oposite
337 * order.
338 */
339 fat_idx_t *idx = fat_idx_get_by_pos(
340 parentp->idx->dev_handle, parentp->firstc,
341 i * dps + j);
342 fibril_mutex_unlock(&parentp->idx->lock);
343 if (!idx) {
344 /*
345 * Can happen if memory is low or if we
346 * run out of 32-bit indices.
347 */
348 rc = block_put(b);
349 /* expect EOK as b was not dirty */
350 assert(rc == EOK);
351 return ENOMEM;
352 }
353 nodep = fat_node_get_core(idx);
354 fibril_mutex_unlock(&idx->lock);
355 rc = block_put(b);
356 /* expect EOK as b was not dirty */
357 assert(rc == EOK);
358 *rfn = FS_NODE(nodep);
359 return EOK;
360 }
361 }
362 rc = block_put(b);
363 assert(rc == EOK); /* expect EOK as b was not dirty */
364 }
365
366 fibril_mutex_unlock(&parentp->idx->lock);
367 *rfn = NULL;
368 return EOK;
369}
370
371/** Instantiate a FAT in-core node. */
372int fat_node_get(fs_node_t **rfn, dev_handle_t dev_handle, fs_index_t index)
373{
374 fat_node_t *nodep;
375 fat_idx_t *idxp;
376
377 idxp = fat_idx_get_by_index(dev_handle, index);
378 if (!idxp) {
379 *rfn = NULL;
380 return EOK;
381 }
382 /* idxp->lock held */
383 nodep = fat_node_get_core(idxp);
384 fibril_mutex_unlock(&idxp->lock);
385 *rfn = FS_NODE(nodep);
386 return EOK;
387}
388
389int fat_node_put(fs_node_t *fn)
390{
391 fat_node_t *nodep = FAT_NODE(fn);
392 bool destroy = false;
393
394 fibril_mutex_lock(&nodep->lock);
395 if (!--nodep->refcnt) {
396 if (nodep->idx) {
397 fibril_mutex_lock(&ffn_mutex);
398 list_append(&nodep->ffn_link, &ffn_head);
399 fibril_mutex_unlock(&ffn_mutex);
400 } else {
401 /*
402 * The node does not have any index structure associated
403 * with itself. This can only mean that we are releasing
404 * the node after a failed attempt to allocate the index
405 * structure for it.
406 */
407 destroy = true;
408 }
409 }
410 fibril_mutex_unlock(&nodep->lock);
411 if (destroy) {
412 free(nodep->bp);
413 free(nodep);
414 }
415 return EOK;
416}
417
418int fat_create_node(fs_node_t **rfn, dev_handle_t dev_handle, int flags)
419{
420 fat_idx_t *idxp;
421 fat_node_t *nodep;
422 fat_bs_t *bs;
423 fat_cluster_t mcl, lcl;
424 uint16_t bps;
425 int rc;
426
427 bs = block_bb_get(dev_handle);
428 bps = uint16_t_le2host(bs->bps);
429 if (flags & L_DIRECTORY) {
430 /* allocate a cluster */
431 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
432 if (rc != EOK)
433 return rc;
434 /* populate the new cluster with unused dentries */
435 rc = fat_zero_cluster(bs, dev_handle, mcl);
436 if (rc != EOK) {
437 (void) fat_free_clusters(bs, dev_handle, mcl);
438 return rc;
439 }
440 }
441
442 rc = fat_node_get_new(&nodep);
443 if (rc != EOK) {
444 (void) fat_free_clusters(bs, dev_handle, mcl);
445 return rc;
446 }
447 idxp = fat_idx_get_new(dev_handle);
448 if (!idxp) {
449 (void) fat_free_clusters(bs, dev_handle, mcl);
450 (void) fat_node_put(FS_NODE(nodep));
451 return ENOMEM; /* FIXME: determine the true error code */
452 }
453 /* idxp->lock held */
454 if (flags & L_DIRECTORY) {
455 nodep->type = FAT_DIRECTORY;
456 nodep->firstc = mcl;
457 nodep->size = bps * bs->spc;
458 } else {
459 nodep->type = FAT_FILE;
460 nodep->firstc = FAT_CLST_RES0;
461 nodep->size = 0;
462 }
463 nodep->lnkcnt = 0; /* not linked anywhere */
464 nodep->refcnt = 1;
465 nodep->dirty = true;
466
467 nodep->idx = idxp;
468 idxp->nodep = nodep;
469
470 fibril_mutex_unlock(&idxp->lock);
471 *rfn = FS_NODE(nodep);
472 return EOK;
473}
474
475int fat_destroy_node(fs_node_t *fn)
476{
477 fat_node_t *nodep = FAT_NODE(fn);
478 fat_bs_t *bs;
479 bool has_children;
480 int rc;
481
482 /*
483 * The node is not reachable from the file system. This means that the
484 * link count should be zero and that the index structure cannot be
485 * found in the position hash. Obviously, we don't need to lock the node
486 * nor its index structure.
487 */
488 assert(nodep->lnkcnt == 0);
489
490 /*
491 * The node may not have any children.
492 */
493 rc = fat_has_children(&has_children, fn);
494 if (rc != EOK)
495 return rc;
496 assert(!has_children);
497
498 bs = block_bb_get(nodep->idx->dev_handle);
499 if (nodep->firstc != FAT_CLST_RES0) {
500 assert(nodep->size);
501 /* Free all clusters allocated to the node. */
502 rc = fat_free_clusters(bs, nodep->idx->dev_handle,
503 nodep->firstc);
504 }
505
506 fat_idx_destroy(nodep->idx);
507 free(nodep->bp);
508 free(nodep);
509 return rc;
510}
511
512int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
513{
514 fat_node_t *parentp = FAT_NODE(pfn);
515 fat_node_t *childp = FAT_NODE(cfn);
516 fat_dentry_t *d;
517 fat_bs_t *bs;
518 block_t *b;
519 unsigned i, j;
520 uint16_t bps;
521 unsigned dps;
522 unsigned blocks;
523 fat_cluster_t mcl, lcl;
524 int rc;
525
526 fibril_mutex_lock(&childp->lock);
527 if (childp->lnkcnt == 1) {
528 /*
529 * On FAT, we don't support multiple hard links.
530 */
531 fibril_mutex_unlock(&childp->lock);
532 return EMLINK;
533 }
534 assert(childp->lnkcnt == 0);
535 fibril_mutex_unlock(&childp->lock);
536
537 if (!fat_dentry_name_verify(name)) {
538 /*
539 * Attempt to create unsupported name.
540 */
541 return ENOTSUP;
542 }
543
544 /*
545 * Get us an unused parent node's dentry or grow the parent and allocate
546 * a new one.
547 */
548
549 fibril_mutex_lock(&parentp->idx->lock);
550 bs = block_bb_get(parentp->idx->dev_handle);
551 bps = uint16_t_le2host(bs->bps);
552 dps = bps / sizeof(fat_dentry_t);
553
554 blocks = parentp->size / bps;
555
556 for (i = 0; i < blocks; i++) {
557 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
558 if (rc != EOK) {
559 fibril_mutex_unlock(&parentp->idx->lock);
560 return rc;
561 }
562 for (j = 0; j < dps; j++) {
563 d = ((fat_dentry_t *)b->data) + j;
564 switch (fat_classify_dentry(d)) {
565 case FAT_DENTRY_SKIP:
566 case FAT_DENTRY_VALID:
567 /* skipping used and meta entries */
568 continue;
569 case FAT_DENTRY_FREE:
570 case FAT_DENTRY_LAST:
571 /* found an empty slot */
572 goto hit;
573 }
574 }
575 rc = block_put(b);
576 if (rc != EOK) {
577 fibril_mutex_unlock(&parentp->idx->lock);
578 return rc;
579 }
580 }
581 j = 0;
582
583 /*
584 * We need to grow the parent in order to create a new unused dentry.
585 */
586 if (parentp->firstc == FAT_CLST_ROOT) {
587 /* Can't grow the root directory. */
588 fibril_mutex_unlock(&parentp->idx->lock);
589 return ENOSPC;
590 }
591 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
592 if (rc != EOK) {
593 fibril_mutex_unlock(&parentp->idx->lock);
594 return rc;
595 }
596 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl);
597 if (rc != EOK) {
598 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
599 fibril_mutex_unlock(&parentp->idx->lock);
600 return rc;
601 }
602 rc = fat_append_clusters(bs, parentp, mcl);
603 if (rc != EOK) {
604 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
605 fibril_mutex_unlock(&parentp->idx->lock);
606 return rc;
607 }
608 parentp->size += bps * bs->spc;
609 parentp->dirty = true; /* need to sync node */
610 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
611 if (rc != EOK) {
612 fibril_mutex_unlock(&parentp->idx->lock);
613 return rc;
614 }
615 d = (fat_dentry_t *)b->data;
616
617hit:
618 /*
619 * At this point we only establish the link between the parent and the
620 * child. The dentry, except of the name and the extension, will remain
621 * uninitialized until the corresponding node is synced. Thus the valid
622 * dentry data is kept in the child node structure.
623 */
624 memset(d, 0, sizeof(fat_dentry_t));
625 fat_dentry_name_set(d, name);
626 b->dirty = true; /* need to sync block */
627 rc = block_put(b);
628 fibril_mutex_unlock(&parentp->idx->lock);
629 if (rc != EOK)
630 return rc;
631
632 fibril_mutex_lock(&childp->idx->lock);
633
634 /*
635 * If possible, create the Sub-directory Identifier Entry and the
636 * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
637 * are not mandatory according to Standard ECMA-107 and HelenOS VFS does
638 * not use them anyway, so this is rather a sign of our good will.
639 */
640 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
641 if (rc != EOK) {
642 /*
643 * Rather than returning an error, simply skip the creation of
644 * these two entries.
645 */
646 goto skip_dots;
647 }
648 d = (fat_dentry_t *)b->data;
649 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
650 str_cmp(d->name, FAT_NAME_DOT) == 0) {
651 memset(d, 0, sizeof(fat_dentry_t));
652 str_cpy(d->name, 8, FAT_NAME_DOT);
653 str_cpy(d->ext, 3, FAT_EXT_PAD);
654 d->attr = FAT_ATTR_SUBDIR;
655 d->firstc = host2uint16_t_le(childp->firstc);
656 /* TODO: initialize also the date/time members. */
657 }
658 d++;
659 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
660 str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) {
661 memset(d, 0, sizeof(fat_dentry_t));
662 str_cpy(d->name, 8, FAT_NAME_DOT_DOT);
663 str_cpy(d->ext, 3, FAT_EXT_PAD);
664 d->attr = FAT_ATTR_SUBDIR;
665 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
666 host2uint16_t_le(FAT_CLST_RES0) :
667 host2uint16_t_le(parentp->firstc);
668 /* TODO: initialize also the date/time members. */
669 }
670 b->dirty = true; /* need to sync block */
671 /*
672 * Ignore the return value as we would have fallen through on error
673 * anyway.
674 */
675 (void) block_put(b);
676skip_dots:
677
678 childp->idx->pfc = parentp->firstc;
679 childp->idx->pdi = i * dps + j;
680 fibril_mutex_unlock(&childp->idx->lock);
681
682 fibril_mutex_lock(&childp->lock);
683 childp->lnkcnt = 1;
684 childp->dirty = true; /* need to sync node */
685 fibril_mutex_unlock(&childp->lock);
686
687 /*
688 * Hash in the index structure into the position hash.
689 */
690 fat_idx_hashin(childp->idx);
691
692 return EOK;
693}
694
695int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
696{
697 fat_node_t *parentp = FAT_NODE(pfn);
698 fat_node_t *childp = FAT_NODE(cfn);
699 fat_bs_t *bs;
700 fat_dentry_t *d;
701 uint16_t bps;
702 block_t *b;
703 bool has_children;
704 int rc;
705
706 if (!parentp)
707 return EBUSY;
708
709 rc = fat_has_children(&has_children, cfn);
710 if (rc != EOK)
711 return rc;
712 if (has_children)
713 return ENOTEMPTY;
714
715 fibril_mutex_lock(&parentp->lock);
716 fibril_mutex_lock(&childp->lock);
717 assert(childp->lnkcnt == 1);
718 fibril_mutex_lock(&childp->idx->lock);
719 bs = block_bb_get(childp->idx->dev_handle);
720 bps = uint16_t_le2host(bs->bps);
721
722 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc,
723 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
724 BLOCK_FLAGS_NONE);
725 if (rc != EOK)
726 goto error;
727 d = (fat_dentry_t *)b->data +
728 (childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
729 /* mark the dentry as not-currently-used */
730 d->name[0] = FAT_DENTRY_ERASED;
731 b->dirty = true; /* need to sync block */
732 rc = block_put(b);
733 if (rc != EOK)
734 goto error;
735
736 /* remove the index structure from the position hash */
737 fat_idx_hashout(childp->idx);
738 /* clear position information */
739 childp->idx->pfc = FAT_CLST_RES0;
740 childp->idx->pdi = 0;
741 fibril_mutex_unlock(&childp->idx->lock);
742 childp->lnkcnt = 0;
743 childp->dirty = true;
744 fibril_mutex_unlock(&childp->lock);
745 fibril_mutex_unlock(&parentp->lock);
746
747 return EOK;
748
749error:
750 fibril_mutex_unlock(&parentp->idx->lock);
751 fibril_mutex_unlock(&childp->lock);
752 fibril_mutex_unlock(&childp->idx->lock);
753 return rc;
754}
755
756int fat_has_children(bool *has_children, fs_node_t *fn)
757{
758 fat_bs_t *bs;
759 fat_node_t *nodep = FAT_NODE(fn);
760 unsigned bps;
761 unsigned dps;
762 unsigned blocks;
763 block_t *b;
764 unsigned i, j;
765 int rc;
766
767 if (nodep->type != FAT_DIRECTORY) {
768 *has_children = false;
769 return EOK;
770 }
771
772 fibril_mutex_lock(&nodep->idx->lock);
773 bs = block_bb_get(nodep->idx->dev_handle);
774 bps = uint16_t_le2host(bs->bps);
775 dps = bps / sizeof(fat_dentry_t);
776
777 blocks = nodep->size / bps;
778
779 for (i = 0; i < blocks; i++) {
780 fat_dentry_t *d;
781
782 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
783 if (rc != EOK) {
784 fibril_mutex_unlock(&nodep->idx->lock);
785 return rc;
786 }
787 for (j = 0; j < dps; j++) {
788 d = ((fat_dentry_t *)b->data) + j;
789 switch (fat_classify_dentry(d)) {
790 case FAT_DENTRY_SKIP:
791 case FAT_DENTRY_FREE:
792 continue;
793 case FAT_DENTRY_LAST:
794 rc = block_put(b);
795 /* expect EOK as b was not dirty */
796 assert(rc == EOK);
797 fibril_mutex_unlock(&nodep->idx->lock);
798 *has_children = false;
799 return EOK;
800 default:
801 case FAT_DENTRY_VALID:
802 rc = block_put(b);
803 /* expect EOK as b was not dirty */
804 assert(rc == EOK);
805 fibril_mutex_unlock(&nodep->idx->lock);
806 *has_children = true;
807 return EOK;
808 }
809 }
810 rc = block_put(b);
811 assert(rc == EOK); /* expect EOK as b was not dirty */
812 }
813
814 fibril_mutex_unlock(&nodep->idx->lock);
815 *has_children = false;
816 return EOK;
817}
818
819
820fs_index_t fat_index_get(fs_node_t *fn)
821{
822 return FAT_NODE(fn)->idx->index;
823}
824
825size_t fat_size_get(fs_node_t *fn)
826{
827 return FAT_NODE(fn)->size;
828}
829
830unsigned fat_lnkcnt_get(fs_node_t *fn)
831{
832 return FAT_NODE(fn)->lnkcnt;
833}
834
835char fat_plb_get_char(unsigned pos)
836{
837 return fat_reg.plb_ro[pos % PLB_SIZE];
838}
839
840bool fat_is_directory(fs_node_t *fn)
841{
842 return FAT_NODE(fn)->type == FAT_DIRECTORY;
843}
844
845bool fat_is_file(fs_node_t *fn)
846{
847 return FAT_NODE(fn)->type == FAT_FILE;
848}
849
850/** libfs operations */
851libfs_ops_t fat_libfs_ops = {
852 .root_get = fat_root_get,
853 .match = fat_match,
854 .node_get = fat_node_get,
855 .node_put = fat_node_put,
856 .create = fat_create_node,
857 .destroy = fat_destroy_node,
858 .link = fat_link,
859 .unlink = fat_unlink,
860 .has_children = fat_has_children,
861 .index_get = fat_index_get,
862 .size_get = fat_size_get,
863 .lnkcnt_get = fat_lnkcnt_get,
864 .plb_get_char = fat_plb_get_char,
865 .is_directory = fat_is_directory,
866 .is_file = fat_is_file
867};
868
869/*
870 * VFS operations.
871 */
872
873void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
874{
875 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
876 enum cache_mode cmode;
877 fat_bs_t *bs;
878 uint16_t bps;
879 uint16_t rde;
880 int rc;
881
882 /* accept the mount options */
883 ipc_callid_t callid;
884 size_t size;
885 if (!ipc_data_write_receive(&callid, &size)) {
886 ipc_answer_0(callid, EINVAL);
887 ipc_answer_0(rid, EINVAL);
888 return;
889 }
890 char *opts = malloc(size + 1);
891 if (!opts) {
892 ipc_answer_0(callid, ENOMEM);
893 ipc_answer_0(rid, ENOMEM);
894 return;
895 }
896 ipcarg_t retval = ipc_data_write_finalize(callid, opts, size);
897 if (retval != EOK) {
898 ipc_answer_0(rid, retval);
899 free(opts);
900 return;
901 }
902 opts[size] = '\0';
903
904 /* Check for option enabling write through. */
905 if (str_cmp(opts, "wtcache") == 0)
906 cmode = CACHE_MODE_WT;
907 else
908 cmode = CACHE_MODE_WB;
909
910 /* initialize libblock */
911 rc = block_init(dev_handle, BS_SIZE);
912 if (rc != EOK) {
913 ipc_answer_0(rid, rc);
914 return;
915 }
916
917 /* prepare the boot block */
918 rc = block_bb_read(dev_handle, BS_BLOCK);
919 if (rc != EOK) {
920 block_fini(dev_handle);
921 ipc_answer_0(rid, rc);
922 return;
923 }
924
925 /* get the buffer with the boot sector */
926 bs = block_bb_get(dev_handle);
927
928 /* Read the number of root directory entries. */
929 bps = uint16_t_le2host(bs->bps);
930 rde = uint16_t_le2host(bs->root_ent_max);
931
932 if (bps != BS_SIZE) {
933 block_fini(dev_handle);
934 ipc_answer_0(rid, ENOTSUP);
935 return;
936 }
937
938 /* Initialize the block cache */
939 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);
940 if (rc != EOK) {
941 block_fini(dev_handle);
942 ipc_answer_0(rid, rc);
943 return;
944 }
945
946 rc = fat_idx_init_by_dev_handle(dev_handle);
947 if (rc != EOK) {
948 block_fini(dev_handle);
949 ipc_answer_0(rid, rc);
950 return;
951 }
952
953 /* Initialize the root node. */
954 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
955 if (!rfn) {
956 block_fini(dev_handle);
957 fat_idx_fini_by_dev_handle(dev_handle);
958 ipc_answer_0(rid, ENOMEM);
959 return;
960 }
961 fs_node_initialize(rfn);
962 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
963 if (!rootp) {
964 free(rfn);
965 block_fini(dev_handle);
966 fat_idx_fini_by_dev_handle(dev_handle);
967 ipc_answer_0(rid, ENOMEM);
968 return;
969 }
970 fat_node_initialize(rootp);
971
972 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
973 if (!ridxp) {
974 free(rfn);
975 free(rootp);
976 block_fini(dev_handle);
977 fat_idx_fini_by_dev_handle(dev_handle);
978 ipc_answer_0(rid, ENOMEM);
979 return;
980 }
981 assert(ridxp->index == 0);
982 /* ridxp->lock held */
983
984 rootp->type = FAT_DIRECTORY;
985 rootp->firstc = FAT_CLST_ROOT;
986 rootp->refcnt = 1;
987 rootp->lnkcnt = 0; /* FS root is not linked */
988 rootp->size = rde * sizeof(fat_dentry_t);
989 rootp->idx = ridxp;
990 ridxp->nodep = rootp;
991 rootp->bp = rfn;
992 rfn->data = rootp;
993
994 fibril_mutex_unlock(&ridxp->lock);
995
996 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
997}
998
999void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1000{
1001 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1002}
1003
1004void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1005{
1006 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1007}
1008
1009void fat_read(ipc_callid_t rid, ipc_call_t *request)
1010{
1011 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1012 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1013 off_t pos = (off_t)IPC_GET_ARG3(*request);
1014 fs_node_t *fn;
1015 fat_node_t *nodep;
1016 fat_bs_t *bs;
1017 uint16_t bps;
1018 size_t bytes;
1019 block_t *b;
1020 int rc;
1021
1022 rc = fat_node_get(&fn, dev_handle, index);
1023 if (rc != EOK) {
1024 ipc_answer_0(rid, rc);
1025 return;
1026 }
1027 if (!fn) {
1028 ipc_answer_0(rid, ENOENT);
1029 return;
1030 }
1031 nodep = FAT_NODE(fn);
1032
1033 ipc_callid_t callid;
1034 size_t len;
1035 if (!ipc_data_read_receive(&callid, &len)) {
1036 fat_node_put(fn);
1037 ipc_answer_0(callid, EINVAL);
1038 ipc_answer_0(rid, EINVAL);
1039 return;
1040 }
1041
1042 bs = block_bb_get(dev_handle);
1043 bps = uint16_t_le2host(bs->bps);
1044
1045 if (nodep->type == FAT_FILE) {
1046 /*
1047 * Our strategy for regular file reads is to read one block at
1048 * most and make use of the possibility to return less data than
1049 * requested. This keeps the code very simple.
1050 */
1051 if (pos >= nodep->size) {
1052 /* reading beyond the EOF */
1053 bytes = 0;
1054 (void) ipc_data_read_finalize(callid, NULL, 0);
1055 } else {
1056 bytes = min(len, bps - pos % bps);
1057 bytes = min(bytes, nodep->size - pos);
1058 rc = fat_block_get(&b, bs, nodep, pos / bps,
1059 BLOCK_FLAGS_NONE);
1060 assert(rc == EOK);
1061 (void) ipc_data_read_finalize(callid, b->data + pos % bps,
1062 bytes);
1063 rc = block_put(b);
1064 assert(rc == EOK);
1065 }
1066 } else {
1067 unsigned bnum;
1068 off_t spos = pos;
1069 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1070 fat_dentry_t *d;
1071
1072 assert(nodep->type == FAT_DIRECTORY);
1073 assert(nodep->size % bps == 0);
1074 assert(bps % sizeof(fat_dentry_t) == 0);
1075
1076 /*
1077 * Our strategy for readdir() is to use the position pointer as
1078 * an index into the array of all dentries. On entry, it points
1079 * to the first unread dentry. If we skip any dentries, we bump
1080 * the position pointer accordingly.
1081 */
1082 bnum = (pos * sizeof(fat_dentry_t)) / bps;
1083 while (bnum < nodep->size / bps) {
1084 off_t o;
1085
1086 rc = fat_block_get(&b, bs, nodep, bnum,
1087 BLOCK_FLAGS_NONE);
1088 assert(rc == EOK);
1089 for (o = pos % (bps / sizeof(fat_dentry_t));
1090 o < bps / sizeof(fat_dentry_t);
1091 o++, pos++) {
1092 d = ((fat_dentry_t *)b->data) + o;
1093 switch (fat_classify_dentry(d)) {
1094 case FAT_DENTRY_SKIP:
1095 case FAT_DENTRY_FREE:
1096 continue;
1097 case FAT_DENTRY_LAST:
1098 rc = block_put(b);
1099 assert(rc == EOK);
1100 goto miss;
1101 default:
1102 case FAT_DENTRY_VALID:
1103 fat_dentry_name_get(d, name);
1104 rc = block_put(b);
1105 assert(rc == EOK);
1106 goto hit;
1107 }
1108 }
1109 rc = block_put(b);
1110 assert(rc == EOK);
1111 bnum++;
1112 }
1113miss:
1114 fat_node_put(fn);
1115 ipc_answer_0(callid, ENOENT);
1116 ipc_answer_1(rid, ENOENT, 0);
1117 return;
1118hit:
1119 (void) ipc_data_read_finalize(callid, name, str_size(name) + 1);
1120 bytes = (pos - spos) + 1;
1121 }
1122
1123 fat_node_put(fn);
1124 ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
1125}
1126
1127void fat_write(ipc_callid_t rid, ipc_call_t *request)
1128{
1129 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1130 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1131 off_t pos = (off_t)IPC_GET_ARG3(*request);
1132 fs_node_t *fn;
1133 fat_node_t *nodep;
1134 fat_bs_t *bs;
1135 size_t bytes;
1136 block_t *b;
1137 uint16_t bps;
1138 unsigned spc;
1139 unsigned bpc; /* bytes per cluster */
1140 off_t boundary;
1141 int flags = BLOCK_FLAGS_NONE;
1142 int rc;
1143
1144 rc = fat_node_get(&fn, dev_handle, index);
1145 if (rc != EOK) {
1146 ipc_answer_0(rid, rc);
1147 return;
1148 }
1149 if (!fn) {
1150 ipc_answer_0(rid, ENOENT);
1151 return;
1152 }
1153 nodep = FAT_NODE(fn);
1154
1155 ipc_callid_t callid;
1156 size_t len;
1157 if (!ipc_data_write_receive(&callid, &len)) {
1158 fat_node_put(fn);
1159 ipc_answer_0(callid, EINVAL);
1160 ipc_answer_0(rid, EINVAL);
1161 return;
1162 }
1163
1164 bs = block_bb_get(dev_handle);
1165 bps = uint16_t_le2host(bs->bps);
1166 spc = bs->spc;
1167 bpc = bps * spc;
1168
1169 /*
1170 * In all scenarios, we will attempt to write out only one block worth
1171 * of data at maximum. There might be some more efficient approaches,
1172 * but this one greatly simplifies fat_write(). Note that we can afford
1173 * to do this because the client must be ready to handle the return
1174 * value signalizing a smaller number of bytes written.
1175 */
1176 bytes = min(len, bps - pos % bps);
1177 if (bytes == bps)
1178 flags |= BLOCK_FLAGS_NOREAD;
1179
1180 boundary = ROUND_UP(nodep->size, bpc);
1181 if (pos < boundary) {
1182 /*
1183 * This is the easier case - we are either overwriting already
1184 * existing contents or writing behind the EOF, but still within
1185 * the limits of the last cluster. The node size may grow to the
1186 * next block size boundary.
1187 */
1188 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1189 assert(rc == EOK);
1190 rc = fat_block_get(&b, bs, nodep, pos / bps, flags);
1191 assert(rc == EOK);
1192 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1193 bytes);
1194 b->dirty = true; /* need to sync block */
1195 rc = block_put(b);
1196 assert(rc == EOK);
1197 if (pos + bytes > nodep->size) {
1198 nodep->size = pos + bytes;
1199 nodep->dirty = true; /* need to sync node */
1200 }
1201 ipc_answer_2(rid, EOK, bytes, nodep->size);
1202 fat_node_put(fn);
1203 return;
1204 } else {
1205 /*
1206 * This is the more difficult case. We must allocate new
1207 * clusters for the node and zero them out.
1208 */
1209 int status;
1210 unsigned nclsts;
1211 fat_cluster_t mcl, lcl;
1212
1213 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
1214 /* create an independent chain of nclsts clusters in all FATs */
1215 status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1216 if (status != EOK) {
1217 /* could not allocate a chain of nclsts clusters */
1218 fat_node_put(fn);
1219 ipc_answer_0(callid, status);
1220 ipc_answer_0(rid, status);
1221 return;
1222 }
1223 /* zero fill any gaps */
1224 rc = fat_fill_gap(bs, nodep, mcl, pos);
1225 assert(rc == EOK);
1226 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc,
1227 flags);
1228 assert(rc == EOK);
1229 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1230 bytes);
1231 b->dirty = true; /* need to sync block */
1232 rc = block_put(b);
1233 assert(rc == EOK);
1234 /*
1235 * Append the cluster chain starting in mcl to the end of the
1236 * node's cluster chain.
1237 */
1238 rc = fat_append_clusters(bs, nodep, mcl);
1239 assert(rc == EOK);
1240 nodep->size = pos + bytes;
1241 nodep->dirty = true; /* need to sync node */
1242 ipc_answer_2(rid, EOK, bytes, nodep->size);
1243 fat_node_put(fn);
1244 return;
1245 }
1246}
1247
1248void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1249{
1250 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1251 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1252 size_t size = (off_t)IPC_GET_ARG3(*request);
1253 fs_node_t *fn;
1254 fat_node_t *nodep;
1255 fat_bs_t *bs;
1256 uint16_t bps;
1257 uint8_t spc;
1258 unsigned bpc; /* bytes per cluster */
1259 int rc;
1260
1261 rc = fat_node_get(&fn, dev_handle, index);
1262 if (rc != EOK) {
1263 ipc_answer_0(rid, rc);
1264 return;
1265 }
1266 if (!fn) {
1267 ipc_answer_0(rid, ENOENT);
1268 return;
1269 }
1270 nodep = FAT_NODE(fn);
1271
1272 bs = block_bb_get(dev_handle);
1273 bps = uint16_t_le2host(bs->bps);
1274 spc = bs->spc;
1275 bpc = bps * spc;
1276
1277 if (nodep->size == size) {
1278 rc = EOK;
1279 } else if (nodep->size < size) {
1280 /*
1281 * The standard says we have the freedom to grow the node.
1282 * For now, we simply return an error.
1283 */
1284 rc = EINVAL;
1285 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
1286 /*
1287 * The node will be shrunk, but no clusters will be deallocated.
1288 */
1289 nodep->size = size;
1290 nodep->dirty = true; /* need to sync node */
1291 rc = EOK;
1292 } else {
1293 /*
1294 * The node will be shrunk, clusters will be deallocated.
1295 */
1296 if (size == 0) {
1297 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1298 if (rc != EOK)
1299 goto out;
1300 } else {
1301 fat_cluster_t lastc;
1302 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc,
1303 &lastc, NULL, (size - 1) / bpc);
1304 if (rc != EOK)
1305 goto out;
1306 rc = fat_chop_clusters(bs, nodep, lastc);
1307 if (rc != EOK)
1308 goto out;
1309 }
1310 nodep->size = size;
1311 nodep->dirty = true; /* need to sync node */
1312 rc = EOK;
1313 }
1314out:
1315 fat_node_put(fn);
1316 ipc_answer_0(rid, rc);
1317 return;
1318}
1319
1320void fat_close(ipc_callid_t rid, ipc_call_t *request)
1321{
1322 ipc_answer_0(rid, EOK);
1323}
1324
1325void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1326{
1327 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1328 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1329 fs_node_t *fn;
1330 int rc;
1331
1332 rc = fat_node_get(&fn, dev_handle, index);
1333 if (rc != EOK) {
1334 ipc_answer_0(rid, rc);
1335 return;
1336 }
1337 if (!fn) {
1338 ipc_answer_0(rid, ENOENT);
1339 return;
1340 }
1341
1342 rc = fat_destroy_node(fn);
1343 ipc_answer_0(rid, rc);
1344}
1345
1346void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1347{
1348 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1349}
1350
1351void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1352{
1353 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1354}
1355
1356void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1357{
1358 /* Dummy implementation */
1359 ipc_answer_0(rid, EOK);
1360}
1361
1362/**
1363 * @}
1364 */
Note: See TracBrowser for help on using the repository browser.