source: mainline/uspace/srv/fs/fat/fat_ops.c@ bbddafb

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bbddafb was 8810c63, checked in by Jakub Jermar <jakub@…>, 16 years ago

Do not assume that a block is not dirty if we did not make it dirty.
It could have been already dirty when we got a reference to it.

  • Property mode set to 100644
File size: 33.1 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/ipc.h>
45#include <ipc/services.h>
46#include <ipc/devmap.h>
47#include <async.h>
48#include <errno.h>
49#include <string.h>
50#include <byteorder.h>
51#include <adt/hash_table.h>
52#include <adt/list.h>
53#include <assert.h>
54#include <fibril_sync.h>
55#include <sys/mman.h>
56#include <align.h>
57
58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
61/** Mutex protecting the list of cached free FAT nodes. */
62static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
63
64/** List of cached free FAT nodes. */
65static LIST_INITIALIZE(ffn_head);
66
67/*
68 * Forward declarations of FAT libfs operations.
69 */
70static int fat_root_get(fs_node_t **, dev_handle_t);
71static int fat_match(fs_node_t **, fs_node_t *, const char *);
72static int fat_node_get(fs_node_t **, dev_handle_t, fs_index_t);
73static int fat_node_put(fs_node_t *);
74static int fat_create_node(fs_node_t **, dev_handle_t, int);
75static int fat_destroy_node(fs_node_t *);
76static int fat_link(fs_node_t *, fs_node_t *, const char *);
77static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
78static int fat_has_children(bool *, fs_node_t *);
79static fs_index_t fat_index_get(fs_node_t *);
80static size_t fat_size_get(fs_node_t *);
81static unsigned fat_lnkcnt_get(fs_node_t *);
82static char fat_plb_get_char(unsigned);
83static bool fat_is_directory(fs_node_t *);
84static bool fat_is_file(fs_node_t *node);
85
86/*
87 * Helper functions.
88 */
89static void fat_node_initialize(fat_node_t *node)
90{
91 fibril_mutex_initialize(&node->lock);
92 node->bp = NULL;
93 node->idx = NULL;
94 node->type = 0;
95 link_initialize(&node->ffn_link);
96 node->size = 0;
97 node->lnkcnt = 0;
98 node->refcnt = 0;
99 node->dirty = false;
100}
101
102static int fat_node_sync(fat_node_t *node)
103{
104 block_t *b;
105 fat_bs_t *bs;
106 fat_dentry_t *d;
107 uint16_t bps;
108 unsigned dps;
109 int rc;
110
111 assert(node->dirty);
112
113 bs = block_bb_get(node->idx->dev_handle);
114 bps = uint16_t_le2host(bs->bps);
115 dps = bps / sizeof(fat_dentry_t);
116
117 /* Read the block that contains the dentry of interest. */
118 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc,
119 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
120 if (rc != EOK)
121 return rc;
122
123 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
124
125 d->firstc = host2uint16_t_le(node->firstc);
126 if (node->type == FAT_FILE) {
127 d->size = host2uint32_t_le(node->size);
128 } else if (node->type == FAT_DIRECTORY) {
129 d->attr = FAT_ATTR_SUBDIR;
130 }
131
132 /* TODO: update other fields? (e.g time fields) */
133
134 b->dirty = true; /* need to sync block */
135 rc = block_put(b);
136 return rc;
137}
138
139static int fat_node_get_new(fat_node_t **nodepp)
140{
141 fs_node_t *fn;
142 fat_node_t *nodep;
143 int rc;
144
145 fibril_mutex_lock(&ffn_mutex);
146 if (!list_empty(&ffn_head)) {
147 /* Try to use a cached free node structure. */
148 fat_idx_t *idxp_tmp;
149 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
150 if (!fibril_mutex_trylock(&nodep->lock))
151 goto skip_cache;
152 idxp_tmp = nodep->idx;
153 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
154 fibril_mutex_unlock(&nodep->lock);
155 goto skip_cache;
156 }
157 list_remove(&nodep->ffn_link);
158 fibril_mutex_unlock(&ffn_mutex);
159 if (nodep->dirty) {
160 rc = fat_node_sync(nodep);
161 if (rc != EOK) {
162 idxp_tmp->nodep = NULL;
163 fibril_mutex_unlock(&nodep->lock);
164 fibril_mutex_unlock(&idxp_tmp->lock);
165 free(nodep->bp);
166 free(nodep);
167 return rc;
168 }
169 }
170 idxp_tmp->nodep = NULL;
171 fibril_mutex_unlock(&nodep->lock);
172 fibril_mutex_unlock(&idxp_tmp->lock);
173 fn = FS_NODE(nodep);
174 } else {
175skip_cache:
176 /* Try to allocate a new node structure. */
177 fibril_mutex_unlock(&ffn_mutex);
178 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
179 if (!fn)
180 return ENOMEM;
181 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
182 if (!nodep) {
183 free(fn);
184 return ENOMEM;
185 }
186 }
187 fat_node_initialize(nodep);
188 fs_node_initialize(fn);
189 fn->data = nodep;
190 nodep->bp = fn;
191
192 *nodepp = nodep;
193 return EOK;
194}
195
196/** Internal version of fat_node_get().
197 *
198 * @param idxp Locked index structure.
199 */
200static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
201{
202 block_t *b;
203 fat_bs_t *bs;
204 fat_dentry_t *d;
205 fat_node_t *nodep = NULL;
206 unsigned bps;
207 unsigned spc;
208 unsigned dps;
209 int rc;
210
211 if (idxp->nodep) {
212 /*
213 * We are lucky.
214 * The node is already instantiated in memory.
215 */
216 fibril_mutex_lock(&idxp->nodep->lock);
217 if (!idxp->nodep->refcnt++) {
218 fibril_mutex_lock(&ffn_mutex);
219 list_remove(&idxp->nodep->ffn_link);
220 fibril_mutex_unlock(&ffn_mutex);
221 }
222 fibril_mutex_unlock(&idxp->nodep->lock);
223 *nodepp = idxp->nodep;
224 return EOK;
225 }
226
227 /*
228 * We must instantiate the node from the file system.
229 */
230
231 assert(idxp->pfc);
232
233 rc = fat_node_get_new(&nodep);
234 if (rc != EOK)
235 return rc;
236
237 bs = block_bb_get(idxp->dev_handle);
238 bps = uint16_t_le2host(bs->bps);
239 spc = bs->spc;
240 dps = bps / sizeof(fat_dentry_t);
241
242 /* Read the block that contains the dentry of interest. */
243 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc,
244 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
245 if (rc != EOK) {
246 (void) fat_node_put(FS_NODE(nodep));
247 return rc;
248 }
249
250 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
251 if (d->attr & FAT_ATTR_SUBDIR) {
252 /*
253 * The only directory which does not have this bit set is the
254 * root directory itself. The root directory node is handled
255 * and initialized elsewhere.
256 */
257 nodep->type = FAT_DIRECTORY;
258 /*
259 * Unfortunately, the 'size' field of the FAT dentry is not
260 * defined for the directory entry type. We must determine the
261 * size of the directory by walking the FAT.
262 */
263 uint16_t clusters;
264 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle,
265 uint16_t_le2host(d->firstc));
266 if (rc != EOK) {
267 (void) fat_node_put(FS_NODE(nodep));
268 return rc;
269 }
270 nodep->size = bps * spc * clusters;
271 } else {
272 nodep->type = FAT_FILE;
273 nodep->size = uint32_t_le2host(d->size);
274 }
275 nodep->firstc = uint16_t_le2host(d->firstc);
276 nodep->lnkcnt = 1;
277 nodep->refcnt = 1;
278
279 rc = block_put(b);
280 if (rc != EOK) {
281 (void) fat_node_put(FS_NODE(nodep));
282 return rc;
283 }
284
285 /* Link the idx structure with the node structure. */
286 nodep->idx = idxp;
287 idxp->nodep = nodep;
288
289 *nodepp = nodep;
290 return EOK;
291}
292
293/*
294 * FAT libfs operations.
295 */
296
297int fat_root_get(fs_node_t **rfn, dev_handle_t dev_handle)
298{
299 return fat_node_get(rfn, dev_handle, 0);
300}
301
302int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
303{
304 fat_bs_t *bs;
305 fat_node_t *parentp = FAT_NODE(pfn);
306 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
307 unsigned i, j;
308 unsigned bps; /* bytes per sector */
309 unsigned dps; /* dentries per sector */
310 unsigned blocks;
311 fat_dentry_t *d;
312 block_t *b;
313 int rc;
314
315 fibril_mutex_lock(&parentp->idx->lock);
316 bs = block_bb_get(parentp->idx->dev_handle);
317 bps = uint16_t_le2host(bs->bps);
318 dps = bps / sizeof(fat_dentry_t);
319 blocks = parentp->size / bps;
320 for (i = 0; i < blocks; i++) {
321 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
322 if (rc != EOK) {
323 fibril_mutex_unlock(&parentp->idx->lock);
324 return rc;
325 }
326 for (j = 0; j < dps; j++) {
327 d = ((fat_dentry_t *)b->data) + j;
328 switch (fat_classify_dentry(d)) {
329 case FAT_DENTRY_SKIP:
330 case FAT_DENTRY_FREE:
331 continue;
332 case FAT_DENTRY_LAST:
333 /* miss */
334 rc = block_put(b);
335 fibril_mutex_unlock(&parentp->idx->lock);
336 *rfn = NULL;
337 return rc;
338 default:
339 case FAT_DENTRY_VALID:
340 fat_dentry_name_get(d, name);
341 break;
342 }
343 if (fat_dentry_namecmp(name, component) == 0) {
344 /* hit */
345 fat_node_t *nodep;
346 /*
347 * Assume tree hierarchy for locking. We
348 * already have the parent and now we are going
349 * to lock the child. Never lock in the oposite
350 * order.
351 */
352 fat_idx_t *idx = fat_idx_get_by_pos(
353 parentp->idx->dev_handle, parentp->firstc,
354 i * dps + j);
355 fibril_mutex_unlock(&parentp->idx->lock);
356 if (!idx) {
357 /*
358 * Can happen if memory is low or if we
359 * run out of 32-bit indices.
360 */
361 rc = block_put(b);
362 return (rc == EOK) ? ENOMEM : rc;
363 }
364 rc = fat_node_get_core(&nodep, idx);
365 assert(rc == EOK);
366 fibril_mutex_unlock(&idx->lock);
367 (void) block_put(b);
368 *rfn = FS_NODE(nodep);
369 return EOK;
370 }
371 }
372 rc = block_put(b);
373 if (rc != EOK) {
374 fibril_mutex_unlock(&parentp->idx->lock);
375 return rc;
376 }
377 }
378
379 fibril_mutex_unlock(&parentp->idx->lock);
380 *rfn = NULL;
381 return EOK;
382}
383
384/** Instantiate a FAT in-core node. */
385int fat_node_get(fs_node_t **rfn, dev_handle_t dev_handle, fs_index_t index)
386{
387 fat_node_t *nodep;
388 fat_idx_t *idxp;
389 int rc;
390
391 idxp = fat_idx_get_by_index(dev_handle, index);
392 if (!idxp) {
393 *rfn = NULL;
394 return EOK;
395 }
396 /* idxp->lock held */
397 rc = fat_node_get_core(&nodep, idxp);
398 fibril_mutex_unlock(&idxp->lock);
399 if (rc == EOK)
400 *rfn = FS_NODE(nodep);
401 return rc;
402}
403
404int fat_node_put(fs_node_t *fn)
405{
406 fat_node_t *nodep = FAT_NODE(fn);
407 bool destroy = false;
408
409 fibril_mutex_lock(&nodep->lock);
410 if (!--nodep->refcnt) {
411 if (nodep->idx) {
412 fibril_mutex_lock(&ffn_mutex);
413 list_append(&nodep->ffn_link, &ffn_head);
414 fibril_mutex_unlock(&ffn_mutex);
415 } else {
416 /*
417 * The node does not have any index structure associated
418 * with itself. This can only mean that we are releasing
419 * the node after a failed attempt to allocate the index
420 * structure for it.
421 */
422 destroy = true;
423 }
424 }
425 fibril_mutex_unlock(&nodep->lock);
426 if (destroy) {
427 free(nodep->bp);
428 free(nodep);
429 }
430 return EOK;
431}
432
433int fat_create_node(fs_node_t **rfn, dev_handle_t dev_handle, int flags)
434{
435 fat_idx_t *idxp;
436 fat_node_t *nodep;
437 fat_bs_t *bs;
438 fat_cluster_t mcl, lcl;
439 uint16_t bps;
440 int rc;
441
442 bs = block_bb_get(dev_handle);
443 bps = uint16_t_le2host(bs->bps);
444 if (flags & L_DIRECTORY) {
445 /* allocate a cluster */
446 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
447 if (rc != EOK)
448 return rc;
449 /* populate the new cluster with unused dentries */
450 rc = fat_zero_cluster(bs, dev_handle, mcl);
451 if (rc != EOK) {
452 (void) fat_free_clusters(bs, dev_handle, mcl);
453 return rc;
454 }
455 }
456
457 rc = fat_node_get_new(&nodep);
458 if (rc != EOK) {
459 (void) fat_free_clusters(bs, dev_handle, mcl);
460 return rc;
461 }
462 rc = fat_idx_get_new(&idxp, dev_handle);
463 if (rc != EOK) {
464 (void) fat_free_clusters(bs, dev_handle, mcl);
465 (void) fat_node_put(FS_NODE(nodep));
466 return rc;
467 }
468 /* idxp->lock held */
469 if (flags & L_DIRECTORY) {
470 nodep->type = FAT_DIRECTORY;
471 nodep->firstc = mcl;
472 nodep->size = bps * bs->spc;
473 } else {
474 nodep->type = FAT_FILE;
475 nodep->firstc = FAT_CLST_RES0;
476 nodep->size = 0;
477 }
478 nodep->lnkcnt = 0; /* not linked anywhere */
479 nodep->refcnt = 1;
480 nodep->dirty = true;
481
482 nodep->idx = idxp;
483 idxp->nodep = nodep;
484
485 fibril_mutex_unlock(&idxp->lock);
486 *rfn = FS_NODE(nodep);
487 return EOK;
488}
489
490int fat_destroy_node(fs_node_t *fn)
491{
492 fat_node_t *nodep = FAT_NODE(fn);
493 fat_bs_t *bs;
494 bool has_children;
495 int rc;
496
497 /*
498 * The node is not reachable from the file system. This means that the
499 * link count should be zero and that the index structure cannot be
500 * found in the position hash. Obviously, we don't need to lock the node
501 * nor its index structure.
502 */
503 assert(nodep->lnkcnt == 0);
504
505 /*
506 * The node may not have any children.
507 */
508 rc = fat_has_children(&has_children, fn);
509 if (rc != EOK)
510 return rc;
511 assert(!has_children);
512
513 bs = block_bb_get(nodep->idx->dev_handle);
514 if (nodep->firstc != FAT_CLST_RES0) {
515 assert(nodep->size);
516 /* Free all clusters allocated to the node. */
517 rc = fat_free_clusters(bs, nodep->idx->dev_handle,
518 nodep->firstc);
519 }
520
521 fat_idx_destroy(nodep->idx);
522 free(nodep->bp);
523 free(nodep);
524 return rc;
525}
526
527int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
528{
529 fat_node_t *parentp = FAT_NODE(pfn);
530 fat_node_t *childp = FAT_NODE(cfn);
531 fat_dentry_t *d;
532 fat_bs_t *bs;
533 block_t *b;
534 unsigned i, j;
535 uint16_t bps;
536 unsigned dps;
537 unsigned blocks;
538 fat_cluster_t mcl, lcl;
539 int rc;
540
541 fibril_mutex_lock(&childp->lock);
542 if (childp->lnkcnt == 1) {
543 /*
544 * On FAT, we don't support multiple hard links.
545 */
546 fibril_mutex_unlock(&childp->lock);
547 return EMLINK;
548 }
549 assert(childp->lnkcnt == 0);
550 fibril_mutex_unlock(&childp->lock);
551
552 if (!fat_dentry_name_verify(name)) {
553 /*
554 * Attempt to create unsupported name.
555 */
556 return ENOTSUP;
557 }
558
559 /*
560 * Get us an unused parent node's dentry or grow the parent and allocate
561 * a new one.
562 */
563
564 fibril_mutex_lock(&parentp->idx->lock);
565 bs = block_bb_get(parentp->idx->dev_handle);
566 bps = uint16_t_le2host(bs->bps);
567 dps = bps / sizeof(fat_dentry_t);
568
569 blocks = parentp->size / bps;
570
571 for (i = 0; i < blocks; i++) {
572 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
573 if (rc != EOK) {
574 fibril_mutex_unlock(&parentp->idx->lock);
575 return rc;
576 }
577 for (j = 0; j < dps; j++) {
578 d = ((fat_dentry_t *)b->data) + j;
579 switch (fat_classify_dentry(d)) {
580 case FAT_DENTRY_SKIP:
581 case FAT_DENTRY_VALID:
582 /* skipping used and meta entries */
583 continue;
584 case FAT_DENTRY_FREE:
585 case FAT_DENTRY_LAST:
586 /* found an empty slot */
587 goto hit;
588 }
589 }
590 rc = block_put(b);
591 if (rc != EOK) {
592 fibril_mutex_unlock(&parentp->idx->lock);
593 return rc;
594 }
595 }
596 j = 0;
597
598 /*
599 * We need to grow the parent in order to create a new unused dentry.
600 */
601 if (parentp->firstc == FAT_CLST_ROOT) {
602 /* Can't grow the root directory. */
603 fibril_mutex_unlock(&parentp->idx->lock);
604 return ENOSPC;
605 }
606 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
607 if (rc != EOK) {
608 fibril_mutex_unlock(&parentp->idx->lock);
609 return rc;
610 }
611 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl);
612 if (rc != EOK) {
613 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
614 fibril_mutex_unlock(&parentp->idx->lock);
615 return rc;
616 }
617 rc = fat_append_clusters(bs, parentp, mcl);
618 if (rc != EOK) {
619 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
620 fibril_mutex_unlock(&parentp->idx->lock);
621 return rc;
622 }
623 parentp->size += bps * bs->spc;
624 parentp->dirty = true; /* need to sync node */
625 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
626 if (rc != EOK) {
627 fibril_mutex_unlock(&parentp->idx->lock);
628 return rc;
629 }
630 d = (fat_dentry_t *)b->data;
631
632hit:
633 /*
634 * At this point we only establish the link between the parent and the
635 * child. The dentry, except of the name and the extension, will remain
636 * uninitialized until the corresponding node is synced. Thus the valid
637 * dentry data is kept in the child node structure.
638 */
639 memset(d, 0, sizeof(fat_dentry_t));
640 fat_dentry_name_set(d, name);
641 b->dirty = true; /* need to sync block */
642 rc = block_put(b);
643 fibril_mutex_unlock(&parentp->idx->lock);
644 if (rc != EOK)
645 return rc;
646
647 fibril_mutex_lock(&childp->idx->lock);
648
649 /*
650 * If possible, create the Sub-directory Identifier Entry and the
651 * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
652 * are not mandatory according to Standard ECMA-107 and HelenOS VFS does
653 * not use them anyway, so this is rather a sign of our good will.
654 */
655 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
656 if (rc != EOK) {
657 /*
658 * Rather than returning an error, simply skip the creation of
659 * these two entries.
660 */
661 goto skip_dots;
662 }
663 d = (fat_dentry_t *)b->data;
664 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
665 str_cmp(d->name, FAT_NAME_DOT) == 0) {
666 memset(d, 0, sizeof(fat_dentry_t));
667 str_cpy(d->name, 8, FAT_NAME_DOT);
668 str_cpy(d->ext, 3, FAT_EXT_PAD);
669 d->attr = FAT_ATTR_SUBDIR;
670 d->firstc = host2uint16_t_le(childp->firstc);
671 /* TODO: initialize also the date/time members. */
672 }
673 d++;
674 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
675 str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) {
676 memset(d, 0, sizeof(fat_dentry_t));
677 str_cpy(d->name, 8, FAT_NAME_DOT_DOT);
678 str_cpy(d->ext, 3, FAT_EXT_PAD);
679 d->attr = FAT_ATTR_SUBDIR;
680 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
681 host2uint16_t_le(FAT_CLST_RES0) :
682 host2uint16_t_le(parentp->firstc);
683 /* TODO: initialize also the date/time members. */
684 }
685 b->dirty = true; /* need to sync block */
686 /*
687 * Ignore the return value as we would have fallen through on error
688 * anyway.
689 */
690 (void) block_put(b);
691skip_dots:
692
693 childp->idx->pfc = parentp->firstc;
694 childp->idx->pdi = i * dps + j;
695 fibril_mutex_unlock(&childp->idx->lock);
696
697 fibril_mutex_lock(&childp->lock);
698 childp->lnkcnt = 1;
699 childp->dirty = true; /* need to sync node */
700 fibril_mutex_unlock(&childp->lock);
701
702 /*
703 * Hash in the index structure into the position hash.
704 */
705 fat_idx_hashin(childp->idx);
706
707 return EOK;
708}
709
710int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
711{
712 fat_node_t *parentp = FAT_NODE(pfn);
713 fat_node_t *childp = FAT_NODE(cfn);
714 fat_bs_t *bs;
715 fat_dentry_t *d;
716 uint16_t bps;
717 block_t *b;
718 bool has_children;
719 int rc;
720
721 if (!parentp)
722 return EBUSY;
723
724 rc = fat_has_children(&has_children, cfn);
725 if (rc != EOK)
726 return rc;
727 if (has_children)
728 return ENOTEMPTY;
729
730 fibril_mutex_lock(&parentp->lock);
731 fibril_mutex_lock(&childp->lock);
732 assert(childp->lnkcnt == 1);
733 fibril_mutex_lock(&childp->idx->lock);
734 bs = block_bb_get(childp->idx->dev_handle);
735 bps = uint16_t_le2host(bs->bps);
736
737 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc,
738 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
739 BLOCK_FLAGS_NONE);
740 if (rc != EOK)
741 goto error;
742 d = (fat_dentry_t *)b->data +
743 (childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
744 /* mark the dentry as not-currently-used */
745 d->name[0] = FAT_DENTRY_ERASED;
746 b->dirty = true; /* need to sync block */
747 rc = block_put(b);
748 if (rc != EOK)
749 goto error;
750
751 /* remove the index structure from the position hash */
752 fat_idx_hashout(childp->idx);
753 /* clear position information */
754 childp->idx->pfc = FAT_CLST_RES0;
755 childp->idx->pdi = 0;
756 fibril_mutex_unlock(&childp->idx->lock);
757 childp->lnkcnt = 0;
758 childp->dirty = true;
759 fibril_mutex_unlock(&childp->lock);
760 fibril_mutex_unlock(&parentp->lock);
761
762 return EOK;
763
764error:
765 fibril_mutex_unlock(&parentp->idx->lock);
766 fibril_mutex_unlock(&childp->lock);
767 fibril_mutex_unlock(&childp->idx->lock);
768 return rc;
769}
770
771int fat_has_children(bool *has_children, fs_node_t *fn)
772{
773 fat_bs_t *bs;
774 fat_node_t *nodep = FAT_NODE(fn);
775 unsigned bps;
776 unsigned dps;
777 unsigned blocks;
778 block_t *b;
779 unsigned i, j;
780 int rc;
781
782 if (nodep->type != FAT_DIRECTORY) {
783 *has_children = false;
784 return EOK;
785 }
786
787 fibril_mutex_lock(&nodep->idx->lock);
788 bs = block_bb_get(nodep->idx->dev_handle);
789 bps = uint16_t_le2host(bs->bps);
790 dps = bps / sizeof(fat_dentry_t);
791
792 blocks = nodep->size / bps;
793
794 for (i = 0; i < blocks; i++) {
795 fat_dentry_t *d;
796
797 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
798 if (rc != EOK) {
799 fibril_mutex_unlock(&nodep->idx->lock);
800 return rc;
801 }
802 for (j = 0; j < dps; j++) {
803 d = ((fat_dentry_t *)b->data) + j;
804 switch (fat_classify_dentry(d)) {
805 case FAT_DENTRY_SKIP:
806 case FAT_DENTRY_FREE:
807 continue;
808 case FAT_DENTRY_LAST:
809 rc = block_put(b);
810 fibril_mutex_unlock(&nodep->idx->lock);
811 *has_children = false;
812 return rc;
813 default:
814 case FAT_DENTRY_VALID:
815 rc = block_put(b);
816 fibril_mutex_unlock(&nodep->idx->lock);
817 *has_children = true;
818 return rc;
819 }
820 }
821 rc = block_put(b);
822 if (rc != EOK) {
823 fibril_mutex_unlock(&nodep->idx->lock);
824 return rc;
825 }
826 }
827
828 fibril_mutex_unlock(&nodep->idx->lock);
829 *has_children = false;
830 return EOK;
831}
832
833
834fs_index_t fat_index_get(fs_node_t *fn)
835{
836 return FAT_NODE(fn)->idx->index;
837}
838
839size_t fat_size_get(fs_node_t *fn)
840{
841 return FAT_NODE(fn)->size;
842}
843
844unsigned fat_lnkcnt_get(fs_node_t *fn)
845{
846 return FAT_NODE(fn)->lnkcnt;
847}
848
849char fat_plb_get_char(unsigned pos)
850{
851 return fat_reg.plb_ro[pos % PLB_SIZE];
852}
853
854bool fat_is_directory(fs_node_t *fn)
855{
856 return FAT_NODE(fn)->type == FAT_DIRECTORY;
857}
858
859bool fat_is_file(fs_node_t *fn)
860{
861 return FAT_NODE(fn)->type == FAT_FILE;
862}
863
864/** libfs operations */
865libfs_ops_t fat_libfs_ops = {
866 .root_get = fat_root_get,
867 .match = fat_match,
868 .node_get = fat_node_get,
869 .node_put = fat_node_put,
870 .create = fat_create_node,
871 .destroy = fat_destroy_node,
872 .link = fat_link,
873 .unlink = fat_unlink,
874 .has_children = fat_has_children,
875 .index_get = fat_index_get,
876 .size_get = fat_size_get,
877 .lnkcnt_get = fat_lnkcnt_get,
878 .plb_get_char = fat_plb_get_char,
879 .is_directory = fat_is_directory,
880 .is_file = fat_is_file
881};
882
883/*
884 * VFS operations.
885 */
886
887void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
888{
889 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
890 enum cache_mode cmode;
891 fat_bs_t *bs;
892 uint16_t bps;
893 uint16_t rde;
894 int rc;
895
896 /* accept the mount options */
897 ipc_callid_t callid;
898 size_t size;
899 if (!ipc_data_write_receive(&callid, &size)) {
900 ipc_answer_0(callid, EINVAL);
901 ipc_answer_0(rid, EINVAL);
902 return;
903 }
904 char *opts = malloc(size + 1);
905 if (!opts) {
906 ipc_answer_0(callid, ENOMEM);
907 ipc_answer_0(rid, ENOMEM);
908 return;
909 }
910 ipcarg_t retval = ipc_data_write_finalize(callid, opts, size);
911 if (retval != EOK) {
912 ipc_answer_0(rid, retval);
913 free(opts);
914 return;
915 }
916 opts[size] = '\0';
917
918 /* Check for option enabling write through. */
919 if (str_cmp(opts, "wtcache") == 0)
920 cmode = CACHE_MODE_WT;
921 else
922 cmode = CACHE_MODE_WB;
923
924 /* initialize libblock */
925 rc = block_init(dev_handle, BS_SIZE);
926 if (rc != EOK) {
927 ipc_answer_0(rid, rc);
928 return;
929 }
930
931 /* prepare the boot block */
932 rc = block_bb_read(dev_handle, BS_BLOCK);
933 if (rc != EOK) {
934 block_fini(dev_handle);
935 ipc_answer_0(rid, rc);
936 return;
937 }
938
939 /* get the buffer with the boot sector */
940 bs = block_bb_get(dev_handle);
941
942 /* Read the number of root directory entries. */
943 bps = uint16_t_le2host(bs->bps);
944 rde = uint16_t_le2host(bs->root_ent_max);
945
946 if (bps != BS_SIZE) {
947 block_fini(dev_handle);
948 ipc_answer_0(rid, ENOTSUP);
949 return;
950 }
951
952 /* Initialize the block cache */
953 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);
954 if (rc != EOK) {
955 block_fini(dev_handle);
956 ipc_answer_0(rid, rc);
957 return;
958 }
959
960 rc = fat_idx_init_by_dev_handle(dev_handle);
961 if (rc != EOK) {
962 block_fini(dev_handle);
963 ipc_answer_0(rid, rc);
964 return;
965 }
966
967 /* Initialize the root node. */
968 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
969 if (!rfn) {
970 block_fini(dev_handle);
971 fat_idx_fini_by_dev_handle(dev_handle);
972 ipc_answer_0(rid, ENOMEM);
973 return;
974 }
975 fs_node_initialize(rfn);
976 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
977 if (!rootp) {
978 free(rfn);
979 block_fini(dev_handle);
980 fat_idx_fini_by_dev_handle(dev_handle);
981 ipc_answer_0(rid, ENOMEM);
982 return;
983 }
984 fat_node_initialize(rootp);
985
986 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
987 if (!ridxp) {
988 free(rfn);
989 free(rootp);
990 block_fini(dev_handle);
991 fat_idx_fini_by_dev_handle(dev_handle);
992 ipc_answer_0(rid, ENOMEM);
993 return;
994 }
995 assert(ridxp->index == 0);
996 /* ridxp->lock held */
997
998 rootp->type = FAT_DIRECTORY;
999 rootp->firstc = FAT_CLST_ROOT;
1000 rootp->refcnt = 1;
1001 rootp->lnkcnt = 0; /* FS root is not linked */
1002 rootp->size = rde * sizeof(fat_dentry_t);
1003 rootp->idx = ridxp;
1004 ridxp->nodep = rootp;
1005 rootp->bp = rfn;
1006 rfn->data = rootp;
1007
1008 fibril_mutex_unlock(&ridxp->lock);
1009
1010 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
1011}
1012
1013void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1014{
1015 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1016}
1017
1018void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1019{
1020 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1021}
1022
1023void fat_read(ipc_callid_t rid, ipc_call_t *request)
1024{
1025 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1026 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1027 off_t pos = (off_t)IPC_GET_ARG3(*request);
1028 fs_node_t *fn;
1029 fat_node_t *nodep;
1030 fat_bs_t *bs;
1031 uint16_t bps;
1032 size_t bytes;
1033 block_t *b;
1034 int rc;
1035
1036 rc = fat_node_get(&fn, dev_handle, index);
1037 if (rc != EOK) {
1038 ipc_answer_0(rid, rc);
1039 return;
1040 }
1041 if (!fn) {
1042 ipc_answer_0(rid, ENOENT);
1043 return;
1044 }
1045 nodep = FAT_NODE(fn);
1046
1047 ipc_callid_t callid;
1048 size_t len;
1049 if (!ipc_data_read_receive(&callid, &len)) {
1050 fat_node_put(fn);
1051 ipc_answer_0(callid, EINVAL);
1052 ipc_answer_0(rid, EINVAL);
1053 return;
1054 }
1055
1056 bs = block_bb_get(dev_handle);
1057 bps = uint16_t_le2host(bs->bps);
1058
1059 if (nodep->type == FAT_FILE) {
1060 /*
1061 * Our strategy for regular file reads is to read one block at
1062 * most and make use of the possibility to return less data than
1063 * requested. This keeps the code very simple.
1064 */
1065 if (pos >= nodep->size) {
1066 /* reading beyond the EOF */
1067 bytes = 0;
1068 (void) ipc_data_read_finalize(callid, NULL, 0);
1069 } else {
1070 bytes = min(len, bps - pos % bps);
1071 bytes = min(bytes, nodep->size - pos);
1072 rc = fat_block_get(&b, bs, nodep, pos / bps,
1073 BLOCK_FLAGS_NONE);
1074 assert(rc == EOK);
1075 (void) ipc_data_read_finalize(callid, b->data + pos % bps,
1076 bytes);
1077 rc = block_put(b);
1078 assert(rc == EOK);
1079 }
1080 } else {
1081 unsigned bnum;
1082 off_t spos = pos;
1083 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1084 fat_dentry_t *d;
1085
1086 assert(nodep->type == FAT_DIRECTORY);
1087 assert(nodep->size % bps == 0);
1088 assert(bps % sizeof(fat_dentry_t) == 0);
1089
1090 /*
1091 * Our strategy for readdir() is to use the position pointer as
1092 * an index into the array of all dentries. On entry, it points
1093 * to the first unread dentry. If we skip any dentries, we bump
1094 * the position pointer accordingly.
1095 */
1096 bnum = (pos * sizeof(fat_dentry_t)) / bps;
1097 while (bnum < nodep->size / bps) {
1098 off_t o;
1099
1100 rc = fat_block_get(&b, bs, nodep, bnum,
1101 BLOCK_FLAGS_NONE);
1102 assert(rc == EOK);
1103 for (o = pos % (bps / sizeof(fat_dentry_t));
1104 o < bps / sizeof(fat_dentry_t);
1105 o++, pos++) {
1106 d = ((fat_dentry_t *)b->data) + o;
1107 switch (fat_classify_dentry(d)) {
1108 case FAT_DENTRY_SKIP:
1109 case FAT_DENTRY_FREE:
1110 continue;
1111 case FAT_DENTRY_LAST:
1112 rc = block_put(b);
1113 assert(rc == EOK);
1114 goto miss;
1115 default:
1116 case FAT_DENTRY_VALID:
1117 fat_dentry_name_get(d, name);
1118 rc = block_put(b);
1119 assert(rc == EOK);
1120 goto hit;
1121 }
1122 }
1123 rc = block_put(b);
1124 assert(rc == EOK);
1125 bnum++;
1126 }
1127miss:
1128 fat_node_put(fn);
1129 ipc_answer_0(callid, ENOENT);
1130 ipc_answer_1(rid, ENOENT, 0);
1131 return;
1132hit:
1133 (void) ipc_data_read_finalize(callid, name, str_size(name) + 1);
1134 bytes = (pos - spos) + 1;
1135 }
1136
1137 fat_node_put(fn);
1138 ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
1139}
1140
1141void fat_write(ipc_callid_t rid, ipc_call_t *request)
1142{
1143 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1144 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1145 off_t pos = (off_t)IPC_GET_ARG3(*request);
1146 fs_node_t *fn;
1147 fat_node_t *nodep;
1148 fat_bs_t *bs;
1149 size_t bytes;
1150 block_t *b;
1151 uint16_t bps;
1152 unsigned spc;
1153 unsigned bpc; /* bytes per cluster */
1154 off_t boundary;
1155 int flags = BLOCK_FLAGS_NONE;
1156 int rc;
1157
1158 rc = fat_node_get(&fn, dev_handle, index);
1159 if (rc != EOK) {
1160 ipc_answer_0(rid, rc);
1161 return;
1162 }
1163 if (!fn) {
1164 ipc_answer_0(rid, ENOENT);
1165 return;
1166 }
1167 nodep = FAT_NODE(fn);
1168
1169 ipc_callid_t callid;
1170 size_t len;
1171 if (!ipc_data_write_receive(&callid, &len)) {
1172 fat_node_put(fn);
1173 ipc_answer_0(callid, EINVAL);
1174 ipc_answer_0(rid, EINVAL);
1175 return;
1176 }
1177
1178 bs = block_bb_get(dev_handle);
1179 bps = uint16_t_le2host(bs->bps);
1180 spc = bs->spc;
1181 bpc = bps * spc;
1182
1183 /*
1184 * In all scenarios, we will attempt to write out only one block worth
1185 * of data at maximum. There might be some more efficient approaches,
1186 * but this one greatly simplifies fat_write(). Note that we can afford
1187 * to do this because the client must be ready to handle the return
1188 * value signalizing a smaller number of bytes written.
1189 */
1190 bytes = min(len, bps - pos % bps);
1191 if (bytes == bps)
1192 flags |= BLOCK_FLAGS_NOREAD;
1193
1194 boundary = ROUND_UP(nodep->size, bpc);
1195 if (pos < boundary) {
1196 /*
1197 * This is the easier case - we are either overwriting already
1198 * existing contents or writing behind the EOF, but still within
1199 * the limits of the last cluster. The node size may grow to the
1200 * next block size boundary.
1201 */
1202 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1203 assert(rc == EOK);
1204 rc = fat_block_get(&b, bs, nodep, pos / bps, flags);
1205 assert(rc == EOK);
1206 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1207 bytes);
1208 b->dirty = true; /* need to sync block */
1209 rc = block_put(b);
1210 assert(rc == EOK);
1211 if (pos + bytes > nodep->size) {
1212 nodep->size = pos + bytes;
1213 nodep->dirty = true; /* need to sync node */
1214 }
1215 ipc_answer_2(rid, EOK, bytes, nodep->size);
1216 fat_node_put(fn);
1217 return;
1218 } else {
1219 /*
1220 * This is the more difficult case. We must allocate new
1221 * clusters for the node and zero them out.
1222 */
1223 int status;
1224 unsigned nclsts;
1225 fat_cluster_t mcl, lcl;
1226
1227 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
1228 /* create an independent chain of nclsts clusters in all FATs */
1229 status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1230 if (status != EOK) {
1231 /* could not allocate a chain of nclsts clusters */
1232 fat_node_put(fn);
1233 ipc_answer_0(callid, status);
1234 ipc_answer_0(rid, status);
1235 return;
1236 }
1237 /* zero fill any gaps */
1238 rc = fat_fill_gap(bs, nodep, mcl, pos);
1239 assert(rc == EOK);
1240 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc,
1241 flags);
1242 assert(rc == EOK);
1243 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1244 bytes);
1245 b->dirty = true; /* need to sync block */
1246 rc = block_put(b);
1247 assert(rc == EOK);
1248 /*
1249 * Append the cluster chain starting in mcl to the end of the
1250 * node's cluster chain.
1251 */
1252 rc = fat_append_clusters(bs, nodep, mcl);
1253 assert(rc == EOK);
1254 nodep->size = pos + bytes;
1255 nodep->dirty = true; /* need to sync node */
1256 ipc_answer_2(rid, EOK, bytes, nodep->size);
1257 fat_node_put(fn);
1258 return;
1259 }
1260}
1261
1262void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1263{
1264 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1265 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1266 size_t size = (off_t)IPC_GET_ARG3(*request);
1267 fs_node_t *fn;
1268 fat_node_t *nodep;
1269 fat_bs_t *bs;
1270 uint16_t bps;
1271 uint8_t spc;
1272 unsigned bpc; /* bytes per cluster */
1273 int rc;
1274
1275 rc = fat_node_get(&fn, dev_handle, index);
1276 if (rc != EOK) {
1277 ipc_answer_0(rid, rc);
1278 return;
1279 }
1280 if (!fn) {
1281 ipc_answer_0(rid, ENOENT);
1282 return;
1283 }
1284 nodep = FAT_NODE(fn);
1285
1286 bs = block_bb_get(dev_handle);
1287 bps = uint16_t_le2host(bs->bps);
1288 spc = bs->spc;
1289 bpc = bps * spc;
1290
1291 if (nodep->size == size) {
1292 rc = EOK;
1293 } else if (nodep->size < size) {
1294 /*
1295 * The standard says we have the freedom to grow the node.
1296 * For now, we simply return an error.
1297 */
1298 rc = EINVAL;
1299 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
1300 /*
1301 * The node will be shrunk, but no clusters will be deallocated.
1302 */
1303 nodep->size = size;
1304 nodep->dirty = true; /* need to sync node */
1305 rc = EOK;
1306 } else {
1307 /*
1308 * The node will be shrunk, clusters will be deallocated.
1309 */
1310 if (size == 0) {
1311 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1312 if (rc != EOK)
1313 goto out;
1314 } else {
1315 fat_cluster_t lastc;
1316 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc,
1317 &lastc, NULL, (size - 1) / bpc);
1318 if (rc != EOK)
1319 goto out;
1320 rc = fat_chop_clusters(bs, nodep, lastc);
1321 if (rc != EOK)
1322 goto out;
1323 }
1324 nodep->size = size;
1325 nodep->dirty = true; /* need to sync node */
1326 rc = EOK;
1327 }
1328out:
1329 fat_node_put(fn);
1330 ipc_answer_0(rid, rc);
1331 return;
1332}
1333
1334void fat_close(ipc_callid_t rid, ipc_call_t *request)
1335{
1336 ipc_answer_0(rid, EOK);
1337}
1338
1339void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1340{
1341 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1342 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1343 fs_node_t *fn;
1344 int rc;
1345
1346 rc = fat_node_get(&fn, dev_handle, index);
1347 if (rc != EOK) {
1348 ipc_answer_0(rid, rc);
1349 return;
1350 }
1351 if (!fn) {
1352 ipc_answer_0(rid, ENOENT);
1353 return;
1354 }
1355
1356 rc = fat_destroy_node(fn);
1357 ipc_answer_0(rid, rc);
1358}
1359
1360void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1361{
1362 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1363}
1364
1365void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1366{
1367 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1368}
1369
1370void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1371{
1372 /* Dummy implementation */
1373 ipc_answer_0(rid, EOK);
1374}
1375
1376/**
1377 * @}
1378 */
Note: See TracBrowser for help on using the repository browser.