source: mainline/uspace/srv/fs/fat/fat_ops.c@ bbf88db

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bbf88db was 12bdc942, checked in by Jakub Jermar <jakub@…>, 16 years ago

Remove dead code.

  • Property mode set to 100644
File size: 32.9 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/ipc.h>
45#include <ipc/services.h>
46#include <ipc/devmap.h>
47#include <async.h>
48#include <errno.h>
49#include <string.h>
50#include <byteorder.h>
51#include <adt/hash_table.h>
52#include <adt/list.h>
53#include <assert.h>
54#include <fibril_sync.h>
55#include <sys/mman.h>
56#include <align.h>
57
58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
61/** Mutex protecting the list of cached free FAT nodes. */
62static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
63
64/** List of cached free FAT nodes. */
65static LIST_INITIALIZE(ffn_head);
66
67static void fat_node_initialize(fat_node_t *node)
68{
69 fibril_mutex_initialize(&node->lock);
70 node->bp = NULL;
71 node->idx = NULL;
72 node->type = 0;
73 link_initialize(&node->ffn_link);
74 node->size = 0;
75 node->lnkcnt = 0;
76 node->refcnt = 0;
77 node->dirty = false;
78}
79
80static int fat_node_sync(fat_node_t *node)
81{
82 block_t *b;
83 fat_bs_t *bs;
84 fat_dentry_t *d;
85 uint16_t bps;
86 unsigned dps;
87 int rc;
88
89 assert(node->dirty);
90
91 bs = block_bb_get(node->idx->dev_handle);
92 bps = uint16_t_le2host(bs->bps);
93 dps = bps / sizeof(fat_dentry_t);
94
95 /* Read the block that contains the dentry of interest. */
96 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc,
97 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
98 if (rc != EOK)
99 return rc;
100
101 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
102
103 d->firstc = host2uint16_t_le(node->firstc);
104 if (node->type == FAT_FILE) {
105 d->size = host2uint32_t_le(node->size);
106 } else if (node->type == FAT_DIRECTORY) {
107 d->attr = FAT_ATTR_SUBDIR;
108 }
109
110 /* TODO: update other fields? (e.g time fields) */
111
112 b->dirty = true; /* need to sync block */
113 rc = block_put(b);
114 return rc;
115}
116
117static fat_node_t *fat_node_get_new(void)
118{
119 fs_node_t *fn;
120 fat_node_t *nodep;
121 int rc;
122
123 fibril_mutex_lock(&ffn_mutex);
124 if (!list_empty(&ffn_head)) {
125 /* Try to use a cached free node structure. */
126 fat_idx_t *idxp_tmp;
127 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
128 if (!fibril_mutex_trylock(&nodep->lock))
129 goto skip_cache;
130 idxp_tmp = nodep->idx;
131 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
132 fibril_mutex_unlock(&nodep->lock);
133 goto skip_cache;
134 }
135 list_remove(&nodep->ffn_link);
136 fibril_mutex_unlock(&ffn_mutex);
137 if (nodep->dirty) {
138 rc = fat_node_sync(nodep);
139 assert(rc == EOK);
140 }
141 idxp_tmp->nodep = NULL;
142 fibril_mutex_unlock(&nodep->lock);
143 fibril_mutex_unlock(&idxp_tmp->lock);
144 fn = FS_NODE(nodep);
145 } else {
146skip_cache:
147 /* Try to allocate a new node structure. */
148 fibril_mutex_unlock(&ffn_mutex);
149 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
150 if (!fn)
151 return NULL;
152 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
153 if (!nodep) {
154 free(fn);
155 return NULL;
156 }
157 }
158 fat_node_initialize(nodep);
159 fs_node_initialize(fn);
160 fn->data = nodep;
161 nodep->bp = fn;
162
163 return nodep;
164}
165
166/** Internal version of fat_node_get().
167 *
168 * @param idxp Locked index structure.
169 */
170static fat_node_t *fat_node_get_core(fat_idx_t *idxp)
171{
172 block_t *b;
173 fat_bs_t *bs;
174 fat_dentry_t *d;
175 fat_node_t *nodep = NULL;
176 unsigned bps;
177 unsigned spc;
178 unsigned dps;
179 int rc;
180
181 if (idxp->nodep) {
182 /*
183 * We are lucky.
184 * The node is already instantiated in memory.
185 */
186 fibril_mutex_lock(&idxp->nodep->lock);
187 if (!idxp->nodep->refcnt++)
188 list_remove(&idxp->nodep->ffn_link);
189 fibril_mutex_unlock(&idxp->nodep->lock);
190 return idxp->nodep;
191 }
192
193 /*
194 * We must instantiate the node from the file system.
195 */
196
197 assert(idxp->pfc);
198
199 nodep = fat_node_get_new();
200 if (!nodep)
201 return NULL;
202
203 bs = block_bb_get(idxp->dev_handle);
204 bps = uint16_t_le2host(bs->bps);
205 spc = bs->spc;
206 dps = bps / sizeof(fat_dentry_t);
207
208 /* Read the block that contains the dentry of interest. */
209 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc,
210 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
211 assert(rc == EOK);
212
213 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
214 if (d->attr & FAT_ATTR_SUBDIR) {
215 /*
216 * The only directory which does not have this bit set is the
217 * root directory itself. The root directory node is handled
218 * and initialized elsewhere.
219 */
220 nodep->type = FAT_DIRECTORY;
221 /*
222 * Unfortunately, the 'size' field of the FAT dentry is not
223 * defined for the directory entry type. We must determine the
224 * size of the directory by walking the FAT.
225 */
226 uint16_t clusters;
227 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle,
228 uint16_t_le2host(d->firstc));
229 assert(rc == EOK);
230 nodep->size = bps * spc * clusters;
231 } else {
232 nodep->type = FAT_FILE;
233 nodep->size = uint32_t_le2host(d->size);
234 }
235 nodep->firstc = uint16_t_le2host(d->firstc);
236 nodep->lnkcnt = 1;
237 nodep->refcnt = 1;
238
239 rc = block_put(b);
240 assert(rc == EOK);
241
242 /* Link the idx structure with the node structure. */
243 nodep->idx = idxp;
244 idxp->nodep = nodep;
245
246 return nodep;
247}
248
249/*
250 * Forward declarations of FAT libfs operations.
251 */
252static int fat_root_get(fs_node_t **, dev_handle_t);
253static int fat_match(fs_node_t **, fs_node_t *, const char *);
254static int fat_node_get(fs_node_t **, dev_handle_t, fs_index_t);
255static int fat_node_put(fs_node_t *);
256static int fat_create_node(fs_node_t **, dev_handle_t, int);
257static int fat_destroy_node(fs_node_t *);
258static int fat_link(fs_node_t *, fs_node_t *, const char *);
259static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
260static int fat_has_children(bool *, fs_node_t *);
261static fs_index_t fat_index_get(fs_node_t *);
262static size_t fat_size_get(fs_node_t *);
263static unsigned fat_lnkcnt_get(fs_node_t *);
264static char fat_plb_get_char(unsigned);
265static bool fat_is_directory(fs_node_t *);
266static bool fat_is_file(fs_node_t *node);
267
268/*
269 * FAT libfs operations.
270 */
271
272int fat_root_get(fs_node_t **rfn, dev_handle_t dev_handle)
273{
274 return fat_node_get(rfn, dev_handle, 0);
275}
276
277int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
278{
279 fat_bs_t *bs;
280 fat_node_t *parentp = FAT_NODE(pfn);
281 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
282 unsigned i, j;
283 unsigned bps; /* bytes per sector */
284 unsigned dps; /* dentries per sector */
285 unsigned blocks;
286 fat_dentry_t *d;
287 block_t *b;
288 int rc;
289
290 fibril_mutex_lock(&parentp->idx->lock);
291 bs = block_bb_get(parentp->idx->dev_handle);
292 bps = uint16_t_le2host(bs->bps);
293 dps = bps / sizeof(fat_dentry_t);
294 blocks = parentp->size / bps;
295 for (i = 0; i < blocks; i++) {
296 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
297 if (rc != EOK) {
298 fibril_mutex_unlock(&parentp->idx->lock);
299 return rc;
300 }
301 for (j = 0; j < dps; j++) {
302 d = ((fat_dentry_t *)b->data) + j;
303 switch (fat_classify_dentry(d)) {
304 case FAT_DENTRY_SKIP:
305 case FAT_DENTRY_FREE:
306 continue;
307 case FAT_DENTRY_LAST:
308 rc = block_put(b);
309 /* expect EOK as b was not dirty */
310 assert(rc == EOK);
311 fibril_mutex_unlock(&parentp->idx->lock);
312 *rfn = NULL;
313 return EOK;
314 default:
315 case FAT_DENTRY_VALID:
316 fat_dentry_name_get(d, name);
317 break;
318 }
319 if (fat_dentry_namecmp(name, component) == 0) {
320 /* hit */
321 fat_node_t *nodep;
322 /*
323 * Assume tree hierarchy for locking. We
324 * already have the parent and now we are going
325 * to lock the child. Never lock in the oposite
326 * order.
327 */
328 fat_idx_t *idx = fat_idx_get_by_pos(
329 parentp->idx->dev_handle, parentp->firstc,
330 i * dps + j);
331 fibril_mutex_unlock(&parentp->idx->lock);
332 if (!idx) {
333 /*
334 * Can happen if memory is low or if we
335 * run out of 32-bit indices.
336 */
337 rc = block_put(b);
338 /* expect EOK as b was not dirty */
339 assert(rc == EOK);
340 return ENOMEM;
341 }
342 nodep = fat_node_get_core(idx);
343 fibril_mutex_unlock(&idx->lock);
344 rc = block_put(b);
345 /* expect EOK as b was not dirty */
346 assert(rc == EOK);
347 *rfn = FS_NODE(nodep);
348 return EOK;
349 }
350 }
351 rc = block_put(b);
352 assert(rc == EOK); /* expect EOK as b was not dirty */
353 }
354
355 fibril_mutex_unlock(&parentp->idx->lock);
356 *rfn = NULL;
357 return EOK;
358}
359
360/** Instantiate a FAT in-core node. */
361int fat_node_get(fs_node_t **rfn, dev_handle_t dev_handle, fs_index_t index)
362{
363 fat_node_t *nodep;
364 fat_idx_t *idxp;
365
366 idxp = fat_idx_get_by_index(dev_handle, index);
367 if (!idxp) {
368 *rfn = NULL;
369 return EOK;
370 }
371 /* idxp->lock held */
372 nodep = fat_node_get_core(idxp);
373 fibril_mutex_unlock(&idxp->lock);
374 *rfn = FS_NODE(nodep);
375 return EOK;
376}
377
378int fat_node_put(fs_node_t *fn)
379{
380 fat_node_t *nodep = FAT_NODE(fn);
381 bool destroy = false;
382
383 fibril_mutex_lock(&nodep->lock);
384 if (!--nodep->refcnt) {
385 if (nodep->idx) {
386 fibril_mutex_lock(&ffn_mutex);
387 list_append(&nodep->ffn_link, &ffn_head);
388 fibril_mutex_unlock(&ffn_mutex);
389 } else {
390 /*
391 * The node does not have any index structure associated
392 * with itself. This can only mean that we are releasing
393 * the node after a failed attempt to allocate the index
394 * structure for it.
395 */
396 destroy = true;
397 }
398 }
399 fibril_mutex_unlock(&nodep->lock);
400 if (destroy) {
401 free(nodep->bp);
402 free(nodep);
403 }
404 return EOK;
405}
406
407int fat_create_node(fs_node_t **rfn, dev_handle_t dev_handle, int flags)
408{
409 fat_idx_t *idxp;
410 fat_node_t *nodep;
411 fat_bs_t *bs;
412 fat_cluster_t mcl, lcl;
413 uint16_t bps;
414 int rc;
415
416 bs = block_bb_get(dev_handle);
417 bps = uint16_t_le2host(bs->bps);
418 if (flags & L_DIRECTORY) {
419 /* allocate a cluster */
420 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
421 if (rc != EOK)
422 return rc;
423 /* populate the new cluster with unused dentries */
424 rc = fat_zero_cluster(bs, dev_handle, mcl);
425 if (rc != EOK) {
426 (void) fat_free_clusters(bs, dev_handle, mcl);
427 return rc;
428 }
429 }
430
431 nodep = fat_node_get_new();
432 if (!nodep) {
433 (void) fat_free_clusters(bs, dev_handle, mcl);
434 return ENOMEM; /* FIXME: determine the true error code */
435 }
436 idxp = fat_idx_get_new(dev_handle);
437 if (!idxp) {
438 (void) fat_free_clusters(bs, dev_handle, mcl);
439 (void) fat_node_put(FS_NODE(nodep));
440 return ENOMEM; /* FIXME: determine the true error code */
441 }
442 /* idxp->lock held */
443 if (flags & L_DIRECTORY) {
444 nodep->type = FAT_DIRECTORY;
445 nodep->firstc = mcl;
446 nodep->size = bps * bs->spc;
447 } else {
448 nodep->type = FAT_FILE;
449 nodep->firstc = FAT_CLST_RES0;
450 nodep->size = 0;
451 }
452 nodep->lnkcnt = 0; /* not linked anywhere */
453 nodep->refcnt = 1;
454 nodep->dirty = true;
455
456 nodep->idx = idxp;
457 idxp->nodep = nodep;
458
459 fibril_mutex_unlock(&idxp->lock);
460 *rfn = FS_NODE(nodep);
461 return EOK;
462}
463
464int fat_destroy_node(fs_node_t *fn)
465{
466 fat_node_t *nodep = FAT_NODE(fn);
467 fat_bs_t *bs;
468 bool has_children;
469 int rc;
470
471 /*
472 * The node is not reachable from the file system. This means that the
473 * link count should be zero and that the index structure cannot be
474 * found in the position hash. Obviously, we don't need to lock the node
475 * nor its index structure.
476 */
477 assert(nodep->lnkcnt == 0);
478
479 /*
480 * The node may not have any children.
481 */
482 rc = fat_has_children(&has_children, fn);
483 if (rc != EOK)
484 return rc;
485 assert(!has_children);
486
487 bs = block_bb_get(nodep->idx->dev_handle);
488 if (nodep->firstc != FAT_CLST_RES0) {
489 assert(nodep->size);
490 /* Free all clusters allocated to the node. */
491 rc = fat_free_clusters(bs, nodep->idx->dev_handle,
492 nodep->firstc);
493 }
494
495 fat_idx_destroy(nodep->idx);
496 free(nodep->bp);
497 free(nodep);
498 return rc;
499}
500
501int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
502{
503 fat_node_t *parentp = FAT_NODE(pfn);
504 fat_node_t *childp = FAT_NODE(cfn);
505 fat_dentry_t *d;
506 fat_bs_t *bs;
507 block_t *b;
508 unsigned i, j;
509 uint16_t bps;
510 unsigned dps;
511 unsigned blocks;
512 fat_cluster_t mcl, lcl;
513 int rc;
514
515 fibril_mutex_lock(&childp->lock);
516 if (childp->lnkcnt == 1) {
517 /*
518 * On FAT, we don't support multiple hard links.
519 */
520 fibril_mutex_unlock(&childp->lock);
521 return EMLINK;
522 }
523 assert(childp->lnkcnt == 0);
524 fibril_mutex_unlock(&childp->lock);
525
526 if (!fat_dentry_name_verify(name)) {
527 /*
528 * Attempt to create unsupported name.
529 */
530 return ENOTSUP;
531 }
532
533 /*
534 * Get us an unused parent node's dentry or grow the parent and allocate
535 * a new one.
536 */
537
538 fibril_mutex_lock(&parentp->idx->lock);
539 bs = block_bb_get(parentp->idx->dev_handle);
540 bps = uint16_t_le2host(bs->bps);
541 dps = bps / sizeof(fat_dentry_t);
542
543 blocks = parentp->size / bps;
544
545 for (i = 0; i < blocks; i++) {
546 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
547 if (rc != EOK) {
548 fibril_mutex_unlock(&parentp->idx->lock);
549 return rc;
550 }
551 for (j = 0; j < dps; j++) {
552 d = ((fat_dentry_t *)b->data) + j;
553 switch (fat_classify_dentry(d)) {
554 case FAT_DENTRY_SKIP:
555 case FAT_DENTRY_VALID:
556 /* skipping used and meta entries */
557 continue;
558 case FAT_DENTRY_FREE:
559 case FAT_DENTRY_LAST:
560 /* found an empty slot */
561 goto hit;
562 }
563 }
564 rc = block_put(b);
565 if (rc != EOK) {
566 fibril_mutex_unlock(&parentp->idx->lock);
567 return rc;
568 }
569 }
570 j = 0;
571
572 /*
573 * We need to grow the parent in order to create a new unused dentry.
574 */
575 if (parentp->firstc == FAT_CLST_ROOT) {
576 /* Can't grow the root directory. */
577 fibril_mutex_unlock(&parentp->idx->lock);
578 return ENOSPC;
579 }
580 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
581 if (rc != EOK) {
582 fibril_mutex_unlock(&parentp->idx->lock);
583 return rc;
584 }
585 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl);
586 if (rc != EOK) {
587 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
588 fibril_mutex_unlock(&parentp->idx->lock);
589 return rc;
590 }
591 rc = fat_append_clusters(bs, parentp, mcl);
592 if (rc != EOK) {
593 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
594 fibril_mutex_unlock(&parentp->idx->lock);
595 return rc;
596 }
597 parentp->size += bps * bs->spc;
598 parentp->dirty = true; /* need to sync node */
599 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
600 if (rc != EOK) {
601 fibril_mutex_unlock(&parentp->idx->lock);
602 return rc;
603 }
604 d = (fat_dentry_t *)b->data;
605
606hit:
607 /*
608 * At this point we only establish the link between the parent and the
609 * child. The dentry, except of the name and the extension, will remain
610 * uninitialized until the corresponding node is synced. Thus the valid
611 * dentry data is kept in the child node structure.
612 */
613 memset(d, 0, sizeof(fat_dentry_t));
614 fat_dentry_name_set(d, name);
615 b->dirty = true; /* need to sync block */
616 rc = block_put(b);
617 fibril_mutex_unlock(&parentp->idx->lock);
618 if (rc != EOK)
619 return rc;
620
621 fibril_mutex_lock(&childp->idx->lock);
622
623 /*
624 * If possible, create the Sub-directory Identifier Entry and the
625 * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
626 * are not mandatory according to Standard ECMA-107 and HelenOS VFS does
627 * not use them anyway, so this is rather a sign of our good will.
628 */
629 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
630 if (rc != EOK) {
631 /*
632 * Rather than returning an error, simply skip the creation of
633 * these two entries.
634 */
635 goto skip_dots;
636 }
637 d = (fat_dentry_t *)b->data;
638 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
639 str_cmp(d->name, FAT_NAME_DOT) == 0) {
640 memset(d, 0, sizeof(fat_dentry_t));
641 str_cpy(d->name, 8, FAT_NAME_DOT);
642 str_cpy(d->ext, 3, FAT_EXT_PAD);
643 d->attr = FAT_ATTR_SUBDIR;
644 d->firstc = host2uint16_t_le(childp->firstc);
645 /* TODO: initialize also the date/time members. */
646 }
647 d++;
648 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
649 str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) {
650 memset(d, 0, sizeof(fat_dentry_t));
651 str_cpy(d->name, 8, FAT_NAME_DOT_DOT);
652 str_cpy(d->ext, 3, FAT_EXT_PAD);
653 d->attr = FAT_ATTR_SUBDIR;
654 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
655 host2uint16_t_le(FAT_CLST_RES0) :
656 host2uint16_t_le(parentp->firstc);
657 /* TODO: initialize also the date/time members. */
658 }
659 b->dirty = true; /* need to sync block */
660 /*
661 * Ignore the return value as we would have fallen through on error
662 * anyway.
663 */
664 (void) block_put(b);
665skip_dots:
666
667 childp->idx->pfc = parentp->firstc;
668 childp->idx->pdi = i * dps + j;
669 fibril_mutex_unlock(&childp->idx->lock);
670
671 fibril_mutex_lock(&childp->lock);
672 childp->lnkcnt = 1;
673 childp->dirty = true; /* need to sync node */
674 fibril_mutex_unlock(&childp->lock);
675
676 /*
677 * Hash in the index structure into the position hash.
678 */
679 fat_idx_hashin(childp->idx);
680
681 return EOK;
682}
683
684int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
685{
686 fat_node_t *parentp = FAT_NODE(pfn);
687 fat_node_t *childp = FAT_NODE(cfn);
688 fat_bs_t *bs;
689 fat_dentry_t *d;
690 uint16_t bps;
691 block_t *b;
692 bool has_children;
693 int rc;
694
695 if (!parentp)
696 return EBUSY;
697
698 rc = fat_has_children(&has_children, cfn);
699 if (rc != EOK)
700 return rc;
701 if (has_children)
702 return ENOTEMPTY;
703
704 fibril_mutex_lock(&parentp->lock);
705 fibril_mutex_lock(&childp->lock);
706 assert(childp->lnkcnt == 1);
707 fibril_mutex_lock(&childp->idx->lock);
708 bs = block_bb_get(childp->idx->dev_handle);
709 bps = uint16_t_le2host(bs->bps);
710
711 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc,
712 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
713 BLOCK_FLAGS_NONE);
714 if (rc != EOK)
715 goto error;
716 d = (fat_dentry_t *)b->data +
717 (childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
718 /* mark the dentry as not-currently-used */
719 d->name[0] = FAT_DENTRY_ERASED;
720 b->dirty = true; /* need to sync block */
721 rc = block_put(b);
722 if (rc != EOK)
723 goto error;
724
725 /* remove the index structure from the position hash */
726 fat_idx_hashout(childp->idx);
727 /* clear position information */
728 childp->idx->pfc = FAT_CLST_RES0;
729 childp->idx->pdi = 0;
730 fibril_mutex_unlock(&childp->idx->lock);
731 childp->lnkcnt = 0;
732 childp->dirty = true;
733 fibril_mutex_unlock(&childp->lock);
734 fibril_mutex_unlock(&parentp->lock);
735
736 return EOK;
737
738error:
739 fibril_mutex_unlock(&parentp->idx->lock);
740 fibril_mutex_unlock(&childp->lock);
741 fibril_mutex_unlock(&childp->idx->lock);
742 return rc;
743}
744
745int fat_has_children(bool *has_children, fs_node_t *fn)
746{
747 fat_bs_t *bs;
748 fat_node_t *nodep = FAT_NODE(fn);
749 unsigned bps;
750 unsigned dps;
751 unsigned blocks;
752 block_t *b;
753 unsigned i, j;
754 int rc;
755
756 if (nodep->type != FAT_DIRECTORY) {
757 *has_children = false;
758 return EOK;
759 }
760
761 fibril_mutex_lock(&nodep->idx->lock);
762 bs = block_bb_get(nodep->idx->dev_handle);
763 bps = uint16_t_le2host(bs->bps);
764 dps = bps / sizeof(fat_dentry_t);
765
766 blocks = nodep->size / bps;
767
768 for (i = 0; i < blocks; i++) {
769 fat_dentry_t *d;
770
771 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
772 if (rc != EOK) {
773 fibril_mutex_unlock(&nodep->idx->lock);
774 return rc;
775 }
776 for (j = 0; j < dps; j++) {
777 d = ((fat_dentry_t *)b->data) + j;
778 switch (fat_classify_dentry(d)) {
779 case FAT_DENTRY_SKIP:
780 case FAT_DENTRY_FREE:
781 continue;
782 case FAT_DENTRY_LAST:
783 rc = block_put(b);
784 /* expect EOK as b was not dirty */
785 assert(rc == EOK);
786 fibril_mutex_unlock(&nodep->idx->lock);
787 *has_children = false;
788 return EOK;
789 default:
790 case FAT_DENTRY_VALID:
791 rc = block_put(b);
792 /* expect EOK as b was not dirty */
793 assert(rc == EOK);
794 fibril_mutex_unlock(&nodep->idx->lock);
795 *has_children = true;
796 return EOK;
797 }
798 }
799 rc = block_put(b);
800 assert(rc == EOK); /* expect EOK as b was not dirty */
801 }
802
803 fibril_mutex_unlock(&nodep->idx->lock);
804 *has_children = false;
805 return EOK;
806}
807
808
809fs_index_t fat_index_get(fs_node_t *fn)
810{
811 return FAT_NODE(fn)->idx->index;
812}
813
814size_t fat_size_get(fs_node_t *fn)
815{
816 return FAT_NODE(fn)->size;
817}
818
819unsigned fat_lnkcnt_get(fs_node_t *fn)
820{
821 return FAT_NODE(fn)->lnkcnt;
822}
823
824char fat_plb_get_char(unsigned pos)
825{
826 return fat_reg.plb_ro[pos % PLB_SIZE];
827}
828
829bool fat_is_directory(fs_node_t *fn)
830{
831 return FAT_NODE(fn)->type == FAT_DIRECTORY;
832}
833
834bool fat_is_file(fs_node_t *fn)
835{
836 return FAT_NODE(fn)->type == FAT_FILE;
837}
838
839/** libfs operations */
840libfs_ops_t fat_libfs_ops = {
841 .root_get = fat_root_get,
842 .match = fat_match,
843 .node_get = fat_node_get,
844 .node_put = fat_node_put,
845 .create = fat_create_node,
846 .destroy = fat_destroy_node,
847 .link = fat_link,
848 .unlink = fat_unlink,
849 .has_children = fat_has_children,
850 .index_get = fat_index_get,
851 .size_get = fat_size_get,
852 .lnkcnt_get = fat_lnkcnt_get,
853 .plb_get_char = fat_plb_get_char,
854 .is_directory = fat_is_directory,
855 .is_file = fat_is_file
856};
857
858/*
859 * VFS operations.
860 */
861
862void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
863{
864 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
865 enum cache_mode cmode;
866 fat_bs_t *bs;
867 uint16_t bps;
868 uint16_t rde;
869 int rc;
870
871 /* accept the mount options */
872 ipc_callid_t callid;
873 size_t size;
874 if (!ipc_data_write_receive(&callid, &size)) {
875 ipc_answer_0(callid, EINVAL);
876 ipc_answer_0(rid, EINVAL);
877 return;
878 }
879 char *opts = malloc(size + 1);
880 if (!opts) {
881 ipc_answer_0(callid, ENOMEM);
882 ipc_answer_0(rid, ENOMEM);
883 return;
884 }
885 ipcarg_t retval = ipc_data_write_finalize(callid, opts, size);
886 if (retval != EOK) {
887 ipc_answer_0(rid, retval);
888 free(opts);
889 return;
890 }
891 opts[size] = '\0';
892
893 /* Check for option enabling write through. */
894 if (str_cmp(opts, "wtcache") == 0)
895 cmode = CACHE_MODE_WT;
896 else
897 cmode = CACHE_MODE_WB;
898
899 /* initialize libblock */
900 rc = block_init(dev_handle, BS_SIZE);
901 if (rc != EOK) {
902 ipc_answer_0(rid, rc);
903 return;
904 }
905
906 /* prepare the boot block */
907 rc = block_bb_read(dev_handle, BS_BLOCK);
908 if (rc != EOK) {
909 block_fini(dev_handle);
910 ipc_answer_0(rid, rc);
911 return;
912 }
913
914 /* get the buffer with the boot sector */
915 bs = block_bb_get(dev_handle);
916
917 /* Read the number of root directory entries. */
918 bps = uint16_t_le2host(bs->bps);
919 rde = uint16_t_le2host(bs->root_ent_max);
920
921 if (bps != BS_SIZE) {
922 block_fini(dev_handle);
923 ipc_answer_0(rid, ENOTSUP);
924 return;
925 }
926
927 /* Initialize the block cache */
928 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);
929 if (rc != EOK) {
930 block_fini(dev_handle);
931 ipc_answer_0(rid, rc);
932 return;
933 }
934
935 rc = fat_idx_init_by_dev_handle(dev_handle);
936 if (rc != EOK) {
937 block_fini(dev_handle);
938 ipc_answer_0(rid, rc);
939 return;
940 }
941
942 /* Initialize the root node. */
943 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
944 if (!rfn) {
945 block_fini(dev_handle);
946 fat_idx_fini_by_dev_handle(dev_handle);
947 ipc_answer_0(rid, ENOMEM);
948 return;
949 }
950 fs_node_initialize(rfn);
951 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
952 if (!rootp) {
953 free(rfn);
954 block_fini(dev_handle);
955 fat_idx_fini_by_dev_handle(dev_handle);
956 ipc_answer_0(rid, ENOMEM);
957 return;
958 }
959 fat_node_initialize(rootp);
960
961 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
962 if (!ridxp) {
963 free(rfn);
964 free(rootp);
965 block_fini(dev_handle);
966 fat_idx_fini_by_dev_handle(dev_handle);
967 ipc_answer_0(rid, ENOMEM);
968 return;
969 }
970 assert(ridxp->index == 0);
971 /* ridxp->lock held */
972
973 rootp->type = FAT_DIRECTORY;
974 rootp->firstc = FAT_CLST_ROOT;
975 rootp->refcnt = 1;
976 rootp->lnkcnt = 0; /* FS root is not linked */
977 rootp->size = rde * sizeof(fat_dentry_t);
978 rootp->idx = ridxp;
979 ridxp->nodep = rootp;
980 rootp->bp = rfn;
981 rfn->data = rootp;
982
983 fibril_mutex_unlock(&ridxp->lock);
984
985 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
986}
987
988void fat_mount(ipc_callid_t rid, ipc_call_t *request)
989{
990 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
991}
992
993void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
994{
995 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
996}
997
998void fat_read(ipc_callid_t rid, ipc_call_t *request)
999{
1000 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1001 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1002 off_t pos = (off_t)IPC_GET_ARG3(*request);
1003 fs_node_t *fn;
1004 fat_node_t *nodep;
1005 fat_bs_t *bs;
1006 uint16_t bps;
1007 size_t bytes;
1008 block_t *b;
1009 int rc;
1010
1011 rc = fat_node_get(&fn, dev_handle, index);
1012 if (rc != EOK) {
1013 ipc_answer_0(rid, rc);
1014 return;
1015 }
1016 if (!fn) {
1017 ipc_answer_0(rid, ENOENT);
1018 return;
1019 }
1020 nodep = FAT_NODE(fn);
1021
1022 ipc_callid_t callid;
1023 size_t len;
1024 if (!ipc_data_read_receive(&callid, &len)) {
1025 fat_node_put(fn);
1026 ipc_answer_0(callid, EINVAL);
1027 ipc_answer_0(rid, EINVAL);
1028 return;
1029 }
1030
1031 bs = block_bb_get(dev_handle);
1032 bps = uint16_t_le2host(bs->bps);
1033
1034 if (nodep->type == FAT_FILE) {
1035 /*
1036 * Our strategy for regular file reads is to read one block at
1037 * most and make use of the possibility to return less data than
1038 * requested. This keeps the code very simple.
1039 */
1040 if (pos >= nodep->size) {
1041 /* reading beyond the EOF */
1042 bytes = 0;
1043 (void) ipc_data_read_finalize(callid, NULL, 0);
1044 } else {
1045 bytes = min(len, bps - pos % bps);
1046 bytes = min(bytes, nodep->size - pos);
1047 rc = fat_block_get(&b, bs, nodep, pos / bps,
1048 BLOCK_FLAGS_NONE);
1049 assert(rc == EOK);
1050 (void) ipc_data_read_finalize(callid, b->data + pos % bps,
1051 bytes);
1052 rc = block_put(b);
1053 assert(rc == EOK);
1054 }
1055 } else {
1056 unsigned bnum;
1057 off_t spos = pos;
1058 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1059 fat_dentry_t *d;
1060
1061 assert(nodep->type == FAT_DIRECTORY);
1062 assert(nodep->size % bps == 0);
1063 assert(bps % sizeof(fat_dentry_t) == 0);
1064
1065 /*
1066 * Our strategy for readdir() is to use the position pointer as
1067 * an index into the array of all dentries. On entry, it points
1068 * to the first unread dentry. If we skip any dentries, we bump
1069 * the position pointer accordingly.
1070 */
1071 bnum = (pos * sizeof(fat_dentry_t)) / bps;
1072 while (bnum < nodep->size / bps) {
1073 off_t o;
1074
1075 rc = fat_block_get(&b, bs, nodep, bnum,
1076 BLOCK_FLAGS_NONE);
1077 assert(rc == EOK);
1078 for (o = pos % (bps / sizeof(fat_dentry_t));
1079 o < bps / sizeof(fat_dentry_t);
1080 o++, pos++) {
1081 d = ((fat_dentry_t *)b->data) + o;
1082 switch (fat_classify_dentry(d)) {
1083 case FAT_DENTRY_SKIP:
1084 case FAT_DENTRY_FREE:
1085 continue;
1086 case FAT_DENTRY_LAST:
1087 rc = block_put(b);
1088 assert(rc == EOK);
1089 goto miss;
1090 default:
1091 case FAT_DENTRY_VALID:
1092 fat_dentry_name_get(d, name);
1093 rc = block_put(b);
1094 assert(rc == EOK);
1095 goto hit;
1096 }
1097 }
1098 rc = block_put(b);
1099 assert(rc == EOK);
1100 bnum++;
1101 }
1102miss:
1103 fat_node_put(fn);
1104 ipc_answer_0(callid, ENOENT);
1105 ipc_answer_1(rid, ENOENT, 0);
1106 return;
1107hit:
1108 (void) ipc_data_read_finalize(callid, name, str_size(name) + 1);
1109 bytes = (pos - spos) + 1;
1110 }
1111
1112 fat_node_put(fn);
1113 ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
1114}
1115
1116void fat_write(ipc_callid_t rid, ipc_call_t *request)
1117{
1118 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1119 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1120 off_t pos = (off_t)IPC_GET_ARG3(*request);
1121 fs_node_t *fn;
1122 fat_node_t *nodep;
1123 fat_bs_t *bs;
1124 size_t bytes;
1125 block_t *b;
1126 uint16_t bps;
1127 unsigned spc;
1128 unsigned bpc; /* bytes per cluster */
1129 off_t boundary;
1130 int flags = BLOCK_FLAGS_NONE;
1131 int rc;
1132
1133 rc = fat_node_get(&fn, dev_handle, index);
1134 if (rc != EOK) {
1135 ipc_answer_0(rid, rc);
1136 return;
1137 }
1138 if (!fn) {
1139 ipc_answer_0(rid, ENOENT);
1140 return;
1141 }
1142 nodep = FAT_NODE(fn);
1143
1144 ipc_callid_t callid;
1145 size_t len;
1146 if (!ipc_data_write_receive(&callid, &len)) {
1147 fat_node_put(fn);
1148 ipc_answer_0(callid, EINVAL);
1149 ipc_answer_0(rid, EINVAL);
1150 return;
1151 }
1152
1153 bs = block_bb_get(dev_handle);
1154 bps = uint16_t_le2host(bs->bps);
1155 spc = bs->spc;
1156 bpc = bps * spc;
1157
1158 /*
1159 * In all scenarios, we will attempt to write out only one block worth
1160 * of data at maximum. There might be some more efficient approaches,
1161 * but this one greatly simplifies fat_write(). Note that we can afford
1162 * to do this because the client must be ready to handle the return
1163 * value signalizing a smaller number of bytes written.
1164 */
1165 bytes = min(len, bps - pos % bps);
1166 if (bytes == bps)
1167 flags |= BLOCK_FLAGS_NOREAD;
1168
1169 boundary = ROUND_UP(nodep->size, bpc);
1170 if (pos < boundary) {
1171 /*
1172 * This is the easier case - we are either overwriting already
1173 * existing contents or writing behind the EOF, but still within
1174 * the limits of the last cluster. The node size may grow to the
1175 * next block size boundary.
1176 */
1177 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1178 assert(rc == EOK);
1179 rc = fat_block_get(&b, bs, nodep, pos / bps, flags);
1180 assert(rc == EOK);
1181 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1182 bytes);
1183 b->dirty = true; /* need to sync block */
1184 rc = block_put(b);
1185 assert(rc == EOK);
1186 if (pos + bytes > nodep->size) {
1187 nodep->size = pos + bytes;
1188 nodep->dirty = true; /* need to sync node */
1189 }
1190 ipc_answer_2(rid, EOK, bytes, nodep->size);
1191 fat_node_put(fn);
1192 return;
1193 } else {
1194 /*
1195 * This is the more difficult case. We must allocate new
1196 * clusters for the node and zero them out.
1197 */
1198 int status;
1199 unsigned nclsts;
1200 fat_cluster_t mcl, lcl;
1201
1202 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
1203 /* create an independent chain of nclsts clusters in all FATs */
1204 status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1205 if (status != EOK) {
1206 /* could not allocate a chain of nclsts clusters */
1207 fat_node_put(fn);
1208 ipc_answer_0(callid, status);
1209 ipc_answer_0(rid, status);
1210 return;
1211 }
1212 /* zero fill any gaps */
1213 rc = fat_fill_gap(bs, nodep, mcl, pos);
1214 assert(rc == EOK);
1215 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc,
1216 flags);
1217 assert(rc == EOK);
1218 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1219 bytes);
1220 b->dirty = true; /* need to sync block */
1221 rc = block_put(b);
1222 assert(rc == EOK);
1223 /*
1224 * Append the cluster chain starting in mcl to the end of the
1225 * node's cluster chain.
1226 */
1227 rc = fat_append_clusters(bs, nodep, mcl);
1228 assert(rc == EOK);
1229 nodep->size = pos + bytes;
1230 nodep->dirty = true; /* need to sync node */
1231 ipc_answer_2(rid, EOK, bytes, nodep->size);
1232 fat_node_put(fn);
1233 return;
1234 }
1235}
1236
1237void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1238{
1239 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1240 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1241 size_t size = (off_t)IPC_GET_ARG3(*request);
1242 fs_node_t *fn;
1243 fat_node_t *nodep;
1244 fat_bs_t *bs;
1245 uint16_t bps;
1246 uint8_t spc;
1247 unsigned bpc; /* bytes per cluster */
1248 int rc;
1249
1250 rc = fat_node_get(&fn, dev_handle, index);
1251 if (rc != EOK) {
1252 ipc_answer_0(rid, rc);
1253 return;
1254 }
1255 if (!fn) {
1256 ipc_answer_0(rid, ENOENT);
1257 return;
1258 }
1259 nodep = FAT_NODE(fn);
1260
1261 bs = block_bb_get(dev_handle);
1262 bps = uint16_t_le2host(bs->bps);
1263 spc = bs->spc;
1264 bpc = bps * spc;
1265
1266 if (nodep->size == size) {
1267 rc = EOK;
1268 } else if (nodep->size < size) {
1269 /*
1270 * The standard says we have the freedom to grow the node.
1271 * For now, we simply return an error.
1272 */
1273 rc = EINVAL;
1274 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
1275 /*
1276 * The node will be shrunk, but no clusters will be deallocated.
1277 */
1278 nodep->size = size;
1279 nodep->dirty = true; /* need to sync node */
1280 rc = EOK;
1281 } else {
1282 /*
1283 * The node will be shrunk, clusters will be deallocated.
1284 */
1285 if (size == 0) {
1286 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1287 if (rc != EOK)
1288 goto out;
1289 } else {
1290 fat_cluster_t lastc;
1291 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc,
1292 &lastc, NULL, (size - 1) / bpc);
1293 if (rc != EOK)
1294 goto out;
1295 rc = fat_chop_clusters(bs, nodep, lastc);
1296 if (rc != EOK)
1297 goto out;
1298 }
1299 nodep->size = size;
1300 nodep->dirty = true; /* need to sync node */
1301 rc = EOK;
1302 }
1303out:
1304 fat_node_put(fn);
1305 ipc_answer_0(rid, rc);
1306 return;
1307}
1308
1309void fat_close(ipc_callid_t rid, ipc_call_t *request)
1310{
1311 ipc_answer_0(rid, EOK);
1312}
1313
1314void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1315{
1316 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1317 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1318 fs_node_t *fn;
1319 int rc;
1320
1321 rc = fat_node_get(&fn, dev_handle, index);
1322 if (rc != EOK) {
1323 ipc_answer_0(rid, rc);
1324 return;
1325 }
1326 if (!fn) {
1327 ipc_answer_0(rid, ENOENT);
1328 return;
1329 }
1330
1331 rc = fat_destroy_node(fn);
1332 ipc_answer_0(rid, rc);
1333}
1334
1335void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1336{
1337 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1338}
1339
1340void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1341{
1342 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1343}
1344
1345void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1346{
1347 /* Dummy implementation */
1348 ipc_answer_0(rid, EOK);
1349}
1350
1351/**
1352 * @}
1353 */
Note: See TracBrowser for help on using the repository browser.