source: mainline/uspace/srv/fs/fat/fat_ops.c@ dc6b148

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since dc6b148 was 0be611b, checked in by Jakub Jermar <jakub@…>, 14 years ago

Do not leave parentp→idx→lock locked when returning from fat_link().

  • Property mode set to 100644
File size: 32.7 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2011 Oleg Romanenko
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup fs
31 * @{
32 */
33
34/**
35 * @file fat_ops.c
36 * @brief Implementation of VFS operations for the FAT file system server.
37 */
38
39#include "fat.h"
40#include "fat_dentry.h"
41#include "fat_fat.h"
42#include "fat_directory.h"
43#include "../../vfs/vfs.h"
44#include <libfs.h>
45#include <libblock.h>
46#include <ipc/services.h>
47#include <ipc/loc.h>
48#include <macros.h>
49#include <async.h>
50#include <errno.h>
51#include <str.h>
52#include <byteorder.h>
53#include <adt/hash_table.h>
54#include <adt/list.h>
55#include <assert.h>
56#include <fibril_synch.h>
57#include <sys/mman.h>
58#include <align.h>
59#include <malloc.h>
60#include <str.h>
61
62#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
63#define FS_NODE(node) ((node) ? (node)->bp : NULL)
64
65#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
66#define BPC(bs) (BPS((bs)) * SPC((bs)))
67
68/** Mutex protecting the list of cached free FAT nodes. */
69static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
70
71/** List of cached free FAT nodes. */
72static LIST_INITIALIZE(ffn_list);
73
74/*
75 * Forward declarations of FAT libfs operations.
76 */
77static int fat_root_get(fs_node_t **, service_id_t);
78static int fat_match(fs_node_t **, fs_node_t *, const char *);
79static int fat_node_get(fs_node_t **, service_id_t, fs_index_t);
80static int fat_node_open(fs_node_t *);
81static int fat_node_put(fs_node_t *);
82static int fat_create_node(fs_node_t **, service_id_t, int);
83static int fat_destroy_node(fs_node_t *);
84static int fat_link(fs_node_t *, fs_node_t *, const char *);
85static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
86static int fat_has_children(bool *, fs_node_t *);
87static fs_index_t fat_index_get(fs_node_t *);
88static aoff64_t fat_size_get(fs_node_t *);
89static unsigned fat_lnkcnt_get(fs_node_t *);
90static bool fat_is_directory(fs_node_t *);
91static bool fat_is_file(fs_node_t *node);
92static service_id_t fat_device_get(fs_node_t *node);
93
94/*
95 * Helper functions.
96 */
97static void fat_node_initialize(fat_node_t *node)
98{
99 fibril_mutex_initialize(&node->lock);
100 node->bp = NULL;
101 node->idx = NULL;
102 node->type = 0;
103 link_initialize(&node->ffn_link);
104 node->size = 0;
105 node->lnkcnt = 0;
106 node->refcnt = 0;
107 node->dirty = false;
108 node->lastc_cached_valid = false;
109 node->lastc_cached_value = 0;
110 node->currc_cached_valid = false;
111 node->currc_cached_bn = 0;
112 node->currc_cached_value = 0;
113}
114
115static int fat_node_sync(fat_node_t *node)
116{
117 block_t *b;
118 fat_bs_t *bs;
119 fat_dentry_t *d;
120 int rc;
121
122 assert(node->dirty);
123
124 bs = block_bb_get(node->idx->service_id);
125
126 /* Read the block that contains the dentry of interest. */
127 rc = _fat_block_get(&b, bs, node->idx->service_id, node->idx->pfc,
128 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
129 BLOCK_FLAGS_NONE);
130 if (rc != EOK)
131 return rc;
132
133 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
134
135 d->firstc = host2uint16_t_le(node->firstc);
136 if (node->type == FAT_FILE) {
137 d->size = host2uint32_t_le(node->size);
138 } else if (node->type == FAT_DIRECTORY) {
139 d->attr = FAT_ATTR_SUBDIR;
140 }
141
142 /* TODO: update other fields? (e.g time fields) */
143
144 b->dirty = true; /* need to sync block */
145 rc = block_put(b);
146 return rc;
147}
148
149static int fat_node_fini_by_service_id(service_id_t service_id)
150{
151 fat_node_t *nodep;
152 int rc;
153
154 /*
155 * We are called from fat_unmounted() and assume that there are already
156 * no nodes belonging to this instance with non-zero refcount. Therefore
157 * it is sufficient to clean up only the FAT free node list.
158 */
159
160restart:
161 fibril_mutex_lock(&ffn_mutex);
162 list_foreach(ffn_list, lnk) {
163 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
164 if (!fibril_mutex_trylock(&nodep->lock)) {
165 fibril_mutex_unlock(&ffn_mutex);
166 goto restart;
167 }
168 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
169 fibril_mutex_unlock(&nodep->lock);
170 fibril_mutex_unlock(&ffn_mutex);
171 goto restart;
172 }
173 if (nodep->idx->service_id != service_id) {
174 fibril_mutex_unlock(&nodep->idx->lock);
175 fibril_mutex_unlock(&nodep->lock);
176 continue;
177 }
178
179 list_remove(&nodep->ffn_link);
180 fibril_mutex_unlock(&ffn_mutex);
181
182 /*
183 * We can unlock the node and its index structure because we are
184 * the last player on this playground and VFS is preventing new
185 * players from entering.
186 */
187 fibril_mutex_unlock(&nodep->idx->lock);
188 fibril_mutex_unlock(&nodep->lock);
189
190 if (nodep->dirty) {
191 rc = fat_node_sync(nodep);
192 if (rc != EOK)
193 return rc;
194 }
195 nodep->idx->nodep = NULL;
196 free(nodep->bp);
197 free(nodep);
198
199 /* Need to restart because we changed ffn_list. */
200 goto restart;
201 }
202 fibril_mutex_unlock(&ffn_mutex);
203
204 return EOK;
205}
206
207static int fat_node_get_new(fat_node_t **nodepp)
208{
209 fs_node_t *fn;
210 fat_node_t *nodep;
211 int rc;
212
213 fibril_mutex_lock(&ffn_mutex);
214 if (!list_empty(&ffn_list)) {
215 /* Try to use a cached free node structure. */
216 fat_idx_t *idxp_tmp;
217 nodep = list_get_instance(list_first(&ffn_list), fat_node_t,
218 ffn_link);
219 if (!fibril_mutex_trylock(&nodep->lock))
220 goto skip_cache;
221 idxp_tmp = nodep->idx;
222 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
223 fibril_mutex_unlock(&nodep->lock);
224 goto skip_cache;
225 }
226 list_remove(&nodep->ffn_link);
227 fibril_mutex_unlock(&ffn_mutex);
228 if (nodep->dirty) {
229 rc = fat_node_sync(nodep);
230 if (rc != EOK) {
231 idxp_tmp->nodep = NULL;
232 fibril_mutex_unlock(&nodep->lock);
233 fibril_mutex_unlock(&idxp_tmp->lock);
234 free(nodep->bp);
235 free(nodep);
236 return rc;
237 }
238 }
239 idxp_tmp->nodep = NULL;
240 fibril_mutex_unlock(&nodep->lock);
241 fibril_mutex_unlock(&idxp_tmp->lock);
242 fn = FS_NODE(nodep);
243 } else {
244skip_cache:
245 /* Try to allocate a new node structure. */
246 fibril_mutex_unlock(&ffn_mutex);
247 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
248 if (!fn)
249 return ENOMEM;
250 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
251 if (!nodep) {
252 free(fn);
253 return ENOMEM;
254 }
255 }
256 fat_node_initialize(nodep);
257 fs_node_initialize(fn);
258 fn->data = nodep;
259 nodep->bp = fn;
260
261 *nodepp = nodep;
262 return EOK;
263}
264
265/** Internal version of fat_node_get().
266 *
267 * @param idxp Locked index structure.
268 */
269static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
270{
271 block_t *b;
272 fat_bs_t *bs;
273 fat_dentry_t *d;
274 fat_node_t *nodep = NULL;
275 int rc;
276
277 if (idxp->nodep) {
278 /*
279 * We are lucky.
280 * The node is already instantiated in memory.
281 */
282 fibril_mutex_lock(&idxp->nodep->lock);
283 if (!idxp->nodep->refcnt++) {
284 fibril_mutex_lock(&ffn_mutex);
285 list_remove(&idxp->nodep->ffn_link);
286 fibril_mutex_unlock(&ffn_mutex);
287 }
288 fibril_mutex_unlock(&idxp->nodep->lock);
289 *nodepp = idxp->nodep;
290 return EOK;
291 }
292
293 /*
294 * We must instantiate the node from the file system.
295 */
296
297 assert(idxp->pfc);
298
299 rc = fat_node_get_new(&nodep);
300 if (rc != EOK)
301 return rc;
302
303 bs = block_bb_get(idxp->service_id);
304
305 /* Read the block that contains the dentry of interest. */
306 rc = _fat_block_get(&b, bs, idxp->service_id, idxp->pfc, NULL,
307 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
308 if (rc != EOK) {
309 (void) fat_node_put(FS_NODE(nodep));
310 return rc;
311 }
312
313 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
314 if (FAT_IS_FAT32(bs)) {
315 nodep->firstc = uint16_t_le2host(d->firstc_lo) |
316 (uint16_t_le2host(d->firstc_hi) << 16);
317 } else
318 nodep->firstc = uint16_t_le2host(d->firstc);
319
320 if (d->attr & FAT_ATTR_SUBDIR) {
321 /*
322 * The only directory which does not have this bit set is the
323 * root directory itself. The root directory node is handled
324 * and initialized elsewhere.
325 */
326 nodep->type = FAT_DIRECTORY;
327
328 /*
329 * Unfortunately, the 'size' field of the FAT dentry is not
330 * defined for the directory entry type. We must determine the
331 * size of the directory by walking the FAT.
332 */
333 uint32_t clusters;
334 rc = fat_clusters_get(&clusters, bs, idxp->service_id,
335 nodep->firstc);
336 if (rc != EOK) {
337 (void) block_put(b);
338 (void) fat_node_put(FS_NODE(nodep));
339 return rc;
340 }
341 nodep->size = BPS(bs) * SPC(bs) * clusters;
342 } else {
343 nodep->type = FAT_FILE;
344 nodep->size = uint32_t_le2host(d->size);
345 }
346
347 nodep->lnkcnt = 1;
348 nodep->refcnt = 1;
349
350 rc = block_put(b);
351 if (rc != EOK) {
352 (void) fat_node_put(FS_NODE(nodep));
353 return rc;
354 }
355
356 /* Link the idx structure with the node structure. */
357 nodep->idx = idxp;
358 idxp->nodep = nodep;
359
360 *nodepp = nodep;
361 return EOK;
362}
363
364/*
365 * FAT libfs operations.
366 */
367
368int fat_root_get(fs_node_t **rfn, service_id_t service_id)
369{
370 return fat_node_get(rfn, service_id, 0);
371}
372
373int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
374{
375 fat_node_t *parentp = FAT_NODE(pfn);
376 char name[FAT_LFN_NAME_SIZE];
377 fat_dentry_t *d;
378 service_id_t service_id;
379 int rc;
380
381 fibril_mutex_lock(&parentp->idx->lock);
382 service_id = parentp->idx->service_id;
383 fibril_mutex_unlock(&parentp->idx->lock);
384
385 fat_directory_t di;
386 rc = fat_directory_open(parentp, &di);
387 if (rc != EOK)
388 return rc;
389
390 while (fat_directory_read(&di, name, &d) == EOK) {
391 if (fat_dentry_namecmp(name, component) == 0) {
392 /* hit */
393 fat_node_t *nodep;
394 aoff64_t o = di.pos %
395 (BPS(di.bs) / sizeof(fat_dentry_t));
396 fat_idx_t *idx = fat_idx_get_by_pos(service_id,
397 parentp->firstc, di.bnum * DPS(di.bs) + o);
398 if (!idx) {
399 /*
400 * Can happen if memory is low or if we
401 * run out of 32-bit indices.
402 */
403 rc = fat_directory_close(&di);
404 return (rc == EOK) ? ENOMEM : rc;
405 }
406 rc = fat_node_get_core(&nodep, idx);
407 fibril_mutex_unlock(&idx->lock);
408 if (rc != EOK) {
409 (void) fat_directory_close(&di);
410 return rc;
411 }
412 *rfn = FS_NODE(nodep);
413 rc = fat_directory_close(&di);
414 if (rc != EOK)
415 (void) fat_node_put(*rfn);
416 return rc;
417 } else {
418 rc = fat_directory_next(&di);
419 if (rc != EOK)
420 break;
421 }
422 }
423 (void) fat_directory_close(&di);
424 *rfn = NULL;
425 return EOK;
426}
427
428/** Instantiate a FAT in-core node. */
429int fat_node_get(fs_node_t **rfn, service_id_t service_id, fs_index_t index)
430{
431 fat_node_t *nodep;
432 fat_idx_t *idxp;
433 int rc;
434
435 idxp = fat_idx_get_by_index(service_id, index);
436 if (!idxp) {
437 *rfn = NULL;
438 return EOK;
439 }
440 /* idxp->lock held */
441 rc = fat_node_get_core(&nodep, idxp);
442 fibril_mutex_unlock(&idxp->lock);
443 if (rc == EOK)
444 *rfn = FS_NODE(nodep);
445 return rc;
446}
447
448int fat_node_open(fs_node_t *fn)
449{
450 /*
451 * Opening a file is stateless, nothing
452 * to be done here.
453 */
454 return EOK;
455}
456
457int fat_node_put(fs_node_t *fn)
458{
459 fat_node_t *nodep = FAT_NODE(fn);
460 bool destroy = false;
461
462 fibril_mutex_lock(&nodep->lock);
463 if (!--nodep->refcnt) {
464 if (nodep->idx) {
465 fibril_mutex_lock(&ffn_mutex);
466 list_append(&nodep->ffn_link, &ffn_list);
467 fibril_mutex_unlock(&ffn_mutex);
468 } else {
469 /*
470 * The node does not have any index structure associated
471 * with itself. This can only mean that we are releasing
472 * the node after a failed attempt to allocate the index
473 * structure for it.
474 */
475 destroy = true;
476 }
477 }
478 fibril_mutex_unlock(&nodep->lock);
479 if (destroy) {
480 free(nodep->bp);
481 free(nodep);
482 }
483 return EOK;
484}
485
486int fat_create_node(fs_node_t **rfn, service_id_t service_id, int flags)
487{
488 fat_idx_t *idxp;
489 fat_node_t *nodep;
490 fat_bs_t *bs;
491 fat_cluster_t mcl, lcl;
492 int rc;
493
494 bs = block_bb_get(service_id);
495 if (flags & L_DIRECTORY) {
496 /* allocate a cluster */
497 rc = fat_alloc_clusters(bs, service_id, 1, &mcl, &lcl);
498 if (rc != EOK)
499 return rc;
500 /* populate the new cluster with unused dentries */
501 rc = fat_zero_cluster(bs, service_id, mcl);
502 if (rc != EOK) {
503 (void) fat_free_clusters(bs, service_id, mcl);
504 return rc;
505 }
506 }
507
508 rc = fat_node_get_new(&nodep);
509 if (rc != EOK) {
510 (void) fat_free_clusters(bs, service_id, mcl);
511 return rc;
512 }
513 rc = fat_idx_get_new(&idxp, service_id);
514 if (rc != EOK) {
515 (void) fat_free_clusters(bs, service_id, mcl);
516 (void) fat_node_put(FS_NODE(nodep));
517 return rc;
518 }
519 /* idxp->lock held */
520 if (flags & L_DIRECTORY) {
521 nodep->type = FAT_DIRECTORY;
522 nodep->firstc = mcl;
523 nodep->size = BPS(bs) * SPC(bs);
524 } else {
525 nodep->type = FAT_FILE;
526 nodep->firstc = FAT_CLST_RES0;
527 nodep->size = 0;
528 }
529 nodep->lnkcnt = 0; /* not linked anywhere */
530 nodep->refcnt = 1;
531 nodep->dirty = true;
532
533 nodep->idx = idxp;
534 idxp->nodep = nodep;
535
536 fibril_mutex_unlock(&idxp->lock);
537 *rfn = FS_NODE(nodep);
538 return EOK;
539}
540
541int fat_destroy_node(fs_node_t *fn)
542{
543 fat_node_t *nodep = FAT_NODE(fn);
544 fat_bs_t *bs;
545 bool has_children;
546 int rc;
547
548 /*
549 * The node is not reachable from the file system. This means that the
550 * link count should be zero and that the index structure cannot be
551 * found in the position hash. Obviously, we don't need to lock the node
552 * nor its index structure.
553 */
554 assert(nodep->lnkcnt == 0);
555
556 /*
557 * The node may not have any children.
558 */
559 rc = fat_has_children(&has_children, fn);
560 if (rc != EOK)
561 return rc;
562 assert(!has_children);
563
564 bs = block_bb_get(nodep->idx->service_id);
565 if (nodep->firstc != FAT_CLST_RES0) {
566 assert(nodep->size);
567 /* Free all clusters allocated to the node. */
568 rc = fat_free_clusters(bs, nodep->idx->service_id,
569 nodep->firstc);
570 }
571
572 fat_idx_destroy(nodep->idx);
573 free(nodep->bp);
574 free(nodep);
575 return rc;
576}
577
578int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
579{
580 fat_node_t *parentp = FAT_NODE(pfn);
581 fat_node_t *childp = FAT_NODE(cfn);
582 fat_dentry_t *d;
583 fat_bs_t *bs;
584 block_t *b;
585 fat_directory_t di;
586 fat_dentry_t de;
587 int rc;
588
589 fibril_mutex_lock(&childp->lock);
590 if (childp->lnkcnt == 1) {
591 /*
592 * On FAT, we don't support multiple hard links.
593 */
594 fibril_mutex_unlock(&childp->lock);
595 return EMLINK;
596 }
597 assert(childp->lnkcnt == 0);
598 fibril_mutex_unlock(&childp->lock);
599
600 if (!fat_valid_name(name))
601 return ENOTSUP;
602
603 fibril_mutex_lock(&parentp->idx->lock);
604 bs = block_bb_get(parentp->idx->service_id);
605 rc = fat_directory_open(parentp, &di);
606 if (rc != EOK) {
607 fibril_mutex_unlock(&parentp->idx->lock);
608 return rc;
609 }
610
611 /*
612 * At this point we only establish the link between the parent and the
613 * child. The dentry, except of the name and the extension, will remain
614 * uninitialized until the corresponding node is synced. Thus the valid
615 * dentry data is kept in the child node structure.
616 */
617 memset(&de, 0, sizeof(fat_dentry_t));
618
619 rc = fat_directory_write(&di, name, &de);
620 if (rc != EOK) {
621 (void) fat_directory_close(&di);
622 fibril_mutex_unlock(&parentp->idx->lock);
623 return rc;
624 }
625 rc = fat_directory_close(&di);
626 if (rc != EOK) {
627 fibril_mutex_unlock(&parentp->idx->lock);
628 return rc;
629 }
630
631 fibril_mutex_unlock(&parentp->idx->lock);
632
633 fibril_mutex_lock(&childp->idx->lock);
634
635 if (childp->type == FAT_DIRECTORY) {
636 /*
637 * If possible, create the Sub-directory Identifier Entry and
638 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
639 * These entries are not mandatory according to Standard
640 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
641 * rather a sign of our good will.
642 */
643 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
644 if (rc != EOK) {
645 /*
646 * Rather than returning an error, simply skip the
647 * creation of these two entries.
648 */
649 goto skip_dots;
650 }
651 d = (fat_dentry_t *) b->data;
652 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
653 (bcmp(d->name, FAT_NAME_DOT, FAT_NAME_LEN)) == 0) {
654 memset(d, 0, sizeof(fat_dentry_t));
655 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN);
656 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
657 d->attr = FAT_ATTR_SUBDIR;
658 d->firstc = host2uint16_t_le(childp->firstc);
659 /* TODO: initialize also the date/time members. */
660 }
661 d++;
662 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
663 (bcmp(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN) == 0)) {
664 memset(d, 0, sizeof(fat_dentry_t));
665 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN);
666 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
667 d->attr = FAT_ATTR_SUBDIR;
668 d->firstc = (parentp->firstc == FAT_ROOT_CLST(bs)) ?
669 host2uint16_t_le(FAT_CLST_ROOTPAR) :
670 host2uint16_t_le(parentp->firstc);
671 /* TODO: initialize also the date/time members. */
672 }
673 b->dirty = true; /* need to sync block */
674 /*
675 * Ignore the return value as we would have fallen through on error
676 * anyway.
677 */
678 (void) block_put(b);
679 }
680skip_dots:
681
682 childp->idx->pfc = parentp->firstc;
683 childp->idx->pdi = di.pos; /* di.pos holds absolute position of SFN entry */
684 fibril_mutex_unlock(&childp->idx->lock);
685
686 fibril_mutex_lock(&childp->lock);
687 childp->lnkcnt = 1;
688 childp->dirty = true; /* need to sync node */
689 fibril_mutex_unlock(&childp->lock);
690
691 /*
692 * Hash in the index structure into the position hash.
693 */
694 fat_idx_hashin(childp->idx);
695
696 return EOK;
697}
698
699int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
700{
701 fat_node_t *parentp = FAT_NODE(pfn);
702 fat_node_t *childp = FAT_NODE(cfn);
703 bool has_children;
704 int rc;
705
706 if (!parentp)
707 return EBUSY;
708
709 rc = fat_has_children(&has_children, cfn);
710 if (rc != EOK)
711 return rc;
712 if (has_children)
713 return ENOTEMPTY;
714
715 fibril_mutex_lock(&parentp->lock);
716 fibril_mutex_lock(&childp->lock);
717 assert(childp->lnkcnt == 1);
718 fibril_mutex_lock(&childp->idx->lock);
719
720 fat_directory_t di;
721 rc = fat_directory_open(parentp, &di);
722 if (rc != EOK)
723 goto error;
724 rc = fat_directory_seek(&di, childp->idx->pdi);
725 if (rc != EOK)
726 goto error;
727 rc = fat_directory_erase(&di);
728 if (rc != EOK)
729 goto error;
730 rc = fat_directory_close(&di);
731 if (rc != EOK)
732 goto error;
733
734 /* remove the index structure from the position hash */
735 fat_idx_hashout(childp->idx);
736 /* clear position information */
737 childp->idx->pfc = FAT_CLST_RES0;
738 childp->idx->pdi = 0;
739 fibril_mutex_unlock(&childp->idx->lock);
740 childp->lnkcnt = 0;
741 childp->refcnt++; /* keep the node in memory until destroyed */
742 childp->dirty = true;
743 fibril_mutex_unlock(&childp->lock);
744 fibril_mutex_unlock(&parentp->lock);
745
746 return EOK;
747
748error:
749 (void) fat_directory_close(&di);
750 fibril_mutex_unlock(&childp->idx->lock);
751 fibril_mutex_unlock(&childp->lock);
752 fibril_mutex_unlock(&parentp->lock);
753 return rc;
754}
755
756int fat_has_children(bool *has_children, fs_node_t *fn)
757{
758 fat_bs_t *bs;
759 fat_node_t *nodep = FAT_NODE(fn);
760 unsigned blocks;
761 block_t *b;
762 unsigned i, j;
763 int rc;
764
765 if (nodep->type != FAT_DIRECTORY) {
766 *has_children = false;
767 return EOK;
768 }
769
770 fibril_mutex_lock(&nodep->idx->lock);
771 bs = block_bb_get(nodep->idx->service_id);
772
773 blocks = nodep->size / BPS(bs);
774
775 for (i = 0; i < blocks; i++) {
776 fat_dentry_t *d;
777
778 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
779 if (rc != EOK) {
780 fibril_mutex_unlock(&nodep->idx->lock);
781 return rc;
782 }
783 for (j = 0; j < DPS(bs); j++) {
784 d = ((fat_dentry_t *)b->data) + j;
785 switch (fat_classify_dentry(d)) {
786 case FAT_DENTRY_SKIP:
787 case FAT_DENTRY_FREE:
788 continue;
789 case FAT_DENTRY_LAST:
790 rc = block_put(b);
791 fibril_mutex_unlock(&nodep->idx->lock);
792 *has_children = false;
793 return rc;
794 default:
795 case FAT_DENTRY_VALID:
796 rc = block_put(b);
797 fibril_mutex_unlock(&nodep->idx->lock);
798 *has_children = true;
799 return rc;
800 }
801 }
802 rc = block_put(b);
803 if (rc != EOK) {
804 fibril_mutex_unlock(&nodep->idx->lock);
805 return rc;
806 }
807 }
808
809 fibril_mutex_unlock(&nodep->idx->lock);
810 *has_children = false;
811 return EOK;
812}
813
814
815fs_index_t fat_index_get(fs_node_t *fn)
816{
817 return FAT_NODE(fn)->idx->index;
818}
819
820aoff64_t fat_size_get(fs_node_t *fn)
821{
822 return FAT_NODE(fn)->size;
823}
824
825unsigned fat_lnkcnt_get(fs_node_t *fn)
826{
827 return FAT_NODE(fn)->lnkcnt;
828}
829
830bool fat_is_directory(fs_node_t *fn)
831{
832 return FAT_NODE(fn)->type == FAT_DIRECTORY;
833}
834
835bool fat_is_file(fs_node_t *fn)
836{
837 return FAT_NODE(fn)->type == FAT_FILE;
838}
839
840service_id_t fat_device_get(fs_node_t *node)
841{
842 return 0;
843}
844
845/** libfs operations */
846libfs_ops_t fat_libfs_ops = {
847 .root_get = fat_root_get,
848 .match = fat_match,
849 .node_get = fat_node_get,
850 .node_open = fat_node_open,
851 .node_put = fat_node_put,
852 .create = fat_create_node,
853 .destroy = fat_destroy_node,
854 .link = fat_link,
855 .unlink = fat_unlink,
856 .has_children = fat_has_children,
857 .index_get = fat_index_get,
858 .size_get = fat_size_get,
859 .lnkcnt_get = fat_lnkcnt_get,
860 .is_directory = fat_is_directory,
861 .is_file = fat_is_file,
862 .device_get = fat_device_get
863};
864
865/*
866 * FAT VFS_OUT operations.
867 */
868
869static int
870fat_mounted(service_id_t service_id, const char *opts, fs_index_t *index,
871 aoff64_t *size, unsigned *linkcnt)
872{
873 enum cache_mode cmode;
874 fat_bs_t *bs;
875 int rc;
876
877 /* Check for option enabling write through. */
878 if (str_cmp(opts, "wtcache") == 0)
879 cmode = CACHE_MODE_WT;
880 else
881 cmode = CACHE_MODE_WB;
882
883 /* initialize libblock */
884 rc = block_init(EXCHANGE_SERIALIZE, service_id, BS_SIZE);
885 if (rc != EOK)
886 return rc;
887
888 /* prepare the boot block */
889 rc = block_bb_read(service_id, BS_BLOCK);
890 if (rc != EOK) {
891 block_fini(service_id);
892 return rc;
893 }
894
895 /* get the buffer with the boot sector */
896 bs = block_bb_get(service_id);
897
898 if (BPS(bs) != BS_SIZE) {
899 block_fini(service_id);
900 return ENOTSUP;
901 }
902
903 /* Initialize the block cache */
904 rc = block_cache_init(service_id, BPS(bs), 0 /* XXX */, cmode);
905 if (rc != EOK) {
906 block_fini(service_id);
907 return rc;
908 }
909
910 /* Do some simple sanity checks on the file system. */
911 rc = fat_sanity_check(bs, service_id);
912 if (rc != EOK) {
913 (void) block_cache_fini(service_id);
914 block_fini(service_id);
915 return rc;
916 }
917
918 rc = fat_idx_init_by_service_id(service_id);
919 if (rc != EOK) {
920 (void) block_cache_fini(service_id);
921 block_fini(service_id);
922 return rc;
923 }
924
925 /* Initialize the root node. */
926 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
927 if (!rfn) {
928 (void) block_cache_fini(service_id);
929 block_fini(service_id);
930 fat_idx_fini_by_service_id(service_id);
931 return ENOMEM;
932 }
933
934 fs_node_initialize(rfn);
935 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
936 if (!rootp) {
937 free(rfn);
938 (void) block_cache_fini(service_id);
939 block_fini(service_id);
940 fat_idx_fini_by_service_id(service_id);
941 return ENOMEM;
942 }
943 fat_node_initialize(rootp);
944
945 fat_idx_t *ridxp = fat_idx_get_by_pos(service_id, FAT_CLST_ROOTPAR, 0);
946 if (!ridxp) {
947 free(rfn);
948 free(rootp);
949 (void) block_cache_fini(service_id);
950 block_fini(service_id);
951 fat_idx_fini_by_service_id(service_id);
952 return ENOMEM;
953 }
954 assert(ridxp->index == 0);
955 /* ridxp->lock held */
956
957 rootp->type = FAT_DIRECTORY;
958 rootp->firstc = FAT_ROOT_CLST(bs);
959 rootp->refcnt = 1;
960 rootp->lnkcnt = 0; /* FS root is not linked */
961
962 if (FAT_IS_FAT32(bs)) {
963 uint32_t clusters;
964 rc = fat_clusters_get(&clusters, bs, service_id, rootp->firstc);
965 if (rc != EOK) {
966 free(rfn);
967 free(rootp);
968 (void) block_cache_fini(service_id);
969 block_fini(service_id);
970 fat_idx_fini_by_service_id(service_id);
971 return ENOTSUP;
972 }
973 rootp->size = BPS(bs) * SPC(bs) * clusters;
974 } else
975 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
976
977 rootp->idx = ridxp;
978 ridxp->nodep = rootp;
979 rootp->bp = rfn;
980 rfn->data = rootp;
981
982 fibril_mutex_unlock(&ridxp->lock);
983
984 *index = ridxp->index;
985 *size = rootp->size;
986 *linkcnt = rootp->lnkcnt;
987
988 return EOK;
989}
990
991static int fat_unmounted(service_id_t service_id)
992{
993 fs_node_t *fn;
994 fat_node_t *nodep;
995 int rc;
996
997 rc = fat_root_get(&fn, service_id);
998 if (rc != EOK)
999 return rc;
1000 nodep = FAT_NODE(fn);
1001
1002 /*
1003 * We expect exactly two references on the root node. One for the
1004 * fat_root_get() above and one created in fat_mounted().
1005 */
1006 if (nodep->refcnt != 2) {
1007 (void) fat_node_put(fn);
1008 return EBUSY;
1009 }
1010
1011 /*
1012 * Put the root node and force it to the FAT free node list.
1013 */
1014 (void) fat_node_put(fn);
1015 (void) fat_node_put(fn);
1016
1017 /*
1018 * Perform cleanup of the node structures, index structures and
1019 * associated data. Write back this file system's dirty blocks and
1020 * stop using libblock for this instance.
1021 */
1022 (void) fat_node_fini_by_service_id(service_id);
1023 fat_idx_fini_by_service_id(service_id);
1024 (void) block_cache_fini(service_id);
1025 block_fini(service_id);
1026
1027 return EOK;
1028}
1029
1030static int
1031fat_read(service_id_t service_id, fs_index_t index, aoff64_t pos,
1032 size_t *rbytes)
1033{
1034 fs_node_t *fn;
1035 fat_node_t *nodep;
1036 fat_bs_t *bs;
1037 size_t bytes;
1038 block_t *b;
1039 int rc;
1040
1041 rc = fat_node_get(&fn, service_id, index);
1042 if (rc != EOK)
1043 return rc;
1044 if (!fn)
1045 return ENOENT;
1046 nodep = FAT_NODE(fn);
1047
1048 ipc_callid_t callid;
1049 size_t len;
1050 if (!async_data_read_receive(&callid, &len)) {
1051 fat_node_put(fn);
1052 async_answer_0(callid, EINVAL);
1053 return EINVAL;
1054 }
1055
1056 bs = block_bb_get(service_id);
1057
1058 if (nodep->type == FAT_FILE) {
1059 /*
1060 * Our strategy for regular file reads is to read one block at
1061 * most and make use of the possibility to return less data than
1062 * requested. This keeps the code very simple.
1063 */
1064 if (pos >= nodep->size) {
1065 /* reading beyond the EOF */
1066 bytes = 0;
1067 (void) async_data_read_finalize(callid, NULL, 0);
1068 } else {
1069 bytes = min(len, BPS(bs) - pos % BPS(bs));
1070 bytes = min(bytes, nodep->size - pos);
1071 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
1072 BLOCK_FLAGS_NONE);
1073 if (rc != EOK) {
1074 fat_node_put(fn);
1075 async_answer_0(callid, rc);
1076 return rc;
1077 }
1078 (void) async_data_read_finalize(callid,
1079 b->data + pos % BPS(bs), bytes);
1080 rc = block_put(b);
1081 if (rc != EOK) {
1082 fat_node_put(fn);
1083 return rc;
1084 }
1085 }
1086 } else {
1087 aoff64_t spos = pos;
1088 char name[FAT_LFN_NAME_SIZE];
1089 fat_dentry_t *d;
1090
1091 assert(nodep->type == FAT_DIRECTORY);
1092 assert(nodep->size % BPS(bs) == 0);
1093 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
1094
1095 fat_directory_t di;
1096 rc = fat_directory_open(nodep, &di);
1097 if (rc != EOK)
1098 goto err;
1099 rc = fat_directory_seek(&di, pos);
1100 if (rc != EOK) {
1101 (void) fat_directory_close(&di);
1102 goto err;
1103 }
1104
1105 rc = fat_directory_read(&di, name, &d);
1106 if (rc == EOK)
1107 goto hit;
1108 if (rc == ENOENT)
1109 goto miss;
1110
1111err:
1112 (void) fat_node_put(fn);
1113 async_answer_0(callid, rc);
1114 return rc;
1115
1116miss:
1117 rc = fat_directory_close(&di);
1118 if (rc != EOK)
1119 goto err;
1120 rc = fat_node_put(fn);
1121 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1122 *rbytes = 0;
1123 return rc != EOK ? rc : ENOENT;
1124
1125hit:
1126 pos = di.pos;
1127 rc = fat_directory_close(&di);
1128 if (rc != EOK)
1129 goto err;
1130 (void) async_data_read_finalize(callid, name,
1131 str_size(name) + 1);
1132 bytes = (pos - spos) + 1;
1133 }
1134
1135 rc = fat_node_put(fn);
1136 *rbytes = bytes;
1137 return rc;
1138}
1139
1140static int
1141fat_write(service_id_t service_id, fs_index_t index, aoff64_t pos,
1142 size_t *wbytes, aoff64_t *nsize)
1143{
1144 fs_node_t *fn;
1145 fat_node_t *nodep;
1146 fat_bs_t *bs;
1147 size_t bytes;
1148 block_t *b;
1149 aoff64_t boundary;
1150 int flags = BLOCK_FLAGS_NONE;
1151 int rc;
1152
1153 rc = fat_node_get(&fn, service_id, index);
1154 if (rc != EOK)
1155 return rc;
1156 if (!fn)
1157 return ENOENT;
1158 nodep = FAT_NODE(fn);
1159
1160 ipc_callid_t callid;
1161 size_t len;
1162 if (!async_data_write_receive(&callid, &len)) {
1163 (void) fat_node_put(fn);
1164 async_answer_0(callid, EINVAL);
1165 return EINVAL;
1166 }
1167
1168 bs = block_bb_get(service_id);
1169
1170 /*
1171 * In all scenarios, we will attempt to write out only one block worth
1172 * of data at maximum. There might be some more efficient approaches,
1173 * but this one greatly simplifies fat_write(). Note that we can afford
1174 * to do this because the client must be ready to handle the return
1175 * value signalizing a smaller number of bytes written.
1176 */
1177 bytes = min(len, BPS(bs) - pos % BPS(bs));
1178 if (bytes == BPS(bs))
1179 flags |= BLOCK_FLAGS_NOREAD;
1180
1181 boundary = ROUND_UP(nodep->size, BPC(bs));
1182 if (pos < boundary) {
1183 /*
1184 * This is the easier case - we are either overwriting already
1185 * existing contents or writing behind the EOF, but still within
1186 * the limits of the last cluster. The node size may grow to the
1187 * next block size boundary.
1188 */
1189 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1190 if (rc != EOK) {
1191 (void) fat_node_put(fn);
1192 async_answer_0(callid, rc);
1193 return rc;
1194 }
1195 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
1196 if (rc != EOK) {
1197 (void) fat_node_put(fn);
1198 async_answer_0(callid, rc);
1199 return rc;
1200 }
1201 (void) async_data_write_finalize(callid,
1202 b->data + pos % BPS(bs), bytes);
1203 b->dirty = true; /* need to sync block */
1204 rc = block_put(b);
1205 if (rc != EOK) {
1206 (void) fat_node_put(fn);
1207 return rc;
1208 }
1209 if (pos + bytes > nodep->size) {
1210 nodep->size = pos + bytes;
1211 nodep->dirty = true; /* need to sync node */
1212 }
1213 *wbytes = bytes;
1214 *nsize = nodep->size;
1215 rc = fat_node_put(fn);
1216 return rc;
1217 } else {
1218 /*
1219 * This is the more difficult case. We must allocate new
1220 * clusters for the node and zero them out.
1221 */
1222 unsigned nclsts;
1223 fat_cluster_t mcl, lcl;
1224
1225 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
1226 /* create an independent chain of nclsts clusters in all FATs */
1227 rc = fat_alloc_clusters(bs, service_id, nclsts, &mcl, &lcl);
1228 if (rc != EOK) {
1229 /* could not allocate a chain of nclsts clusters */
1230 (void) fat_node_put(fn);
1231 async_answer_0(callid, rc);
1232 return rc;
1233 }
1234 /* zero fill any gaps */
1235 rc = fat_fill_gap(bs, nodep, mcl, pos);
1236 if (rc != EOK) {
1237 (void) fat_free_clusters(bs, service_id, mcl);
1238 (void) fat_node_put(fn);
1239 async_answer_0(callid, rc);
1240 return rc;
1241 }
1242 rc = _fat_block_get(&b, bs, service_id, lcl, NULL,
1243 (pos / BPS(bs)) % SPC(bs), flags);
1244 if (rc != EOK) {
1245 (void) fat_free_clusters(bs, service_id, mcl);
1246 (void) fat_node_put(fn);
1247 async_answer_0(callid, rc);
1248 return rc;
1249 }
1250 (void) async_data_write_finalize(callid,
1251 b->data + pos % BPS(bs), bytes);
1252 b->dirty = true; /* need to sync block */
1253 rc = block_put(b);
1254 if (rc != EOK) {
1255 (void) fat_free_clusters(bs, service_id, mcl);
1256 (void) fat_node_put(fn);
1257 return rc;
1258 }
1259 /*
1260 * Append the cluster chain starting in mcl to the end of the
1261 * node's cluster chain.
1262 */
1263 rc = fat_append_clusters(bs, nodep, mcl, lcl);
1264 if (rc != EOK) {
1265 (void) fat_free_clusters(bs, service_id, mcl);
1266 (void) fat_node_put(fn);
1267 return rc;
1268 }
1269 *nsize = nodep->size = pos + bytes;
1270 rc = fat_node_put(fn);
1271 nodep->dirty = true; /* need to sync node */
1272 *wbytes = bytes;
1273 return rc;
1274 }
1275}
1276
1277static int
1278fat_truncate(service_id_t service_id, fs_index_t index, aoff64_t size)
1279{
1280 fs_node_t *fn;
1281 fat_node_t *nodep;
1282 fat_bs_t *bs;
1283 int rc;
1284
1285 rc = fat_node_get(&fn, service_id, index);
1286 if (rc != EOK)
1287 return rc;
1288 if (!fn)
1289 return ENOENT;
1290 nodep = FAT_NODE(fn);
1291
1292 bs = block_bb_get(service_id);
1293
1294 if (nodep->size == size) {
1295 rc = EOK;
1296 } else if (nodep->size < size) {
1297 /*
1298 * The standard says we have the freedom to grow the node.
1299 * For now, we simply return an error.
1300 */
1301 rc = EINVAL;
1302 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
1303 /*
1304 * The node will be shrunk, but no clusters will be deallocated.
1305 */
1306 nodep->size = size;
1307 nodep->dirty = true; /* need to sync node */
1308 rc = EOK;
1309 } else {
1310 /*
1311 * The node will be shrunk, clusters will be deallocated.
1312 */
1313 if (size == 0) {
1314 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1315 if (rc != EOK)
1316 goto out;
1317 } else {
1318 fat_cluster_t lastc;
1319 rc = fat_cluster_walk(bs, service_id, nodep->firstc,
1320 &lastc, NULL, (size - 1) / BPC(bs));
1321 if (rc != EOK)
1322 goto out;
1323 rc = fat_chop_clusters(bs, nodep, lastc);
1324 if (rc != EOK)
1325 goto out;
1326 }
1327 nodep->size = size;
1328 nodep->dirty = true; /* need to sync node */
1329 rc = EOK;
1330 }
1331out:
1332 fat_node_put(fn);
1333 return rc;
1334}
1335
1336static int fat_close(service_id_t service_id, fs_index_t index)
1337{
1338 return EOK;
1339}
1340
1341static int fat_destroy(service_id_t service_id, fs_index_t index)
1342{
1343 fs_node_t *fn;
1344 fat_node_t *nodep;
1345 int rc;
1346
1347 rc = fat_node_get(&fn, service_id, index);
1348 if (rc != EOK)
1349 return rc;
1350 if (!fn)
1351 return ENOENT;
1352
1353 nodep = FAT_NODE(fn);
1354 /*
1355 * We should have exactly two references. One for the above
1356 * call to fat_node_get() and one from fat_unlink().
1357 */
1358 assert(nodep->refcnt == 2);
1359
1360 rc = fat_destroy_node(fn);
1361 return rc;
1362}
1363
1364static int fat_sync(service_id_t service_id, fs_index_t index)
1365{
1366 fs_node_t *fn;
1367 int rc = fat_node_get(&fn, service_id, index);
1368 if (rc != EOK)
1369 return rc;
1370 if (!fn)
1371 return ENOENT;
1372
1373 fat_node_t *nodep = FAT_NODE(fn);
1374
1375 nodep->dirty = true;
1376 rc = fat_node_sync(nodep);
1377
1378 fat_node_put(fn);
1379 return rc;
1380}
1381
1382vfs_out_ops_t fat_ops = {
1383 .mounted = fat_mounted,
1384 .unmounted = fat_unmounted,
1385 .read = fat_read,
1386 .write = fat_write,
1387 .truncate = fat_truncate,
1388 .close = fat_close,
1389 .destroy = fat_destroy,
1390 .sync = fat_sync,
1391};
1392
1393/**
1394 * @}
1395 */
Note: See TracBrowser for help on using the repository browser.