source: mainline/uspace/srv/fs/fat/fat_ops.c@ 92bee46

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 92bee46 was 92bee46, checked in by Jakub Jermar <jakub@…>, 16 years ago

Refuse to mount FAT file systems with root directory entries that do not take up
whole blocks.

  • Property mode set to 100644
File size: 36.6 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/ipc.h>
45#include <ipc/services.h>
46#include <ipc/devmap.h>
47#include <async.h>
48#include <errno.h>
49#include <string.h>
50#include <byteorder.h>
51#include <adt/hash_table.h>
52#include <adt/list.h>
53#include <assert.h>
54#include <fibril_synch.h>
55#include <sys/mman.h>
56#include <align.h>
57
58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
61/** Mutex protecting the list of cached free FAT nodes. */
62static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
63
64/** List of cached free FAT nodes. */
65static LIST_INITIALIZE(ffn_head);
66
67/*
68 * Forward declarations of FAT libfs operations.
69 */
70static int fat_root_get(fs_node_t **, dev_handle_t);
71static int fat_match(fs_node_t **, fs_node_t *, const char *);
72static int fat_node_get(fs_node_t **, dev_handle_t, fs_index_t);
73static int fat_node_open(fs_node_t *);
74static int fat_node_put(fs_node_t *);
75static int fat_create_node(fs_node_t **, dev_handle_t, int);
76static int fat_destroy_node(fs_node_t *);
77static int fat_link(fs_node_t *, fs_node_t *, const char *);
78static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
79static int fat_has_children(bool *, fs_node_t *);
80static fs_index_t fat_index_get(fs_node_t *);
81static size_t fat_size_get(fs_node_t *);
82static unsigned fat_lnkcnt_get(fs_node_t *);
83static char fat_plb_get_char(unsigned);
84static bool fat_is_directory(fs_node_t *);
85static bool fat_is_file(fs_node_t *node);
86static dev_handle_t fat_device_get(fs_node_t *node);
87
88/*
89 * Helper functions.
90 */
91static void fat_node_initialize(fat_node_t *node)
92{
93 fibril_mutex_initialize(&node->lock);
94 node->bp = NULL;
95 node->idx = NULL;
96 node->type = 0;
97 link_initialize(&node->ffn_link);
98 node->size = 0;
99 node->lnkcnt = 0;
100 node->refcnt = 0;
101 node->dirty = false;
102}
103
104static int fat_node_sync(fat_node_t *node)
105{
106 block_t *b;
107 fat_bs_t *bs;
108 fat_dentry_t *d;
109 uint16_t bps;
110 unsigned dps;
111 int rc;
112
113 assert(node->dirty);
114
115 bs = block_bb_get(node->idx->dev_handle);
116 bps = uint16_t_le2host(bs->bps);
117 dps = bps / sizeof(fat_dentry_t);
118
119 /* Read the block that contains the dentry of interest. */
120 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc,
121 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
122 if (rc != EOK)
123 return rc;
124
125 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
126
127 d->firstc = host2uint16_t_le(node->firstc);
128 if (node->type == FAT_FILE) {
129 d->size = host2uint32_t_le(node->size);
130 } else if (node->type == FAT_DIRECTORY) {
131 d->attr = FAT_ATTR_SUBDIR;
132 }
133
134 /* TODO: update other fields? (e.g time fields) */
135
136 b->dirty = true; /* need to sync block */
137 rc = block_put(b);
138 return rc;
139}
140
141static int fat_node_get_new(fat_node_t **nodepp)
142{
143 fs_node_t *fn;
144 fat_node_t *nodep;
145 int rc;
146
147 fibril_mutex_lock(&ffn_mutex);
148 if (!list_empty(&ffn_head)) {
149 /* Try to use a cached free node structure. */
150 fat_idx_t *idxp_tmp;
151 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
152 if (!fibril_mutex_trylock(&nodep->lock))
153 goto skip_cache;
154 idxp_tmp = nodep->idx;
155 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
156 fibril_mutex_unlock(&nodep->lock);
157 goto skip_cache;
158 }
159 list_remove(&nodep->ffn_link);
160 fibril_mutex_unlock(&ffn_mutex);
161 if (nodep->dirty) {
162 rc = fat_node_sync(nodep);
163 if (rc != EOK) {
164 idxp_tmp->nodep = NULL;
165 fibril_mutex_unlock(&nodep->lock);
166 fibril_mutex_unlock(&idxp_tmp->lock);
167 free(nodep->bp);
168 free(nodep);
169 return rc;
170 }
171 }
172 idxp_tmp->nodep = NULL;
173 fibril_mutex_unlock(&nodep->lock);
174 fibril_mutex_unlock(&idxp_tmp->lock);
175 fn = FS_NODE(nodep);
176 } else {
177skip_cache:
178 /* Try to allocate a new node structure. */
179 fibril_mutex_unlock(&ffn_mutex);
180 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
181 if (!fn)
182 return ENOMEM;
183 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
184 if (!nodep) {
185 free(fn);
186 return ENOMEM;
187 }
188 }
189 fat_node_initialize(nodep);
190 fs_node_initialize(fn);
191 fn->data = nodep;
192 nodep->bp = fn;
193
194 *nodepp = nodep;
195 return EOK;
196}
197
198/** Internal version of fat_node_get().
199 *
200 * @param idxp Locked index structure.
201 */
202static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
203{
204 block_t *b;
205 fat_bs_t *bs;
206 fat_dentry_t *d;
207 fat_node_t *nodep = NULL;
208 unsigned bps;
209 unsigned spc;
210 unsigned dps;
211 int rc;
212
213 if (idxp->nodep) {
214 /*
215 * We are lucky.
216 * The node is already instantiated in memory.
217 */
218 fibril_mutex_lock(&idxp->nodep->lock);
219 if (!idxp->nodep->refcnt++) {
220 fibril_mutex_lock(&ffn_mutex);
221 list_remove(&idxp->nodep->ffn_link);
222 fibril_mutex_unlock(&ffn_mutex);
223 }
224 fibril_mutex_unlock(&idxp->nodep->lock);
225 *nodepp = idxp->nodep;
226 return EOK;
227 }
228
229 /*
230 * We must instantiate the node from the file system.
231 */
232
233 assert(idxp->pfc);
234
235 rc = fat_node_get_new(&nodep);
236 if (rc != EOK)
237 return rc;
238
239 bs = block_bb_get(idxp->dev_handle);
240 bps = uint16_t_le2host(bs->bps);
241 spc = bs->spc;
242 dps = bps / sizeof(fat_dentry_t);
243
244 /* Read the block that contains the dentry of interest. */
245 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc,
246 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
247 if (rc != EOK) {
248 (void) fat_node_put(FS_NODE(nodep));
249 return rc;
250 }
251
252 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
253 if (d->attr & FAT_ATTR_SUBDIR) {
254 /*
255 * The only directory which does not have this bit set is the
256 * root directory itself. The root directory node is handled
257 * and initialized elsewhere.
258 */
259 nodep->type = FAT_DIRECTORY;
260 /*
261 * Unfortunately, the 'size' field of the FAT dentry is not
262 * defined for the directory entry type. We must determine the
263 * size of the directory by walking the FAT.
264 */
265 uint16_t clusters;
266 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle,
267 uint16_t_le2host(d->firstc));
268 if (rc != EOK) {
269 (void) fat_node_put(FS_NODE(nodep));
270 return rc;
271 }
272 nodep->size = bps * spc * clusters;
273 } else {
274 nodep->type = FAT_FILE;
275 nodep->size = uint32_t_le2host(d->size);
276 }
277 nodep->firstc = uint16_t_le2host(d->firstc);
278 nodep->lnkcnt = 1;
279 nodep->refcnt = 1;
280
281 rc = block_put(b);
282 if (rc != EOK) {
283 (void) fat_node_put(FS_NODE(nodep));
284 return rc;
285 }
286
287 /* Link the idx structure with the node structure. */
288 nodep->idx = idxp;
289 idxp->nodep = nodep;
290
291 *nodepp = nodep;
292 return EOK;
293}
294
295/** Perform basic sanity checks on the file system.
296 *
297 * Verify if values of boot sector fields are sane. Also verify media
298 * descriptor. This is used to rule out cases when a device obviously
299 * does not contain a fat file system.
300 */
301static int fat_sanity_check(fat_bs_t *bs, dev_handle_t dev_handle)
302{
303 fat_cluster_t e0, e1;
304 unsigned fat_no;
305 int rc;
306
307 /* Check number of FATs. */
308 if (bs->fatcnt == 0)
309 return ENOTSUP;
310
311 /* Check total number of sectors. */
312
313 if (bs->totsec16 == 0 && bs->totsec32 == 0)
314 return ENOTSUP;
315
316 if (bs->totsec16 != 0 && bs->totsec32 != 0 &&
317 bs->totsec16 != bs->totsec32)
318 return ENOTSUP;
319
320 /* Check media descriptor. Must be between 0xf0 and 0xff. */
321 if ((bs->mdesc & 0xf0) != 0xf0)
322 return ENOTSUP;
323
324 /* Check number of sectors per FAT. */
325 if (bs->sec_per_fat == 0)
326 return ENOTSUP;
327
328 /*
329 * Check that the root directory entries take up whole blocks.
330 * This check is rather strict, but it allows us to treat the root
331 * directory and non-root directories uniformly in some places.
332 * It can be removed provided that functions such as fat_read() are
333 * sanitized to support file systems with this property.
334 */
335 if ((uint16_t_le2host(bs->root_ent_max) * sizeof(fat_dentry_t)) %
336 uint16_t_le2host(bs->bps) != 0)
337 return ENOTSUP;
338
339 /* Check signature of each FAT. */
340
341 for (fat_no = 0; fat_no < bs->fatcnt; fat_no++) {
342 rc = fat_get_cluster(bs, dev_handle, fat_no, 0, &e0);
343 if (rc != EOK)
344 return EIO;
345
346 rc = fat_get_cluster(bs, dev_handle, fat_no, 1, &e1);
347 if (rc != EOK)
348 return EIO;
349
350 /* Check that first byte of FAT contains the media descriptor. */
351 if ((e0 & 0xff) != bs->mdesc)
352 return ENOTSUP;
353
354 /*
355 * Check that remaining bits of the first two entries are
356 * set to one.
357 */
358 if ((e0 >> 8) != 0xff || e1 != 0xffff)
359 return ENOTSUP;
360 }
361
362 return EOK;
363}
364
365/*
366 * FAT libfs operations.
367 */
368
369int fat_root_get(fs_node_t **rfn, dev_handle_t dev_handle)
370{
371 return fat_node_get(rfn, dev_handle, 0);
372}
373
374int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
375{
376 fat_bs_t *bs;
377 fat_node_t *parentp = FAT_NODE(pfn);
378 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
379 unsigned i, j;
380 unsigned bps; /* bytes per sector */
381 unsigned dps; /* dentries per sector */
382 unsigned blocks;
383 fat_dentry_t *d;
384 block_t *b;
385 int rc;
386
387 fibril_mutex_lock(&parentp->idx->lock);
388 bs = block_bb_get(parentp->idx->dev_handle);
389 bps = uint16_t_le2host(bs->bps);
390 dps = bps / sizeof(fat_dentry_t);
391 blocks = parentp->size / bps;
392 for (i = 0; i < blocks; i++) {
393 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
394 if (rc != EOK) {
395 fibril_mutex_unlock(&parentp->idx->lock);
396 return rc;
397 }
398 for (j = 0; j < dps; j++) {
399 d = ((fat_dentry_t *)b->data) + j;
400 switch (fat_classify_dentry(d)) {
401 case FAT_DENTRY_SKIP:
402 case FAT_DENTRY_FREE:
403 continue;
404 case FAT_DENTRY_LAST:
405 /* miss */
406 rc = block_put(b);
407 fibril_mutex_unlock(&parentp->idx->lock);
408 *rfn = NULL;
409 return rc;
410 default:
411 case FAT_DENTRY_VALID:
412 fat_dentry_name_get(d, name);
413 break;
414 }
415 if (fat_dentry_namecmp(name, component) == 0) {
416 /* hit */
417 fat_node_t *nodep;
418 /*
419 * Assume tree hierarchy for locking. We
420 * already have the parent and now we are going
421 * to lock the child. Never lock in the oposite
422 * order.
423 */
424 fat_idx_t *idx = fat_idx_get_by_pos(
425 parentp->idx->dev_handle, parentp->firstc,
426 i * dps + j);
427 fibril_mutex_unlock(&parentp->idx->lock);
428 if (!idx) {
429 /*
430 * Can happen if memory is low or if we
431 * run out of 32-bit indices.
432 */
433 rc = block_put(b);
434 return (rc == EOK) ? ENOMEM : rc;
435 }
436 rc = fat_node_get_core(&nodep, idx);
437 fibril_mutex_unlock(&idx->lock);
438 if (rc != EOK) {
439 (void) block_put(b);
440 return rc;
441 }
442 *rfn = FS_NODE(nodep);
443 rc = block_put(b);
444 if (rc != EOK)
445 (void) fat_node_put(*rfn);
446 return rc;
447 }
448 }
449 rc = block_put(b);
450 if (rc != EOK) {
451 fibril_mutex_unlock(&parentp->idx->lock);
452 return rc;
453 }
454 }
455
456 fibril_mutex_unlock(&parentp->idx->lock);
457 *rfn = NULL;
458 return EOK;
459}
460
461/** Instantiate a FAT in-core node. */
462int fat_node_get(fs_node_t **rfn, dev_handle_t dev_handle, fs_index_t index)
463{
464 fat_node_t *nodep;
465 fat_idx_t *idxp;
466 int rc;
467
468 idxp = fat_idx_get_by_index(dev_handle, index);
469 if (!idxp) {
470 *rfn = NULL;
471 return EOK;
472 }
473 /* idxp->lock held */
474 rc = fat_node_get_core(&nodep, idxp);
475 fibril_mutex_unlock(&idxp->lock);
476 if (rc == EOK)
477 *rfn = FS_NODE(nodep);
478 return rc;
479}
480
481int fat_node_open(fs_node_t *fn)
482{
483 /*
484 * Opening a file is stateless, nothing
485 * to be done here.
486 */
487 return EOK;
488}
489
490int fat_node_put(fs_node_t *fn)
491{
492 fat_node_t *nodep = FAT_NODE(fn);
493 bool destroy = false;
494
495 fibril_mutex_lock(&nodep->lock);
496 if (!--nodep->refcnt) {
497 if (nodep->idx) {
498 fibril_mutex_lock(&ffn_mutex);
499 list_append(&nodep->ffn_link, &ffn_head);
500 fibril_mutex_unlock(&ffn_mutex);
501 } else {
502 /*
503 * The node does not have any index structure associated
504 * with itself. This can only mean that we are releasing
505 * the node after a failed attempt to allocate the index
506 * structure for it.
507 */
508 destroy = true;
509 }
510 }
511 fibril_mutex_unlock(&nodep->lock);
512 if (destroy) {
513 free(nodep->bp);
514 free(nodep);
515 }
516 return EOK;
517}
518
519int fat_create_node(fs_node_t **rfn, dev_handle_t dev_handle, int flags)
520{
521 fat_idx_t *idxp;
522 fat_node_t *nodep;
523 fat_bs_t *bs;
524 fat_cluster_t mcl, lcl;
525 uint16_t bps;
526 int rc;
527
528 bs = block_bb_get(dev_handle);
529 bps = uint16_t_le2host(bs->bps);
530 if (flags & L_DIRECTORY) {
531 /* allocate a cluster */
532 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
533 if (rc != EOK)
534 return rc;
535 /* populate the new cluster with unused dentries */
536 rc = fat_zero_cluster(bs, dev_handle, mcl);
537 if (rc != EOK) {
538 (void) fat_free_clusters(bs, dev_handle, mcl);
539 return rc;
540 }
541 }
542
543 rc = fat_node_get_new(&nodep);
544 if (rc != EOK) {
545 (void) fat_free_clusters(bs, dev_handle, mcl);
546 return rc;
547 }
548 rc = fat_idx_get_new(&idxp, dev_handle);
549 if (rc != EOK) {
550 (void) fat_free_clusters(bs, dev_handle, mcl);
551 (void) fat_node_put(FS_NODE(nodep));
552 return rc;
553 }
554 /* idxp->lock held */
555 if (flags & L_DIRECTORY) {
556 nodep->type = FAT_DIRECTORY;
557 nodep->firstc = mcl;
558 nodep->size = bps * bs->spc;
559 } else {
560 nodep->type = FAT_FILE;
561 nodep->firstc = FAT_CLST_RES0;
562 nodep->size = 0;
563 }
564 nodep->lnkcnt = 0; /* not linked anywhere */
565 nodep->refcnt = 1;
566 nodep->dirty = true;
567
568 nodep->idx = idxp;
569 idxp->nodep = nodep;
570
571 fibril_mutex_unlock(&idxp->lock);
572 *rfn = FS_NODE(nodep);
573 return EOK;
574}
575
576int fat_destroy_node(fs_node_t *fn)
577{
578 fat_node_t *nodep = FAT_NODE(fn);
579 fat_bs_t *bs;
580 bool has_children;
581 int rc;
582
583 /*
584 * The node is not reachable from the file system. This means that the
585 * link count should be zero and that the index structure cannot be
586 * found in the position hash. Obviously, we don't need to lock the node
587 * nor its index structure.
588 */
589 assert(nodep->lnkcnt == 0);
590
591 /*
592 * The node may not have any children.
593 */
594 rc = fat_has_children(&has_children, fn);
595 if (rc != EOK)
596 return rc;
597 assert(!has_children);
598
599 bs = block_bb_get(nodep->idx->dev_handle);
600 if (nodep->firstc != FAT_CLST_RES0) {
601 assert(nodep->size);
602 /* Free all clusters allocated to the node. */
603 rc = fat_free_clusters(bs, nodep->idx->dev_handle,
604 nodep->firstc);
605 }
606
607 fat_idx_destroy(nodep->idx);
608 free(nodep->bp);
609 free(nodep);
610 return rc;
611}
612
613int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
614{
615 fat_node_t *parentp = FAT_NODE(pfn);
616 fat_node_t *childp = FAT_NODE(cfn);
617 fat_dentry_t *d;
618 fat_bs_t *bs;
619 block_t *b;
620 unsigned i, j;
621 uint16_t bps;
622 unsigned dps;
623 unsigned blocks;
624 fat_cluster_t mcl, lcl;
625 int rc;
626
627 fibril_mutex_lock(&childp->lock);
628 if (childp->lnkcnt == 1) {
629 /*
630 * On FAT, we don't support multiple hard links.
631 */
632 fibril_mutex_unlock(&childp->lock);
633 return EMLINK;
634 }
635 assert(childp->lnkcnt == 0);
636 fibril_mutex_unlock(&childp->lock);
637
638 if (!fat_dentry_name_verify(name)) {
639 /*
640 * Attempt to create unsupported name.
641 */
642 return ENOTSUP;
643 }
644
645 /*
646 * Get us an unused parent node's dentry or grow the parent and allocate
647 * a new one.
648 */
649
650 fibril_mutex_lock(&parentp->idx->lock);
651 bs = block_bb_get(parentp->idx->dev_handle);
652 bps = uint16_t_le2host(bs->bps);
653 dps = bps / sizeof(fat_dentry_t);
654
655 blocks = parentp->size / bps;
656
657 for (i = 0; i < blocks; i++) {
658 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
659 if (rc != EOK) {
660 fibril_mutex_unlock(&parentp->idx->lock);
661 return rc;
662 }
663 for (j = 0; j < dps; j++) {
664 d = ((fat_dentry_t *)b->data) + j;
665 switch (fat_classify_dentry(d)) {
666 case FAT_DENTRY_SKIP:
667 case FAT_DENTRY_VALID:
668 /* skipping used and meta entries */
669 continue;
670 case FAT_DENTRY_FREE:
671 case FAT_DENTRY_LAST:
672 /* found an empty slot */
673 goto hit;
674 }
675 }
676 rc = block_put(b);
677 if (rc != EOK) {
678 fibril_mutex_unlock(&parentp->idx->lock);
679 return rc;
680 }
681 }
682 j = 0;
683
684 /*
685 * We need to grow the parent in order to create a new unused dentry.
686 */
687 if (parentp->firstc == FAT_CLST_ROOT) {
688 /* Can't grow the root directory. */
689 fibril_mutex_unlock(&parentp->idx->lock);
690 return ENOSPC;
691 }
692 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
693 if (rc != EOK) {
694 fibril_mutex_unlock(&parentp->idx->lock);
695 return rc;
696 }
697 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl);
698 if (rc != EOK) {
699 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
700 fibril_mutex_unlock(&parentp->idx->lock);
701 return rc;
702 }
703 rc = fat_append_clusters(bs, parentp, mcl);
704 if (rc != EOK) {
705 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
706 fibril_mutex_unlock(&parentp->idx->lock);
707 return rc;
708 }
709 parentp->size += bps * bs->spc;
710 parentp->dirty = true; /* need to sync node */
711 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
712 if (rc != EOK) {
713 fibril_mutex_unlock(&parentp->idx->lock);
714 return rc;
715 }
716 d = (fat_dentry_t *)b->data;
717
718hit:
719 /*
720 * At this point we only establish the link between the parent and the
721 * child. The dentry, except of the name and the extension, will remain
722 * uninitialized until the corresponding node is synced. Thus the valid
723 * dentry data is kept in the child node structure.
724 */
725 memset(d, 0, sizeof(fat_dentry_t));
726 fat_dentry_name_set(d, name);
727 b->dirty = true; /* need to sync block */
728 rc = block_put(b);
729 fibril_mutex_unlock(&parentp->idx->lock);
730 if (rc != EOK)
731 return rc;
732
733 fibril_mutex_lock(&childp->idx->lock);
734
735 /*
736 * If possible, create the Sub-directory Identifier Entry and the
737 * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
738 * are not mandatory according to Standard ECMA-107 and HelenOS VFS does
739 * not use them anyway, so this is rather a sign of our good will.
740 */
741 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
742 if (rc != EOK) {
743 /*
744 * Rather than returning an error, simply skip the creation of
745 * these two entries.
746 */
747 goto skip_dots;
748 }
749 d = (fat_dentry_t *)b->data;
750 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
751 str_cmp(d->name, FAT_NAME_DOT) == 0) {
752 memset(d, 0, sizeof(fat_dentry_t));
753 str_cpy(d->name, 8, FAT_NAME_DOT);
754 str_cpy(d->ext, 3, FAT_EXT_PAD);
755 d->attr = FAT_ATTR_SUBDIR;
756 d->firstc = host2uint16_t_le(childp->firstc);
757 /* TODO: initialize also the date/time members. */
758 }
759 d++;
760 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
761 str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) {
762 memset(d, 0, sizeof(fat_dentry_t));
763 str_cpy(d->name, 8, FAT_NAME_DOT_DOT);
764 str_cpy(d->ext, 3, FAT_EXT_PAD);
765 d->attr = FAT_ATTR_SUBDIR;
766 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
767 host2uint16_t_le(FAT_CLST_RES0) :
768 host2uint16_t_le(parentp->firstc);
769 /* TODO: initialize also the date/time members. */
770 }
771 b->dirty = true; /* need to sync block */
772 /*
773 * Ignore the return value as we would have fallen through on error
774 * anyway.
775 */
776 (void) block_put(b);
777skip_dots:
778
779 childp->idx->pfc = parentp->firstc;
780 childp->idx->pdi = i * dps + j;
781 fibril_mutex_unlock(&childp->idx->lock);
782
783 fibril_mutex_lock(&childp->lock);
784 childp->lnkcnt = 1;
785 childp->dirty = true; /* need to sync node */
786 fibril_mutex_unlock(&childp->lock);
787
788 /*
789 * Hash in the index structure into the position hash.
790 */
791 fat_idx_hashin(childp->idx);
792
793 return EOK;
794}
795
796int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
797{
798 fat_node_t *parentp = FAT_NODE(pfn);
799 fat_node_t *childp = FAT_NODE(cfn);
800 fat_bs_t *bs;
801 fat_dentry_t *d;
802 uint16_t bps;
803 block_t *b;
804 bool has_children;
805 int rc;
806
807 if (!parentp)
808 return EBUSY;
809
810 rc = fat_has_children(&has_children, cfn);
811 if (rc != EOK)
812 return rc;
813 if (has_children)
814 return ENOTEMPTY;
815
816 fibril_mutex_lock(&parentp->lock);
817 fibril_mutex_lock(&childp->lock);
818 assert(childp->lnkcnt == 1);
819 fibril_mutex_lock(&childp->idx->lock);
820 bs = block_bb_get(childp->idx->dev_handle);
821 bps = uint16_t_le2host(bs->bps);
822
823 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc,
824 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
825 BLOCK_FLAGS_NONE);
826 if (rc != EOK)
827 goto error;
828 d = (fat_dentry_t *)b->data +
829 (childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
830 /* mark the dentry as not-currently-used */
831 d->name[0] = FAT_DENTRY_ERASED;
832 b->dirty = true; /* need to sync block */
833 rc = block_put(b);
834 if (rc != EOK)
835 goto error;
836
837 /* remove the index structure from the position hash */
838 fat_idx_hashout(childp->idx);
839 /* clear position information */
840 childp->idx->pfc = FAT_CLST_RES0;
841 childp->idx->pdi = 0;
842 fibril_mutex_unlock(&childp->idx->lock);
843 childp->lnkcnt = 0;
844 childp->dirty = true;
845 fibril_mutex_unlock(&childp->lock);
846 fibril_mutex_unlock(&parentp->lock);
847
848 return EOK;
849
850error:
851 fibril_mutex_unlock(&parentp->idx->lock);
852 fibril_mutex_unlock(&childp->lock);
853 fibril_mutex_unlock(&childp->idx->lock);
854 return rc;
855}
856
857int fat_has_children(bool *has_children, fs_node_t *fn)
858{
859 fat_bs_t *bs;
860 fat_node_t *nodep = FAT_NODE(fn);
861 unsigned bps;
862 unsigned dps;
863 unsigned blocks;
864 block_t *b;
865 unsigned i, j;
866 int rc;
867
868 if (nodep->type != FAT_DIRECTORY) {
869 *has_children = false;
870 return EOK;
871 }
872
873 fibril_mutex_lock(&nodep->idx->lock);
874 bs = block_bb_get(nodep->idx->dev_handle);
875 bps = uint16_t_le2host(bs->bps);
876 dps = bps / sizeof(fat_dentry_t);
877
878 blocks = nodep->size / bps;
879
880 for (i = 0; i < blocks; i++) {
881 fat_dentry_t *d;
882
883 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
884 if (rc != EOK) {
885 fibril_mutex_unlock(&nodep->idx->lock);
886 return rc;
887 }
888 for (j = 0; j < dps; j++) {
889 d = ((fat_dentry_t *)b->data) + j;
890 switch (fat_classify_dentry(d)) {
891 case FAT_DENTRY_SKIP:
892 case FAT_DENTRY_FREE:
893 continue;
894 case FAT_DENTRY_LAST:
895 rc = block_put(b);
896 fibril_mutex_unlock(&nodep->idx->lock);
897 *has_children = false;
898 return rc;
899 default:
900 case FAT_DENTRY_VALID:
901 rc = block_put(b);
902 fibril_mutex_unlock(&nodep->idx->lock);
903 *has_children = true;
904 return rc;
905 }
906 }
907 rc = block_put(b);
908 if (rc != EOK) {
909 fibril_mutex_unlock(&nodep->idx->lock);
910 return rc;
911 }
912 }
913
914 fibril_mutex_unlock(&nodep->idx->lock);
915 *has_children = false;
916 return EOK;
917}
918
919
920fs_index_t fat_index_get(fs_node_t *fn)
921{
922 return FAT_NODE(fn)->idx->index;
923}
924
925size_t fat_size_get(fs_node_t *fn)
926{
927 return FAT_NODE(fn)->size;
928}
929
930unsigned fat_lnkcnt_get(fs_node_t *fn)
931{
932 return FAT_NODE(fn)->lnkcnt;
933}
934
935char fat_plb_get_char(unsigned pos)
936{
937 return fat_reg.plb_ro[pos % PLB_SIZE];
938}
939
940bool fat_is_directory(fs_node_t *fn)
941{
942 return FAT_NODE(fn)->type == FAT_DIRECTORY;
943}
944
945bool fat_is_file(fs_node_t *fn)
946{
947 return FAT_NODE(fn)->type == FAT_FILE;
948}
949
950dev_handle_t fat_device_get(fs_node_t *node)
951{
952 return 0;
953}
954
955/** libfs operations */
956libfs_ops_t fat_libfs_ops = {
957 .root_get = fat_root_get,
958 .match = fat_match,
959 .node_get = fat_node_get,
960 .node_open = fat_node_open,
961 .node_put = fat_node_put,
962 .create = fat_create_node,
963 .destroy = fat_destroy_node,
964 .link = fat_link,
965 .unlink = fat_unlink,
966 .has_children = fat_has_children,
967 .index_get = fat_index_get,
968 .size_get = fat_size_get,
969 .lnkcnt_get = fat_lnkcnt_get,
970 .plb_get_char = fat_plb_get_char,
971 .is_directory = fat_is_directory,
972 .is_file = fat_is_file,
973 .device_get = fat_device_get
974};
975
976/*
977 * VFS operations.
978 */
979
980void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
981{
982 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
983 enum cache_mode cmode;
984 fat_bs_t *bs;
985 uint16_t bps;
986 uint16_t rde;
987 int rc;
988
989 /* accept the mount options */
990 ipc_callid_t callid;
991 size_t size;
992 if (!async_data_write_receive(&callid, &size)) {
993 ipc_answer_0(callid, EINVAL);
994 ipc_answer_0(rid, EINVAL);
995 return;
996 }
997 char *opts = malloc(size + 1);
998 if (!opts) {
999 ipc_answer_0(callid, ENOMEM);
1000 ipc_answer_0(rid, ENOMEM);
1001 return;
1002 }
1003 ipcarg_t retval = async_data_write_finalize(callid, opts, size);
1004 if (retval != EOK) {
1005 ipc_answer_0(rid, retval);
1006 free(opts);
1007 return;
1008 }
1009 opts[size] = '\0';
1010
1011 /* Check for option enabling write through. */
1012 if (str_cmp(opts, "wtcache") == 0)
1013 cmode = CACHE_MODE_WT;
1014 else
1015 cmode = CACHE_MODE_WB;
1016
1017 /* initialize libblock */
1018 rc = block_init(dev_handle, BS_SIZE);
1019 if (rc != EOK) {
1020 ipc_answer_0(rid, rc);
1021 return;
1022 }
1023
1024 /* prepare the boot block */
1025 rc = block_bb_read(dev_handle, BS_BLOCK);
1026 if (rc != EOK) {
1027 block_fini(dev_handle);
1028 ipc_answer_0(rid, rc);
1029 return;
1030 }
1031
1032 /* get the buffer with the boot sector */
1033 bs = block_bb_get(dev_handle);
1034
1035 /* Read the number of root directory entries. */
1036 bps = uint16_t_le2host(bs->bps);
1037 rde = uint16_t_le2host(bs->root_ent_max);
1038
1039 if (bps != BS_SIZE) {
1040 block_fini(dev_handle);
1041 ipc_answer_0(rid, ENOTSUP);
1042 return;
1043 }
1044
1045 /* Initialize the block cache */
1046 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);
1047 if (rc != EOK) {
1048 block_fini(dev_handle);
1049 ipc_answer_0(rid, rc);
1050 return;
1051 }
1052
1053 /* Do some simple sanity checks on the file system. */
1054 rc = fat_sanity_check(bs, dev_handle);
1055 if (rc != EOK) {
1056 block_fini(dev_handle);
1057 ipc_answer_0(rid, rc);
1058 return;
1059 }
1060
1061 rc = fat_idx_init_by_dev_handle(dev_handle);
1062 if (rc != EOK) {
1063 block_fini(dev_handle);
1064 ipc_answer_0(rid, rc);
1065 return;
1066 }
1067
1068 /* Initialize the root node. */
1069 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
1070 if (!rfn) {
1071 block_fini(dev_handle);
1072 fat_idx_fini_by_dev_handle(dev_handle);
1073 ipc_answer_0(rid, ENOMEM);
1074 return;
1075 }
1076 fs_node_initialize(rfn);
1077 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1078 if (!rootp) {
1079 free(rfn);
1080 block_fini(dev_handle);
1081 fat_idx_fini_by_dev_handle(dev_handle);
1082 ipc_answer_0(rid, ENOMEM);
1083 return;
1084 }
1085 fat_node_initialize(rootp);
1086
1087 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
1088 if (!ridxp) {
1089 free(rfn);
1090 free(rootp);
1091 block_fini(dev_handle);
1092 fat_idx_fini_by_dev_handle(dev_handle);
1093 ipc_answer_0(rid, ENOMEM);
1094 return;
1095 }
1096 assert(ridxp->index == 0);
1097 /* ridxp->lock held */
1098
1099 rootp->type = FAT_DIRECTORY;
1100 rootp->firstc = FAT_CLST_ROOT;
1101 rootp->refcnt = 1;
1102 rootp->lnkcnt = 0; /* FS root is not linked */
1103 rootp->size = rde * sizeof(fat_dentry_t);
1104 rootp->idx = ridxp;
1105 ridxp->nodep = rootp;
1106 rootp->bp = rfn;
1107 rfn->data = rootp;
1108
1109 fibril_mutex_unlock(&ridxp->lock);
1110
1111 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
1112}
1113
1114void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1115{
1116 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1117}
1118
1119void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1120{
1121 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1122}
1123
1124void fat_read(ipc_callid_t rid, ipc_call_t *request)
1125{
1126 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1127 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1128 off_t pos = (off_t)IPC_GET_ARG3(*request);
1129 fs_node_t *fn;
1130 fat_node_t *nodep;
1131 fat_bs_t *bs;
1132 uint16_t bps;
1133 size_t bytes;
1134 block_t *b;
1135 int rc;
1136
1137 rc = fat_node_get(&fn, dev_handle, index);
1138 if (rc != EOK) {
1139 ipc_answer_0(rid, rc);
1140 return;
1141 }
1142 if (!fn) {
1143 ipc_answer_0(rid, ENOENT);
1144 return;
1145 }
1146 nodep = FAT_NODE(fn);
1147
1148 ipc_callid_t callid;
1149 size_t len;
1150 if (!async_data_read_receive(&callid, &len)) {
1151 fat_node_put(fn);
1152 ipc_answer_0(callid, EINVAL);
1153 ipc_answer_0(rid, EINVAL);
1154 return;
1155 }
1156
1157 bs = block_bb_get(dev_handle);
1158 bps = uint16_t_le2host(bs->bps);
1159
1160 if (nodep->type == FAT_FILE) {
1161 /*
1162 * Our strategy for regular file reads is to read one block at
1163 * most and make use of the possibility to return less data than
1164 * requested. This keeps the code very simple.
1165 */
1166 if (pos >= nodep->size) {
1167 /* reading beyond the EOF */
1168 bytes = 0;
1169 (void) async_data_read_finalize(callid, NULL, 0);
1170 } else {
1171 bytes = min(len, bps - pos % bps);
1172 bytes = min(bytes, nodep->size - pos);
1173 rc = fat_block_get(&b, bs, nodep, pos / bps,
1174 BLOCK_FLAGS_NONE);
1175 if (rc != EOK) {
1176 fat_node_put(fn);
1177 ipc_answer_0(callid, rc);
1178 ipc_answer_0(rid, rc);
1179 return;
1180 }
1181 (void) async_data_read_finalize(callid, b->data + pos % bps,
1182 bytes);
1183 rc = block_put(b);
1184 if (rc != EOK) {
1185 fat_node_put(fn);
1186 ipc_answer_0(rid, rc);
1187 return;
1188 }
1189 }
1190 } else {
1191 unsigned bnum;
1192 off_t spos = pos;
1193 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1194 fat_dentry_t *d;
1195
1196 assert(nodep->type == FAT_DIRECTORY);
1197 assert(nodep->size % bps == 0);
1198 assert(bps % sizeof(fat_dentry_t) == 0);
1199
1200 /*
1201 * Our strategy for readdir() is to use the position pointer as
1202 * an index into the array of all dentries. On entry, it points
1203 * to the first unread dentry. If we skip any dentries, we bump
1204 * the position pointer accordingly.
1205 */
1206 bnum = (pos * sizeof(fat_dentry_t)) / bps;
1207 while (bnum < nodep->size / bps) {
1208 off_t o;
1209
1210 rc = fat_block_get(&b, bs, nodep, bnum,
1211 BLOCK_FLAGS_NONE);
1212 if (rc != EOK)
1213 goto err;
1214 for (o = pos % (bps / sizeof(fat_dentry_t));
1215 o < bps / sizeof(fat_dentry_t);
1216 o++, pos++) {
1217 d = ((fat_dentry_t *)b->data) + o;
1218 switch (fat_classify_dentry(d)) {
1219 case FAT_DENTRY_SKIP:
1220 case FAT_DENTRY_FREE:
1221 continue;
1222 case FAT_DENTRY_LAST:
1223 rc = block_put(b);
1224 if (rc != EOK)
1225 goto err;
1226 goto miss;
1227 default:
1228 case FAT_DENTRY_VALID:
1229 fat_dentry_name_get(d, name);
1230 rc = block_put(b);
1231 if (rc != EOK)
1232 goto err;
1233 goto hit;
1234 }
1235 }
1236 rc = block_put(b);
1237 if (rc != EOK)
1238 goto err;
1239 bnum++;
1240 }
1241miss:
1242 rc = fat_node_put(fn);
1243 ipc_answer_0(callid, rc != EOK ? rc : ENOENT);
1244 ipc_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
1245 return;
1246
1247err:
1248 (void) fat_node_put(fn);
1249 ipc_answer_0(callid, rc);
1250 ipc_answer_0(rid, rc);
1251 return;
1252
1253hit:
1254 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
1255 bytes = (pos - spos) + 1;
1256 }
1257
1258 rc = fat_node_put(fn);
1259 ipc_answer_1(rid, rc, (ipcarg_t)bytes);
1260}
1261
1262void fat_write(ipc_callid_t rid, ipc_call_t *request)
1263{
1264 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1265 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1266 off_t pos = (off_t)IPC_GET_ARG3(*request);
1267 fs_node_t *fn;
1268 fat_node_t *nodep;
1269 fat_bs_t *bs;
1270 size_t bytes, size;
1271 block_t *b;
1272 uint16_t bps;
1273 unsigned spc;
1274 unsigned bpc; /* bytes per cluster */
1275 off_t boundary;
1276 int flags = BLOCK_FLAGS_NONE;
1277 int rc;
1278
1279 rc = fat_node_get(&fn, dev_handle, index);
1280 if (rc != EOK) {
1281 ipc_answer_0(rid, rc);
1282 return;
1283 }
1284 if (!fn) {
1285 ipc_answer_0(rid, ENOENT);
1286 return;
1287 }
1288 nodep = FAT_NODE(fn);
1289
1290 ipc_callid_t callid;
1291 size_t len;
1292 if (!async_data_write_receive(&callid, &len)) {
1293 (void) fat_node_put(fn);
1294 ipc_answer_0(callid, EINVAL);
1295 ipc_answer_0(rid, EINVAL);
1296 return;
1297 }
1298
1299 bs = block_bb_get(dev_handle);
1300 bps = uint16_t_le2host(bs->bps);
1301 spc = bs->spc;
1302 bpc = bps * spc;
1303
1304 /*
1305 * In all scenarios, we will attempt to write out only one block worth
1306 * of data at maximum. There might be some more efficient approaches,
1307 * but this one greatly simplifies fat_write(). Note that we can afford
1308 * to do this because the client must be ready to handle the return
1309 * value signalizing a smaller number of bytes written.
1310 */
1311 bytes = min(len, bps - pos % bps);
1312 if (bytes == bps)
1313 flags |= BLOCK_FLAGS_NOREAD;
1314
1315 boundary = ROUND_UP(nodep->size, bpc);
1316 if (pos < boundary) {
1317 /*
1318 * This is the easier case - we are either overwriting already
1319 * existing contents or writing behind the EOF, but still within
1320 * the limits of the last cluster. The node size may grow to the
1321 * next block size boundary.
1322 */
1323 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1324 if (rc != EOK) {
1325 (void) fat_node_put(fn);
1326 ipc_answer_0(callid, rc);
1327 ipc_answer_0(rid, rc);
1328 return;
1329 }
1330 rc = fat_block_get(&b, bs, nodep, pos / bps, flags);
1331 if (rc != EOK) {
1332 (void) fat_node_put(fn);
1333 ipc_answer_0(callid, rc);
1334 ipc_answer_0(rid, rc);
1335 return;
1336 }
1337 (void) async_data_write_finalize(callid, b->data + pos % bps,
1338 bytes);
1339 b->dirty = true; /* need to sync block */
1340 rc = block_put(b);
1341 if (rc != EOK) {
1342 (void) fat_node_put(fn);
1343 ipc_answer_0(rid, rc);
1344 return;
1345 }
1346 if (pos + bytes > nodep->size) {
1347 nodep->size = pos + bytes;
1348 nodep->dirty = true; /* need to sync node */
1349 }
1350 size = nodep->size;
1351 rc = fat_node_put(fn);
1352 ipc_answer_2(rid, rc, bytes, nodep->size);
1353 return;
1354 } else {
1355 /*
1356 * This is the more difficult case. We must allocate new
1357 * clusters for the node and zero them out.
1358 */
1359 unsigned nclsts;
1360 fat_cluster_t mcl, lcl;
1361
1362 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
1363 /* create an independent chain of nclsts clusters in all FATs */
1364 rc = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1365 if (rc != EOK) {
1366 /* could not allocate a chain of nclsts clusters */
1367 (void) fat_node_put(fn);
1368 ipc_answer_0(callid, rc);
1369 ipc_answer_0(rid, rc);
1370 return;
1371 }
1372 /* zero fill any gaps */
1373 rc = fat_fill_gap(bs, nodep, mcl, pos);
1374 if (rc != EOK) {
1375 (void) fat_free_clusters(bs, dev_handle, mcl);
1376 (void) fat_node_put(fn);
1377 ipc_answer_0(callid, rc);
1378 ipc_answer_0(rid, rc);
1379 return;
1380 }
1381 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc,
1382 flags);
1383 if (rc != EOK) {
1384 (void) fat_free_clusters(bs, dev_handle, mcl);
1385 (void) fat_node_put(fn);
1386 ipc_answer_0(callid, rc);
1387 ipc_answer_0(rid, rc);
1388 return;
1389 }
1390 (void) async_data_write_finalize(callid, b->data + pos % bps,
1391 bytes);
1392 b->dirty = true; /* need to sync block */
1393 rc = block_put(b);
1394 if (rc != EOK) {
1395 (void) fat_free_clusters(bs, dev_handle, mcl);
1396 (void) fat_node_put(fn);
1397 ipc_answer_0(rid, rc);
1398 return;
1399 }
1400 /*
1401 * Append the cluster chain starting in mcl to the end of the
1402 * node's cluster chain.
1403 */
1404 rc = fat_append_clusters(bs, nodep, mcl);
1405 if (rc != EOK) {
1406 (void) fat_free_clusters(bs, dev_handle, mcl);
1407 (void) fat_node_put(fn);
1408 ipc_answer_0(rid, rc);
1409 return;
1410 }
1411 nodep->size = size = pos + bytes;
1412 nodep->dirty = true; /* need to sync node */
1413 rc = fat_node_put(fn);
1414 ipc_answer_2(rid, rc, bytes, size);
1415 return;
1416 }
1417}
1418
1419void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1420{
1421 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1422 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1423 size_t size = (off_t)IPC_GET_ARG3(*request);
1424 fs_node_t *fn;
1425 fat_node_t *nodep;
1426 fat_bs_t *bs;
1427 uint16_t bps;
1428 uint8_t spc;
1429 unsigned bpc; /* bytes per cluster */
1430 int rc;
1431
1432 rc = fat_node_get(&fn, dev_handle, index);
1433 if (rc != EOK) {
1434 ipc_answer_0(rid, rc);
1435 return;
1436 }
1437 if (!fn) {
1438 ipc_answer_0(rid, ENOENT);
1439 return;
1440 }
1441 nodep = FAT_NODE(fn);
1442
1443 bs = block_bb_get(dev_handle);
1444 bps = uint16_t_le2host(bs->bps);
1445 spc = bs->spc;
1446 bpc = bps * spc;
1447
1448 if (nodep->size == size) {
1449 rc = EOK;
1450 } else if (nodep->size < size) {
1451 /*
1452 * The standard says we have the freedom to grow the node.
1453 * For now, we simply return an error.
1454 */
1455 rc = EINVAL;
1456 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
1457 /*
1458 * The node will be shrunk, but no clusters will be deallocated.
1459 */
1460 nodep->size = size;
1461 nodep->dirty = true; /* need to sync node */
1462 rc = EOK;
1463 } else {
1464 /*
1465 * The node will be shrunk, clusters will be deallocated.
1466 */
1467 if (size == 0) {
1468 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1469 if (rc != EOK)
1470 goto out;
1471 } else {
1472 fat_cluster_t lastc;
1473 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc,
1474 &lastc, NULL, (size - 1) / bpc);
1475 if (rc != EOK)
1476 goto out;
1477 rc = fat_chop_clusters(bs, nodep, lastc);
1478 if (rc != EOK)
1479 goto out;
1480 }
1481 nodep->size = size;
1482 nodep->dirty = true; /* need to sync node */
1483 rc = EOK;
1484 }
1485out:
1486 fat_node_put(fn);
1487 ipc_answer_0(rid, rc);
1488 return;
1489}
1490
1491void fat_close(ipc_callid_t rid, ipc_call_t *request)
1492{
1493 ipc_answer_0(rid, EOK);
1494}
1495
1496void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1497{
1498 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1499 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1500 fs_node_t *fn;
1501 int rc;
1502
1503 rc = fat_node_get(&fn, dev_handle, index);
1504 if (rc != EOK) {
1505 ipc_answer_0(rid, rc);
1506 return;
1507 }
1508 if (!fn) {
1509 ipc_answer_0(rid, ENOENT);
1510 return;
1511 }
1512
1513 rc = fat_destroy_node(fn);
1514 ipc_answer_0(rid, rc);
1515}
1516
1517void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1518{
1519 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1520}
1521
1522void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1523{
1524 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1525}
1526
1527void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1528{
1529 /* Dummy implementation */
1530 ipc_answer_0(rid, EOK);
1531}
1532
1533/**
1534 * @}
1535 */
Note: See TracBrowser for help on using the repository browser.