source: mainline/uspace/srv/fs/fat/fat_ops.c@ 7a23d60

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 7a23d60 was 7a23d60, checked in by Jakub Jermar <jakub@…>, 15 years ago

Use convenience macros for accessing FAT boot sector in fat_ops.c as well.

  • Property mode set to 100644
File size: 36.8 KB
RevLine 
[be815bc]1/*
[a2aa1dec]2 * Copyright (c) 2008 Jakub Jermar
[be815bc]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
[033ef7d3]39#include "fat_dentry.h"
40#include "fat_fat.h"
[6364d3c]41#include "../../vfs/vfs.h"
[a2aa1dec]42#include <libfs.h>
[fc840d9]43#include <libblock.h>
[be815bc]44#include <ipc/ipc.h>
[7a35204a]45#include <ipc/services.h>
46#include <ipc/devmap.h>
[ed903174]47#include <macros.h>
[be815bc]48#include <async.h>
49#include <errno.h>
[19f857a]50#include <str.h>
[776f2e6]51#include <byteorder.h>
[d9c8c81]52#include <adt/hash_table.h>
53#include <adt/list.h>
[e1e3b26]54#include <assert.h>
[1e4cada]55#include <fibril_synch.h>
[7a35204a]56#include <sys/mman.h>
[8d32152]57#include <align.h>
[e1e3b26]58
[b6035ba]59#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
60#define FS_NODE(node) ((node) ? (node)->bp : NULL)
61
[7a23d60]62#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
63#define BPC(bs) (BPS((bs)) * SPC((bs)))
64
[6ebe721]65/** Mutex protecting the list of cached free FAT nodes. */
66static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
[add5835]67
68/** List of cached free FAT nodes. */
69static LIST_INITIALIZE(ffn_head);
[6364d3c]70
[0fc1e5d]71/*
72 * Forward declarations of FAT libfs operations.
73 */
74static int fat_root_get(fs_node_t **, dev_handle_t);
75static int fat_match(fs_node_t **, fs_node_t *, const char *);
76static int fat_node_get(fs_node_t **, dev_handle_t, fs_index_t);
[1313ee9]77static int fat_node_open(fs_node_t *);
[0fc1e5d]78static int fat_node_put(fs_node_t *);
79static int fat_create_node(fs_node_t **, dev_handle_t, int);
80static int fat_destroy_node(fs_node_t *);
81static int fat_link(fs_node_t *, fs_node_t *, const char *);
82static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
83static int fat_has_children(bool *, fs_node_t *);
84static fs_index_t fat_index_get(fs_node_t *);
[ed903174]85static aoff64_t fat_size_get(fs_node_t *);
[0fc1e5d]86static unsigned fat_lnkcnt_get(fs_node_t *);
87static char fat_plb_get_char(unsigned);
88static bool fat_is_directory(fs_node_t *);
89static bool fat_is_file(fs_node_t *node);
[1313ee9]90static dev_handle_t fat_device_get(fs_node_t *node);
[0fc1e5d]91
92/*
93 * Helper functions.
94 */
[e1e3b26]95static void fat_node_initialize(fat_node_t *node)
[a2aa1dec]96{
[6ebe721]97 fibril_mutex_initialize(&node->lock);
[b6035ba]98 node->bp = NULL;
[869e546]99 node->idx = NULL;
[e1e3b26]100 node->type = 0;
101 link_initialize(&node->ffn_link);
102 node->size = 0;
103 node->lnkcnt = 0;
104 node->refcnt = 0;
105 node->dirty = false;
[377cce8]106 node->lastc_cached_valid = false;
107 node->lastc_cached_value = FAT_CLST_LAST1;
[e1e3b26]108}
109
[4098e38]110static int fat_node_sync(fat_node_t *node)
[e1e3b26]111{
[7858bc5f]112 block_t *b;
113 fat_bs_t *bs;
[beb17734]114 fat_dentry_t *d;
[c91f2d1b]115 int rc;
[beb17734]116
117 assert(node->dirty);
118
[7858bc5f]119 bs = block_bb_get(node->idx->dev_handle);
[beb17734]120
121 /* Read the block that contains the dentry of interest. */
[684b655]122 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc,
[7a23d60]123 (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
124 BLOCK_FLAGS_NONE);
[4098e38]125 if (rc != EOK)
126 return rc;
[beb17734]127
[7a23d60]128 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
[beb17734]129
130 d->firstc = host2uint16_t_le(node->firstc);
[a5da446]131 if (node->type == FAT_FILE) {
[beb17734]132 d->size = host2uint32_t_le(node->size);
[a5da446]133 } else if (node->type == FAT_DIRECTORY) {
134 d->attr = FAT_ATTR_SUBDIR;
135 }
136
137 /* TODO: update other fields? (e.g time fields) */
[beb17734]138
139 b->dirty = true; /* need to sync block */
[c91f2d1b]140 rc = block_put(b);
[4098e38]141 return rc;
[e1e3b26]142}
143
[430de97]144static int fat_node_fini_by_dev_handle(dev_handle_t dev_handle)
145{
146 link_t *lnk;
147 fat_node_t *nodep;
148 int rc;
149
150 /*
151 * We are called from fat_unmounted() and assume that there are already
152 * no nodes belonging to this instance with non-zero refcount. Therefore
153 * it is sufficient to clean up only the FAT free node list.
154 */
155
156restart:
157 fibril_mutex_lock(&ffn_mutex);
158 for (lnk = ffn_head.next; lnk != &ffn_head; lnk = lnk->next) {
159 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
160 if (!fibril_mutex_trylock(&nodep->lock)) {
161 fibril_mutex_unlock(&ffn_mutex);
162 goto restart;
163 }
164 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
165 fibril_mutex_unlock(&nodep->lock);
166 fibril_mutex_unlock(&ffn_mutex);
167 goto restart;
168 }
169 if (nodep->idx->dev_handle != dev_handle) {
170 fibril_mutex_unlock(&nodep->idx->lock);
171 fibril_mutex_unlock(&nodep->lock);
172 continue;
173 }
174
175 list_remove(&nodep->ffn_link);
176 fibril_mutex_unlock(&ffn_mutex);
177
178 /*
179 * We can unlock the node and its index structure because we are
180 * the last player on this playground and VFS is preventing new
181 * players from entering.
182 */
183 fibril_mutex_unlock(&nodep->idx->lock);
184 fibril_mutex_unlock(&nodep->lock);
185
186 if (nodep->dirty) {
187 rc = fat_node_sync(nodep);
188 if (rc != EOK)
189 return rc;
190 }
191 nodep->idx->nodep = NULL;
192 free(nodep->bp);
193 free(nodep);
194
195 /* Need to restart because we changed the ffn_head list. */
196 goto restart;
197 }
198 fibril_mutex_unlock(&ffn_mutex);
199
200 return EOK;
201}
202
[17bf658]203static int fat_node_get_new(fat_node_t **nodepp)
[9a3d5f0]204{
[b6035ba]205 fs_node_t *fn;
[9a3d5f0]206 fat_node_t *nodep;
[4098e38]207 int rc;
[9a3d5f0]208
[6ebe721]209 fibril_mutex_lock(&ffn_mutex);
[9a3d5f0]210 if (!list_empty(&ffn_head)) {
211 /* Try to use a cached free node structure. */
212 fat_idx_t *idxp_tmp;
213 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
[6ebe721]214 if (!fibril_mutex_trylock(&nodep->lock))
[9a3d5f0]215 goto skip_cache;
216 idxp_tmp = nodep->idx;
[6ebe721]217 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
218 fibril_mutex_unlock(&nodep->lock);
[9a3d5f0]219 goto skip_cache;
220 }
221 list_remove(&nodep->ffn_link);
[6ebe721]222 fibril_mutex_unlock(&ffn_mutex);
[4098e38]223 if (nodep->dirty) {
224 rc = fat_node_sync(nodep);
[17bf658]225 if (rc != EOK) {
226 idxp_tmp->nodep = NULL;
227 fibril_mutex_unlock(&nodep->lock);
228 fibril_mutex_unlock(&idxp_tmp->lock);
229 free(nodep->bp);
230 free(nodep);
231 return rc;
232 }
[4098e38]233 }
[9a3d5f0]234 idxp_tmp->nodep = NULL;
[6ebe721]235 fibril_mutex_unlock(&nodep->lock);
236 fibril_mutex_unlock(&idxp_tmp->lock);
[b6035ba]237 fn = FS_NODE(nodep);
[9a3d5f0]238 } else {
239skip_cache:
240 /* Try to allocate a new node structure. */
[6ebe721]241 fibril_mutex_unlock(&ffn_mutex);
[b6035ba]242 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
243 if (!fn)
[17bf658]244 return ENOMEM;
[9a3d5f0]245 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
[b6035ba]246 if (!nodep) {
247 free(fn);
[17bf658]248 return ENOMEM;
[b6035ba]249 }
[9a3d5f0]250 }
251 fat_node_initialize(nodep);
[83937ccd]252 fs_node_initialize(fn);
[b6035ba]253 fn->data = nodep;
254 nodep->bp = fn;
[9a3d5f0]255
[17bf658]256 *nodepp = nodep;
257 return EOK;
[9a3d5f0]258}
259
[add5835]260/** Internal version of fat_node_get().
261 *
262 * @param idxp Locked index structure.
263 */
[0fc1e5d]264static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
[e1e3b26]265{
[7858bc5f]266 block_t *b;
267 fat_bs_t *bs;
[4573a79]268 fat_dentry_t *d;
[c06dbf9]269 fat_node_t *nodep = NULL;
[c91f2d1b]270 int rc;
[4573a79]271
[add5835]272 if (idxp->nodep) {
[4573a79]273 /*
274 * We are lucky.
275 * The node is already instantiated in memory.
276 */
[6ebe721]277 fibril_mutex_lock(&idxp->nodep->lock);
[e6bc3a5]278 if (!idxp->nodep->refcnt++) {
279 fibril_mutex_lock(&ffn_mutex);
[c06dbf9]280 list_remove(&idxp->nodep->ffn_link);
[e6bc3a5]281 fibril_mutex_unlock(&ffn_mutex);
282 }
[6ebe721]283 fibril_mutex_unlock(&idxp->nodep->lock);
[0fc1e5d]284 *nodepp = idxp->nodep;
285 return EOK;
[4573a79]286 }
287
288 /*
289 * We must instantiate the node from the file system.
290 */
291
[add5835]292 assert(idxp->pfc);
[4573a79]293
[17bf658]294 rc = fat_node_get_new(&nodep);
295 if (rc != EOK)
[0fc1e5d]296 return rc;
[4573a79]297
[7858bc5f]298 bs = block_bb_get(idxp->dev_handle);
[4573a79]299
[2c4bbcde]300 /* Read the block that contains the dentry of interest. */
[684b655]301 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc,
[7a23d60]302 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
[0fc1e5d]303 if (rc != EOK) {
304 (void) fat_node_put(FS_NODE(nodep));
305 return rc;
306 }
[4573a79]307
[7a23d60]308 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
[2c4bbcde]309 if (d->attr & FAT_ATTR_SUBDIR) {
310 /*
311 * The only directory which does not have this bit set is the
312 * root directory itself. The root directory node is handled
313 * and initialized elsewhere.
314 */
315 nodep->type = FAT_DIRECTORY;
[2ab1023]316 /*
[e2115311]317 * Unfortunately, the 'size' field of the FAT dentry is not
318 * defined for the directory entry type. We must determine the
319 * size of the directory by walking the FAT.
[2ab1023]320 */
[e402382]321 uint16_t clusters;
322 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle,
[4f1c0b4]323 uint16_t_le2host(d->firstc));
[0fc1e5d]324 if (rc != EOK) {
325 (void) fat_node_put(FS_NODE(nodep));
326 return rc;
327 }
[7a23d60]328 nodep->size = BPS(bs) * SPC(bs) * clusters;
[2c4bbcde]329 } else {
330 nodep->type = FAT_FILE;
[2ab1023]331 nodep->size = uint32_t_le2host(d->size);
[2c4bbcde]332 }
333 nodep->firstc = uint16_t_le2host(d->firstc);
334 nodep->lnkcnt = 1;
335 nodep->refcnt = 1;
336
[c91f2d1b]337 rc = block_put(b);
[0fc1e5d]338 if (rc != EOK) {
339 (void) fat_node_put(FS_NODE(nodep));
340 return rc;
341 }
[2c4bbcde]342
343 /* Link the idx structure with the node structure. */
[add5835]344 nodep->idx = idxp;
345 idxp->nodep = nodep;
[2c4bbcde]346
[0fc1e5d]347 *nodepp = nodep;
348 return EOK;
[a2aa1dec]349}
350
[50e5b25]351/*
352 * FAT libfs operations.
353 */
354
[073f550]355int fat_root_get(fs_node_t **rfn, dev_handle_t dev_handle)
356{
357 return fat_node_get(rfn, dev_handle, 0);
358}
359
360int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
361{
362 fat_bs_t *bs;
363 fat_node_t *parentp = FAT_NODE(pfn);
364 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
365 unsigned i, j;
366 unsigned blocks;
367 fat_dentry_t *d;
368 block_t *b;
369 int rc;
370
371 fibril_mutex_lock(&parentp->idx->lock);
372 bs = block_bb_get(parentp->idx->dev_handle);
[7a23d60]373 blocks = parentp->size / BPS(bs);
[073f550]374 for (i = 0; i < blocks; i++) {
375 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
376 if (rc != EOK) {
377 fibril_mutex_unlock(&parentp->idx->lock);
378 return rc;
379 }
[7a23d60]380 for (j = 0; j < DPS(bs); j++) {
[073f550]381 d = ((fat_dentry_t *)b->data) + j;
382 switch (fat_classify_dentry(d)) {
383 case FAT_DENTRY_SKIP:
384 case FAT_DENTRY_FREE:
385 continue;
386 case FAT_DENTRY_LAST:
[8810c63]387 /* miss */
[073f550]388 rc = block_put(b);
389 fibril_mutex_unlock(&parentp->idx->lock);
390 *rfn = NULL;
[8810c63]391 return rc;
[073f550]392 default:
393 case FAT_DENTRY_VALID:
394 fat_dentry_name_get(d, name);
395 break;
396 }
397 if (fat_dentry_namecmp(name, component) == 0) {
398 /* hit */
399 fat_node_t *nodep;
400 /*
401 * Assume tree hierarchy for locking. We
402 * already have the parent and now we are going
403 * to lock the child. Never lock in the oposite
404 * order.
405 */
406 fat_idx_t *idx = fat_idx_get_by_pos(
407 parentp->idx->dev_handle, parentp->firstc,
[7a23d60]408 i * DPS(bs) + j);
[073f550]409 fibril_mutex_unlock(&parentp->idx->lock);
410 if (!idx) {
411 /*
412 * Can happen if memory is low or if we
413 * run out of 32-bit indices.
414 */
415 rc = block_put(b);
[8810c63]416 return (rc == EOK) ? ENOMEM : rc;
[073f550]417 }
[0fc1e5d]418 rc = fat_node_get_core(&nodep, idx);
[073f550]419 fibril_mutex_unlock(&idx->lock);
[1647323]420 if (rc != EOK) {
421 (void) block_put(b);
422 return rc;
423 }
[073f550]424 *rfn = FS_NODE(nodep);
[1647323]425 rc = block_put(b);
426 if (rc != EOK)
427 (void) fat_node_put(*rfn);
428 return rc;
[073f550]429 }
430 }
431 rc = block_put(b);
[8810c63]432 if (rc != EOK) {
433 fibril_mutex_unlock(&parentp->idx->lock);
434 return rc;
435 }
[073f550]436 }
437
438 fibril_mutex_unlock(&parentp->idx->lock);
439 *rfn = NULL;
440 return EOK;
441}
442
[add5835]443/** Instantiate a FAT in-core node. */
[073f550]444int fat_node_get(fs_node_t **rfn, dev_handle_t dev_handle, fs_index_t index)
[add5835]445{
[b6035ba]446 fat_node_t *nodep;
[add5835]447 fat_idx_t *idxp;
[0fc1e5d]448 int rc;
[add5835]449
450 idxp = fat_idx_get_by_index(dev_handle, index);
[073f550]451 if (!idxp) {
452 *rfn = NULL;
453 return EOK;
454 }
[add5835]455 /* idxp->lock held */
[0fc1e5d]456 rc = fat_node_get_core(&nodep, idxp);
[6ebe721]457 fibril_mutex_unlock(&idxp->lock);
[0fc1e5d]458 if (rc == EOK)
459 *rfn = FS_NODE(nodep);
460 return rc;
[add5835]461}
462
[1313ee9]463int fat_node_open(fs_node_t *fn)
464{
465 /*
466 * Opening a file is stateless, nothing
467 * to be done here.
468 */
469 return EOK;
470}
471
[073f550]472int fat_node_put(fs_node_t *fn)
[06901c6b]473{
[b6035ba]474 fat_node_t *nodep = FAT_NODE(fn);
[6571b78]475 bool destroy = false;
[34b3ce3]476
[6ebe721]477 fibril_mutex_lock(&nodep->lock);
[34b3ce3]478 if (!--nodep->refcnt) {
[6571b78]479 if (nodep->idx) {
[6ebe721]480 fibril_mutex_lock(&ffn_mutex);
[6571b78]481 list_append(&nodep->ffn_link, &ffn_head);
[6ebe721]482 fibril_mutex_unlock(&ffn_mutex);
[6571b78]483 } else {
484 /*
485 * The node does not have any index structure associated
486 * with itself. This can only mean that we are releasing
487 * the node after a failed attempt to allocate the index
488 * structure for it.
489 */
490 destroy = true;
491 }
[34b3ce3]492 }
[6ebe721]493 fibril_mutex_unlock(&nodep->lock);
[b6035ba]494 if (destroy) {
495 free(nodep->bp);
496 free(nodep);
497 }
[073f550]498 return EOK;
[06901c6b]499}
500
[073f550]501int fat_create_node(fs_node_t **rfn, dev_handle_t dev_handle, int flags)
[80e8482]502{
[6571b78]503 fat_idx_t *idxp;
504 fat_node_t *nodep;
[49df572]505 fat_bs_t *bs;
506 fat_cluster_t mcl, lcl;
507 int rc;
508
509 bs = block_bb_get(dev_handle);
510 if (flags & L_DIRECTORY) {
511 /* allocate a cluster */
512 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
[073f550]513 if (rc != EOK)
514 return rc;
515 /* populate the new cluster with unused dentries */
516 rc = fat_zero_cluster(bs, dev_handle, mcl);
517 if (rc != EOK) {
518 (void) fat_free_clusters(bs, dev_handle, mcl);
519 return rc;
520 }
[49df572]521 }
[6571b78]522
[17bf658]523 rc = fat_node_get_new(&nodep);
524 if (rc != EOK) {
[cca29e3c]525 (void) fat_free_clusters(bs, dev_handle, mcl);
[17bf658]526 return rc;
[49df572]527 }
[9a15176]528 rc = fat_idx_get_new(&idxp, dev_handle);
529 if (rc != EOK) {
[cca29e3c]530 (void) fat_free_clusters(bs, dev_handle, mcl);
[073f550]531 (void) fat_node_put(FS_NODE(nodep));
[9a15176]532 return rc;
[6571b78]533 }
534 /* idxp->lock held */
535 if (flags & L_DIRECTORY) {
536 nodep->type = FAT_DIRECTORY;
[49df572]537 nodep->firstc = mcl;
[7a23d60]538 nodep->size = BPS(bs) * SPC(bs);
[6571b78]539 } else {
540 nodep->type = FAT_FILE;
[49df572]541 nodep->firstc = FAT_CLST_RES0;
542 nodep->size = 0;
[6571b78]543 }
544 nodep->lnkcnt = 0; /* not linked anywhere */
545 nodep->refcnt = 1;
[49df572]546 nodep->dirty = true;
[6571b78]547
548 nodep->idx = idxp;
549 idxp->nodep = nodep;
550
[6ebe721]551 fibril_mutex_unlock(&idxp->lock);
[073f550]552 *rfn = FS_NODE(nodep);
553 return EOK;
[80e8482]554}
555
[b6035ba]556int fat_destroy_node(fs_node_t *fn)
[80e8482]557{
[b6035ba]558 fat_node_t *nodep = FAT_NODE(fn);
[50e5b25]559 fat_bs_t *bs;
[073f550]560 bool has_children;
561 int rc;
[50e5b25]562
563 /*
564 * The node is not reachable from the file system. This means that the
565 * link count should be zero and that the index structure cannot be
566 * found in the position hash. Obviously, we don't need to lock the node
567 * nor its index structure.
568 */
569 assert(nodep->lnkcnt == 0);
570
571 /*
572 * The node may not have any children.
573 */
[073f550]574 rc = fat_has_children(&has_children, fn);
575 if (rc != EOK)
576 return rc;
577 assert(!has_children);
[50e5b25]578
579 bs = block_bb_get(nodep->idx->dev_handle);
580 if (nodep->firstc != FAT_CLST_RES0) {
581 assert(nodep->size);
582 /* Free all clusters allocated to the node. */
[cca29e3c]583 rc = fat_free_clusters(bs, nodep->idx->dev_handle,
584 nodep->firstc);
[50e5b25]585 }
586
587 fat_idx_destroy(nodep->idx);
[b6035ba]588 free(nodep->bp);
[50e5b25]589 free(nodep);
[cca29e3c]590 return rc;
[80e8482]591}
592
[b6035ba]593int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
[80e8482]594{
[b6035ba]595 fat_node_t *parentp = FAT_NODE(pfn);
596 fat_node_t *childp = FAT_NODE(cfn);
[0fdd6bb]597 fat_dentry_t *d;
598 fat_bs_t *bs;
599 block_t *b;
[a405563]600 unsigned i, j;
[0fdd6bb]601 unsigned blocks;
[e32b65a]602 fat_cluster_t mcl, lcl;
603 int rc;
[0fdd6bb]604
[6ebe721]605 fibril_mutex_lock(&childp->lock);
[0fdd6bb]606 if (childp->lnkcnt == 1) {
607 /*
608 * On FAT, we don't support multiple hard links.
609 */
[6ebe721]610 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]611 return EMLINK;
612 }
613 assert(childp->lnkcnt == 0);
[6ebe721]614 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]615
616 if (!fat_dentry_name_verify(name)) {
617 /*
618 * Attempt to create unsupported name.
619 */
620 return ENOTSUP;
621 }
622
623 /*
624 * Get us an unused parent node's dentry or grow the parent and allocate
625 * a new one.
626 */
627
[6ebe721]628 fibril_mutex_lock(&parentp->idx->lock);
[0fdd6bb]629 bs = block_bb_get(parentp->idx->dev_handle);
630
[7a23d60]631 blocks = parentp->size / BPS(bs);
[0fdd6bb]632
633 for (i = 0; i < blocks; i++) {
[684b655]634 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]635 if (rc != EOK) {
636 fibril_mutex_unlock(&parentp->idx->lock);
637 return rc;
638 }
[7a23d60]639 for (j = 0; j < DPS(bs); j++) {
[0fdd6bb]640 d = ((fat_dentry_t *)b->data) + j;
641 switch (fat_classify_dentry(d)) {
642 case FAT_DENTRY_SKIP:
643 case FAT_DENTRY_VALID:
644 /* skipping used and meta entries */
645 continue;
646 case FAT_DENTRY_FREE:
647 case FAT_DENTRY_LAST:
648 /* found an empty slot */
649 goto hit;
650 }
651 }
[c91f2d1b]652 rc = block_put(b);
[4b4668e]653 if (rc != EOK) {
654 fibril_mutex_unlock(&parentp->idx->lock);
655 return rc;
656 }
[0fdd6bb]657 }
[699743c]658 j = 0;
[0fdd6bb]659
660 /*
661 * We need to grow the parent in order to create a new unused dentry.
662 */
[b713492b]663 if (parentp->firstc == FAT_CLST_ROOT) {
[e32b65a]664 /* Can't grow the root directory. */
[6ebe721]665 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]666 return ENOSPC;
667 }
668 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
669 if (rc != EOK) {
[6ebe721]670 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]671 return rc;
672 }
[cca29e3c]673 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl);
[4b4668e]674 if (rc != EOK) {
[073f550]675 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
[4b4668e]676 fibril_mutex_unlock(&parentp->idx->lock);
677 return rc;
678 }
[377cce8]679 rc = fat_append_clusters(bs, parentp, mcl, lcl);
[4b4668e]680 if (rc != EOK) {
[073f550]681 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
[4b4668e]682 fibril_mutex_unlock(&parentp->idx->lock);
683 return rc;
684 }
[7a23d60]685 parentp->size += BPS(bs) * SPC(bs);
[d44aabd]686 parentp->dirty = true; /* need to sync node */
[684b655]687 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]688 if (rc != EOK) {
689 fibril_mutex_unlock(&parentp->idx->lock);
690 return rc;
691 }
[e32b65a]692 d = (fat_dentry_t *)b->data;
[0fdd6bb]693
694hit:
695 /*
696 * At this point we only establish the link between the parent and the
697 * child. The dentry, except of the name and the extension, will remain
[e32b65a]698 * uninitialized until the corresponding node is synced. Thus the valid
699 * dentry data is kept in the child node structure.
[0fdd6bb]700 */
701 memset(d, 0, sizeof(fat_dentry_t));
702 fat_dentry_name_set(d, name);
703 b->dirty = true; /* need to sync block */
[c91f2d1b]704 rc = block_put(b);
[6ebe721]705 fibril_mutex_unlock(&parentp->idx->lock);
[4b4668e]706 if (rc != EOK)
707 return rc;
[0fdd6bb]708
[6ebe721]709 fibril_mutex_lock(&childp->idx->lock);
[1baec4b]710
[24a2517]711 if (childp->type == FAT_DIRECTORY) {
[4b4668e]712 /*
[24a2517]713 * If possible, create the Sub-directory Identifier Entry and
714 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
715 * These entries are not mandatory according to Standard
716 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
717 * rather a sign of our good will.
[4b4668e]718 */
[24a2517]719 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
720 if (rc != EOK) {
721 /*
722 * Rather than returning an error, simply skip the
723 * creation of these two entries.
724 */
725 goto skip_dots;
726 }
[ed903174]727 d = (fat_dentry_t *) b->data;
728 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
729 (str_cmp((char *) d->name, FAT_NAME_DOT)) == 0) {
[24a2517]730 memset(d, 0, sizeof(fat_dentry_t));
[ed903174]731 str_cpy((char *) d->name, 8, FAT_NAME_DOT);
732 str_cpy((char *) d->ext, 3, FAT_EXT_PAD);
[24a2517]733 d->attr = FAT_ATTR_SUBDIR;
734 d->firstc = host2uint16_t_le(childp->firstc);
735 /* TODO: initialize also the date/time members. */
736 }
737 d++;
[ed903174]738 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
739 (str_cmp((char *) d->name, FAT_NAME_DOT_DOT) == 0)) {
[24a2517]740 memset(d, 0, sizeof(fat_dentry_t));
[ed903174]741 str_cpy((char *) d->name, 8, FAT_NAME_DOT_DOT);
742 str_cpy((char *) d->ext, 3, FAT_EXT_PAD);
[24a2517]743 d->attr = FAT_ATTR_SUBDIR;
744 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
745 host2uint16_t_le(FAT_CLST_RES0) :
746 host2uint16_t_le(parentp->firstc);
747 /* TODO: initialize also the date/time members. */
748 }
749 b->dirty = true; /* need to sync block */
750 /*
751 * Ignore the return value as we would have fallen through on error
752 * anyway.
753 */
754 (void) block_put(b);
[1baec4b]755 }
[4b4668e]756skip_dots:
[1baec4b]757
[0fdd6bb]758 childp->idx->pfc = parentp->firstc;
[7a23d60]759 childp->idx->pdi = i * DPS(bs) + j;
[6ebe721]760 fibril_mutex_unlock(&childp->idx->lock);
[0fdd6bb]761
[6ebe721]762 fibril_mutex_lock(&childp->lock);
[0fdd6bb]763 childp->lnkcnt = 1;
764 childp->dirty = true; /* need to sync node */
[6ebe721]765 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]766
767 /*
768 * Hash in the index structure into the position hash.
769 */
770 fat_idx_hashin(childp->idx);
771
772 return EOK;
[80e8482]773}
774
[cf95bc0]775int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
[80e8482]776{
[b6035ba]777 fat_node_t *parentp = FAT_NODE(pfn);
778 fat_node_t *childp = FAT_NODE(cfn);
[a31c1ccf]779 fat_bs_t *bs;
780 fat_dentry_t *d;
781 block_t *b;
[073f550]782 bool has_children;
[c91f2d1b]783 int rc;
[a31c1ccf]784
[770d281]785 if (!parentp)
786 return EBUSY;
[0be3e8b]787
[073f550]788 rc = fat_has_children(&has_children, cfn);
789 if (rc != EOK)
790 return rc;
791 if (has_children)
[0be3e8b]792 return ENOTEMPTY;
[770d281]793
[6ebe721]794 fibril_mutex_lock(&parentp->lock);
795 fibril_mutex_lock(&childp->lock);
[a31c1ccf]796 assert(childp->lnkcnt == 1);
[6ebe721]797 fibril_mutex_lock(&childp->idx->lock);
[a31c1ccf]798 bs = block_bb_get(childp->idx->dev_handle);
799
[684b655]800 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc,
[7a23d60]801 (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
[a31c1ccf]802 BLOCK_FLAGS_NONE);
[46c0498]803 if (rc != EOK)
804 goto error;
[a31c1ccf]805 d = (fat_dentry_t *)b->data +
[7a23d60]806 (childp->idx->pdi % (BPS(bs) / sizeof(fat_dentry_t)));
[a31c1ccf]807 /* mark the dentry as not-currently-used */
808 d->name[0] = FAT_DENTRY_ERASED;
809 b->dirty = true; /* need to sync block */
[c91f2d1b]810 rc = block_put(b);
[46c0498]811 if (rc != EOK)
812 goto error;
[a31c1ccf]813
814 /* remove the index structure from the position hash */
815 fat_idx_hashout(childp->idx);
816 /* clear position information */
817 childp->idx->pfc = FAT_CLST_RES0;
818 childp->idx->pdi = 0;
[6ebe721]819 fibril_mutex_unlock(&childp->idx->lock);
[a31c1ccf]820 childp->lnkcnt = 0;
821 childp->dirty = true;
[6ebe721]822 fibril_mutex_unlock(&childp->lock);
823 fibril_mutex_unlock(&parentp->lock);
[a31c1ccf]824
825 return EOK;
[46c0498]826
827error:
828 fibril_mutex_unlock(&parentp->idx->lock);
829 fibril_mutex_unlock(&childp->lock);
830 fibril_mutex_unlock(&childp->idx->lock);
831 return rc;
[80e8482]832}
833
[073f550]834int fat_has_children(bool *has_children, fs_node_t *fn)
[32fb10ed]835{
[7858bc5f]836 fat_bs_t *bs;
[b6035ba]837 fat_node_t *nodep = FAT_NODE(fn);
[32fb10ed]838 unsigned blocks;
[7858bc5f]839 block_t *b;
[32fb10ed]840 unsigned i, j;
[c91f2d1b]841 int rc;
[32fb10ed]842
[073f550]843 if (nodep->type != FAT_DIRECTORY) {
844 *has_children = false;
845 return EOK;
846 }
[b0247bac]847
[6ebe721]848 fibril_mutex_lock(&nodep->idx->lock);
[7858bc5f]849 bs = block_bb_get(nodep->idx->dev_handle);
[32fb10ed]850
[7a23d60]851 blocks = nodep->size / BPS(bs);
[32fb10ed]852
853 for (i = 0; i < blocks; i++) {
854 fat_dentry_t *d;
855
[684b655]856 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
[073f550]857 if (rc != EOK) {
858 fibril_mutex_unlock(&nodep->idx->lock);
859 return rc;
860 }
[7a23d60]861 for (j = 0; j < DPS(bs); j++) {
[32fb10ed]862 d = ((fat_dentry_t *)b->data) + j;
863 switch (fat_classify_dentry(d)) {
864 case FAT_DENTRY_SKIP:
[0fdd6bb]865 case FAT_DENTRY_FREE:
[32fb10ed]866 continue;
867 case FAT_DENTRY_LAST:
[c91f2d1b]868 rc = block_put(b);
[6ebe721]869 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]870 *has_children = false;
[8810c63]871 return rc;
[32fb10ed]872 default:
873 case FAT_DENTRY_VALID:
[c91f2d1b]874 rc = block_put(b);
[6ebe721]875 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]876 *has_children = true;
[8810c63]877 return rc;
[32fb10ed]878 }
879 }
[c91f2d1b]880 rc = block_put(b);
[8810c63]881 if (rc != EOK) {
882 fibril_mutex_unlock(&nodep->idx->lock);
883 return rc;
884 }
[32fb10ed]885 }
886
[6ebe721]887 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]888 *has_children = false;
889 return EOK;
890}
891
892
893fs_index_t fat_index_get(fs_node_t *fn)
894{
895 return FAT_NODE(fn)->idx->index;
896}
897
[ed903174]898aoff64_t fat_size_get(fs_node_t *fn)
[073f550]899{
900 return FAT_NODE(fn)->size;
[32fb10ed]901}
902
[073f550]903unsigned fat_lnkcnt_get(fs_node_t *fn)
[74ea3c6]904{
[073f550]905 return FAT_NODE(fn)->lnkcnt;
[74ea3c6]906}
907
[50e5b25]908char fat_plb_get_char(unsigned pos)
[74ea3c6]909{
910 return fat_reg.plb_ro[pos % PLB_SIZE];
911}
912
[b6035ba]913bool fat_is_directory(fs_node_t *fn)
[e1e3b26]914{
[b6035ba]915 return FAT_NODE(fn)->type == FAT_DIRECTORY;
[e1e3b26]916}
917
[b6035ba]918bool fat_is_file(fs_node_t *fn)
[e1e3b26]919{
[b6035ba]920 return FAT_NODE(fn)->type == FAT_FILE;
[e1e3b26]921}
922
[1313ee9]923dev_handle_t fat_device_get(fs_node_t *node)
924{
925 return 0;
926}
927
[a2aa1dec]928/** libfs operations */
929libfs_ops_t fat_libfs_ops = {
[073f550]930 .root_get = fat_root_get,
[a2aa1dec]931 .match = fat_match,
932 .node_get = fat_node_get,
[1313ee9]933 .node_open = fat_node_open,
[06901c6b]934 .node_put = fat_node_put,
[6571b78]935 .create = fat_create_node,
936 .destroy = fat_destroy_node,
[80e8482]937 .link = fat_link,
938 .unlink = fat_unlink,
[073f550]939 .has_children = fat_has_children,
[e1e3b26]940 .index_get = fat_index_get,
941 .size_get = fat_size_get,
942 .lnkcnt_get = fat_lnkcnt_get,
[1313ee9]943 .plb_get_char = fat_plb_get_char,
[e1e3b26]944 .is_directory = fat_is_directory,
[1313ee9]945 .is_file = fat_is_file,
946 .device_get = fat_device_get
[a2aa1dec]947};
948
[0013b9ce]949/*
950 * VFS operations.
951 */
952
[cde485d]953void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
954{
955 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
[1fbe064b]956 enum cache_mode cmode;
[7858bc5f]957 fat_bs_t *bs;
[472c09d]958
959 /* Accept the mount options */
960 char *opts;
[4cac2d69]961 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL);
[472c09d]962
963 if (rc != EOK) {
964 ipc_answer_0(rid, rc);
[594303b]965 return;
966 }
967
[1fbe064b]968 /* Check for option enabling write through. */
969 if (str_cmp(opts, "wtcache") == 0)
970 cmode = CACHE_MODE_WT;
971 else
972 cmode = CACHE_MODE_WB;
973
[64aed80]974 free(opts);
975
[7858bc5f]976 /* initialize libblock */
[6284978]977 rc = block_init(dev_handle, BS_SIZE);
[7a35204a]978 if (rc != EOK) {
[6284978]979 ipc_answer_0(rid, rc);
980 return;
981 }
982
983 /* prepare the boot block */
[1ee00b7]984 rc = block_bb_read(dev_handle, BS_BLOCK);
[6284978]985 if (rc != EOK) {
986 block_fini(dev_handle);
987 ipc_answer_0(rid, rc);
[7a35204a]988 return;
989 }
990
[7858bc5f]991 /* get the buffer with the boot sector */
992 bs = block_bb_get(dev_handle);
993
[7a23d60]994 if (BPS(bs) != BS_SIZE) {
[7858bc5f]995 block_fini(dev_handle);
[7a35204a]996 ipc_answer_0(rid, ENOTSUP);
997 return;
998 }
999
[f1ba5d6]1000 /* Initialize the block cache */
[7a23d60]1001 rc = block_cache_init(dev_handle, BPS(bs), 0 /* XXX */, cmode);
[f1ba5d6]1002 if (rc != EOK) {
1003 block_fini(dev_handle);
1004 ipc_answer_0(rid, rc);
1005 return;
1006 }
1007
[2ffaab5]1008 /* Do some simple sanity checks on the file system. */
[711e1f32]1009 rc = fat_sanity_check(bs, dev_handle);
1010 if (rc != EOK) {
[430de97]1011 (void) block_cache_fini(dev_handle);
[711e1f32]1012 block_fini(dev_handle);
1013 ipc_answer_0(rid, rc);
1014 return;
1015 }
1016
[cde485d]1017 rc = fat_idx_init_by_dev_handle(dev_handle);
1018 if (rc != EOK) {
[430de97]1019 (void) block_cache_fini(dev_handle);
[7858bc5f]1020 block_fini(dev_handle);
[cde485d]1021 ipc_answer_0(rid, rc);
1022 return;
1023 }
1024
[689f036]1025 /* Initialize the root node. */
[b6035ba]1026 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
1027 if (!rfn) {
[430de97]1028 (void) block_cache_fini(dev_handle);
[b6035ba]1029 block_fini(dev_handle);
1030 fat_idx_fini_by_dev_handle(dev_handle);
1031 ipc_answer_0(rid, ENOMEM);
1032 return;
1033 }
[83937ccd]1034 fs_node_initialize(rfn);
[689f036]1035 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1036 if (!rootp) {
[b6035ba]1037 free(rfn);
[430de97]1038 (void) block_cache_fini(dev_handle);
[7858bc5f]1039 block_fini(dev_handle);
[689f036]1040 fat_idx_fini_by_dev_handle(dev_handle);
1041 ipc_answer_0(rid, ENOMEM);
1042 return;
1043 }
1044 fat_node_initialize(rootp);
1045
1046 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
1047 if (!ridxp) {
[b6035ba]1048 free(rfn);
[689f036]1049 free(rootp);
[430de97]1050 (void) block_cache_fini(dev_handle);
[b6035ba]1051 block_fini(dev_handle);
[689f036]1052 fat_idx_fini_by_dev_handle(dev_handle);
1053 ipc_answer_0(rid, ENOMEM);
1054 return;
1055 }
1056 assert(ridxp->index == 0);
1057 /* ridxp->lock held */
1058
1059 rootp->type = FAT_DIRECTORY;
1060 rootp->firstc = FAT_CLST_ROOT;
1061 rootp->refcnt = 1;
[5ab597d]1062 rootp->lnkcnt = 0; /* FS root is not linked */
[7a23d60]1063 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
[689f036]1064 rootp->idx = ridxp;
1065 ridxp->nodep = rootp;
[b6035ba]1066 rootp->bp = rfn;
1067 rfn->data = rootp;
[689f036]1068
[6ebe721]1069 fibril_mutex_unlock(&ridxp->lock);
[689f036]1070
[5ab597d]1071 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
[cde485d]1072}
1073
1074void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1075{
[16d17ca]1076 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[cde485d]1077}
1078
[3c11713]1079void fat_unmounted(ipc_callid_t rid, ipc_call_t *request)
1080{
[430de97]1081 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
1082 fs_node_t *fn;
1083 fat_node_t *nodep;
1084 int rc;
1085
1086 rc = fat_root_get(&fn, dev_handle);
1087 if (rc != EOK) {
1088 ipc_answer_0(rid, rc);
1089 return;
1090 }
1091 nodep = FAT_NODE(fn);
1092
1093 /*
1094 * We expect exactly two references on the root node. One for the
1095 * fat_root_get() above and one created in fat_mounted().
1096 */
1097 if (nodep->refcnt != 2) {
1098 (void) fat_node_put(fn);
1099 ipc_answer_0(rid, EBUSY);
1100 return;
1101 }
1102
1103 /*
1104 * Put the root node and force it to the FAT free node list.
1105 */
1106 (void) fat_node_put(fn);
1107 (void) fat_node_put(fn);
1108
1109 /*
1110 * Perform cleanup of the node structures, index structures and
1111 * associated data. Write back this file system's dirty blocks and
1112 * stop using libblock for this instance.
1113 */
1114 (void) fat_node_fini_by_dev_handle(dev_handle);
1115 fat_idx_fini_by_dev_handle(dev_handle);
1116 (void) block_cache_fini(dev_handle);
1117 block_fini(dev_handle);
1118
1119 ipc_answer_0(rid, EOK);
[3c11713]1120}
1121
1122void fat_unmount(ipc_callid_t rid, ipc_call_t *request)
1123{
1124 libfs_unmount(&fat_libfs_ops, rid, request);
1125}
1126
[be815bc]1127void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1128{
[a2aa1dec]1129 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[be815bc]1130}
1131
[4bf40f6]1132void fat_read(ipc_callid_t rid, ipc_call_t *request)
1133{
[ed903174]1134 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
1135 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1136 aoff64_t pos =
1137 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1138 fs_node_t *fn;
[b6035ba]1139 fat_node_t *nodep;
[7858bc5f]1140 fat_bs_t *bs;
[79d031b]1141 size_t bytes;
[7858bc5f]1142 block_t *b;
[c91f2d1b]1143 int rc;
[79d031b]1144
[073f550]1145 rc = fat_node_get(&fn, dev_handle, index);
1146 if (rc != EOK) {
1147 ipc_answer_0(rid, rc);
1148 return;
1149 }
[b6035ba]1150 if (!fn) {
[4bf40f6]1151 ipc_answer_0(rid, ENOENT);
1152 return;
1153 }
[b6035ba]1154 nodep = FAT_NODE(fn);
[4bf40f6]1155
1156 ipc_callid_t callid;
1157 size_t len;
[0da4e41]1158 if (!async_data_read_receive(&callid, &len)) {
[b6035ba]1159 fat_node_put(fn);
[4bf40f6]1160 ipc_answer_0(callid, EINVAL);
1161 ipc_answer_0(rid, EINVAL);
1162 return;
1163 }
1164
[7858bc5f]1165 bs = block_bb_get(dev_handle);
[cb682eb]1166
[4bf40f6]1167 if (nodep->type == FAT_FILE) {
[ddd1219]1168 /*
1169 * Our strategy for regular file reads is to read one block at
1170 * most and make use of the possibility to return less data than
1171 * requested. This keeps the code very simple.
1172 */
[0d974d8]1173 if (pos >= nodep->size) {
[7d861950]1174 /* reading beyond the EOF */
1175 bytes = 0;
[0da4e41]1176 (void) async_data_read_finalize(callid, NULL, 0);
[0d974d8]1177 } else {
[7a23d60]1178 bytes = min(len, BPS(bs) - pos % BPS(bs));
[0d974d8]1179 bytes = min(bytes, nodep->size - pos);
[7a23d60]1180 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
[1d8cdb1]1181 BLOCK_FLAGS_NONE);
[453f2e75]1182 if (rc != EOK) {
1183 fat_node_put(fn);
1184 ipc_answer_0(callid, rc);
1185 ipc_answer_0(rid, rc);
1186 return;
1187 }
[7a23d60]1188 (void) async_data_read_finalize(callid,
1189 b->data + pos % BPS(bs), bytes);
[c91f2d1b]1190 rc = block_put(b);
[453f2e75]1191 if (rc != EOK) {
1192 fat_node_put(fn);
1193 ipc_answer_0(rid, rc);
1194 return;
1195 }
[0d974d8]1196 }
[4bf40f6]1197 } else {
[ddd1219]1198 unsigned bnum;
[ed903174]1199 aoff64_t spos = pos;
[ddd1219]1200 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1201 fat_dentry_t *d;
1202
[4bf40f6]1203 assert(nodep->type == FAT_DIRECTORY);
[7a23d60]1204 assert(nodep->size % BPS(bs) == 0);
1205 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
[ddd1219]1206
1207 /*
1208 * Our strategy for readdir() is to use the position pointer as
1209 * an index into the array of all dentries. On entry, it points
1210 * to the first unread dentry. If we skip any dentries, we bump
1211 * the position pointer accordingly.
1212 */
[7a23d60]1213 bnum = (pos * sizeof(fat_dentry_t)) / BPS(bs);
1214 while (bnum < nodep->size / BPS(bs)) {
[ed903174]1215 aoff64_t o;
[ddd1219]1216
[684b655]1217 rc = fat_block_get(&b, bs, nodep, bnum,
1218 BLOCK_FLAGS_NONE);
[453f2e75]1219 if (rc != EOK)
1220 goto err;
[7a23d60]1221 for (o = pos % (BPS(bs) / sizeof(fat_dentry_t));
1222 o < BPS(bs) / sizeof(fat_dentry_t);
[ddd1219]1223 o++, pos++) {
1224 d = ((fat_dentry_t *)b->data) + o;
1225 switch (fat_classify_dentry(d)) {
1226 case FAT_DENTRY_SKIP:
[0fdd6bb]1227 case FAT_DENTRY_FREE:
[ddd1219]1228 continue;
1229 case FAT_DENTRY_LAST:
[c91f2d1b]1230 rc = block_put(b);
[453f2e75]1231 if (rc != EOK)
1232 goto err;
[ddd1219]1233 goto miss;
1234 default:
1235 case FAT_DENTRY_VALID:
[0fdd6bb]1236 fat_dentry_name_get(d, name);
[073f550]1237 rc = block_put(b);
[453f2e75]1238 if (rc != EOK)
1239 goto err;
[ddd1219]1240 goto hit;
1241 }
1242 }
[c91f2d1b]1243 rc = block_put(b);
[453f2e75]1244 if (rc != EOK)
1245 goto err;
[ddd1219]1246 bnum++;
1247 }
1248miss:
[453f2e75]1249 rc = fat_node_put(fn);
1250 ipc_answer_0(callid, rc != EOK ? rc : ENOENT);
1251 ipc_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
[4bf40f6]1252 return;
[453f2e75]1253
1254err:
1255 (void) fat_node_put(fn);
1256 ipc_answer_0(callid, rc);
1257 ipc_answer_0(rid, rc);
1258 return;
1259
[ddd1219]1260hit:
[0da4e41]1261 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
[ddd1219]1262 bytes = (pos - spos) + 1;
[4bf40f6]1263 }
1264
[453f2e75]1265 rc = fat_node_put(fn);
1266 ipc_answer_1(rid, rc, (ipcarg_t)bytes);
[4bf40f6]1267}
1268
[c947dda]1269void fat_write(ipc_callid_t rid, ipc_call_t *request)
1270{
[ed903174]1271 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
1272 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1273 aoff64_t pos =
1274 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1275 fs_node_t *fn;
[b6035ba]1276 fat_node_t *nodep;
[7858bc5f]1277 fat_bs_t *bs;
[dfddfcd]1278 size_t bytes, size;
[7858bc5f]1279 block_t *b;
[ed903174]1280 aoff64_t boundary;
[1d8cdb1]1281 int flags = BLOCK_FLAGS_NONE;
[c91f2d1b]1282 int rc;
[8d32152]1283
[073f550]1284 rc = fat_node_get(&fn, dev_handle, index);
1285 if (rc != EOK) {
1286 ipc_answer_0(rid, rc);
1287 return;
1288 }
[b6035ba]1289 if (!fn) {
[8d32152]1290 ipc_answer_0(rid, ENOENT);
1291 return;
1292 }
[b6035ba]1293 nodep = FAT_NODE(fn);
[8d32152]1294
1295 ipc_callid_t callid;
1296 size_t len;
[0da4e41]1297 if (!async_data_write_receive(&callid, &len)) {
[dfddfcd]1298 (void) fat_node_put(fn);
[8d32152]1299 ipc_answer_0(callid, EINVAL);
1300 ipc_answer_0(rid, EINVAL);
1301 return;
1302 }
1303
[913a821c]1304 bs = block_bb_get(dev_handle);
1305
[8d32152]1306 /*
1307 * In all scenarios, we will attempt to write out only one block worth
1308 * of data at maximum. There might be some more efficient approaches,
1309 * but this one greatly simplifies fat_write(). Note that we can afford
1310 * to do this because the client must be ready to handle the return
1311 * value signalizing a smaller number of bytes written.
1312 */
[7a23d60]1313 bytes = min(len, BPS(bs) - pos % BPS(bs));
1314 if (bytes == BPS(bs))
[1d8cdb1]1315 flags |= BLOCK_FLAGS_NOREAD;
[8d32152]1316
[7a23d60]1317 boundary = ROUND_UP(nodep->size, BPC(bs));
[b4b7187]1318 if (pos < boundary) {
[8d32152]1319 /*
1320 * This is the easier case - we are either overwriting already
1321 * existing contents or writing behind the EOF, but still within
1322 * the limits of the last cluster. The node size may grow to the
1323 * next block size boundary.
1324 */
[cca29e3c]1325 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
[dfddfcd]1326 if (rc != EOK) {
1327 (void) fat_node_put(fn);
1328 ipc_answer_0(callid, rc);
1329 ipc_answer_0(rid, rc);
1330 return;
1331 }
[7a23d60]1332 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
[dfddfcd]1333 if (rc != EOK) {
1334 (void) fat_node_put(fn);
1335 ipc_answer_0(callid, rc);
1336 ipc_answer_0(rid, rc);
1337 return;
1338 }
[7a23d60]1339 (void) async_data_write_finalize(callid,
1340 b->data + pos % BPS(bs), bytes);
[8d32152]1341 b->dirty = true; /* need to sync block */
[c91f2d1b]1342 rc = block_put(b);
[dfddfcd]1343 if (rc != EOK) {
1344 (void) fat_node_put(fn);
1345 ipc_answer_0(rid, rc);
1346 return;
1347 }
[8d32152]1348 if (pos + bytes > nodep->size) {
1349 nodep->size = pos + bytes;
1350 nodep->dirty = true; /* need to sync node */
1351 }
[dfddfcd]1352 size = nodep->size;
1353 rc = fat_node_put(fn);
1354 ipc_answer_2(rid, rc, bytes, nodep->size);
[8d32152]1355 return;
1356 } else {
1357 /*
1358 * This is the more difficult case. We must allocate new
1359 * clusters for the node and zero them out.
1360 */
1361 unsigned nclsts;
[8334a427]1362 fat_cluster_t mcl, lcl;
1363
[7a23d60]1364 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
[6f2dfd1]1365 /* create an independent chain of nclsts clusters in all FATs */
[dfddfcd]1366 rc = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1367 if (rc != EOK) {
[6f2dfd1]1368 /* could not allocate a chain of nclsts clusters */
[dfddfcd]1369 (void) fat_node_put(fn);
1370 ipc_answer_0(callid, rc);
1371 ipc_answer_0(rid, rc);
[6f2dfd1]1372 return;
1373 }
1374 /* zero fill any gaps */
[cca29e3c]1375 rc = fat_fill_gap(bs, nodep, mcl, pos);
[dfddfcd]1376 if (rc != EOK) {
1377 (void) fat_free_clusters(bs, dev_handle, mcl);
1378 (void) fat_node_put(fn);
1379 ipc_answer_0(callid, rc);
1380 ipc_answer_0(rid, rc);
1381 return;
1382 }
[7a23d60]1383 rc = _fat_block_get(&b, bs, dev_handle, lcl,
1384 (pos / BPS(bs)) % SPC(bs), flags);
[dfddfcd]1385 if (rc != EOK) {
1386 (void) fat_free_clusters(bs, dev_handle, mcl);
1387 (void) fat_node_put(fn);
1388 ipc_answer_0(callid, rc);
1389 ipc_answer_0(rid, rc);
1390 return;
1391 }
[7a23d60]1392 (void) async_data_write_finalize(callid,
1393 b->data + pos % BPS(bs), bytes);
[b4b7187]1394 b->dirty = true; /* need to sync block */
[c91f2d1b]1395 rc = block_put(b);
[dfddfcd]1396 if (rc != EOK) {
1397 (void) fat_free_clusters(bs, dev_handle, mcl);
1398 (void) fat_node_put(fn);
1399 ipc_answer_0(rid, rc);
1400 return;
1401 }
[6f2dfd1]1402 /*
1403 * Append the cluster chain starting in mcl to the end of the
1404 * node's cluster chain.
1405 */
[377cce8]1406 rc = fat_append_clusters(bs, nodep, mcl, lcl);
[dfddfcd]1407 if (rc != EOK) {
1408 (void) fat_free_clusters(bs, dev_handle, mcl);
1409 (void) fat_node_put(fn);
1410 ipc_answer_0(rid, rc);
1411 return;
1412 }
1413 nodep->size = size = pos + bytes;
[b4b7187]1414 nodep->dirty = true; /* need to sync node */
[dfddfcd]1415 rc = fat_node_put(fn);
1416 ipc_answer_2(rid, rc, bytes, size);
[6f2dfd1]1417 return;
[8d32152]1418 }
[c947dda]1419}
1420
[6c71a1f]1421void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1422{
[ed903174]1423 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
1424 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1425 aoff64_t size =
1426 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1427 fs_node_t *fn;
[b6035ba]1428 fat_node_t *nodep;
[913a821c]1429 fat_bs_t *bs;
[8334a427]1430 int rc;
1431
[073f550]1432 rc = fat_node_get(&fn, dev_handle, index);
1433 if (rc != EOK) {
1434 ipc_answer_0(rid, rc);
1435 return;
1436 }
[b6035ba]1437 if (!fn) {
[8334a427]1438 ipc_answer_0(rid, ENOENT);
1439 return;
1440 }
[b6035ba]1441 nodep = FAT_NODE(fn);
[8334a427]1442
[913a821c]1443 bs = block_bb_get(dev_handle);
1444
[8334a427]1445 if (nodep->size == size) {
1446 rc = EOK;
1447 } else if (nodep->size < size) {
1448 /*
[913a821c]1449 * The standard says we have the freedom to grow the node.
[8334a427]1450 * For now, we simply return an error.
1451 */
1452 rc = EINVAL;
[7a23d60]1453 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
[913a821c]1454 /*
1455 * The node will be shrunk, but no clusters will be deallocated.
1456 */
1457 nodep->size = size;
1458 nodep->dirty = true; /* need to sync node */
1459 rc = EOK;
[8334a427]1460 } else {
1461 /*
[913a821c]1462 * The node will be shrunk, clusters will be deallocated.
[8334a427]1463 */
[913a821c]1464 if (size == 0) {
[cca29e3c]1465 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1466 if (rc != EOK)
1467 goto out;
[913a821c]1468 } else {
1469 fat_cluster_t lastc;
[e402382]1470 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc,
[7a23d60]1471 &lastc, NULL, (size - 1) / BPC(bs));
[e402382]1472 if (rc != EOK)
1473 goto out;
[cca29e3c]1474 rc = fat_chop_clusters(bs, nodep, lastc);
1475 if (rc != EOK)
1476 goto out;
[913a821c]1477 }
1478 nodep->size = size;
1479 nodep->dirty = true; /* need to sync node */
1480 rc = EOK;
[8334a427]1481 }
[e402382]1482out:
[b6035ba]1483 fat_node_put(fn);
[8334a427]1484 ipc_answer_0(rid, rc);
1485 return;
[6c71a1f]1486}
1487
[c20aa06]1488void fat_close(ipc_callid_t rid, ipc_call_t *request)
1489{
1490 ipc_answer_0(rid, EOK);
1491}
1492
[50e5b25]1493void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1494{
1495 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1496 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
[073f550]1497 fs_node_t *fn;
[50e5b25]1498 int rc;
1499
[073f550]1500 rc = fat_node_get(&fn, dev_handle, index);
1501 if (rc != EOK) {
1502 ipc_answer_0(rid, rc);
1503 return;
1504 }
[b6035ba]1505 if (!fn) {
[50e5b25]1506 ipc_answer_0(rid, ENOENT);
1507 return;
1508 }
1509
[b6035ba]1510 rc = fat_destroy_node(fn);
[50e5b25]1511 ipc_answer_0(rid, rc);
1512}
1513
[c20aa06]1514void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1515{
1516 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1517}
1518
[852b801]1519void fat_stat(ipc_callid_t rid, ipc_call_t *request)
[c20aa06]1520{
[75160a6]1521 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[c20aa06]1522}
1523
1524void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1525{
1526 /* Dummy implementation */
1527 ipc_answer_0(rid, EOK);
1528}
1529
[be815bc]1530/**
1531 * @}
[c20aa06]1532 */
Note: See TracBrowser for help on using the repository browser.