source: mainline/uspace/srv/fs/fat/fat_ops.c@ 1313ee9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 1313ee9 was 1313ee9, checked in by Martin Decky <martin@…>, 16 years ago

introduce device namespaces

  • add support for explicit open in libfs (needed by devfs, but also possibly for other filesystems which need to track some stateful information)
  • extend libfs to be more generic, make proper adjustments to libc, tmpfs and fat
  • various updates to make use of the device namespaces
  • code cleanup
  • Property mode set to 100644
File size: 34.7 KB
RevLine 
[be815bc]1/*
[a2aa1dec]2 * Copyright (c) 2008 Jakub Jermar
[be815bc]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
[033ef7d3]39#include "fat_dentry.h"
40#include "fat_fat.h"
[6364d3c]41#include "../../vfs/vfs.h"
[a2aa1dec]42#include <libfs.h>
[fc840d9]43#include <libblock.h>
[be815bc]44#include <ipc/ipc.h>
[7a35204a]45#include <ipc/services.h>
46#include <ipc/devmap.h>
[be815bc]47#include <async.h>
48#include <errno.h>
[a2aa1dec]49#include <string.h>
[776f2e6]50#include <byteorder.h>
[d9c8c81]51#include <adt/hash_table.h>
52#include <adt/list.h>
[e1e3b26]53#include <assert.h>
[1e4cada]54#include <fibril_synch.h>
[7a35204a]55#include <sys/mman.h>
[8d32152]56#include <align.h>
[e1e3b26]57
[b6035ba]58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
[6ebe721]61/** Mutex protecting the list of cached free FAT nodes. */
62static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
[add5835]63
64/** List of cached free FAT nodes. */
65static LIST_INITIALIZE(ffn_head);
[6364d3c]66
[0fc1e5d]67/*
68 * Forward declarations of FAT libfs operations.
69 */
70static int fat_root_get(fs_node_t **, dev_handle_t);
71static int fat_match(fs_node_t **, fs_node_t *, const char *);
72static int fat_node_get(fs_node_t **, dev_handle_t, fs_index_t);
[1313ee9]73static int fat_node_open(fs_node_t *);
[0fc1e5d]74static int fat_node_put(fs_node_t *);
75static int fat_create_node(fs_node_t **, dev_handle_t, int);
76static int fat_destroy_node(fs_node_t *);
77static int fat_link(fs_node_t *, fs_node_t *, const char *);
78static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
79static int fat_has_children(bool *, fs_node_t *);
80static fs_index_t fat_index_get(fs_node_t *);
81static size_t fat_size_get(fs_node_t *);
82static unsigned fat_lnkcnt_get(fs_node_t *);
83static char fat_plb_get_char(unsigned);
84static bool fat_is_directory(fs_node_t *);
85static bool fat_is_file(fs_node_t *node);
[1313ee9]86static dev_handle_t fat_device_get(fs_node_t *node);
[0fc1e5d]87
88/*
89 * Helper functions.
90 */
[e1e3b26]91static void fat_node_initialize(fat_node_t *node)
[a2aa1dec]92{
[6ebe721]93 fibril_mutex_initialize(&node->lock);
[b6035ba]94 node->bp = NULL;
[869e546]95 node->idx = NULL;
[e1e3b26]96 node->type = 0;
97 link_initialize(&node->ffn_link);
98 node->size = 0;
99 node->lnkcnt = 0;
100 node->refcnt = 0;
101 node->dirty = false;
102}
103
[4098e38]104static int fat_node_sync(fat_node_t *node)
[e1e3b26]105{
[7858bc5f]106 block_t *b;
107 fat_bs_t *bs;
[beb17734]108 fat_dentry_t *d;
109 uint16_t bps;
110 unsigned dps;
[c91f2d1b]111 int rc;
[beb17734]112
113 assert(node->dirty);
114
[7858bc5f]115 bs = block_bb_get(node->idx->dev_handle);
116 bps = uint16_t_le2host(bs->bps);
[beb17734]117 dps = bps / sizeof(fat_dentry_t);
118
119 /* Read the block that contains the dentry of interest. */
[684b655]120 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc,
[1d8cdb1]121 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
[4098e38]122 if (rc != EOK)
123 return rc;
[beb17734]124
125 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
126
127 d->firstc = host2uint16_t_le(node->firstc);
[a5da446]128 if (node->type == FAT_FILE) {
[beb17734]129 d->size = host2uint32_t_le(node->size);
[a5da446]130 } else if (node->type == FAT_DIRECTORY) {
131 d->attr = FAT_ATTR_SUBDIR;
132 }
133
134 /* TODO: update other fields? (e.g time fields) */
[beb17734]135
136 b->dirty = true; /* need to sync block */
[c91f2d1b]137 rc = block_put(b);
[4098e38]138 return rc;
[e1e3b26]139}
140
[17bf658]141static int fat_node_get_new(fat_node_t **nodepp)
[9a3d5f0]142{
[b6035ba]143 fs_node_t *fn;
[9a3d5f0]144 fat_node_t *nodep;
[4098e38]145 int rc;
[9a3d5f0]146
[6ebe721]147 fibril_mutex_lock(&ffn_mutex);
[9a3d5f0]148 if (!list_empty(&ffn_head)) {
149 /* Try to use a cached free node structure. */
150 fat_idx_t *idxp_tmp;
151 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
[6ebe721]152 if (!fibril_mutex_trylock(&nodep->lock))
[9a3d5f0]153 goto skip_cache;
154 idxp_tmp = nodep->idx;
[6ebe721]155 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
156 fibril_mutex_unlock(&nodep->lock);
[9a3d5f0]157 goto skip_cache;
158 }
159 list_remove(&nodep->ffn_link);
[6ebe721]160 fibril_mutex_unlock(&ffn_mutex);
[4098e38]161 if (nodep->dirty) {
162 rc = fat_node_sync(nodep);
[17bf658]163 if (rc != EOK) {
164 idxp_tmp->nodep = NULL;
165 fibril_mutex_unlock(&nodep->lock);
166 fibril_mutex_unlock(&idxp_tmp->lock);
167 free(nodep->bp);
168 free(nodep);
169 return rc;
170 }
[4098e38]171 }
[9a3d5f0]172 idxp_tmp->nodep = NULL;
[6ebe721]173 fibril_mutex_unlock(&nodep->lock);
174 fibril_mutex_unlock(&idxp_tmp->lock);
[b6035ba]175 fn = FS_NODE(nodep);
[9a3d5f0]176 } else {
177skip_cache:
178 /* Try to allocate a new node structure. */
[6ebe721]179 fibril_mutex_unlock(&ffn_mutex);
[b6035ba]180 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
181 if (!fn)
[17bf658]182 return ENOMEM;
[9a3d5f0]183 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
[b6035ba]184 if (!nodep) {
185 free(fn);
[17bf658]186 return ENOMEM;
[b6035ba]187 }
[9a3d5f0]188 }
189 fat_node_initialize(nodep);
[83937ccd]190 fs_node_initialize(fn);
[b6035ba]191 fn->data = nodep;
192 nodep->bp = fn;
[9a3d5f0]193
[17bf658]194 *nodepp = nodep;
195 return EOK;
[9a3d5f0]196}
197
[add5835]198/** Internal version of fat_node_get().
199 *
200 * @param idxp Locked index structure.
201 */
[0fc1e5d]202static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
[e1e3b26]203{
[7858bc5f]204 block_t *b;
205 fat_bs_t *bs;
[4573a79]206 fat_dentry_t *d;
[c06dbf9]207 fat_node_t *nodep = NULL;
[4573a79]208 unsigned bps;
[4f1c0b4]209 unsigned spc;
[4573a79]210 unsigned dps;
[c91f2d1b]211 int rc;
[4573a79]212
[add5835]213 if (idxp->nodep) {
[4573a79]214 /*
215 * We are lucky.
216 * The node is already instantiated in memory.
217 */
[6ebe721]218 fibril_mutex_lock(&idxp->nodep->lock);
[e6bc3a5]219 if (!idxp->nodep->refcnt++) {
220 fibril_mutex_lock(&ffn_mutex);
[c06dbf9]221 list_remove(&idxp->nodep->ffn_link);
[e6bc3a5]222 fibril_mutex_unlock(&ffn_mutex);
223 }
[6ebe721]224 fibril_mutex_unlock(&idxp->nodep->lock);
[0fc1e5d]225 *nodepp = idxp->nodep;
226 return EOK;
[4573a79]227 }
228
229 /*
230 * We must instantiate the node from the file system.
231 */
232
[add5835]233 assert(idxp->pfc);
[4573a79]234
[17bf658]235 rc = fat_node_get_new(&nodep);
236 if (rc != EOK)
[0fc1e5d]237 return rc;
[4573a79]238
[7858bc5f]239 bs = block_bb_get(idxp->dev_handle);
240 bps = uint16_t_le2host(bs->bps);
[4f1c0b4]241 spc = bs->spc;
[4573a79]242 dps = bps / sizeof(fat_dentry_t);
243
[2c4bbcde]244 /* Read the block that contains the dentry of interest. */
[684b655]245 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc,
[1d8cdb1]246 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
[0fc1e5d]247 if (rc != EOK) {
248 (void) fat_node_put(FS_NODE(nodep));
249 return rc;
250 }
[4573a79]251
[add5835]252 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
[2c4bbcde]253 if (d->attr & FAT_ATTR_SUBDIR) {
254 /*
255 * The only directory which does not have this bit set is the
256 * root directory itself. The root directory node is handled
257 * and initialized elsewhere.
258 */
259 nodep->type = FAT_DIRECTORY;
[2ab1023]260 /*
[e2115311]261 * Unfortunately, the 'size' field of the FAT dentry is not
262 * defined for the directory entry type. We must determine the
263 * size of the directory by walking the FAT.
[2ab1023]264 */
[e402382]265 uint16_t clusters;
266 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle,
[4f1c0b4]267 uint16_t_le2host(d->firstc));
[0fc1e5d]268 if (rc != EOK) {
269 (void) fat_node_put(FS_NODE(nodep));
270 return rc;
271 }
[e402382]272 nodep->size = bps * spc * clusters;
[2c4bbcde]273 } else {
274 nodep->type = FAT_FILE;
[2ab1023]275 nodep->size = uint32_t_le2host(d->size);
[2c4bbcde]276 }
277 nodep->firstc = uint16_t_le2host(d->firstc);
278 nodep->lnkcnt = 1;
279 nodep->refcnt = 1;
280
[c91f2d1b]281 rc = block_put(b);
[0fc1e5d]282 if (rc != EOK) {
283 (void) fat_node_put(FS_NODE(nodep));
284 return rc;
285 }
[2c4bbcde]286
287 /* Link the idx structure with the node structure. */
[add5835]288 nodep->idx = idxp;
289 idxp->nodep = nodep;
[2c4bbcde]290
[0fc1e5d]291 *nodepp = nodep;
292 return EOK;
[a2aa1dec]293}
294
[50e5b25]295/*
296 * FAT libfs operations.
297 */
298
[073f550]299int fat_root_get(fs_node_t **rfn, dev_handle_t dev_handle)
300{
301 return fat_node_get(rfn, dev_handle, 0);
302}
303
304int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
305{
306 fat_bs_t *bs;
307 fat_node_t *parentp = FAT_NODE(pfn);
308 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
309 unsigned i, j;
310 unsigned bps; /* bytes per sector */
311 unsigned dps; /* dentries per sector */
312 unsigned blocks;
313 fat_dentry_t *d;
314 block_t *b;
315 int rc;
316
317 fibril_mutex_lock(&parentp->idx->lock);
318 bs = block_bb_get(parentp->idx->dev_handle);
319 bps = uint16_t_le2host(bs->bps);
320 dps = bps / sizeof(fat_dentry_t);
321 blocks = parentp->size / bps;
322 for (i = 0; i < blocks; i++) {
323 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
324 if (rc != EOK) {
325 fibril_mutex_unlock(&parentp->idx->lock);
326 return rc;
327 }
328 for (j = 0; j < dps; j++) {
329 d = ((fat_dentry_t *)b->data) + j;
330 switch (fat_classify_dentry(d)) {
331 case FAT_DENTRY_SKIP:
332 case FAT_DENTRY_FREE:
333 continue;
334 case FAT_DENTRY_LAST:
[8810c63]335 /* miss */
[073f550]336 rc = block_put(b);
337 fibril_mutex_unlock(&parentp->idx->lock);
338 *rfn = NULL;
[8810c63]339 return rc;
[073f550]340 default:
341 case FAT_DENTRY_VALID:
342 fat_dentry_name_get(d, name);
343 break;
344 }
345 if (fat_dentry_namecmp(name, component) == 0) {
346 /* hit */
347 fat_node_t *nodep;
348 /*
349 * Assume tree hierarchy for locking. We
350 * already have the parent and now we are going
351 * to lock the child. Never lock in the oposite
352 * order.
353 */
354 fat_idx_t *idx = fat_idx_get_by_pos(
355 parentp->idx->dev_handle, parentp->firstc,
356 i * dps + j);
357 fibril_mutex_unlock(&parentp->idx->lock);
358 if (!idx) {
359 /*
360 * Can happen if memory is low or if we
361 * run out of 32-bit indices.
362 */
363 rc = block_put(b);
[8810c63]364 return (rc == EOK) ? ENOMEM : rc;
[073f550]365 }
[0fc1e5d]366 rc = fat_node_get_core(&nodep, idx);
[073f550]367 fibril_mutex_unlock(&idx->lock);
[1647323]368 if (rc != EOK) {
369 (void) block_put(b);
370 return rc;
371 }
[073f550]372 *rfn = FS_NODE(nodep);
[1647323]373 rc = block_put(b);
374 if (rc != EOK)
375 (void) fat_node_put(*rfn);
376 return rc;
[073f550]377 }
378 }
379 rc = block_put(b);
[8810c63]380 if (rc != EOK) {
381 fibril_mutex_unlock(&parentp->idx->lock);
382 return rc;
383 }
[073f550]384 }
385
386 fibril_mutex_unlock(&parentp->idx->lock);
387 *rfn = NULL;
388 return EOK;
389}
390
[add5835]391/** Instantiate a FAT in-core node. */
[073f550]392int fat_node_get(fs_node_t **rfn, dev_handle_t dev_handle, fs_index_t index)
[add5835]393{
[b6035ba]394 fat_node_t *nodep;
[add5835]395 fat_idx_t *idxp;
[0fc1e5d]396 int rc;
[add5835]397
398 idxp = fat_idx_get_by_index(dev_handle, index);
[073f550]399 if (!idxp) {
400 *rfn = NULL;
401 return EOK;
402 }
[add5835]403 /* idxp->lock held */
[0fc1e5d]404 rc = fat_node_get_core(&nodep, idxp);
[6ebe721]405 fibril_mutex_unlock(&idxp->lock);
[0fc1e5d]406 if (rc == EOK)
407 *rfn = FS_NODE(nodep);
408 return rc;
[add5835]409}
410
[1313ee9]411int fat_node_open(fs_node_t *fn)
412{
413 /*
414 * Opening a file is stateless, nothing
415 * to be done here.
416 */
417 return EOK;
418}
419
[073f550]420int fat_node_put(fs_node_t *fn)
[06901c6b]421{
[b6035ba]422 fat_node_t *nodep = FAT_NODE(fn);
[6571b78]423 bool destroy = false;
[34b3ce3]424
[6ebe721]425 fibril_mutex_lock(&nodep->lock);
[34b3ce3]426 if (!--nodep->refcnt) {
[6571b78]427 if (nodep->idx) {
[6ebe721]428 fibril_mutex_lock(&ffn_mutex);
[6571b78]429 list_append(&nodep->ffn_link, &ffn_head);
[6ebe721]430 fibril_mutex_unlock(&ffn_mutex);
[6571b78]431 } else {
432 /*
433 * The node does not have any index structure associated
434 * with itself. This can only mean that we are releasing
435 * the node after a failed attempt to allocate the index
436 * structure for it.
437 */
438 destroy = true;
439 }
[34b3ce3]440 }
[6ebe721]441 fibril_mutex_unlock(&nodep->lock);
[b6035ba]442 if (destroy) {
443 free(nodep->bp);
444 free(nodep);
445 }
[073f550]446 return EOK;
[06901c6b]447}
448
[073f550]449int fat_create_node(fs_node_t **rfn, dev_handle_t dev_handle, int flags)
[80e8482]450{
[6571b78]451 fat_idx_t *idxp;
452 fat_node_t *nodep;
[49df572]453 fat_bs_t *bs;
454 fat_cluster_t mcl, lcl;
455 uint16_t bps;
456 int rc;
457
458 bs = block_bb_get(dev_handle);
459 bps = uint16_t_le2host(bs->bps);
460 if (flags & L_DIRECTORY) {
461 /* allocate a cluster */
462 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
[073f550]463 if (rc != EOK)
464 return rc;
465 /* populate the new cluster with unused dentries */
466 rc = fat_zero_cluster(bs, dev_handle, mcl);
467 if (rc != EOK) {
468 (void) fat_free_clusters(bs, dev_handle, mcl);
469 return rc;
470 }
[49df572]471 }
[6571b78]472
[17bf658]473 rc = fat_node_get_new(&nodep);
474 if (rc != EOK) {
[cca29e3c]475 (void) fat_free_clusters(bs, dev_handle, mcl);
[17bf658]476 return rc;
[49df572]477 }
[9a15176]478 rc = fat_idx_get_new(&idxp, dev_handle);
479 if (rc != EOK) {
[cca29e3c]480 (void) fat_free_clusters(bs, dev_handle, mcl);
[073f550]481 (void) fat_node_put(FS_NODE(nodep));
[9a15176]482 return rc;
[6571b78]483 }
484 /* idxp->lock held */
485 if (flags & L_DIRECTORY) {
486 nodep->type = FAT_DIRECTORY;
[49df572]487 nodep->firstc = mcl;
488 nodep->size = bps * bs->spc;
[6571b78]489 } else {
490 nodep->type = FAT_FILE;
[49df572]491 nodep->firstc = FAT_CLST_RES0;
492 nodep->size = 0;
[6571b78]493 }
494 nodep->lnkcnt = 0; /* not linked anywhere */
495 nodep->refcnt = 1;
[49df572]496 nodep->dirty = true;
[6571b78]497
498 nodep->idx = idxp;
499 idxp->nodep = nodep;
500
[6ebe721]501 fibril_mutex_unlock(&idxp->lock);
[073f550]502 *rfn = FS_NODE(nodep);
503 return EOK;
[80e8482]504}
505
[b6035ba]506int fat_destroy_node(fs_node_t *fn)
[80e8482]507{
[b6035ba]508 fat_node_t *nodep = FAT_NODE(fn);
[50e5b25]509 fat_bs_t *bs;
[073f550]510 bool has_children;
511 int rc;
[50e5b25]512
513 /*
514 * The node is not reachable from the file system. This means that the
515 * link count should be zero and that the index structure cannot be
516 * found in the position hash. Obviously, we don't need to lock the node
517 * nor its index structure.
518 */
519 assert(nodep->lnkcnt == 0);
520
521 /*
522 * The node may not have any children.
523 */
[073f550]524 rc = fat_has_children(&has_children, fn);
525 if (rc != EOK)
526 return rc;
527 assert(!has_children);
[50e5b25]528
529 bs = block_bb_get(nodep->idx->dev_handle);
530 if (nodep->firstc != FAT_CLST_RES0) {
531 assert(nodep->size);
532 /* Free all clusters allocated to the node. */
[cca29e3c]533 rc = fat_free_clusters(bs, nodep->idx->dev_handle,
534 nodep->firstc);
[50e5b25]535 }
536
537 fat_idx_destroy(nodep->idx);
[b6035ba]538 free(nodep->bp);
[50e5b25]539 free(nodep);
[cca29e3c]540 return rc;
[80e8482]541}
542
[b6035ba]543int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
[80e8482]544{
[b6035ba]545 fat_node_t *parentp = FAT_NODE(pfn);
546 fat_node_t *childp = FAT_NODE(cfn);
[0fdd6bb]547 fat_dentry_t *d;
548 fat_bs_t *bs;
549 block_t *b;
[a405563]550 unsigned i, j;
[0fdd6bb]551 uint16_t bps;
552 unsigned dps;
553 unsigned blocks;
[e32b65a]554 fat_cluster_t mcl, lcl;
555 int rc;
[0fdd6bb]556
[6ebe721]557 fibril_mutex_lock(&childp->lock);
[0fdd6bb]558 if (childp->lnkcnt == 1) {
559 /*
560 * On FAT, we don't support multiple hard links.
561 */
[6ebe721]562 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]563 return EMLINK;
564 }
565 assert(childp->lnkcnt == 0);
[6ebe721]566 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]567
568 if (!fat_dentry_name_verify(name)) {
569 /*
570 * Attempt to create unsupported name.
571 */
572 return ENOTSUP;
573 }
574
575 /*
576 * Get us an unused parent node's dentry or grow the parent and allocate
577 * a new one.
578 */
579
[6ebe721]580 fibril_mutex_lock(&parentp->idx->lock);
[0fdd6bb]581 bs = block_bb_get(parentp->idx->dev_handle);
582 bps = uint16_t_le2host(bs->bps);
583 dps = bps / sizeof(fat_dentry_t);
584
585 blocks = parentp->size / bps;
586
587 for (i = 0; i < blocks; i++) {
[684b655]588 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]589 if (rc != EOK) {
590 fibril_mutex_unlock(&parentp->idx->lock);
591 return rc;
592 }
[0fdd6bb]593 for (j = 0; j < dps; j++) {
594 d = ((fat_dentry_t *)b->data) + j;
595 switch (fat_classify_dentry(d)) {
596 case FAT_DENTRY_SKIP:
597 case FAT_DENTRY_VALID:
598 /* skipping used and meta entries */
599 continue;
600 case FAT_DENTRY_FREE:
601 case FAT_DENTRY_LAST:
602 /* found an empty slot */
603 goto hit;
604 }
605 }
[c91f2d1b]606 rc = block_put(b);
[4b4668e]607 if (rc != EOK) {
608 fibril_mutex_unlock(&parentp->idx->lock);
609 return rc;
610 }
[0fdd6bb]611 }
[699743c]612 j = 0;
[0fdd6bb]613
614 /*
615 * We need to grow the parent in order to create a new unused dentry.
616 */
[b713492b]617 if (parentp->firstc == FAT_CLST_ROOT) {
[e32b65a]618 /* Can't grow the root directory. */
[6ebe721]619 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]620 return ENOSPC;
621 }
622 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
623 if (rc != EOK) {
[6ebe721]624 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]625 return rc;
626 }
[cca29e3c]627 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl);
[4b4668e]628 if (rc != EOK) {
[073f550]629 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
[4b4668e]630 fibril_mutex_unlock(&parentp->idx->lock);
631 return rc;
632 }
[cca29e3c]633 rc = fat_append_clusters(bs, parentp, mcl);
[4b4668e]634 if (rc != EOK) {
[073f550]635 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
[4b4668e]636 fibril_mutex_unlock(&parentp->idx->lock);
637 return rc;
638 }
[d44aabd]639 parentp->size += bps * bs->spc;
640 parentp->dirty = true; /* need to sync node */
[684b655]641 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]642 if (rc != EOK) {
643 fibril_mutex_unlock(&parentp->idx->lock);
644 return rc;
645 }
[e32b65a]646 d = (fat_dentry_t *)b->data;
[0fdd6bb]647
648hit:
649 /*
650 * At this point we only establish the link between the parent and the
651 * child. The dentry, except of the name and the extension, will remain
[e32b65a]652 * uninitialized until the corresponding node is synced. Thus the valid
653 * dentry data is kept in the child node structure.
[0fdd6bb]654 */
655 memset(d, 0, sizeof(fat_dentry_t));
656 fat_dentry_name_set(d, name);
657 b->dirty = true; /* need to sync block */
[c91f2d1b]658 rc = block_put(b);
[6ebe721]659 fibril_mutex_unlock(&parentp->idx->lock);
[4b4668e]660 if (rc != EOK)
661 return rc;
[0fdd6bb]662
[6ebe721]663 fibril_mutex_lock(&childp->idx->lock);
[1baec4b]664
665 /*
666 * If possible, create the Sub-directory Identifier Entry and the
667 * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
668 * are not mandatory according to Standard ECMA-107 and HelenOS VFS does
669 * not use them anyway, so this is rather a sign of our good will.
670 */
[684b655]671 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
[4b4668e]672 if (rc != EOK) {
673 /*
674 * Rather than returning an error, simply skip the creation of
675 * these two entries.
676 */
677 goto skip_dots;
678 }
[1baec4b]679 d = (fat_dentry_t *)b->data;
680 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
[92fd52d7]681 str_cmp(d->name, FAT_NAME_DOT) == 0) {
[1baec4b]682 memset(d, 0, sizeof(fat_dentry_t));
[6eb2e96]683 str_cpy(d->name, 8, FAT_NAME_DOT);
684 str_cpy(d->ext, 3, FAT_EXT_PAD);
[1baec4b]685 d->attr = FAT_ATTR_SUBDIR;
686 d->firstc = host2uint16_t_le(childp->firstc);
687 /* TODO: initialize also the date/time members. */
688 }
689 d++;
690 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
[92fd52d7]691 str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) {
[1baec4b]692 memset(d, 0, sizeof(fat_dentry_t));
[6eb2e96]693 str_cpy(d->name, 8, FAT_NAME_DOT_DOT);
694 str_cpy(d->ext, 3, FAT_EXT_PAD);
[1baec4b]695 d->attr = FAT_ATTR_SUBDIR;
696 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
697 host2uint16_t_le(FAT_CLST_RES0) :
698 host2uint16_t_le(parentp->firstc);
699 /* TODO: initialize also the date/time members. */
700 }
701 b->dirty = true; /* need to sync block */
[4b4668e]702 /*
703 * Ignore the return value as we would have fallen through on error
704 * anyway.
705 */
706 (void) block_put(b);
707skip_dots:
[1baec4b]708
[0fdd6bb]709 childp->idx->pfc = parentp->firstc;
710 childp->idx->pdi = i * dps + j;
[6ebe721]711 fibril_mutex_unlock(&childp->idx->lock);
[0fdd6bb]712
[6ebe721]713 fibril_mutex_lock(&childp->lock);
[0fdd6bb]714 childp->lnkcnt = 1;
715 childp->dirty = true; /* need to sync node */
[6ebe721]716 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]717
718 /*
719 * Hash in the index structure into the position hash.
720 */
721 fat_idx_hashin(childp->idx);
722
723 return EOK;
[80e8482]724}
725
[cf95bc0]726int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
[80e8482]727{
[b6035ba]728 fat_node_t *parentp = FAT_NODE(pfn);
729 fat_node_t *childp = FAT_NODE(cfn);
[a31c1ccf]730 fat_bs_t *bs;
731 fat_dentry_t *d;
732 uint16_t bps;
733 block_t *b;
[073f550]734 bool has_children;
[c91f2d1b]735 int rc;
[a31c1ccf]736
[770d281]737 if (!parentp)
738 return EBUSY;
[0be3e8b]739
[073f550]740 rc = fat_has_children(&has_children, cfn);
741 if (rc != EOK)
742 return rc;
743 if (has_children)
[0be3e8b]744 return ENOTEMPTY;
[770d281]745
[6ebe721]746 fibril_mutex_lock(&parentp->lock);
747 fibril_mutex_lock(&childp->lock);
[a31c1ccf]748 assert(childp->lnkcnt == 1);
[6ebe721]749 fibril_mutex_lock(&childp->idx->lock);
[a31c1ccf]750 bs = block_bb_get(childp->idx->dev_handle);
751 bps = uint16_t_le2host(bs->bps);
752
[684b655]753 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc,
[a31c1ccf]754 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
755 BLOCK_FLAGS_NONE);
[46c0498]756 if (rc != EOK)
757 goto error;
[a31c1ccf]758 d = (fat_dentry_t *)b->data +
759 (childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
760 /* mark the dentry as not-currently-used */
761 d->name[0] = FAT_DENTRY_ERASED;
762 b->dirty = true; /* need to sync block */
[c91f2d1b]763 rc = block_put(b);
[46c0498]764 if (rc != EOK)
765 goto error;
[a31c1ccf]766
767 /* remove the index structure from the position hash */
768 fat_idx_hashout(childp->idx);
769 /* clear position information */
770 childp->idx->pfc = FAT_CLST_RES0;
771 childp->idx->pdi = 0;
[6ebe721]772 fibril_mutex_unlock(&childp->idx->lock);
[a31c1ccf]773 childp->lnkcnt = 0;
774 childp->dirty = true;
[6ebe721]775 fibril_mutex_unlock(&childp->lock);
776 fibril_mutex_unlock(&parentp->lock);
[a31c1ccf]777
778 return EOK;
[46c0498]779
780error:
781 fibril_mutex_unlock(&parentp->idx->lock);
782 fibril_mutex_unlock(&childp->lock);
783 fibril_mutex_unlock(&childp->idx->lock);
784 return rc;
[80e8482]785}
786
[073f550]787int fat_has_children(bool *has_children, fs_node_t *fn)
[32fb10ed]788{
[7858bc5f]789 fat_bs_t *bs;
[b6035ba]790 fat_node_t *nodep = FAT_NODE(fn);
[32fb10ed]791 unsigned bps;
792 unsigned dps;
793 unsigned blocks;
[7858bc5f]794 block_t *b;
[32fb10ed]795 unsigned i, j;
[c91f2d1b]796 int rc;
[32fb10ed]797
[073f550]798 if (nodep->type != FAT_DIRECTORY) {
799 *has_children = false;
800 return EOK;
801 }
[b0247bac]802
[6ebe721]803 fibril_mutex_lock(&nodep->idx->lock);
[7858bc5f]804 bs = block_bb_get(nodep->idx->dev_handle);
805 bps = uint16_t_le2host(bs->bps);
[32fb10ed]806 dps = bps / sizeof(fat_dentry_t);
807
[b0247bac]808 blocks = nodep->size / bps;
[32fb10ed]809
810 for (i = 0; i < blocks; i++) {
811 fat_dentry_t *d;
812
[684b655]813 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
[073f550]814 if (rc != EOK) {
815 fibril_mutex_unlock(&nodep->idx->lock);
816 return rc;
817 }
[b0247bac]818 for (j = 0; j < dps; j++) {
[32fb10ed]819 d = ((fat_dentry_t *)b->data) + j;
820 switch (fat_classify_dentry(d)) {
821 case FAT_DENTRY_SKIP:
[0fdd6bb]822 case FAT_DENTRY_FREE:
[32fb10ed]823 continue;
824 case FAT_DENTRY_LAST:
[c91f2d1b]825 rc = block_put(b);
[6ebe721]826 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]827 *has_children = false;
[8810c63]828 return rc;
[32fb10ed]829 default:
830 case FAT_DENTRY_VALID:
[c91f2d1b]831 rc = block_put(b);
[6ebe721]832 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]833 *has_children = true;
[8810c63]834 return rc;
[32fb10ed]835 }
836 }
[c91f2d1b]837 rc = block_put(b);
[8810c63]838 if (rc != EOK) {
839 fibril_mutex_unlock(&nodep->idx->lock);
840 return rc;
841 }
[32fb10ed]842 }
843
[6ebe721]844 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]845 *has_children = false;
846 return EOK;
847}
848
849
850fs_index_t fat_index_get(fs_node_t *fn)
851{
852 return FAT_NODE(fn)->idx->index;
853}
854
855size_t fat_size_get(fs_node_t *fn)
856{
857 return FAT_NODE(fn)->size;
[32fb10ed]858}
859
[073f550]860unsigned fat_lnkcnt_get(fs_node_t *fn)
[74ea3c6]861{
[073f550]862 return FAT_NODE(fn)->lnkcnt;
[74ea3c6]863}
864
[50e5b25]865char fat_plb_get_char(unsigned pos)
[74ea3c6]866{
867 return fat_reg.plb_ro[pos % PLB_SIZE];
868}
869
[b6035ba]870bool fat_is_directory(fs_node_t *fn)
[e1e3b26]871{
[b6035ba]872 return FAT_NODE(fn)->type == FAT_DIRECTORY;
[e1e3b26]873}
874
[b6035ba]875bool fat_is_file(fs_node_t *fn)
[e1e3b26]876{
[b6035ba]877 return FAT_NODE(fn)->type == FAT_FILE;
[e1e3b26]878}
879
[1313ee9]880dev_handle_t fat_device_get(fs_node_t *node)
881{
882 return 0;
883}
884
[a2aa1dec]885/** libfs operations */
886libfs_ops_t fat_libfs_ops = {
[073f550]887 .root_get = fat_root_get,
[a2aa1dec]888 .match = fat_match,
889 .node_get = fat_node_get,
[1313ee9]890 .node_open = fat_node_open,
[06901c6b]891 .node_put = fat_node_put,
[6571b78]892 .create = fat_create_node,
893 .destroy = fat_destroy_node,
[80e8482]894 .link = fat_link,
895 .unlink = fat_unlink,
[073f550]896 .has_children = fat_has_children,
[e1e3b26]897 .index_get = fat_index_get,
898 .size_get = fat_size_get,
899 .lnkcnt_get = fat_lnkcnt_get,
[1313ee9]900 .plb_get_char = fat_plb_get_char,
[e1e3b26]901 .is_directory = fat_is_directory,
[1313ee9]902 .is_file = fat_is_file,
903 .device_get = fat_device_get
[a2aa1dec]904};
905
[0013b9ce]906/*
907 * VFS operations.
908 */
909
[cde485d]910void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
911{
912 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
[1fbe064b]913 enum cache_mode cmode;
[7858bc5f]914 fat_bs_t *bs;
[7a35204a]915 uint16_t bps;
[689f036]916 uint16_t rde;
[cde485d]917 int rc;
918
[594303b]919 /* accept the mount options */
920 ipc_callid_t callid;
921 size_t size;
[0da4e41]922 if (!async_data_write_receive(&callid, &size)) {
[594303b]923 ipc_answer_0(callid, EINVAL);
924 ipc_answer_0(rid, EINVAL);
925 return;
926 }
927 char *opts = malloc(size + 1);
928 if (!opts) {
929 ipc_answer_0(callid, ENOMEM);
930 ipc_answer_0(rid, ENOMEM);
931 return;
932 }
[0da4e41]933 ipcarg_t retval = async_data_write_finalize(callid, opts, size);
[594303b]934 if (retval != EOK) {
935 ipc_answer_0(rid, retval);
936 free(opts);
937 return;
938 }
939 opts[size] = '\0';
940
[1fbe064b]941 /* Check for option enabling write through. */
942 if (str_cmp(opts, "wtcache") == 0)
943 cmode = CACHE_MODE_WT;
944 else
945 cmode = CACHE_MODE_WB;
946
[7858bc5f]947 /* initialize libblock */
[6284978]948 rc = block_init(dev_handle, BS_SIZE);
[7a35204a]949 if (rc != EOK) {
[6284978]950 ipc_answer_0(rid, rc);
951 return;
952 }
953
954 /* prepare the boot block */
[1ee00b7]955 rc = block_bb_read(dev_handle, BS_BLOCK);
[6284978]956 if (rc != EOK) {
957 block_fini(dev_handle);
958 ipc_answer_0(rid, rc);
[7a35204a]959 return;
960 }
961
[7858bc5f]962 /* get the buffer with the boot sector */
963 bs = block_bb_get(dev_handle);
964
[689f036]965 /* Read the number of root directory entries. */
[7858bc5f]966 bps = uint16_t_le2host(bs->bps);
967 rde = uint16_t_le2host(bs->root_ent_max);
[689f036]968
[7a35204a]969 if (bps != BS_SIZE) {
[7858bc5f]970 block_fini(dev_handle);
[7a35204a]971 ipc_answer_0(rid, ENOTSUP);
972 return;
973 }
974
[f1ba5d6]975 /* Initialize the block cache */
[1fbe064b]976 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);
[f1ba5d6]977 if (rc != EOK) {
978 block_fini(dev_handle);
979 ipc_answer_0(rid, rc);
980 return;
981 }
982
[cde485d]983 rc = fat_idx_init_by_dev_handle(dev_handle);
984 if (rc != EOK) {
[7858bc5f]985 block_fini(dev_handle);
[cde485d]986 ipc_answer_0(rid, rc);
987 return;
988 }
989
[689f036]990 /* Initialize the root node. */
[b6035ba]991 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
992 if (!rfn) {
993 block_fini(dev_handle);
994 fat_idx_fini_by_dev_handle(dev_handle);
995 ipc_answer_0(rid, ENOMEM);
996 return;
997 }
[83937ccd]998 fs_node_initialize(rfn);
[689f036]999 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1000 if (!rootp) {
[b6035ba]1001 free(rfn);
[7858bc5f]1002 block_fini(dev_handle);
[689f036]1003 fat_idx_fini_by_dev_handle(dev_handle);
1004 ipc_answer_0(rid, ENOMEM);
1005 return;
1006 }
1007 fat_node_initialize(rootp);
1008
1009 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
1010 if (!ridxp) {
[b6035ba]1011 free(rfn);
[689f036]1012 free(rootp);
[b6035ba]1013 block_fini(dev_handle);
[689f036]1014 fat_idx_fini_by_dev_handle(dev_handle);
1015 ipc_answer_0(rid, ENOMEM);
1016 return;
1017 }
1018 assert(ridxp->index == 0);
1019 /* ridxp->lock held */
1020
1021 rootp->type = FAT_DIRECTORY;
1022 rootp->firstc = FAT_CLST_ROOT;
1023 rootp->refcnt = 1;
[5ab597d]1024 rootp->lnkcnt = 0; /* FS root is not linked */
[689f036]1025 rootp->size = rde * sizeof(fat_dentry_t);
1026 rootp->idx = ridxp;
1027 ridxp->nodep = rootp;
[b6035ba]1028 rootp->bp = rfn;
1029 rfn->data = rootp;
[689f036]1030
[6ebe721]1031 fibril_mutex_unlock(&ridxp->lock);
[689f036]1032
[5ab597d]1033 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
[cde485d]1034}
1035
1036void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1037{
[16d17ca]1038 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[cde485d]1039}
1040
[be815bc]1041void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1042{
[a2aa1dec]1043 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[be815bc]1044}
1045
[4bf40f6]1046void fat_read(ipc_callid_t rid, ipc_call_t *request)
1047{
1048 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1049 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1050 off_t pos = (off_t)IPC_GET_ARG3(*request);
[073f550]1051 fs_node_t *fn;
[b6035ba]1052 fat_node_t *nodep;
[7858bc5f]1053 fat_bs_t *bs;
[cb682eb]1054 uint16_t bps;
[79d031b]1055 size_t bytes;
[7858bc5f]1056 block_t *b;
[c91f2d1b]1057 int rc;
[79d031b]1058
[073f550]1059 rc = fat_node_get(&fn, dev_handle, index);
1060 if (rc != EOK) {
1061 ipc_answer_0(rid, rc);
1062 return;
1063 }
[b6035ba]1064 if (!fn) {
[4bf40f6]1065 ipc_answer_0(rid, ENOENT);
1066 return;
1067 }
[b6035ba]1068 nodep = FAT_NODE(fn);
[4bf40f6]1069
1070 ipc_callid_t callid;
1071 size_t len;
[0da4e41]1072 if (!async_data_read_receive(&callid, &len)) {
[b6035ba]1073 fat_node_put(fn);
[4bf40f6]1074 ipc_answer_0(callid, EINVAL);
1075 ipc_answer_0(rid, EINVAL);
1076 return;
1077 }
1078
[7858bc5f]1079 bs = block_bb_get(dev_handle);
1080 bps = uint16_t_le2host(bs->bps);
[cb682eb]1081
[4bf40f6]1082 if (nodep->type == FAT_FILE) {
[ddd1219]1083 /*
1084 * Our strategy for regular file reads is to read one block at
1085 * most and make use of the possibility to return less data than
1086 * requested. This keeps the code very simple.
1087 */
[0d974d8]1088 if (pos >= nodep->size) {
[7d861950]1089 /* reading beyond the EOF */
1090 bytes = 0;
[0da4e41]1091 (void) async_data_read_finalize(callid, NULL, 0);
[0d974d8]1092 } else {
1093 bytes = min(len, bps - pos % bps);
1094 bytes = min(bytes, nodep->size - pos);
[684b655]1095 rc = fat_block_get(&b, bs, nodep, pos / bps,
[1d8cdb1]1096 BLOCK_FLAGS_NONE);
[453f2e75]1097 if (rc != EOK) {
1098 fat_node_put(fn);
1099 ipc_answer_0(callid, rc);
1100 ipc_answer_0(rid, rc);
1101 return;
1102 }
[0da4e41]1103 (void) async_data_read_finalize(callid, b->data + pos % bps,
[0d974d8]1104 bytes);
[c91f2d1b]1105 rc = block_put(b);
[453f2e75]1106 if (rc != EOK) {
1107 fat_node_put(fn);
1108 ipc_answer_0(rid, rc);
1109 return;
1110 }
[0d974d8]1111 }
[4bf40f6]1112 } else {
[ddd1219]1113 unsigned bnum;
1114 off_t spos = pos;
1115 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1116 fat_dentry_t *d;
1117
[4bf40f6]1118 assert(nodep->type == FAT_DIRECTORY);
[ddd1219]1119 assert(nodep->size % bps == 0);
1120 assert(bps % sizeof(fat_dentry_t) == 0);
1121
1122 /*
1123 * Our strategy for readdir() is to use the position pointer as
1124 * an index into the array of all dentries. On entry, it points
1125 * to the first unread dentry. If we skip any dentries, we bump
1126 * the position pointer accordingly.
1127 */
1128 bnum = (pos * sizeof(fat_dentry_t)) / bps;
1129 while (bnum < nodep->size / bps) {
1130 off_t o;
1131
[684b655]1132 rc = fat_block_get(&b, bs, nodep, bnum,
1133 BLOCK_FLAGS_NONE);
[453f2e75]1134 if (rc != EOK)
1135 goto err;
[ddd1219]1136 for (o = pos % (bps / sizeof(fat_dentry_t));
1137 o < bps / sizeof(fat_dentry_t);
1138 o++, pos++) {
1139 d = ((fat_dentry_t *)b->data) + o;
1140 switch (fat_classify_dentry(d)) {
1141 case FAT_DENTRY_SKIP:
[0fdd6bb]1142 case FAT_DENTRY_FREE:
[ddd1219]1143 continue;
1144 case FAT_DENTRY_LAST:
[c91f2d1b]1145 rc = block_put(b);
[453f2e75]1146 if (rc != EOK)
1147 goto err;
[ddd1219]1148 goto miss;
1149 default:
1150 case FAT_DENTRY_VALID:
[0fdd6bb]1151 fat_dentry_name_get(d, name);
[073f550]1152 rc = block_put(b);
[453f2e75]1153 if (rc != EOK)
1154 goto err;
[ddd1219]1155 goto hit;
1156 }
1157 }
[c91f2d1b]1158 rc = block_put(b);
[453f2e75]1159 if (rc != EOK)
1160 goto err;
[ddd1219]1161 bnum++;
1162 }
1163miss:
[453f2e75]1164 rc = fat_node_put(fn);
1165 ipc_answer_0(callid, rc != EOK ? rc : ENOENT);
1166 ipc_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
[4bf40f6]1167 return;
[453f2e75]1168
1169err:
1170 (void) fat_node_put(fn);
1171 ipc_answer_0(callid, rc);
1172 ipc_answer_0(rid, rc);
1173 return;
1174
[ddd1219]1175hit:
[0da4e41]1176 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
[ddd1219]1177 bytes = (pos - spos) + 1;
[4bf40f6]1178 }
1179
[453f2e75]1180 rc = fat_node_put(fn);
1181 ipc_answer_1(rid, rc, (ipcarg_t)bytes);
[4bf40f6]1182}
1183
[c947dda]1184void fat_write(ipc_callid_t rid, ipc_call_t *request)
1185{
[8d32152]1186 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1187 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1188 off_t pos = (off_t)IPC_GET_ARG3(*request);
[073f550]1189 fs_node_t *fn;
[b6035ba]1190 fat_node_t *nodep;
[7858bc5f]1191 fat_bs_t *bs;
[dfddfcd]1192 size_t bytes, size;
[7858bc5f]1193 block_t *b;
[8d32152]1194 uint16_t bps;
1195 unsigned spc;
[913a821c]1196 unsigned bpc; /* bytes per cluster */
[b4b7187]1197 off_t boundary;
[1d8cdb1]1198 int flags = BLOCK_FLAGS_NONE;
[c91f2d1b]1199 int rc;
[8d32152]1200
[073f550]1201 rc = fat_node_get(&fn, dev_handle, index);
1202 if (rc != EOK) {
1203 ipc_answer_0(rid, rc);
1204 return;
1205 }
[b6035ba]1206 if (!fn) {
[8d32152]1207 ipc_answer_0(rid, ENOENT);
1208 return;
1209 }
[b6035ba]1210 nodep = FAT_NODE(fn);
[8d32152]1211
1212 ipc_callid_t callid;
1213 size_t len;
[0da4e41]1214 if (!async_data_write_receive(&callid, &len)) {
[dfddfcd]1215 (void) fat_node_put(fn);
[8d32152]1216 ipc_answer_0(callid, EINVAL);
1217 ipc_answer_0(rid, EINVAL);
1218 return;
1219 }
1220
[913a821c]1221 bs = block_bb_get(dev_handle);
1222 bps = uint16_t_le2host(bs->bps);
1223 spc = bs->spc;
1224 bpc = bps * spc;
1225
[8d32152]1226 /*
1227 * In all scenarios, we will attempt to write out only one block worth
1228 * of data at maximum. There might be some more efficient approaches,
1229 * but this one greatly simplifies fat_write(). Note that we can afford
1230 * to do this because the client must be ready to handle the return
1231 * value signalizing a smaller number of bytes written.
1232 */
1233 bytes = min(len, bps - pos % bps);
[1d8cdb1]1234 if (bytes == bps)
1235 flags |= BLOCK_FLAGS_NOREAD;
[8d32152]1236
[913a821c]1237 boundary = ROUND_UP(nodep->size, bpc);
[b4b7187]1238 if (pos < boundary) {
[8d32152]1239 /*
1240 * This is the easier case - we are either overwriting already
1241 * existing contents or writing behind the EOF, but still within
1242 * the limits of the last cluster. The node size may grow to the
1243 * next block size boundary.
1244 */
[cca29e3c]1245 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
[dfddfcd]1246 if (rc != EOK) {
1247 (void) fat_node_put(fn);
1248 ipc_answer_0(callid, rc);
1249 ipc_answer_0(rid, rc);
1250 return;
1251 }
[684b655]1252 rc = fat_block_get(&b, bs, nodep, pos / bps, flags);
[dfddfcd]1253 if (rc != EOK) {
1254 (void) fat_node_put(fn);
1255 ipc_answer_0(callid, rc);
1256 ipc_answer_0(rid, rc);
1257 return;
1258 }
[0da4e41]1259 (void) async_data_write_finalize(callid, b->data + pos % bps,
[8d32152]1260 bytes);
1261 b->dirty = true; /* need to sync block */
[c91f2d1b]1262 rc = block_put(b);
[dfddfcd]1263 if (rc != EOK) {
1264 (void) fat_node_put(fn);
1265 ipc_answer_0(rid, rc);
1266 return;
1267 }
[8d32152]1268 if (pos + bytes > nodep->size) {
1269 nodep->size = pos + bytes;
1270 nodep->dirty = true; /* need to sync node */
1271 }
[dfddfcd]1272 size = nodep->size;
1273 rc = fat_node_put(fn);
1274 ipc_answer_2(rid, rc, bytes, nodep->size);
[8d32152]1275 return;
1276 } else {
1277 /*
1278 * This is the more difficult case. We must allocate new
1279 * clusters for the node and zero them out.
1280 */
1281 unsigned nclsts;
[8334a427]1282 fat_cluster_t mcl, lcl;
1283
[913a821c]1284 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
[6f2dfd1]1285 /* create an independent chain of nclsts clusters in all FATs */
[dfddfcd]1286 rc = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1287 if (rc != EOK) {
[6f2dfd1]1288 /* could not allocate a chain of nclsts clusters */
[dfddfcd]1289 (void) fat_node_put(fn);
1290 ipc_answer_0(callid, rc);
1291 ipc_answer_0(rid, rc);
[6f2dfd1]1292 return;
1293 }
1294 /* zero fill any gaps */
[cca29e3c]1295 rc = fat_fill_gap(bs, nodep, mcl, pos);
[dfddfcd]1296 if (rc != EOK) {
1297 (void) fat_free_clusters(bs, dev_handle, mcl);
1298 (void) fat_node_put(fn);
1299 ipc_answer_0(callid, rc);
1300 ipc_answer_0(rid, rc);
1301 return;
1302 }
[684b655]1303 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc,
[1d8cdb1]1304 flags);
[dfddfcd]1305 if (rc != EOK) {
1306 (void) fat_free_clusters(bs, dev_handle, mcl);
1307 (void) fat_node_put(fn);
1308 ipc_answer_0(callid, rc);
1309 ipc_answer_0(rid, rc);
1310 return;
1311 }
[0da4e41]1312 (void) async_data_write_finalize(callid, b->data + pos % bps,
[6f2dfd1]1313 bytes);
[b4b7187]1314 b->dirty = true; /* need to sync block */
[c91f2d1b]1315 rc = block_put(b);
[dfddfcd]1316 if (rc != EOK) {
1317 (void) fat_free_clusters(bs, dev_handle, mcl);
1318 (void) fat_node_put(fn);
1319 ipc_answer_0(rid, rc);
1320 return;
1321 }
[6f2dfd1]1322 /*
1323 * Append the cluster chain starting in mcl to the end of the
1324 * node's cluster chain.
1325 */
[cca29e3c]1326 rc = fat_append_clusters(bs, nodep, mcl);
[dfddfcd]1327 if (rc != EOK) {
1328 (void) fat_free_clusters(bs, dev_handle, mcl);
1329 (void) fat_node_put(fn);
1330 ipc_answer_0(rid, rc);
1331 return;
1332 }
1333 nodep->size = size = pos + bytes;
[b4b7187]1334 nodep->dirty = true; /* need to sync node */
[dfddfcd]1335 rc = fat_node_put(fn);
1336 ipc_answer_2(rid, rc, bytes, size);
[6f2dfd1]1337 return;
[8d32152]1338 }
[c947dda]1339}
1340
[6c71a1f]1341void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1342{
[8334a427]1343 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1344 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1345 size_t size = (off_t)IPC_GET_ARG3(*request);
[073f550]1346 fs_node_t *fn;
[b6035ba]1347 fat_node_t *nodep;
[913a821c]1348 fat_bs_t *bs;
1349 uint16_t bps;
1350 uint8_t spc;
1351 unsigned bpc; /* bytes per cluster */
[8334a427]1352 int rc;
1353
[073f550]1354 rc = fat_node_get(&fn, dev_handle, index);
1355 if (rc != EOK) {
1356 ipc_answer_0(rid, rc);
1357 return;
1358 }
[b6035ba]1359 if (!fn) {
[8334a427]1360 ipc_answer_0(rid, ENOENT);
1361 return;
1362 }
[b6035ba]1363 nodep = FAT_NODE(fn);
[8334a427]1364
[913a821c]1365 bs = block_bb_get(dev_handle);
1366 bps = uint16_t_le2host(bs->bps);
1367 spc = bs->spc;
1368 bpc = bps * spc;
1369
[8334a427]1370 if (nodep->size == size) {
1371 rc = EOK;
1372 } else if (nodep->size < size) {
1373 /*
[913a821c]1374 * The standard says we have the freedom to grow the node.
[8334a427]1375 * For now, we simply return an error.
1376 */
1377 rc = EINVAL;
[913a821c]1378 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
1379 /*
1380 * The node will be shrunk, but no clusters will be deallocated.
1381 */
1382 nodep->size = size;
1383 nodep->dirty = true; /* need to sync node */
1384 rc = EOK;
[8334a427]1385 } else {
1386 /*
[913a821c]1387 * The node will be shrunk, clusters will be deallocated.
[8334a427]1388 */
[913a821c]1389 if (size == 0) {
[cca29e3c]1390 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1391 if (rc != EOK)
1392 goto out;
[913a821c]1393 } else {
1394 fat_cluster_t lastc;
[e402382]1395 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc,
1396 &lastc, NULL, (size - 1) / bpc);
1397 if (rc != EOK)
1398 goto out;
[cca29e3c]1399 rc = fat_chop_clusters(bs, nodep, lastc);
1400 if (rc != EOK)
1401 goto out;
[913a821c]1402 }
1403 nodep->size = size;
1404 nodep->dirty = true; /* need to sync node */
1405 rc = EOK;
[8334a427]1406 }
[e402382]1407out:
[b6035ba]1408 fat_node_put(fn);
[8334a427]1409 ipc_answer_0(rid, rc);
1410 return;
[6c71a1f]1411}
1412
[c20aa06]1413void fat_close(ipc_callid_t rid, ipc_call_t *request)
1414{
1415 ipc_answer_0(rid, EOK);
1416}
1417
[50e5b25]1418void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1419{
1420 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1421 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
[073f550]1422 fs_node_t *fn;
[50e5b25]1423 int rc;
1424
[073f550]1425 rc = fat_node_get(&fn, dev_handle, index);
1426 if (rc != EOK) {
1427 ipc_answer_0(rid, rc);
1428 return;
1429 }
[b6035ba]1430 if (!fn) {
[50e5b25]1431 ipc_answer_0(rid, ENOENT);
1432 return;
1433 }
1434
[b6035ba]1435 rc = fat_destroy_node(fn);
[50e5b25]1436 ipc_answer_0(rid, rc);
1437}
1438
[c20aa06]1439void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1440{
1441 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1442}
1443
[852b801]1444void fat_stat(ipc_callid_t rid, ipc_call_t *request)
[c20aa06]1445{
[75160a6]1446 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[c20aa06]1447}
1448
1449void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1450{
1451 /* Dummy implementation */
1452 ipc_answer_0(rid, EOK);
1453}
1454
[be815bc]1455/**
1456 * @}
[c20aa06]1457 */
Note: See TracBrowser for help on using the repository browser.