source: mainline/uspace/srv/fs/fat/fat_ops.c@ 30a4301

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 30a4301 was ffa2c8ef, checked in by Martin Decky <martin@…>, 15 years ago

do not intermix low-level IPC methods with async framework methods

  • Property mode set to 100644
File size: 37.4 KB
RevLine 
[be815bc]1/*
[a2aa1dec]2 * Copyright (c) 2008 Jakub Jermar
[be815bc]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
[033ef7d3]39#include "fat_dentry.h"
40#include "fat_fat.h"
[6364d3c]41#include "../../vfs/vfs.h"
[a2aa1dec]42#include <libfs.h>
[fc840d9]43#include <libblock.h>
[7a35204a]44#include <ipc/services.h>
45#include <ipc/devmap.h>
[ed903174]46#include <macros.h>
[be815bc]47#include <async.h>
48#include <errno.h>
[19f857a]49#include <str.h>
[776f2e6]50#include <byteorder.h>
[d9c8c81]51#include <adt/hash_table.h>
52#include <adt/list.h>
[e1e3b26]53#include <assert.h>
[1e4cada]54#include <fibril_synch.h>
[7a35204a]55#include <sys/mman.h>
[8d32152]56#include <align.h>
[e1e3b26]57
[b6035ba]58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
[7a23d60]61#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
62#define BPC(bs) (BPS((bs)) * SPC((bs)))
63
[6ebe721]64/** Mutex protecting the list of cached free FAT nodes. */
65static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
[add5835]66
67/** List of cached free FAT nodes. */
68static LIST_INITIALIZE(ffn_head);
[6364d3c]69
[0fc1e5d]70/*
71 * Forward declarations of FAT libfs operations.
72 */
[991f645]73static int fat_root_get(fs_node_t **, devmap_handle_t);
[0fc1e5d]74static int fat_match(fs_node_t **, fs_node_t *, const char *);
[991f645]75static int fat_node_get(fs_node_t **, devmap_handle_t, fs_index_t);
[1313ee9]76static int fat_node_open(fs_node_t *);
[0fc1e5d]77static int fat_node_put(fs_node_t *);
[991f645]78static int fat_create_node(fs_node_t **, devmap_handle_t, int);
[0fc1e5d]79static int fat_destroy_node(fs_node_t *);
80static int fat_link(fs_node_t *, fs_node_t *, const char *);
81static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
82static int fat_has_children(bool *, fs_node_t *);
83static fs_index_t fat_index_get(fs_node_t *);
[ed903174]84static aoff64_t fat_size_get(fs_node_t *);
[0fc1e5d]85static unsigned fat_lnkcnt_get(fs_node_t *);
86static char fat_plb_get_char(unsigned);
87static bool fat_is_directory(fs_node_t *);
88static bool fat_is_file(fs_node_t *node);
[991f645]89static devmap_handle_t fat_device_get(fs_node_t *node);
[0fc1e5d]90
91/*
92 * Helper functions.
93 */
[e1e3b26]94static void fat_node_initialize(fat_node_t *node)
[a2aa1dec]95{
[6ebe721]96 fibril_mutex_initialize(&node->lock);
[b6035ba]97 node->bp = NULL;
[869e546]98 node->idx = NULL;
[e1e3b26]99 node->type = 0;
100 link_initialize(&node->ffn_link);
101 node->size = 0;
102 node->lnkcnt = 0;
103 node->refcnt = 0;
104 node->dirty = false;
[377cce8]105 node->lastc_cached_valid = false;
106 node->lastc_cached_value = FAT_CLST_LAST1;
[dba4a23]107 node->currc_cached_valid = false;
108 node->currc_cached_bn = 0;
109 node->currc_cached_value = FAT_CLST_LAST1;
[e1e3b26]110}
111
[4098e38]112static int fat_node_sync(fat_node_t *node)
[e1e3b26]113{
[7858bc5f]114 block_t *b;
115 fat_bs_t *bs;
[beb17734]116 fat_dentry_t *d;
[c91f2d1b]117 int rc;
[beb17734]118
119 assert(node->dirty);
120
[991f645]121 bs = block_bb_get(node->idx->devmap_handle);
[beb17734]122
123 /* Read the block that contains the dentry of interest. */
[991f645]124 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc,
[6da81e0]125 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
[7a23d60]126 BLOCK_FLAGS_NONE);
[4098e38]127 if (rc != EOK)
128 return rc;
[beb17734]129
[7a23d60]130 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
[beb17734]131
132 d->firstc = host2uint16_t_le(node->firstc);
[a5da446]133 if (node->type == FAT_FILE) {
[beb17734]134 d->size = host2uint32_t_le(node->size);
[a5da446]135 } else if (node->type == FAT_DIRECTORY) {
136 d->attr = FAT_ATTR_SUBDIR;
137 }
138
139 /* TODO: update other fields? (e.g time fields) */
[beb17734]140
141 b->dirty = true; /* need to sync block */
[c91f2d1b]142 rc = block_put(b);
[4098e38]143 return rc;
[e1e3b26]144}
145
[991f645]146static int fat_node_fini_by_devmap_handle(devmap_handle_t devmap_handle)
[430de97]147{
148 link_t *lnk;
149 fat_node_t *nodep;
150 int rc;
151
152 /*
153 * We are called from fat_unmounted() and assume that there are already
154 * no nodes belonging to this instance with non-zero refcount. Therefore
155 * it is sufficient to clean up only the FAT free node list.
156 */
157
158restart:
159 fibril_mutex_lock(&ffn_mutex);
160 for (lnk = ffn_head.next; lnk != &ffn_head; lnk = lnk->next) {
161 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
162 if (!fibril_mutex_trylock(&nodep->lock)) {
163 fibril_mutex_unlock(&ffn_mutex);
164 goto restart;
165 }
166 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
167 fibril_mutex_unlock(&nodep->lock);
168 fibril_mutex_unlock(&ffn_mutex);
169 goto restart;
170 }
[991f645]171 if (nodep->idx->devmap_handle != devmap_handle) {
[430de97]172 fibril_mutex_unlock(&nodep->idx->lock);
173 fibril_mutex_unlock(&nodep->lock);
174 continue;
175 }
176
177 list_remove(&nodep->ffn_link);
178 fibril_mutex_unlock(&ffn_mutex);
179
180 /*
181 * We can unlock the node and its index structure because we are
182 * the last player on this playground and VFS is preventing new
183 * players from entering.
184 */
185 fibril_mutex_unlock(&nodep->idx->lock);
186 fibril_mutex_unlock(&nodep->lock);
187
188 if (nodep->dirty) {
189 rc = fat_node_sync(nodep);
190 if (rc != EOK)
191 return rc;
192 }
193 nodep->idx->nodep = NULL;
194 free(nodep->bp);
195 free(nodep);
196
197 /* Need to restart because we changed the ffn_head list. */
198 goto restart;
199 }
200 fibril_mutex_unlock(&ffn_mutex);
201
202 return EOK;
203}
204
[17bf658]205static int fat_node_get_new(fat_node_t **nodepp)
[9a3d5f0]206{
[b6035ba]207 fs_node_t *fn;
[9a3d5f0]208 fat_node_t *nodep;
[4098e38]209 int rc;
[9a3d5f0]210
[6ebe721]211 fibril_mutex_lock(&ffn_mutex);
[9a3d5f0]212 if (!list_empty(&ffn_head)) {
213 /* Try to use a cached free node structure. */
214 fat_idx_t *idxp_tmp;
215 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
[6ebe721]216 if (!fibril_mutex_trylock(&nodep->lock))
[9a3d5f0]217 goto skip_cache;
218 idxp_tmp = nodep->idx;
[6ebe721]219 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
220 fibril_mutex_unlock(&nodep->lock);
[9a3d5f0]221 goto skip_cache;
222 }
223 list_remove(&nodep->ffn_link);
[6ebe721]224 fibril_mutex_unlock(&ffn_mutex);
[4098e38]225 if (nodep->dirty) {
226 rc = fat_node_sync(nodep);
[17bf658]227 if (rc != EOK) {
228 idxp_tmp->nodep = NULL;
229 fibril_mutex_unlock(&nodep->lock);
230 fibril_mutex_unlock(&idxp_tmp->lock);
231 free(nodep->bp);
232 free(nodep);
233 return rc;
234 }
[4098e38]235 }
[9a3d5f0]236 idxp_tmp->nodep = NULL;
[6ebe721]237 fibril_mutex_unlock(&nodep->lock);
238 fibril_mutex_unlock(&idxp_tmp->lock);
[b6035ba]239 fn = FS_NODE(nodep);
[9a3d5f0]240 } else {
241skip_cache:
242 /* Try to allocate a new node structure. */
[6ebe721]243 fibril_mutex_unlock(&ffn_mutex);
[b6035ba]244 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
245 if (!fn)
[17bf658]246 return ENOMEM;
[9a3d5f0]247 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
[b6035ba]248 if (!nodep) {
249 free(fn);
[17bf658]250 return ENOMEM;
[b6035ba]251 }
[9a3d5f0]252 }
253 fat_node_initialize(nodep);
[83937ccd]254 fs_node_initialize(fn);
[b6035ba]255 fn->data = nodep;
256 nodep->bp = fn;
[9a3d5f0]257
[17bf658]258 *nodepp = nodep;
259 return EOK;
[9a3d5f0]260}
261
[add5835]262/** Internal version of fat_node_get().
263 *
264 * @param idxp Locked index structure.
265 */
[0fc1e5d]266static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
[e1e3b26]267{
[7858bc5f]268 block_t *b;
269 fat_bs_t *bs;
[4573a79]270 fat_dentry_t *d;
[c06dbf9]271 fat_node_t *nodep = NULL;
[c91f2d1b]272 int rc;
[4573a79]273
[add5835]274 if (idxp->nodep) {
[4573a79]275 /*
276 * We are lucky.
277 * The node is already instantiated in memory.
278 */
[6ebe721]279 fibril_mutex_lock(&idxp->nodep->lock);
[e6bc3a5]280 if (!idxp->nodep->refcnt++) {
281 fibril_mutex_lock(&ffn_mutex);
[c06dbf9]282 list_remove(&idxp->nodep->ffn_link);
[e6bc3a5]283 fibril_mutex_unlock(&ffn_mutex);
284 }
[6ebe721]285 fibril_mutex_unlock(&idxp->nodep->lock);
[0fc1e5d]286 *nodepp = idxp->nodep;
287 return EOK;
[4573a79]288 }
289
290 /*
291 * We must instantiate the node from the file system.
292 */
293
[add5835]294 assert(idxp->pfc);
[4573a79]295
[17bf658]296 rc = fat_node_get_new(&nodep);
297 if (rc != EOK)
[0fc1e5d]298 return rc;
[4573a79]299
[991f645]300 bs = block_bb_get(idxp->devmap_handle);
[4573a79]301
[2c4bbcde]302 /* Read the block that contains the dentry of interest. */
[991f645]303 rc = _fat_block_get(&b, bs, idxp->devmap_handle, idxp->pfc, NULL,
[7a23d60]304 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
[0fc1e5d]305 if (rc != EOK) {
306 (void) fat_node_put(FS_NODE(nodep));
307 return rc;
308 }
[4573a79]309
[7a23d60]310 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
[2c4bbcde]311 if (d->attr & FAT_ATTR_SUBDIR) {
312 /*
313 * The only directory which does not have this bit set is the
314 * root directory itself. The root directory node is handled
315 * and initialized elsewhere.
316 */
317 nodep->type = FAT_DIRECTORY;
[2ab1023]318 /*
[e2115311]319 * Unfortunately, the 'size' field of the FAT dentry is not
320 * defined for the directory entry type. We must determine the
321 * size of the directory by walking the FAT.
[2ab1023]322 */
[e402382]323 uint16_t clusters;
[991f645]324 rc = fat_clusters_get(&clusters, bs, idxp->devmap_handle,
[4f1c0b4]325 uint16_t_le2host(d->firstc));
[0fc1e5d]326 if (rc != EOK) {
327 (void) fat_node_put(FS_NODE(nodep));
328 return rc;
329 }
[7a23d60]330 nodep->size = BPS(bs) * SPC(bs) * clusters;
[2c4bbcde]331 } else {
332 nodep->type = FAT_FILE;
[2ab1023]333 nodep->size = uint32_t_le2host(d->size);
[2c4bbcde]334 }
335 nodep->firstc = uint16_t_le2host(d->firstc);
336 nodep->lnkcnt = 1;
337 nodep->refcnt = 1;
338
[c91f2d1b]339 rc = block_put(b);
[0fc1e5d]340 if (rc != EOK) {
341 (void) fat_node_put(FS_NODE(nodep));
342 return rc;
343 }
[2c4bbcde]344
345 /* Link the idx structure with the node structure. */
[add5835]346 nodep->idx = idxp;
347 idxp->nodep = nodep;
[2c4bbcde]348
[0fc1e5d]349 *nodepp = nodep;
350 return EOK;
[a2aa1dec]351}
352
[50e5b25]353/*
354 * FAT libfs operations.
355 */
356
[991f645]357int fat_root_get(fs_node_t **rfn, devmap_handle_t devmap_handle)
[073f550]358{
[991f645]359 return fat_node_get(rfn, devmap_handle, 0);
[073f550]360}
361
362int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
363{
364 fat_bs_t *bs;
365 fat_node_t *parentp = FAT_NODE(pfn);
366 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
367 unsigned i, j;
368 unsigned blocks;
369 fat_dentry_t *d;
[991f645]370 devmap_handle_t devmap_handle;
[073f550]371 block_t *b;
372 int rc;
373
374 fibril_mutex_lock(&parentp->idx->lock);
[991f645]375 devmap_handle = parentp->idx->devmap_handle;
[a93d79a]376 fibril_mutex_unlock(&parentp->idx->lock);
377
[991f645]378 bs = block_bb_get(devmap_handle);
[7a23d60]379 blocks = parentp->size / BPS(bs);
[073f550]380 for (i = 0; i < blocks; i++) {
381 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[a93d79a]382 if (rc != EOK)
[073f550]383 return rc;
[7a23d60]384 for (j = 0; j < DPS(bs); j++) {
[073f550]385 d = ((fat_dentry_t *)b->data) + j;
386 switch (fat_classify_dentry(d)) {
387 case FAT_DENTRY_SKIP:
388 case FAT_DENTRY_FREE:
389 continue;
390 case FAT_DENTRY_LAST:
[8810c63]391 /* miss */
[073f550]392 rc = block_put(b);
393 *rfn = NULL;
[8810c63]394 return rc;
[073f550]395 default:
396 case FAT_DENTRY_VALID:
397 fat_dentry_name_get(d, name);
398 break;
399 }
400 if (fat_dentry_namecmp(name, component) == 0) {
401 /* hit */
402 fat_node_t *nodep;
[991f645]403 fat_idx_t *idx = fat_idx_get_by_pos(devmap_handle,
[a93d79a]404 parentp->firstc, i * DPS(bs) + j);
[073f550]405 if (!idx) {
406 /*
407 * Can happen if memory is low or if we
408 * run out of 32-bit indices.
409 */
410 rc = block_put(b);
[8810c63]411 return (rc == EOK) ? ENOMEM : rc;
[073f550]412 }
[0fc1e5d]413 rc = fat_node_get_core(&nodep, idx);
[073f550]414 fibril_mutex_unlock(&idx->lock);
[1647323]415 if (rc != EOK) {
416 (void) block_put(b);
417 return rc;
418 }
[073f550]419 *rfn = FS_NODE(nodep);
[1647323]420 rc = block_put(b);
421 if (rc != EOK)
422 (void) fat_node_put(*rfn);
423 return rc;
[073f550]424 }
425 }
426 rc = block_put(b);
[a93d79a]427 if (rc != EOK)
[8810c63]428 return rc;
[073f550]429 }
430
431 *rfn = NULL;
432 return EOK;
433}
434
[add5835]435/** Instantiate a FAT in-core node. */
[991f645]436int fat_node_get(fs_node_t **rfn, devmap_handle_t devmap_handle, fs_index_t index)
[add5835]437{
[b6035ba]438 fat_node_t *nodep;
[add5835]439 fat_idx_t *idxp;
[0fc1e5d]440 int rc;
[add5835]441
[991f645]442 idxp = fat_idx_get_by_index(devmap_handle, index);
[073f550]443 if (!idxp) {
444 *rfn = NULL;
445 return EOK;
446 }
[add5835]447 /* idxp->lock held */
[0fc1e5d]448 rc = fat_node_get_core(&nodep, idxp);
[6ebe721]449 fibril_mutex_unlock(&idxp->lock);
[0fc1e5d]450 if (rc == EOK)
451 *rfn = FS_NODE(nodep);
452 return rc;
[add5835]453}
454
[1313ee9]455int fat_node_open(fs_node_t *fn)
456{
457 /*
458 * Opening a file is stateless, nothing
459 * to be done here.
460 */
461 return EOK;
462}
463
[073f550]464int fat_node_put(fs_node_t *fn)
[06901c6b]465{
[b6035ba]466 fat_node_t *nodep = FAT_NODE(fn);
[6571b78]467 bool destroy = false;
[34b3ce3]468
[6ebe721]469 fibril_mutex_lock(&nodep->lock);
[34b3ce3]470 if (!--nodep->refcnt) {
[6571b78]471 if (nodep->idx) {
[6ebe721]472 fibril_mutex_lock(&ffn_mutex);
[6571b78]473 list_append(&nodep->ffn_link, &ffn_head);
[6ebe721]474 fibril_mutex_unlock(&ffn_mutex);
[6571b78]475 } else {
476 /*
477 * The node does not have any index structure associated
478 * with itself. This can only mean that we are releasing
479 * the node after a failed attempt to allocate the index
480 * structure for it.
481 */
482 destroy = true;
483 }
[34b3ce3]484 }
[6ebe721]485 fibril_mutex_unlock(&nodep->lock);
[b6035ba]486 if (destroy) {
487 free(nodep->bp);
488 free(nodep);
489 }
[073f550]490 return EOK;
[06901c6b]491}
492
[991f645]493int fat_create_node(fs_node_t **rfn, devmap_handle_t devmap_handle, int flags)
[80e8482]494{
[6571b78]495 fat_idx_t *idxp;
496 fat_node_t *nodep;
[49df572]497 fat_bs_t *bs;
498 fat_cluster_t mcl, lcl;
499 int rc;
500
[991f645]501 bs = block_bb_get(devmap_handle);
[49df572]502 if (flags & L_DIRECTORY) {
503 /* allocate a cluster */
[991f645]504 rc = fat_alloc_clusters(bs, devmap_handle, 1, &mcl, &lcl);
[073f550]505 if (rc != EOK)
506 return rc;
507 /* populate the new cluster with unused dentries */
[991f645]508 rc = fat_zero_cluster(bs, devmap_handle, mcl);
[073f550]509 if (rc != EOK) {
[991f645]510 (void) fat_free_clusters(bs, devmap_handle, mcl);
[073f550]511 return rc;
512 }
[49df572]513 }
[6571b78]514
[17bf658]515 rc = fat_node_get_new(&nodep);
516 if (rc != EOK) {
[991f645]517 (void) fat_free_clusters(bs, devmap_handle, mcl);
[17bf658]518 return rc;
[49df572]519 }
[991f645]520 rc = fat_idx_get_new(&idxp, devmap_handle);
[9a15176]521 if (rc != EOK) {
[991f645]522 (void) fat_free_clusters(bs, devmap_handle, mcl);
[073f550]523 (void) fat_node_put(FS_NODE(nodep));
[9a15176]524 return rc;
[6571b78]525 }
526 /* idxp->lock held */
527 if (flags & L_DIRECTORY) {
528 nodep->type = FAT_DIRECTORY;
[49df572]529 nodep->firstc = mcl;
[7a23d60]530 nodep->size = BPS(bs) * SPC(bs);
[6571b78]531 } else {
532 nodep->type = FAT_FILE;
[49df572]533 nodep->firstc = FAT_CLST_RES0;
534 nodep->size = 0;
[6571b78]535 }
536 nodep->lnkcnt = 0; /* not linked anywhere */
537 nodep->refcnt = 1;
[49df572]538 nodep->dirty = true;
[6571b78]539
540 nodep->idx = idxp;
541 idxp->nodep = nodep;
542
[6ebe721]543 fibril_mutex_unlock(&idxp->lock);
[073f550]544 *rfn = FS_NODE(nodep);
545 return EOK;
[80e8482]546}
547
[b6035ba]548int fat_destroy_node(fs_node_t *fn)
[80e8482]549{
[b6035ba]550 fat_node_t *nodep = FAT_NODE(fn);
[50e5b25]551 fat_bs_t *bs;
[073f550]552 bool has_children;
553 int rc;
[50e5b25]554
555 /*
556 * The node is not reachable from the file system. This means that the
557 * link count should be zero and that the index structure cannot be
558 * found in the position hash. Obviously, we don't need to lock the node
559 * nor its index structure.
560 */
561 assert(nodep->lnkcnt == 0);
562
563 /*
564 * The node may not have any children.
565 */
[073f550]566 rc = fat_has_children(&has_children, fn);
567 if (rc != EOK)
568 return rc;
569 assert(!has_children);
[50e5b25]570
[991f645]571 bs = block_bb_get(nodep->idx->devmap_handle);
[50e5b25]572 if (nodep->firstc != FAT_CLST_RES0) {
573 assert(nodep->size);
574 /* Free all clusters allocated to the node. */
[991f645]575 rc = fat_free_clusters(bs, nodep->idx->devmap_handle,
[cca29e3c]576 nodep->firstc);
[50e5b25]577 }
578
579 fat_idx_destroy(nodep->idx);
[b6035ba]580 free(nodep->bp);
[50e5b25]581 free(nodep);
[cca29e3c]582 return rc;
[80e8482]583}
584
[b6035ba]585int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
[80e8482]586{
[b6035ba]587 fat_node_t *parentp = FAT_NODE(pfn);
588 fat_node_t *childp = FAT_NODE(cfn);
[0fdd6bb]589 fat_dentry_t *d;
590 fat_bs_t *bs;
591 block_t *b;
[a405563]592 unsigned i, j;
[0fdd6bb]593 unsigned blocks;
[e32b65a]594 fat_cluster_t mcl, lcl;
595 int rc;
[0fdd6bb]596
[6ebe721]597 fibril_mutex_lock(&childp->lock);
[0fdd6bb]598 if (childp->lnkcnt == 1) {
599 /*
600 * On FAT, we don't support multiple hard links.
601 */
[6ebe721]602 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]603 return EMLINK;
604 }
605 assert(childp->lnkcnt == 0);
[6ebe721]606 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]607
608 if (!fat_dentry_name_verify(name)) {
609 /*
610 * Attempt to create unsupported name.
611 */
612 return ENOTSUP;
613 }
614
615 /*
616 * Get us an unused parent node's dentry or grow the parent and allocate
617 * a new one.
618 */
619
[6ebe721]620 fibril_mutex_lock(&parentp->idx->lock);
[991f645]621 bs = block_bb_get(parentp->idx->devmap_handle);
[0fdd6bb]622
[7a23d60]623 blocks = parentp->size / BPS(bs);
[0fdd6bb]624
625 for (i = 0; i < blocks; i++) {
[684b655]626 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]627 if (rc != EOK) {
628 fibril_mutex_unlock(&parentp->idx->lock);
629 return rc;
630 }
[7a23d60]631 for (j = 0; j < DPS(bs); j++) {
[0fdd6bb]632 d = ((fat_dentry_t *)b->data) + j;
633 switch (fat_classify_dentry(d)) {
634 case FAT_DENTRY_SKIP:
635 case FAT_DENTRY_VALID:
636 /* skipping used and meta entries */
637 continue;
638 case FAT_DENTRY_FREE:
639 case FAT_DENTRY_LAST:
640 /* found an empty slot */
641 goto hit;
642 }
643 }
[c91f2d1b]644 rc = block_put(b);
[4b4668e]645 if (rc != EOK) {
646 fibril_mutex_unlock(&parentp->idx->lock);
647 return rc;
648 }
[0fdd6bb]649 }
[699743c]650 j = 0;
[0fdd6bb]651
652 /*
653 * We need to grow the parent in order to create a new unused dentry.
654 */
[b713492b]655 if (parentp->firstc == FAT_CLST_ROOT) {
[e32b65a]656 /* Can't grow the root directory. */
[6ebe721]657 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]658 return ENOSPC;
659 }
[991f645]660 rc = fat_alloc_clusters(bs, parentp->idx->devmap_handle, 1, &mcl, &lcl);
[e32b65a]661 if (rc != EOK) {
[6ebe721]662 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]663 return rc;
664 }
[991f645]665 rc = fat_zero_cluster(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]666 if (rc != EOK) {
[991f645]667 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]668 fibril_mutex_unlock(&parentp->idx->lock);
669 return rc;
670 }
[377cce8]671 rc = fat_append_clusters(bs, parentp, mcl, lcl);
[4b4668e]672 if (rc != EOK) {
[991f645]673 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]674 fibril_mutex_unlock(&parentp->idx->lock);
675 return rc;
676 }
[7a23d60]677 parentp->size += BPS(bs) * SPC(bs);
[d44aabd]678 parentp->dirty = true; /* need to sync node */
[684b655]679 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]680 if (rc != EOK) {
681 fibril_mutex_unlock(&parentp->idx->lock);
682 return rc;
683 }
[e32b65a]684 d = (fat_dentry_t *)b->data;
[0fdd6bb]685
686hit:
687 /*
688 * At this point we only establish the link between the parent and the
689 * child. The dentry, except of the name and the extension, will remain
[e32b65a]690 * uninitialized until the corresponding node is synced. Thus the valid
691 * dentry data is kept in the child node structure.
[0fdd6bb]692 */
693 memset(d, 0, sizeof(fat_dentry_t));
694 fat_dentry_name_set(d, name);
695 b->dirty = true; /* need to sync block */
[c91f2d1b]696 rc = block_put(b);
[6ebe721]697 fibril_mutex_unlock(&parentp->idx->lock);
[4b4668e]698 if (rc != EOK)
699 return rc;
[0fdd6bb]700
[6ebe721]701 fibril_mutex_lock(&childp->idx->lock);
[1baec4b]702
[24a2517]703 if (childp->type == FAT_DIRECTORY) {
[4b4668e]704 /*
[24a2517]705 * If possible, create the Sub-directory Identifier Entry and
706 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
707 * These entries are not mandatory according to Standard
708 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
709 * rather a sign of our good will.
[4b4668e]710 */
[24a2517]711 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
712 if (rc != EOK) {
713 /*
714 * Rather than returning an error, simply skip the
715 * creation of these two entries.
716 */
717 goto skip_dots;
718 }
[ed903174]719 d = (fat_dentry_t *) b->data;
720 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
721 (str_cmp((char *) d->name, FAT_NAME_DOT)) == 0) {
[24a2517]722 memset(d, 0, sizeof(fat_dentry_t));
[ed903174]723 str_cpy((char *) d->name, 8, FAT_NAME_DOT);
724 str_cpy((char *) d->ext, 3, FAT_EXT_PAD);
[24a2517]725 d->attr = FAT_ATTR_SUBDIR;
726 d->firstc = host2uint16_t_le(childp->firstc);
727 /* TODO: initialize also the date/time members. */
728 }
729 d++;
[ed903174]730 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
731 (str_cmp((char *) d->name, FAT_NAME_DOT_DOT) == 0)) {
[24a2517]732 memset(d, 0, sizeof(fat_dentry_t));
[ed903174]733 str_cpy((char *) d->name, 8, FAT_NAME_DOT_DOT);
734 str_cpy((char *) d->ext, 3, FAT_EXT_PAD);
[24a2517]735 d->attr = FAT_ATTR_SUBDIR;
736 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
737 host2uint16_t_le(FAT_CLST_RES0) :
738 host2uint16_t_le(parentp->firstc);
739 /* TODO: initialize also the date/time members. */
740 }
741 b->dirty = true; /* need to sync block */
742 /*
743 * Ignore the return value as we would have fallen through on error
744 * anyway.
745 */
746 (void) block_put(b);
[1baec4b]747 }
[4b4668e]748skip_dots:
[1baec4b]749
[0fdd6bb]750 childp->idx->pfc = parentp->firstc;
[7a23d60]751 childp->idx->pdi = i * DPS(bs) + j;
[6ebe721]752 fibril_mutex_unlock(&childp->idx->lock);
[0fdd6bb]753
[6ebe721]754 fibril_mutex_lock(&childp->lock);
[0fdd6bb]755 childp->lnkcnt = 1;
756 childp->dirty = true; /* need to sync node */
[6ebe721]757 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]758
759 /*
760 * Hash in the index structure into the position hash.
761 */
762 fat_idx_hashin(childp->idx);
763
764 return EOK;
[80e8482]765}
766
[cf95bc0]767int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
[80e8482]768{
[b6035ba]769 fat_node_t *parentp = FAT_NODE(pfn);
770 fat_node_t *childp = FAT_NODE(cfn);
[a31c1ccf]771 fat_bs_t *bs;
772 fat_dentry_t *d;
773 block_t *b;
[073f550]774 bool has_children;
[c91f2d1b]775 int rc;
[a31c1ccf]776
[770d281]777 if (!parentp)
778 return EBUSY;
[0be3e8b]779
[073f550]780 rc = fat_has_children(&has_children, cfn);
781 if (rc != EOK)
782 return rc;
783 if (has_children)
[0be3e8b]784 return ENOTEMPTY;
[770d281]785
[6ebe721]786 fibril_mutex_lock(&parentp->lock);
787 fibril_mutex_lock(&childp->lock);
[a31c1ccf]788 assert(childp->lnkcnt == 1);
[6ebe721]789 fibril_mutex_lock(&childp->idx->lock);
[991f645]790 bs = block_bb_get(childp->idx->devmap_handle);
[a31c1ccf]791
[991f645]792 rc = _fat_block_get(&b, bs, childp->idx->devmap_handle, childp->idx->pfc,
[6da81e0]793 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
[a31c1ccf]794 BLOCK_FLAGS_NONE);
[46c0498]795 if (rc != EOK)
796 goto error;
[a31c1ccf]797 d = (fat_dentry_t *)b->data +
[7a23d60]798 (childp->idx->pdi % (BPS(bs) / sizeof(fat_dentry_t)));
[a31c1ccf]799 /* mark the dentry as not-currently-used */
800 d->name[0] = FAT_DENTRY_ERASED;
801 b->dirty = true; /* need to sync block */
[c91f2d1b]802 rc = block_put(b);
[46c0498]803 if (rc != EOK)
804 goto error;
[a31c1ccf]805
806 /* remove the index structure from the position hash */
807 fat_idx_hashout(childp->idx);
808 /* clear position information */
809 childp->idx->pfc = FAT_CLST_RES0;
810 childp->idx->pdi = 0;
[6ebe721]811 fibril_mutex_unlock(&childp->idx->lock);
[a31c1ccf]812 childp->lnkcnt = 0;
813 childp->dirty = true;
[6ebe721]814 fibril_mutex_unlock(&childp->lock);
815 fibril_mutex_unlock(&parentp->lock);
[a31c1ccf]816
817 return EOK;
[46c0498]818
819error:
820 fibril_mutex_unlock(&parentp->idx->lock);
821 fibril_mutex_unlock(&childp->lock);
822 fibril_mutex_unlock(&childp->idx->lock);
823 return rc;
[80e8482]824}
825
[073f550]826int fat_has_children(bool *has_children, fs_node_t *fn)
[32fb10ed]827{
[7858bc5f]828 fat_bs_t *bs;
[b6035ba]829 fat_node_t *nodep = FAT_NODE(fn);
[32fb10ed]830 unsigned blocks;
[7858bc5f]831 block_t *b;
[32fb10ed]832 unsigned i, j;
[c91f2d1b]833 int rc;
[32fb10ed]834
[073f550]835 if (nodep->type != FAT_DIRECTORY) {
836 *has_children = false;
837 return EOK;
838 }
[b0247bac]839
[6ebe721]840 fibril_mutex_lock(&nodep->idx->lock);
[991f645]841 bs = block_bb_get(nodep->idx->devmap_handle);
[32fb10ed]842
[7a23d60]843 blocks = nodep->size / BPS(bs);
[32fb10ed]844
845 for (i = 0; i < blocks; i++) {
846 fat_dentry_t *d;
847
[684b655]848 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
[073f550]849 if (rc != EOK) {
850 fibril_mutex_unlock(&nodep->idx->lock);
851 return rc;
852 }
[7a23d60]853 for (j = 0; j < DPS(bs); j++) {
[32fb10ed]854 d = ((fat_dentry_t *)b->data) + j;
855 switch (fat_classify_dentry(d)) {
856 case FAT_DENTRY_SKIP:
[0fdd6bb]857 case FAT_DENTRY_FREE:
[32fb10ed]858 continue;
859 case FAT_DENTRY_LAST:
[c91f2d1b]860 rc = block_put(b);
[6ebe721]861 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]862 *has_children = false;
[8810c63]863 return rc;
[32fb10ed]864 default:
865 case FAT_DENTRY_VALID:
[c91f2d1b]866 rc = block_put(b);
[6ebe721]867 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]868 *has_children = true;
[8810c63]869 return rc;
[32fb10ed]870 }
871 }
[c91f2d1b]872 rc = block_put(b);
[8810c63]873 if (rc != EOK) {
874 fibril_mutex_unlock(&nodep->idx->lock);
875 return rc;
876 }
[32fb10ed]877 }
878
[6ebe721]879 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]880 *has_children = false;
881 return EOK;
882}
883
884
885fs_index_t fat_index_get(fs_node_t *fn)
886{
887 return FAT_NODE(fn)->idx->index;
888}
889
[ed903174]890aoff64_t fat_size_get(fs_node_t *fn)
[073f550]891{
892 return FAT_NODE(fn)->size;
[32fb10ed]893}
894
[073f550]895unsigned fat_lnkcnt_get(fs_node_t *fn)
[74ea3c6]896{
[073f550]897 return FAT_NODE(fn)->lnkcnt;
[74ea3c6]898}
899
[50e5b25]900char fat_plb_get_char(unsigned pos)
[74ea3c6]901{
902 return fat_reg.plb_ro[pos % PLB_SIZE];
903}
904
[b6035ba]905bool fat_is_directory(fs_node_t *fn)
[e1e3b26]906{
[b6035ba]907 return FAT_NODE(fn)->type == FAT_DIRECTORY;
[e1e3b26]908}
909
[b6035ba]910bool fat_is_file(fs_node_t *fn)
[e1e3b26]911{
[b6035ba]912 return FAT_NODE(fn)->type == FAT_FILE;
[e1e3b26]913}
914
[991f645]915devmap_handle_t fat_device_get(fs_node_t *node)
[1313ee9]916{
917 return 0;
918}
919
[a2aa1dec]920/** libfs operations */
921libfs_ops_t fat_libfs_ops = {
[073f550]922 .root_get = fat_root_get,
[a2aa1dec]923 .match = fat_match,
924 .node_get = fat_node_get,
[1313ee9]925 .node_open = fat_node_open,
[06901c6b]926 .node_put = fat_node_put,
[6571b78]927 .create = fat_create_node,
928 .destroy = fat_destroy_node,
[80e8482]929 .link = fat_link,
930 .unlink = fat_unlink,
[073f550]931 .has_children = fat_has_children,
[e1e3b26]932 .index_get = fat_index_get,
933 .size_get = fat_size_get,
934 .lnkcnt_get = fat_lnkcnt_get,
[1313ee9]935 .plb_get_char = fat_plb_get_char,
[e1e3b26]936 .is_directory = fat_is_directory,
[1313ee9]937 .is_file = fat_is_file,
938 .device_get = fat_device_get
[a2aa1dec]939};
940
[0013b9ce]941/*
942 * VFS operations.
943 */
944
[cde485d]945void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
946{
[991f645]947 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[1fbe064b]948 enum cache_mode cmode;
[7858bc5f]949 fat_bs_t *bs;
[472c09d]950
951 /* Accept the mount options */
952 char *opts;
[4cac2d69]953 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL);
[472c09d]954
955 if (rc != EOK) {
[ffa2c8ef]956 async_answer_0(rid, rc);
[594303b]957 return;
958 }
959
[1fbe064b]960 /* Check for option enabling write through. */
961 if (str_cmp(opts, "wtcache") == 0)
962 cmode = CACHE_MODE_WT;
963 else
964 cmode = CACHE_MODE_WB;
965
[64aed80]966 free(opts);
967
[7858bc5f]968 /* initialize libblock */
[991f645]969 rc = block_init(devmap_handle, BS_SIZE);
[7a35204a]970 if (rc != EOK) {
[ffa2c8ef]971 async_answer_0(rid, rc);
[6284978]972 return;
973 }
974
975 /* prepare the boot block */
[991f645]976 rc = block_bb_read(devmap_handle, BS_BLOCK);
[6284978]977 if (rc != EOK) {
[991f645]978 block_fini(devmap_handle);
[ffa2c8ef]979 async_answer_0(rid, rc);
[7a35204a]980 return;
981 }
982
[7858bc5f]983 /* get the buffer with the boot sector */
[991f645]984 bs = block_bb_get(devmap_handle);
[7858bc5f]985
[7a23d60]986 if (BPS(bs) != BS_SIZE) {
[991f645]987 block_fini(devmap_handle);
[ffa2c8ef]988 async_answer_0(rid, ENOTSUP);
[7a35204a]989 return;
990 }
991
[f1ba5d6]992 /* Initialize the block cache */
[991f645]993 rc = block_cache_init(devmap_handle, BPS(bs), 0 /* XXX */, cmode);
[f1ba5d6]994 if (rc != EOK) {
[991f645]995 block_fini(devmap_handle);
[ffa2c8ef]996 async_answer_0(rid, rc);
[f1ba5d6]997 return;
998 }
999
[2ffaab5]1000 /* Do some simple sanity checks on the file system. */
[991f645]1001 rc = fat_sanity_check(bs, devmap_handle);
[711e1f32]1002 if (rc != EOK) {
[991f645]1003 (void) block_cache_fini(devmap_handle);
1004 block_fini(devmap_handle);
[ffa2c8ef]1005 async_answer_0(rid, rc);
[711e1f32]1006 return;
1007 }
1008
[991f645]1009 rc = fat_idx_init_by_devmap_handle(devmap_handle);
[cde485d]1010 if (rc != EOK) {
[991f645]1011 (void) block_cache_fini(devmap_handle);
1012 block_fini(devmap_handle);
[ffa2c8ef]1013 async_answer_0(rid, rc);
[cde485d]1014 return;
1015 }
1016
[689f036]1017 /* Initialize the root node. */
[b6035ba]1018 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
1019 if (!rfn) {
[991f645]1020 (void) block_cache_fini(devmap_handle);
1021 block_fini(devmap_handle);
1022 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1023 async_answer_0(rid, ENOMEM);
[b6035ba]1024 return;
1025 }
[83937ccd]1026 fs_node_initialize(rfn);
[689f036]1027 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1028 if (!rootp) {
[b6035ba]1029 free(rfn);
[991f645]1030 (void) block_cache_fini(devmap_handle);
1031 block_fini(devmap_handle);
1032 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1033 async_answer_0(rid, ENOMEM);
[689f036]1034 return;
1035 }
1036 fat_node_initialize(rootp);
1037
[991f645]1038 fat_idx_t *ridxp = fat_idx_get_by_pos(devmap_handle, FAT_CLST_ROOTPAR, 0);
[689f036]1039 if (!ridxp) {
[b6035ba]1040 free(rfn);
[689f036]1041 free(rootp);
[991f645]1042 (void) block_cache_fini(devmap_handle);
1043 block_fini(devmap_handle);
1044 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1045 async_answer_0(rid, ENOMEM);
[689f036]1046 return;
1047 }
1048 assert(ridxp->index == 0);
1049 /* ridxp->lock held */
1050
1051 rootp->type = FAT_DIRECTORY;
1052 rootp->firstc = FAT_CLST_ROOT;
1053 rootp->refcnt = 1;
[5ab597d]1054 rootp->lnkcnt = 0; /* FS root is not linked */
[7a23d60]1055 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
[689f036]1056 rootp->idx = ridxp;
1057 ridxp->nodep = rootp;
[b6035ba]1058 rootp->bp = rfn;
1059 rfn->data = rootp;
[689f036]1060
[6ebe721]1061 fibril_mutex_unlock(&ridxp->lock);
[689f036]1062
[ffa2c8ef]1063 async_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
[cde485d]1064}
1065
1066void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1067{
[16d17ca]1068 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[cde485d]1069}
1070
[3c11713]1071void fat_unmounted(ipc_callid_t rid, ipc_call_t *request)
1072{
[991f645]1073 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[430de97]1074 fs_node_t *fn;
1075 fat_node_t *nodep;
1076 int rc;
1077
[991f645]1078 rc = fat_root_get(&fn, devmap_handle);
[430de97]1079 if (rc != EOK) {
[ffa2c8ef]1080 async_answer_0(rid, rc);
[430de97]1081 return;
1082 }
1083 nodep = FAT_NODE(fn);
1084
1085 /*
1086 * We expect exactly two references on the root node. One for the
1087 * fat_root_get() above and one created in fat_mounted().
1088 */
1089 if (nodep->refcnt != 2) {
1090 (void) fat_node_put(fn);
[ffa2c8ef]1091 async_answer_0(rid, EBUSY);
[430de97]1092 return;
1093 }
1094
1095 /*
1096 * Put the root node and force it to the FAT free node list.
1097 */
1098 (void) fat_node_put(fn);
1099 (void) fat_node_put(fn);
1100
1101 /*
1102 * Perform cleanup of the node structures, index structures and
1103 * associated data. Write back this file system's dirty blocks and
1104 * stop using libblock for this instance.
1105 */
[991f645]1106 (void) fat_node_fini_by_devmap_handle(devmap_handle);
1107 fat_idx_fini_by_devmap_handle(devmap_handle);
1108 (void) block_cache_fini(devmap_handle);
1109 block_fini(devmap_handle);
[430de97]1110
[ffa2c8ef]1111 async_answer_0(rid, EOK);
[3c11713]1112}
1113
1114void fat_unmount(ipc_callid_t rid, ipc_call_t *request)
1115{
1116 libfs_unmount(&fat_libfs_ops, rid, request);
1117}
1118
[be815bc]1119void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1120{
[a2aa1dec]1121 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[be815bc]1122}
1123
[4bf40f6]1124void fat_read(ipc_callid_t rid, ipc_call_t *request)
1125{
[991f645]1126 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1127 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1128 aoff64_t pos =
1129 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1130 fs_node_t *fn;
[b6035ba]1131 fat_node_t *nodep;
[7858bc5f]1132 fat_bs_t *bs;
[79d031b]1133 size_t bytes;
[7858bc5f]1134 block_t *b;
[c91f2d1b]1135 int rc;
[79d031b]1136
[991f645]1137 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1138 if (rc != EOK) {
[ffa2c8ef]1139 async_answer_0(rid, rc);
[073f550]1140 return;
1141 }
[b6035ba]1142 if (!fn) {
[ffa2c8ef]1143 async_answer_0(rid, ENOENT);
[4bf40f6]1144 return;
1145 }
[b6035ba]1146 nodep = FAT_NODE(fn);
[4bf40f6]1147
1148 ipc_callid_t callid;
1149 size_t len;
[0da4e41]1150 if (!async_data_read_receive(&callid, &len)) {
[b6035ba]1151 fat_node_put(fn);
[ffa2c8ef]1152 async_answer_0(callid, EINVAL);
1153 async_answer_0(rid, EINVAL);
[4bf40f6]1154 return;
1155 }
1156
[991f645]1157 bs = block_bb_get(devmap_handle);
[cb682eb]1158
[4bf40f6]1159 if (nodep->type == FAT_FILE) {
[ddd1219]1160 /*
1161 * Our strategy for regular file reads is to read one block at
1162 * most and make use of the possibility to return less data than
1163 * requested. This keeps the code very simple.
1164 */
[0d974d8]1165 if (pos >= nodep->size) {
[7d861950]1166 /* reading beyond the EOF */
1167 bytes = 0;
[0da4e41]1168 (void) async_data_read_finalize(callid, NULL, 0);
[0d974d8]1169 } else {
[7a23d60]1170 bytes = min(len, BPS(bs) - pos % BPS(bs));
[0d974d8]1171 bytes = min(bytes, nodep->size - pos);
[7a23d60]1172 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
[1d8cdb1]1173 BLOCK_FLAGS_NONE);
[453f2e75]1174 if (rc != EOK) {
1175 fat_node_put(fn);
[ffa2c8ef]1176 async_answer_0(callid, rc);
1177 async_answer_0(rid, rc);
[453f2e75]1178 return;
1179 }
[7a23d60]1180 (void) async_data_read_finalize(callid,
1181 b->data + pos % BPS(bs), bytes);
[c91f2d1b]1182 rc = block_put(b);
[453f2e75]1183 if (rc != EOK) {
1184 fat_node_put(fn);
[ffa2c8ef]1185 async_answer_0(rid, rc);
[453f2e75]1186 return;
1187 }
[0d974d8]1188 }
[4bf40f6]1189 } else {
[ddd1219]1190 unsigned bnum;
[ed903174]1191 aoff64_t spos = pos;
[ddd1219]1192 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1193 fat_dentry_t *d;
1194
[4bf40f6]1195 assert(nodep->type == FAT_DIRECTORY);
[7a23d60]1196 assert(nodep->size % BPS(bs) == 0);
1197 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
[ddd1219]1198
1199 /*
1200 * Our strategy for readdir() is to use the position pointer as
1201 * an index into the array of all dentries. On entry, it points
1202 * to the first unread dentry. If we skip any dentries, we bump
1203 * the position pointer accordingly.
1204 */
[7a23d60]1205 bnum = (pos * sizeof(fat_dentry_t)) / BPS(bs);
1206 while (bnum < nodep->size / BPS(bs)) {
[ed903174]1207 aoff64_t o;
[ddd1219]1208
[684b655]1209 rc = fat_block_get(&b, bs, nodep, bnum,
1210 BLOCK_FLAGS_NONE);
[453f2e75]1211 if (rc != EOK)
1212 goto err;
[7a23d60]1213 for (o = pos % (BPS(bs) / sizeof(fat_dentry_t));
1214 o < BPS(bs) / sizeof(fat_dentry_t);
[ddd1219]1215 o++, pos++) {
1216 d = ((fat_dentry_t *)b->data) + o;
1217 switch (fat_classify_dentry(d)) {
1218 case FAT_DENTRY_SKIP:
[0fdd6bb]1219 case FAT_DENTRY_FREE:
[ddd1219]1220 continue;
1221 case FAT_DENTRY_LAST:
[c91f2d1b]1222 rc = block_put(b);
[453f2e75]1223 if (rc != EOK)
1224 goto err;
[ddd1219]1225 goto miss;
1226 default:
1227 case FAT_DENTRY_VALID:
[0fdd6bb]1228 fat_dentry_name_get(d, name);
[073f550]1229 rc = block_put(b);
[453f2e75]1230 if (rc != EOK)
1231 goto err;
[ddd1219]1232 goto hit;
1233 }
1234 }
[c91f2d1b]1235 rc = block_put(b);
[453f2e75]1236 if (rc != EOK)
1237 goto err;
[ddd1219]1238 bnum++;
1239 }
1240miss:
[453f2e75]1241 rc = fat_node_put(fn);
[ffa2c8ef]1242 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1243 async_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
[4bf40f6]1244 return;
[453f2e75]1245
1246err:
1247 (void) fat_node_put(fn);
[ffa2c8ef]1248 async_answer_0(callid, rc);
1249 async_answer_0(rid, rc);
[453f2e75]1250 return;
1251
[ddd1219]1252hit:
[0da4e41]1253 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
[ddd1219]1254 bytes = (pos - spos) + 1;
[4bf40f6]1255 }
1256
[453f2e75]1257 rc = fat_node_put(fn);
[ffa2c8ef]1258 async_answer_1(rid, rc, (sysarg_t)bytes);
[4bf40f6]1259}
1260
[c947dda]1261void fat_write(ipc_callid_t rid, ipc_call_t *request)
1262{
[991f645]1263 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1264 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1265 aoff64_t pos =
1266 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1267 fs_node_t *fn;
[b6035ba]1268 fat_node_t *nodep;
[7858bc5f]1269 fat_bs_t *bs;
[dfddfcd]1270 size_t bytes, size;
[7858bc5f]1271 block_t *b;
[ed903174]1272 aoff64_t boundary;
[1d8cdb1]1273 int flags = BLOCK_FLAGS_NONE;
[c91f2d1b]1274 int rc;
[8d32152]1275
[991f645]1276 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1277 if (rc != EOK) {
[ffa2c8ef]1278 async_answer_0(rid, rc);
[073f550]1279 return;
1280 }
[b6035ba]1281 if (!fn) {
[ffa2c8ef]1282 async_answer_0(rid, ENOENT);
[8d32152]1283 return;
1284 }
[b6035ba]1285 nodep = FAT_NODE(fn);
[8d32152]1286
1287 ipc_callid_t callid;
1288 size_t len;
[0da4e41]1289 if (!async_data_write_receive(&callid, &len)) {
[dfddfcd]1290 (void) fat_node_put(fn);
[ffa2c8ef]1291 async_answer_0(callid, EINVAL);
1292 async_answer_0(rid, EINVAL);
[8d32152]1293 return;
1294 }
1295
[991f645]1296 bs = block_bb_get(devmap_handle);
[913a821c]1297
[8d32152]1298 /*
1299 * In all scenarios, we will attempt to write out only one block worth
1300 * of data at maximum. There might be some more efficient approaches,
1301 * but this one greatly simplifies fat_write(). Note that we can afford
1302 * to do this because the client must be ready to handle the return
1303 * value signalizing a smaller number of bytes written.
1304 */
[7a23d60]1305 bytes = min(len, BPS(bs) - pos % BPS(bs));
1306 if (bytes == BPS(bs))
[1d8cdb1]1307 flags |= BLOCK_FLAGS_NOREAD;
[8d32152]1308
[7a23d60]1309 boundary = ROUND_UP(nodep->size, BPC(bs));
[b4b7187]1310 if (pos < boundary) {
[8d32152]1311 /*
1312 * This is the easier case - we are either overwriting already
1313 * existing contents or writing behind the EOF, but still within
1314 * the limits of the last cluster. The node size may grow to the
1315 * next block size boundary.
1316 */
[cca29e3c]1317 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
[dfddfcd]1318 if (rc != EOK) {
1319 (void) fat_node_put(fn);
[ffa2c8ef]1320 async_answer_0(callid, rc);
1321 async_answer_0(rid, rc);
[dfddfcd]1322 return;
1323 }
[7a23d60]1324 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
[dfddfcd]1325 if (rc != EOK) {
1326 (void) fat_node_put(fn);
[ffa2c8ef]1327 async_answer_0(callid, rc);
1328 async_answer_0(rid, rc);
[dfddfcd]1329 return;
1330 }
[7a23d60]1331 (void) async_data_write_finalize(callid,
1332 b->data + pos % BPS(bs), bytes);
[8d32152]1333 b->dirty = true; /* need to sync block */
[c91f2d1b]1334 rc = block_put(b);
[dfddfcd]1335 if (rc != EOK) {
1336 (void) fat_node_put(fn);
[ffa2c8ef]1337 async_answer_0(rid, rc);
[dfddfcd]1338 return;
1339 }
[8d32152]1340 if (pos + bytes > nodep->size) {
1341 nodep->size = pos + bytes;
1342 nodep->dirty = true; /* need to sync node */
1343 }
[dfddfcd]1344 size = nodep->size;
1345 rc = fat_node_put(fn);
[ffa2c8ef]1346 async_answer_2(rid, rc, bytes, nodep->size);
[8d32152]1347 return;
1348 } else {
1349 /*
1350 * This is the more difficult case. We must allocate new
1351 * clusters for the node and zero them out.
1352 */
1353 unsigned nclsts;
[8334a427]1354 fat_cluster_t mcl, lcl;
1355
[7a23d60]1356 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
[6f2dfd1]1357 /* create an independent chain of nclsts clusters in all FATs */
[991f645]1358 rc = fat_alloc_clusters(bs, devmap_handle, nclsts, &mcl, &lcl);
[dfddfcd]1359 if (rc != EOK) {
[6f2dfd1]1360 /* could not allocate a chain of nclsts clusters */
[dfddfcd]1361 (void) fat_node_put(fn);
[ffa2c8ef]1362 async_answer_0(callid, rc);
1363 async_answer_0(rid, rc);
[6f2dfd1]1364 return;
1365 }
1366 /* zero fill any gaps */
[cca29e3c]1367 rc = fat_fill_gap(bs, nodep, mcl, pos);
[dfddfcd]1368 if (rc != EOK) {
[991f645]1369 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1370 (void) fat_node_put(fn);
[ffa2c8ef]1371 async_answer_0(callid, rc);
1372 async_answer_0(rid, rc);
[dfddfcd]1373 return;
1374 }
[991f645]1375 rc = _fat_block_get(&b, bs, devmap_handle, lcl, NULL,
[7a23d60]1376 (pos / BPS(bs)) % SPC(bs), flags);
[dfddfcd]1377 if (rc != EOK) {
[991f645]1378 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1379 (void) fat_node_put(fn);
[ffa2c8ef]1380 async_answer_0(callid, rc);
1381 async_answer_0(rid, rc);
[dfddfcd]1382 return;
1383 }
[7a23d60]1384 (void) async_data_write_finalize(callid,
1385 b->data + pos % BPS(bs), bytes);
[b4b7187]1386 b->dirty = true; /* need to sync block */
[c91f2d1b]1387 rc = block_put(b);
[dfddfcd]1388 if (rc != EOK) {
[991f645]1389 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1390 (void) fat_node_put(fn);
[ffa2c8ef]1391 async_answer_0(rid, rc);
[dfddfcd]1392 return;
1393 }
[6f2dfd1]1394 /*
1395 * Append the cluster chain starting in mcl to the end of the
1396 * node's cluster chain.
1397 */
[377cce8]1398 rc = fat_append_clusters(bs, nodep, mcl, lcl);
[dfddfcd]1399 if (rc != EOK) {
[991f645]1400 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1401 (void) fat_node_put(fn);
[ffa2c8ef]1402 async_answer_0(rid, rc);
[dfddfcd]1403 return;
1404 }
1405 nodep->size = size = pos + bytes;
[b4b7187]1406 nodep->dirty = true; /* need to sync node */
[dfddfcd]1407 rc = fat_node_put(fn);
[ffa2c8ef]1408 async_answer_2(rid, rc, bytes, size);
[6f2dfd1]1409 return;
[8d32152]1410 }
[c947dda]1411}
1412
[6c71a1f]1413void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1414{
[991f645]1415 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1416 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1417 aoff64_t size =
1418 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1419 fs_node_t *fn;
[b6035ba]1420 fat_node_t *nodep;
[913a821c]1421 fat_bs_t *bs;
[8334a427]1422 int rc;
1423
[991f645]1424 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1425 if (rc != EOK) {
[ffa2c8ef]1426 async_answer_0(rid, rc);
[073f550]1427 return;
1428 }
[b6035ba]1429 if (!fn) {
[ffa2c8ef]1430 async_answer_0(rid, ENOENT);
[8334a427]1431 return;
1432 }
[b6035ba]1433 nodep = FAT_NODE(fn);
[8334a427]1434
[991f645]1435 bs = block_bb_get(devmap_handle);
[913a821c]1436
[8334a427]1437 if (nodep->size == size) {
1438 rc = EOK;
1439 } else if (nodep->size < size) {
1440 /*
[913a821c]1441 * The standard says we have the freedom to grow the node.
[8334a427]1442 * For now, we simply return an error.
1443 */
1444 rc = EINVAL;
[7a23d60]1445 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
[913a821c]1446 /*
1447 * The node will be shrunk, but no clusters will be deallocated.
1448 */
1449 nodep->size = size;
1450 nodep->dirty = true; /* need to sync node */
1451 rc = EOK;
[8334a427]1452 } else {
1453 /*
[913a821c]1454 * The node will be shrunk, clusters will be deallocated.
[8334a427]1455 */
[913a821c]1456 if (size == 0) {
[cca29e3c]1457 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1458 if (rc != EOK)
1459 goto out;
[913a821c]1460 } else {
1461 fat_cluster_t lastc;
[991f645]1462 rc = fat_cluster_walk(bs, devmap_handle, nodep->firstc,
[7a23d60]1463 &lastc, NULL, (size - 1) / BPC(bs));
[e402382]1464 if (rc != EOK)
1465 goto out;
[cca29e3c]1466 rc = fat_chop_clusters(bs, nodep, lastc);
1467 if (rc != EOK)
1468 goto out;
[913a821c]1469 }
1470 nodep->size = size;
1471 nodep->dirty = true; /* need to sync node */
1472 rc = EOK;
[8334a427]1473 }
[e402382]1474out:
[b6035ba]1475 fat_node_put(fn);
[ffa2c8ef]1476 async_answer_0(rid, rc);
[8334a427]1477 return;
[6c71a1f]1478}
1479
[c20aa06]1480void fat_close(ipc_callid_t rid, ipc_call_t *request)
1481{
[ffa2c8ef]1482 async_answer_0(rid, EOK);
[c20aa06]1483}
1484
[50e5b25]1485void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1486{
[991f645]1487 devmap_handle_t devmap_handle = (devmap_handle_t)IPC_GET_ARG1(*request);
[50e5b25]1488 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
[073f550]1489 fs_node_t *fn;
[50e5b25]1490 int rc;
1491
[991f645]1492 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1493 if (rc != EOK) {
[ffa2c8ef]1494 async_answer_0(rid, rc);
[073f550]1495 return;
1496 }
[b6035ba]1497 if (!fn) {
[ffa2c8ef]1498 async_answer_0(rid, ENOENT);
[50e5b25]1499 return;
1500 }
1501
[b6035ba]1502 rc = fat_destroy_node(fn);
[ffa2c8ef]1503 async_answer_0(rid, rc);
[50e5b25]1504}
1505
[c20aa06]1506void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1507{
1508 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1509}
1510
[852b801]1511void fat_stat(ipc_callid_t rid, ipc_call_t *request)
[c20aa06]1512{
[75160a6]1513 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[c20aa06]1514}
1515
1516void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1517{
[991f645]1518 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[69a60c4]1519 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1520
1521 fs_node_t *fn;
[991f645]1522 int rc = fat_node_get(&fn, devmap_handle, index);
[69a60c4]1523 if (rc != EOK) {
[ffa2c8ef]1524 async_answer_0(rid, rc);
[69a60c4]1525 return;
1526 }
1527 if (!fn) {
[ffa2c8ef]1528 async_answer_0(rid, ENOENT);
[69a60c4]1529 return;
1530 }
1531
1532 fat_node_t *nodep = FAT_NODE(fn);
1533
1534 nodep->dirty = true;
1535 rc = fat_node_sync(nodep);
1536
1537 fat_node_put(fn);
[ffa2c8ef]1538 async_answer_0(rid, rc);
[c20aa06]1539}
1540
[be815bc]1541/**
1542 * @}
[c20aa06]1543 */
Note: See TracBrowser for help on using the repository browser.