source: mainline/uspace/srv/fs/fat/fat_ops.c@ a39cfb8

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a39cfb8 was b62dc100, checked in by Jakub Jermar <jakub@…>, 14 years ago

Avoid having '\0' in FAT dentry names by avoiding str_cpy().

  • Property mode set to 100644
File size: 37.7 KB
RevLine 
[be815bc]1/*
[a2aa1dec]2 * Copyright (c) 2008 Jakub Jermar
[be815bc]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
[033ef7d3]39#include "fat_dentry.h"
40#include "fat_fat.h"
[6364d3c]41#include "../../vfs/vfs.h"
[a2aa1dec]42#include <libfs.h>
[fc840d9]43#include <libblock.h>
[7a35204a]44#include <ipc/services.h>
45#include <ipc/devmap.h>
[ed903174]46#include <macros.h>
[be815bc]47#include <async.h>
48#include <errno.h>
[19f857a]49#include <str.h>
[776f2e6]50#include <byteorder.h>
[d9c8c81]51#include <adt/hash_table.h>
52#include <adt/list.h>
[e1e3b26]53#include <assert.h>
[1e4cada]54#include <fibril_synch.h>
[7a35204a]55#include <sys/mman.h>
[8d32152]56#include <align.h>
[e1e3b26]57
[b6035ba]58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
[7a23d60]61#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
62#define BPC(bs) (BPS((bs)) * SPC((bs)))
63
[6ebe721]64/** Mutex protecting the list of cached free FAT nodes. */
65static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
[add5835]66
67/** List of cached free FAT nodes. */
68static LIST_INITIALIZE(ffn_head);
[6364d3c]69
[0fc1e5d]70/*
71 * Forward declarations of FAT libfs operations.
72 */
[991f645]73static int fat_root_get(fs_node_t **, devmap_handle_t);
[0fc1e5d]74static int fat_match(fs_node_t **, fs_node_t *, const char *);
[991f645]75static int fat_node_get(fs_node_t **, devmap_handle_t, fs_index_t);
[1313ee9]76static int fat_node_open(fs_node_t *);
[0fc1e5d]77static int fat_node_put(fs_node_t *);
[991f645]78static int fat_create_node(fs_node_t **, devmap_handle_t, int);
[0fc1e5d]79static int fat_destroy_node(fs_node_t *);
80static int fat_link(fs_node_t *, fs_node_t *, const char *);
81static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
82static int fat_has_children(bool *, fs_node_t *);
83static fs_index_t fat_index_get(fs_node_t *);
[ed903174]84static aoff64_t fat_size_get(fs_node_t *);
[0fc1e5d]85static unsigned fat_lnkcnt_get(fs_node_t *);
86static char fat_plb_get_char(unsigned);
87static bool fat_is_directory(fs_node_t *);
88static bool fat_is_file(fs_node_t *node);
[991f645]89static devmap_handle_t fat_device_get(fs_node_t *node);
[0fc1e5d]90
91/*
92 * Helper functions.
93 */
[e1e3b26]94static void fat_node_initialize(fat_node_t *node)
[a2aa1dec]95{
[6ebe721]96 fibril_mutex_initialize(&node->lock);
[b6035ba]97 node->bp = NULL;
[869e546]98 node->idx = NULL;
[e1e3b26]99 node->type = 0;
100 link_initialize(&node->ffn_link);
101 node->size = 0;
102 node->lnkcnt = 0;
103 node->refcnt = 0;
104 node->dirty = false;
[377cce8]105 node->lastc_cached_valid = false;
106 node->lastc_cached_value = FAT_CLST_LAST1;
[dba4a23]107 node->currc_cached_valid = false;
108 node->currc_cached_bn = 0;
109 node->currc_cached_value = FAT_CLST_LAST1;
[e1e3b26]110}
111
[4098e38]112static int fat_node_sync(fat_node_t *node)
[e1e3b26]113{
[7858bc5f]114 block_t *b;
115 fat_bs_t *bs;
[beb17734]116 fat_dentry_t *d;
[c91f2d1b]117 int rc;
[beb17734]118
119 assert(node->dirty);
120
[991f645]121 bs = block_bb_get(node->idx->devmap_handle);
[beb17734]122
123 /* Read the block that contains the dentry of interest. */
[991f645]124 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc,
[6da81e0]125 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
[7a23d60]126 BLOCK_FLAGS_NONE);
[4098e38]127 if (rc != EOK)
128 return rc;
[beb17734]129
[7a23d60]130 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
[beb17734]131
132 d->firstc = host2uint16_t_le(node->firstc);
[a5da446]133 if (node->type == FAT_FILE) {
[beb17734]134 d->size = host2uint32_t_le(node->size);
[a5da446]135 } else if (node->type == FAT_DIRECTORY) {
136 d->attr = FAT_ATTR_SUBDIR;
137 }
138
139 /* TODO: update other fields? (e.g time fields) */
[beb17734]140
141 b->dirty = true; /* need to sync block */
[c91f2d1b]142 rc = block_put(b);
[4098e38]143 return rc;
[e1e3b26]144}
145
[991f645]146static int fat_node_fini_by_devmap_handle(devmap_handle_t devmap_handle)
[430de97]147{
148 link_t *lnk;
149 fat_node_t *nodep;
150 int rc;
151
152 /*
153 * We are called from fat_unmounted() and assume that there are already
154 * no nodes belonging to this instance with non-zero refcount. Therefore
155 * it is sufficient to clean up only the FAT free node list.
156 */
157
158restart:
159 fibril_mutex_lock(&ffn_mutex);
160 for (lnk = ffn_head.next; lnk != &ffn_head; lnk = lnk->next) {
161 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
162 if (!fibril_mutex_trylock(&nodep->lock)) {
163 fibril_mutex_unlock(&ffn_mutex);
164 goto restart;
165 }
166 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
167 fibril_mutex_unlock(&nodep->lock);
168 fibril_mutex_unlock(&ffn_mutex);
169 goto restart;
170 }
[991f645]171 if (nodep->idx->devmap_handle != devmap_handle) {
[430de97]172 fibril_mutex_unlock(&nodep->idx->lock);
173 fibril_mutex_unlock(&nodep->lock);
174 continue;
175 }
176
177 list_remove(&nodep->ffn_link);
178 fibril_mutex_unlock(&ffn_mutex);
179
180 /*
181 * We can unlock the node and its index structure because we are
182 * the last player on this playground and VFS is preventing new
183 * players from entering.
184 */
185 fibril_mutex_unlock(&nodep->idx->lock);
186 fibril_mutex_unlock(&nodep->lock);
187
188 if (nodep->dirty) {
189 rc = fat_node_sync(nodep);
190 if (rc != EOK)
191 return rc;
192 }
193 nodep->idx->nodep = NULL;
194 free(nodep->bp);
195 free(nodep);
196
197 /* Need to restart because we changed the ffn_head list. */
198 goto restart;
199 }
200 fibril_mutex_unlock(&ffn_mutex);
201
202 return EOK;
203}
204
[17bf658]205static int fat_node_get_new(fat_node_t **nodepp)
[9a3d5f0]206{
[b6035ba]207 fs_node_t *fn;
[9a3d5f0]208 fat_node_t *nodep;
[4098e38]209 int rc;
[9a3d5f0]210
[6ebe721]211 fibril_mutex_lock(&ffn_mutex);
[9a3d5f0]212 if (!list_empty(&ffn_head)) {
213 /* Try to use a cached free node structure. */
214 fat_idx_t *idxp_tmp;
215 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
[6ebe721]216 if (!fibril_mutex_trylock(&nodep->lock))
[9a3d5f0]217 goto skip_cache;
218 idxp_tmp = nodep->idx;
[6ebe721]219 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
220 fibril_mutex_unlock(&nodep->lock);
[9a3d5f0]221 goto skip_cache;
222 }
223 list_remove(&nodep->ffn_link);
[6ebe721]224 fibril_mutex_unlock(&ffn_mutex);
[4098e38]225 if (nodep->dirty) {
226 rc = fat_node_sync(nodep);
[17bf658]227 if (rc != EOK) {
228 idxp_tmp->nodep = NULL;
229 fibril_mutex_unlock(&nodep->lock);
230 fibril_mutex_unlock(&idxp_tmp->lock);
231 free(nodep->bp);
232 free(nodep);
233 return rc;
234 }
[4098e38]235 }
[9a3d5f0]236 idxp_tmp->nodep = NULL;
[6ebe721]237 fibril_mutex_unlock(&nodep->lock);
238 fibril_mutex_unlock(&idxp_tmp->lock);
[b6035ba]239 fn = FS_NODE(nodep);
[9a3d5f0]240 } else {
241skip_cache:
242 /* Try to allocate a new node structure. */
[6ebe721]243 fibril_mutex_unlock(&ffn_mutex);
[b6035ba]244 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
245 if (!fn)
[17bf658]246 return ENOMEM;
[9a3d5f0]247 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
[b6035ba]248 if (!nodep) {
249 free(fn);
[17bf658]250 return ENOMEM;
[b6035ba]251 }
[9a3d5f0]252 }
253 fat_node_initialize(nodep);
[83937ccd]254 fs_node_initialize(fn);
[b6035ba]255 fn->data = nodep;
256 nodep->bp = fn;
[9a3d5f0]257
[17bf658]258 *nodepp = nodep;
259 return EOK;
[9a3d5f0]260}
261
[add5835]262/** Internal version of fat_node_get().
263 *
264 * @param idxp Locked index structure.
265 */
[0fc1e5d]266static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
[e1e3b26]267{
[7858bc5f]268 block_t *b;
269 fat_bs_t *bs;
[4573a79]270 fat_dentry_t *d;
[c06dbf9]271 fat_node_t *nodep = NULL;
[c91f2d1b]272 int rc;
[4573a79]273
[add5835]274 if (idxp->nodep) {
[4573a79]275 /*
276 * We are lucky.
277 * The node is already instantiated in memory.
278 */
[6ebe721]279 fibril_mutex_lock(&idxp->nodep->lock);
[e6bc3a5]280 if (!idxp->nodep->refcnt++) {
281 fibril_mutex_lock(&ffn_mutex);
[c06dbf9]282 list_remove(&idxp->nodep->ffn_link);
[e6bc3a5]283 fibril_mutex_unlock(&ffn_mutex);
284 }
[6ebe721]285 fibril_mutex_unlock(&idxp->nodep->lock);
[0fc1e5d]286 *nodepp = idxp->nodep;
287 return EOK;
[4573a79]288 }
289
290 /*
291 * We must instantiate the node from the file system.
292 */
293
[add5835]294 assert(idxp->pfc);
[4573a79]295
[17bf658]296 rc = fat_node_get_new(&nodep);
297 if (rc != EOK)
[0fc1e5d]298 return rc;
[4573a79]299
[991f645]300 bs = block_bb_get(idxp->devmap_handle);
[4573a79]301
[2c4bbcde]302 /* Read the block that contains the dentry of interest. */
[991f645]303 rc = _fat_block_get(&b, bs, idxp->devmap_handle, idxp->pfc, NULL,
[7a23d60]304 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
[0fc1e5d]305 if (rc != EOK) {
306 (void) fat_node_put(FS_NODE(nodep));
307 return rc;
308 }
[4573a79]309
[7a23d60]310 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
[2c4bbcde]311 if (d->attr & FAT_ATTR_SUBDIR) {
312 /*
313 * The only directory which does not have this bit set is the
314 * root directory itself. The root directory node is handled
315 * and initialized elsewhere.
316 */
317 nodep->type = FAT_DIRECTORY;
[2ab1023]318 /*
[e2115311]319 * Unfortunately, the 'size' field of the FAT dentry is not
320 * defined for the directory entry type. We must determine the
321 * size of the directory by walking the FAT.
[2ab1023]322 */
[e402382]323 uint16_t clusters;
[991f645]324 rc = fat_clusters_get(&clusters, bs, idxp->devmap_handle,
[4f1c0b4]325 uint16_t_le2host(d->firstc));
[0fc1e5d]326 if (rc != EOK) {
[9fec913]327 (void) block_put(b);
[0fc1e5d]328 (void) fat_node_put(FS_NODE(nodep));
329 return rc;
330 }
[7a23d60]331 nodep->size = BPS(bs) * SPC(bs) * clusters;
[2c4bbcde]332 } else {
333 nodep->type = FAT_FILE;
[2ab1023]334 nodep->size = uint32_t_le2host(d->size);
[2c4bbcde]335 }
336 nodep->firstc = uint16_t_le2host(d->firstc);
337 nodep->lnkcnt = 1;
338 nodep->refcnt = 1;
339
[c91f2d1b]340 rc = block_put(b);
[0fc1e5d]341 if (rc != EOK) {
342 (void) fat_node_put(FS_NODE(nodep));
343 return rc;
344 }
[2c4bbcde]345
346 /* Link the idx structure with the node structure. */
[add5835]347 nodep->idx = idxp;
348 idxp->nodep = nodep;
[2c4bbcde]349
[0fc1e5d]350 *nodepp = nodep;
351 return EOK;
[a2aa1dec]352}
353
[50e5b25]354/*
355 * FAT libfs operations.
356 */
357
[991f645]358int fat_root_get(fs_node_t **rfn, devmap_handle_t devmap_handle)
[073f550]359{
[991f645]360 return fat_node_get(rfn, devmap_handle, 0);
[073f550]361}
362
363int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
364{
365 fat_bs_t *bs;
366 fat_node_t *parentp = FAT_NODE(pfn);
367 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
368 unsigned i, j;
369 unsigned blocks;
370 fat_dentry_t *d;
[991f645]371 devmap_handle_t devmap_handle;
[073f550]372 block_t *b;
373 int rc;
374
375 fibril_mutex_lock(&parentp->idx->lock);
[991f645]376 devmap_handle = parentp->idx->devmap_handle;
[a93d79a]377 fibril_mutex_unlock(&parentp->idx->lock);
378
[991f645]379 bs = block_bb_get(devmap_handle);
[7a23d60]380 blocks = parentp->size / BPS(bs);
[073f550]381 for (i = 0; i < blocks; i++) {
382 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[a93d79a]383 if (rc != EOK)
[073f550]384 return rc;
[7a23d60]385 for (j = 0; j < DPS(bs); j++) {
[073f550]386 d = ((fat_dentry_t *)b->data) + j;
387 switch (fat_classify_dentry(d)) {
388 case FAT_DENTRY_SKIP:
389 case FAT_DENTRY_FREE:
390 continue;
391 case FAT_DENTRY_LAST:
[8810c63]392 /* miss */
[073f550]393 rc = block_put(b);
394 *rfn = NULL;
[8810c63]395 return rc;
[073f550]396 default:
397 case FAT_DENTRY_VALID:
398 fat_dentry_name_get(d, name);
399 break;
400 }
401 if (fat_dentry_namecmp(name, component) == 0) {
402 /* hit */
403 fat_node_t *nodep;
[991f645]404 fat_idx_t *idx = fat_idx_get_by_pos(devmap_handle,
[a93d79a]405 parentp->firstc, i * DPS(bs) + j);
[073f550]406 if (!idx) {
407 /*
408 * Can happen if memory is low or if we
409 * run out of 32-bit indices.
410 */
411 rc = block_put(b);
[8810c63]412 return (rc == EOK) ? ENOMEM : rc;
[073f550]413 }
[0fc1e5d]414 rc = fat_node_get_core(&nodep, idx);
[073f550]415 fibril_mutex_unlock(&idx->lock);
[1647323]416 if (rc != EOK) {
417 (void) block_put(b);
418 return rc;
419 }
[073f550]420 *rfn = FS_NODE(nodep);
[1647323]421 rc = block_put(b);
422 if (rc != EOK)
423 (void) fat_node_put(*rfn);
424 return rc;
[073f550]425 }
426 }
427 rc = block_put(b);
[a93d79a]428 if (rc != EOK)
[8810c63]429 return rc;
[073f550]430 }
431
432 *rfn = NULL;
433 return EOK;
434}
435
[add5835]436/** Instantiate a FAT in-core node. */
[991f645]437int fat_node_get(fs_node_t **rfn, devmap_handle_t devmap_handle, fs_index_t index)
[add5835]438{
[b6035ba]439 fat_node_t *nodep;
[add5835]440 fat_idx_t *idxp;
[0fc1e5d]441 int rc;
[add5835]442
[991f645]443 idxp = fat_idx_get_by_index(devmap_handle, index);
[073f550]444 if (!idxp) {
445 *rfn = NULL;
446 return EOK;
447 }
[add5835]448 /* idxp->lock held */
[0fc1e5d]449 rc = fat_node_get_core(&nodep, idxp);
[6ebe721]450 fibril_mutex_unlock(&idxp->lock);
[0fc1e5d]451 if (rc == EOK)
452 *rfn = FS_NODE(nodep);
453 return rc;
[add5835]454}
455
[1313ee9]456int fat_node_open(fs_node_t *fn)
457{
458 /*
459 * Opening a file is stateless, nothing
460 * to be done here.
461 */
462 return EOK;
463}
464
[073f550]465int fat_node_put(fs_node_t *fn)
[06901c6b]466{
[b6035ba]467 fat_node_t *nodep = FAT_NODE(fn);
[6571b78]468 bool destroy = false;
[34b3ce3]469
[6ebe721]470 fibril_mutex_lock(&nodep->lock);
[34b3ce3]471 if (!--nodep->refcnt) {
[6571b78]472 if (nodep->idx) {
[6ebe721]473 fibril_mutex_lock(&ffn_mutex);
[6571b78]474 list_append(&nodep->ffn_link, &ffn_head);
[6ebe721]475 fibril_mutex_unlock(&ffn_mutex);
[6571b78]476 } else {
477 /*
478 * The node does not have any index structure associated
479 * with itself. This can only mean that we are releasing
480 * the node after a failed attempt to allocate the index
481 * structure for it.
482 */
483 destroy = true;
484 }
[34b3ce3]485 }
[6ebe721]486 fibril_mutex_unlock(&nodep->lock);
[b6035ba]487 if (destroy) {
488 free(nodep->bp);
489 free(nodep);
490 }
[073f550]491 return EOK;
[06901c6b]492}
493
[991f645]494int fat_create_node(fs_node_t **rfn, devmap_handle_t devmap_handle, int flags)
[80e8482]495{
[6571b78]496 fat_idx_t *idxp;
497 fat_node_t *nodep;
[49df572]498 fat_bs_t *bs;
499 fat_cluster_t mcl, lcl;
500 int rc;
501
[991f645]502 bs = block_bb_get(devmap_handle);
[49df572]503 if (flags & L_DIRECTORY) {
504 /* allocate a cluster */
[991f645]505 rc = fat_alloc_clusters(bs, devmap_handle, 1, &mcl, &lcl);
[073f550]506 if (rc != EOK)
507 return rc;
508 /* populate the new cluster with unused dentries */
[991f645]509 rc = fat_zero_cluster(bs, devmap_handle, mcl);
[073f550]510 if (rc != EOK) {
[991f645]511 (void) fat_free_clusters(bs, devmap_handle, mcl);
[073f550]512 return rc;
513 }
[49df572]514 }
[6571b78]515
[17bf658]516 rc = fat_node_get_new(&nodep);
517 if (rc != EOK) {
[991f645]518 (void) fat_free_clusters(bs, devmap_handle, mcl);
[17bf658]519 return rc;
[49df572]520 }
[991f645]521 rc = fat_idx_get_new(&idxp, devmap_handle);
[9a15176]522 if (rc != EOK) {
[991f645]523 (void) fat_free_clusters(bs, devmap_handle, mcl);
[073f550]524 (void) fat_node_put(FS_NODE(nodep));
[9a15176]525 return rc;
[6571b78]526 }
527 /* idxp->lock held */
528 if (flags & L_DIRECTORY) {
529 nodep->type = FAT_DIRECTORY;
[49df572]530 nodep->firstc = mcl;
[7a23d60]531 nodep->size = BPS(bs) * SPC(bs);
[6571b78]532 } else {
533 nodep->type = FAT_FILE;
[49df572]534 nodep->firstc = FAT_CLST_RES0;
535 nodep->size = 0;
[6571b78]536 }
537 nodep->lnkcnt = 0; /* not linked anywhere */
538 nodep->refcnt = 1;
[49df572]539 nodep->dirty = true;
[6571b78]540
541 nodep->idx = idxp;
542 idxp->nodep = nodep;
543
[6ebe721]544 fibril_mutex_unlock(&idxp->lock);
[073f550]545 *rfn = FS_NODE(nodep);
546 return EOK;
[80e8482]547}
548
[b6035ba]549int fat_destroy_node(fs_node_t *fn)
[80e8482]550{
[b6035ba]551 fat_node_t *nodep = FAT_NODE(fn);
[50e5b25]552 fat_bs_t *bs;
[073f550]553 bool has_children;
554 int rc;
[50e5b25]555
556 /*
557 * The node is not reachable from the file system. This means that the
558 * link count should be zero and that the index structure cannot be
559 * found in the position hash. Obviously, we don't need to lock the node
560 * nor its index structure.
561 */
562 assert(nodep->lnkcnt == 0);
563
564 /*
565 * The node may not have any children.
566 */
[073f550]567 rc = fat_has_children(&has_children, fn);
568 if (rc != EOK)
569 return rc;
570 assert(!has_children);
[50e5b25]571
[991f645]572 bs = block_bb_get(nodep->idx->devmap_handle);
[50e5b25]573 if (nodep->firstc != FAT_CLST_RES0) {
574 assert(nodep->size);
575 /* Free all clusters allocated to the node. */
[991f645]576 rc = fat_free_clusters(bs, nodep->idx->devmap_handle,
[cca29e3c]577 nodep->firstc);
[50e5b25]578 }
579
580 fat_idx_destroy(nodep->idx);
[b6035ba]581 free(nodep->bp);
[50e5b25]582 free(nodep);
[cca29e3c]583 return rc;
[80e8482]584}
585
[b6035ba]586int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
[80e8482]587{
[b6035ba]588 fat_node_t *parentp = FAT_NODE(pfn);
589 fat_node_t *childp = FAT_NODE(cfn);
[0fdd6bb]590 fat_dentry_t *d;
591 fat_bs_t *bs;
592 block_t *b;
[a405563]593 unsigned i, j;
[0fdd6bb]594 unsigned blocks;
[e32b65a]595 fat_cluster_t mcl, lcl;
596 int rc;
[0fdd6bb]597
[6ebe721]598 fibril_mutex_lock(&childp->lock);
[0fdd6bb]599 if (childp->lnkcnt == 1) {
600 /*
601 * On FAT, we don't support multiple hard links.
602 */
[6ebe721]603 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]604 return EMLINK;
605 }
606 assert(childp->lnkcnt == 0);
[6ebe721]607 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]608
609 if (!fat_dentry_name_verify(name)) {
610 /*
611 * Attempt to create unsupported name.
612 */
613 return ENOTSUP;
614 }
615
616 /*
617 * Get us an unused parent node's dentry or grow the parent and allocate
618 * a new one.
619 */
620
[6ebe721]621 fibril_mutex_lock(&parentp->idx->lock);
[991f645]622 bs = block_bb_get(parentp->idx->devmap_handle);
[0fdd6bb]623
[7a23d60]624 blocks = parentp->size / BPS(bs);
[0fdd6bb]625
626 for (i = 0; i < blocks; i++) {
[684b655]627 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]628 if (rc != EOK) {
629 fibril_mutex_unlock(&parentp->idx->lock);
630 return rc;
631 }
[7a23d60]632 for (j = 0; j < DPS(bs); j++) {
[0fdd6bb]633 d = ((fat_dentry_t *)b->data) + j;
634 switch (fat_classify_dentry(d)) {
635 case FAT_DENTRY_SKIP:
636 case FAT_DENTRY_VALID:
637 /* skipping used and meta entries */
638 continue;
639 case FAT_DENTRY_FREE:
640 case FAT_DENTRY_LAST:
641 /* found an empty slot */
642 goto hit;
643 }
644 }
[c91f2d1b]645 rc = block_put(b);
[4b4668e]646 if (rc != EOK) {
647 fibril_mutex_unlock(&parentp->idx->lock);
648 return rc;
649 }
[0fdd6bb]650 }
[699743c]651 j = 0;
[0fdd6bb]652
653 /*
654 * We need to grow the parent in order to create a new unused dentry.
655 */
[b713492b]656 if (parentp->firstc == FAT_CLST_ROOT) {
[e32b65a]657 /* Can't grow the root directory. */
[6ebe721]658 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]659 return ENOSPC;
660 }
[991f645]661 rc = fat_alloc_clusters(bs, parentp->idx->devmap_handle, 1, &mcl, &lcl);
[e32b65a]662 if (rc != EOK) {
[6ebe721]663 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]664 return rc;
665 }
[991f645]666 rc = fat_zero_cluster(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]667 if (rc != EOK) {
[991f645]668 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]669 fibril_mutex_unlock(&parentp->idx->lock);
670 return rc;
671 }
[377cce8]672 rc = fat_append_clusters(bs, parentp, mcl, lcl);
[4b4668e]673 if (rc != EOK) {
[991f645]674 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]675 fibril_mutex_unlock(&parentp->idx->lock);
676 return rc;
677 }
[7a23d60]678 parentp->size += BPS(bs) * SPC(bs);
[d44aabd]679 parentp->dirty = true; /* need to sync node */
[684b655]680 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]681 if (rc != EOK) {
682 fibril_mutex_unlock(&parentp->idx->lock);
683 return rc;
684 }
[e32b65a]685 d = (fat_dentry_t *)b->data;
[0fdd6bb]686
687hit:
688 /*
689 * At this point we only establish the link between the parent and the
690 * child. The dentry, except of the name and the extension, will remain
[e32b65a]691 * uninitialized until the corresponding node is synced. Thus the valid
692 * dentry data is kept in the child node structure.
[0fdd6bb]693 */
694 memset(d, 0, sizeof(fat_dentry_t));
695 fat_dentry_name_set(d, name);
696 b->dirty = true; /* need to sync block */
[c91f2d1b]697 rc = block_put(b);
[6ebe721]698 fibril_mutex_unlock(&parentp->idx->lock);
[4b4668e]699 if (rc != EOK)
700 return rc;
[0fdd6bb]701
[6ebe721]702 fibril_mutex_lock(&childp->idx->lock);
[1baec4b]703
[24a2517]704 if (childp->type == FAT_DIRECTORY) {
[4b4668e]705 /*
[24a2517]706 * If possible, create the Sub-directory Identifier Entry and
707 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
708 * These entries are not mandatory according to Standard
709 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
710 * rather a sign of our good will.
[4b4668e]711 */
[24a2517]712 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
713 if (rc != EOK) {
714 /*
715 * Rather than returning an error, simply skip the
716 * creation of these two entries.
717 */
718 goto skip_dots;
719 }
[ed903174]720 d = (fat_dentry_t *) b->data;
721 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
722 (str_cmp((char *) d->name, FAT_NAME_DOT)) == 0) {
[24a2517]723 memset(d, 0, sizeof(fat_dentry_t));
[b62dc100]724 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN);
725 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
[24a2517]726 d->attr = FAT_ATTR_SUBDIR;
727 d->firstc = host2uint16_t_le(childp->firstc);
728 /* TODO: initialize also the date/time members. */
729 }
730 d++;
[ed903174]731 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
732 (str_cmp((char *) d->name, FAT_NAME_DOT_DOT) == 0)) {
[24a2517]733 memset(d, 0, sizeof(fat_dentry_t));
[b62dc100]734 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN);
735 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
[24a2517]736 d->attr = FAT_ATTR_SUBDIR;
737 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
738 host2uint16_t_le(FAT_CLST_RES0) :
739 host2uint16_t_le(parentp->firstc);
740 /* TODO: initialize also the date/time members. */
741 }
742 b->dirty = true; /* need to sync block */
743 /*
744 * Ignore the return value as we would have fallen through on error
745 * anyway.
746 */
747 (void) block_put(b);
[1baec4b]748 }
[4b4668e]749skip_dots:
[1baec4b]750
[0fdd6bb]751 childp->idx->pfc = parentp->firstc;
[7a23d60]752 childp->idx->pdi = i * DPS(bs) + j;
[6ebe721]753 fibril_mutex_unlock(&childp->idx->lock);
[0fdd6bb]754
[6ebe721]755 fibril_mutex_lock(&childp->lock);
[0fdd6bb]756 childp->lnkcnt = 1;
757 childp->dirty = true; /* need to sync node */
[6ebe721]758 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]759
760 /*
761 * Hash in the index structure into the position hash.
762 */
763 fat_idx_hashin(childp->idx);
764
765 return EOK;
[80e8482]766}
767
[cf95bc0]768int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
[80e8482]769{
[b6035ba]770 fat_node_t *parentp = FAT_NODE(pfn);
771 fat_node_t *childp = FAT_NODE(cfn);
[a31c1ccf]772 fat_bs_t *bs;
773 fat_dentry_t *d;
774 block_t *b;
[073f550]775 bool has_children;
[c91f2d1b]776 int rc;
[a31c1ccf]777
[770d281]778 if (!parentp)
779 return EBUSY;
[0be3e8b]780
[073f550]781 rc = fat_has_children(&has_children, cfn);
782 if (rc != EOK)
783 return rc;
784 if (has_children)
[0be3e8b]785 return ENOTEMPTY;
[770d281]786
[6ebe721]787 fibril_mutex_lock(&parentp->lock);
788 fibril_mutex_lock(&childp->lock);
[a31c1ccf]789 assert(childp->lnkcnt == 1);
[6ebe721]790 fibril_mutex_lock(&childp->idx->lock);
[991f645]791 bs = block_bb_get(childp->idx->devmap_handle);
[a31c1ccf]792
[991f645]793 rc = _fat_block_get(&b, bs, childp->idx->devmap_handle, childp->idx->pfc,
[6da81e0]794 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
[a31c1ccf]795 BLOCK_FLAGS_NONE);
[46c0498]796 if (rc != EOK)
797 goto error;
[a31c1ccf]798 d = (fat_dentry_t *)b->data +
[7a23d60]799 (childp->idx->pdi % (BPS(bs) / sizeof(fat_dentry_t)));
[a31c1ccf]800 /* mark the dentry as not-currently-used */
801 d->name[0] = FAT_DENTRY_ERASED;
802 b->dirty = true; /* need to sync block */
[c91f2d1b]803 rc = block_put(b);
[46c0498]804 if (rc != EOK)
805 goto error;
[a31c1ccf]806
807 /* remove the index structure from the position hash */
808 fat_idx_hashout(childp->idx);
809 /* clear position information */
810 childp->idx->pfc = FAT_CLST_RES0;
811 childp->idx->pdi = 0;
[6ebe721]812 fibril_mutex_unlock(&childp->idx->lock);
[a31c1ccf]813 childp->lnkcnt = 0;
[5ca5eaa7]814 childp->refcnt++; /* keep the node in memory until destroyed */
[a31c1ccf]815 childp->dirty = true;
[6ebe721]816 fibril_mutex_unlock(&childp->lock);
817 fibril_mutex_unlock(&parentp->lock);
[a31c1ccf]818
819 return EOK;
[46c0498]820
821error:
822 fibril_mutex_unlock(&parentp->idx->lock);
823 fibril_mutex_unlock(&childp->lock);
824 fibril_mutex_unlock(&childp->idx->lock);
825 return rc;
[80e8482]826}
827
[073f550]828int fat_has_children(bool *has_children, fs_node_t *fn)
[32fb10ed]829{
[7858bc5f]830 fat_bs_t *bs;
[b6035ba]831 fat_node_t *nodep = FAT_NODE(fn);
[32fb10ed]832 unsigned blocks;
[7858bc5f]833 block_t *b;
[32fb10ed]834 unsigned i, j;
[c91f2d1b]835 int rc;
[32fb10ed]836
[073f550]837 if (nodep->type != FAT_DIRECTORY) {
838 *has_children = false;
839 return EOK;
840 }
[b0247bac]841
[6ebe721]842 fibril_mutex_lock(&nodep->idx->lock);
[991f645]843 bs = block_bb_get(nodep->idx->devmap_handle);
[32fb10ed]844
[7a23d60]845 blocks = nodep->size / BPS(bs);
[32fb10ed]846
847 for (i = 0; i < blocks; i++) {
848 fat_dentry_t *d;
849
[684b655]850 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
[073f550]851 if (rc != EOK) {
852 fibril_mutex_unlock(&nodep->idx->lock);
853 return rc;
854 }
[7a23d60]855 for (j = 0; j < DPS(bs); j++) {
[32fb10ed]856 d = ((fat_dentry_t *)b->data) + j;
857 switch (fat_classify_dentry(d)) {
858 case FAT_DENTRY_SKIP:
[0fdd6bb]859 case FAT_DENTRY_FREE:
[32fb10ed]860 continue;
861 case FAT_DENTRY_LAST:
[c91f2d1b]862 rc = block_put(b);
[6ebe721]863 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]864 *has_children = false;
[8810c63]865 return rc;
[32fb10ed]866 default:
867 case FAT_DENTRY_VALID:
[c91f2d1b]868 rc = block_put(b);
[6ebe721]869 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]870 *has_children = true;
[8810c63]871 return rc;
[32fb10ed]872 }
873 }
[c91f2d1b]874 rc = block_put(b);
[8810c63]875 if (rc != EOK) {
876 fibril_mutex_unlock(&nodep->idx->lock);
877 return rc;
878 }
[32fb10ed]879 }
880
[6ebe721]881 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]882 *has_children = false;
883 return EOK;
884}
885
886
887fs_index_t fat_index_get(fs_node_t *fn)
888{
889 return FAT_NODE(fn)->idx->index;
890}
891
[ed903174]892aoff64_t fat_size_get(fs_node_t *fn)
[073f550]893{
894 return FAT_NODE(fn)->size;
[32fb10ed]895}
896
[073f550]897unsigned fat_lnkcnt_get(fs_node_t *fn)
[74ea3c6]898{
[073f550]899 return FAT_NODE(fn)->lnkcnt;
[74ea3c6]900}
901
[50e5b25]902char fat_plb_get_char(unsigned pos)
[74ea3c6]903{
904 return fat_reg.plb_ro[pos % PLB_SIZE];
905}
906
[b6035ba]907bool fat_is_directory(fs_node_t *fn)
[e1e3b26]908{
[b6035ba]909 return FAT_NODE(fn)->type == FAT_DIRECTORY;
[e1e3b26]910}
911
[b6035ba]912bool fat_is_file(fs_node_t *fn)
[e1e3b26]913{
[b6035ba]914 return FAT_NODE(fn)->type == FAT_FILE;
[e1e3b26]915}
916
[991f645]917devmap_handle_t fat_device_get(fs_node_t *node)
[1313ee9]918{
919 return 0;
920}
921
[a2aa1dec]922/** libfs operations */
923libfs_ops_t fat_libfs_ops = {
[073f550]924 .root_get = fat_root_get,
[a2aa1dec]925 .match = fat_match,
926 .node_get = fat_node_get,
[1313ee9]927 .node_open = fat_node_open,
[06901c6b]928 .node_put = fat_node_put,
[6571b78]929 .create = fat_create_node,
930 .destroy = fat_destroy_node,
[80e8482]931 .link = fat_link,
932 .unlink = fat_unlink,
[073f550]933 .has_children = fat_has_children,
[e1e3b26]934 .index_get = fat_index_get,
935 .size_get = fat_size_get,
936 .lnkcnt_get = fat_lnkcnt_get,
[1313ee9]937 .plb_get_char = fat_plb_get_char,
[e1e3b26]938 .is_directory = fat_is_directory,
[1313ee9]939 .is_file = fat_is_file,
940 .device_get = fat_device_get
[a2aa1dec]941};
942
[0013b9ce]943/*
944 * VFS operations.
945 */
946
[cde485d]947void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
948{
[991f645]949 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[1fbe064b]950 enum cache_mode cmode;
[7858bc5f]951 fat_bs_t *bs;
[472c09d]952
953 /* Accept the mount options */
954 char *opts;
[4cac2d69]955 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL);
[472c09d]956
957 if (rc != EOK) {
[ffa2c8ef]958 async_answer_0(rid, rc);
[594303b]959 return;
960 }
961
[1fbe064b]962 /* Check for option enabling write through. */
963 if (str_cmp(opts, "wtcache") == 0)
964 cmode = CACHE_MODE_WT;
965 else
966 cmode = CACHE_MODE_WB;
967
[64aed80]968 free(opts);
969
[7858bc5f]970 /* initialize libblock */
[991f645]971 rc = block_init(devmap_handle, BS_SIZE);
[7a35204a]972 if (rc != EOK) {
[ffa2c8ef]973 async_answer_0(rid, rc);
[6284978]974 return;
975 }
976
977 /* prepare the boot block */
[991f645]978 rc = block_bb_read(devmap_handle, BS_BLOCK);
[6284978]979 if (rc != EOK) {
[991f645]980 block_fini(devmap_handle);
[ffa2c8ef]981 async_answer_0(rid, rc);
[7a35204a]982 return;
983 }
984
[7858bc5f]985 /* get the buffer with the boot sector */
[991f645]986 bs = block_bb_get(devmap_handle);
[7858bc5f]987
[7a23d60]988 if (BPS(bs) != BS_SIZE) {
[991f645]989 block_fini(devmap_handle);
[ffa2c8ef]990 async_answer_0(rid, ENOTSUP);
[7a35204a]991 return;
992 }
993
[f1ba5d6]994 /* Initialize the block cache */
[991f645]995 rc = block_cache_init(devmap_handle, BPS(bs), 0 /* XXX */, cmode);
[f1ba5d6]996 if (rc != EOK) {
[991f645]997 block_fini(devmap_handle);
[ffa2c8ef]998 async_answer_0(rid, rc);
[f1ba5d6]999 return;
1000 }
1001
[2ffaab5]1002 /* Do some simple sanity checks on the file system. */
[991f645]1003 rc = fat_sanity_check(bs, devmap_handle);
[711e1f32]1004 if (rc != EOK) {
[991f645]1005 (void) block_cache_fini(devmap_handle);
1006 block_fini(devmap_handle);
[ffa2c8ef]1007 async_answer_0(rid, rc);
[711e1f32]1008 return;
1009 }
1010
[991f645]1011 rc = fat_idx_init_by_devmap_handle(devmap_handle);
[cde485d]1012 if (rc != EOK) {
[991f645]1013 (void) block_cache_fini(devmap_handle);
1014 block_fini(devmap_handle);
[ffa2c8ef]1015 async_answer_0(rid, rc);
[cde485d]1016 return;
1017 }
1018
[689f036]1019 /* Initialize the root node. */
[b6035ba]1020 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
1021 if (!rfn) {
[991f645]1022 (void) block_cache_fini(devmap_handle);
1023 block_fini(devmap_handle);
1024 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1025 async_answer_0(rid, ENOMEM);
[b6035ba]1026 return;
1027 }
[83937ccd]1028 fs_node_initialize(rfn);
[689f036]1029 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1030 if (!rootp) {
[b6035ba]1031 free(rfn);
[991f645]1032 (void) block_cache_fini(devmap_handle);
1033 block_fini(devmap_handle);
1034 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1035 async_answer_0(rid, ENOMEM);
[689f036]1036 return;
1037 }
1038 fat_node_initialize(rootp);
1039
[991f645]1040 fat_idx_t *ridxp = fat_idx_get_by_pos(devmap_handle, FAT_CLST_ROOTPAR, 0);
[689f036]1041 if (!ridxp) {
[b6035ba]1042 free(rfn);
[689f036]1043 free(rootp);
[991f645]1044 (void) block_cache_fini(devmap_handle);
1045 block_fini(devmap_handle);
1046 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1047 async_answer_0(rid, ENOMEM);
[689f036]1048 return;
1049 }
1050 assert(ridxp->index == 0);
1051 /* ridxp->lock held */
1052
1053 rootp->type = FAT_DIRECTORY;
1054 rootp->firstc = FAT_CLST_ROOT;
1055 rootp->refcnt = 1;
[5ab597d]1056 rootp->lnkcnt = 0; /* FS root is not linked */
[7a23d60]1057 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
[689f036]1058 rootp->idx = ridxp;
1059 ridxp->nodep = rootp;
[b6035ba]1060 rootp->bp = rfn;
1061 rfn->data = rootp;
[689f036]1062
[6ebe721]1063 fibril_mutex_unlock(&ridxp->lock);
[689f036]1064
[ffa2c8ef]1065 async_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
[cde485d]1066}
1067
1068void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1069{
[16d17ca]1070 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[cde485d]1071}
1072
[3c11713]1073void fat_unmounted(ipc_callid_t rid, ipc_call_t *request)
1074{
[991f645]1075 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[430de97]1076 fs_node_t *fn;
1077 fat_node_t *nodep;
1078 int rc;
1079
[991f645]1080 rc = fat_root_get(&fn, devmap_handle);
[430de97]1081 if (rc != EOK) {
[ffa2c8ef]1082 async_answer_0(rid, rc);
[430de97]1083 return;
1084 }
1085 nodep = FAT_NODE(fn);
1086
1087 /*
1088 * We expect exactly two references on the root node. One for the
1089 * fat_root_get() above and one created in fat_mounted().
1090 */
1091 if (nodep->refcnt != 2) {
1092 (void) fat_node_put(fn);
[ffa2c8ef]1093 async_answer_0(rid, EBUSY);
[430de97]1094 return;
1095 }
1096
1097 /*
1098 * Put the root node and force it to the FAT free node list.
1099 */
1100 (void) fat_node_put(fn);
1101 (void) fat_node_put(fn);
1102
1103 /*
1104 * Perform cleanup of the node structures, index structures and
1105 * associated data. Write back this file system's dirty blocks and
1106 * stop using libblock for this instance.
1107 */
[991f645]1108 (void) fat_node_fini_by_devmap_handle(devmap_handle);
1109 fat_idx_fini_by_devmap_handle(devmap_handle);
1110 (void) block_cache_fini(devmap_handle);
1111 block_fini(devmap_handle);
[430de97]1112
[ffa2c8ef]1113 async_answer_0(rid, EOK);
[3c11713]1114}
1115
1116void fat_unmount(ipc_callid_t rid, ipc_call_t *request)
1117{
1118 libfs_unmount(&fat_libfs_ops, rid, request);
1119}
1120
[be815bc]1121void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1122{
[a2aa1dec]1123 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[be815bc]1124}
1125
[4bf40f6]1126void fat_read(ipc_callid_t rid, ipc_call_t *request)
1127{
[991f645]1128 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1129 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1130 aoff64_t pos =
1131 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1132 fs_node_t *fn;
[b6035ba]1133 fat_node_t *nodep;
[7858bc5f]1134 fat_bs_t *bs;
[79d031b]1135 size_t bytes;
[7858bc5f]1136 block_t *b;
[c91f2d1b]1137 int rc;
[79d031b]1138
[991f645]1139 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1140 if (rc != EOK) {
[ffa2c8ef]1141 async_answer_0(rid, rc);
[073f550]1142 return;
1143 }
[b6035ba]1144 if (!fn) {
[ffa2c8ef]1145 async_answer_0(rid, ENOENT);
[4bf40f6]1146 return;
1147 }
[b6035ba]1148 nodep = FAT_NODE(fn);
[4bf40f6]1149
1150 ipc_callid_t callid;
1151 size_t len;
[0da4e41]1152 if (!async_data_read_receive(&callid, &len)) {
[b6035ba]1153 fat_node_put(fn);
[ffa2c8ef]1154 async_answer_0(callid, EINVAL);
1155 async_answer_0(rid, EINVAL);
[4bf40f6]1156 return;
1157 }
1158
[991f645]1159 bs = block_bb_get(devmap_handle);
[cb682eb]1160
[4bf40f6]1161 if (nodep->type == FAT_FILE) {
[ddd1219]1162 /*
1163 * Our strategy for regular file reads is to read one block at
1164 * most and make use of the possibility to return less data than
1165 * requested. This keeps the code very simple.
1166 */
[0d974d8]1167 if (pos >= nodep->size) {
[7d861950]1168 /* reading beyond the EOF */
1169 bytes = 0;
[0da4e41]1170 (void) async_data_read_finalize(callid, NULL, 0);
[0d974d8]1171 } else {
[7a23d60]1172 bytes = min(len, BPS(bs) - pos % BPS(bs));
[0d974d8]1173 bytes = min(bytes, nodep->size - pos);
[7a23d60]1174 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
[1d8cdb1]1175 BLOCK_FLAGS_NONE);
[453f2e75]1176 if (rc != EOK) {
1177 fat_node_put(fn);
[ffa2c8ef]1178 async_answer_0(callid, rc);
1179 async_answer_0(rid, rc);
[453f2e75]1180 return;
1181 }
[7a23d60]1182 (void) async_data_read_finalize(callid,
1183 b->data + pos % BPS(bs), bytes);
[c91f2d1b]1184 rc = block_put(b);
[453f2e75]1185 if (rc != EOK) {
1186 fat_node_put(fn);
[ffa2c8ef]1187 async_answer_0(rid, rc);
[453f2e75]1188 return;
1189 }
[0d974d8]1190 }
[4bf40f6]1191 } else {
[ddd1219]1192 unsigned bnum;
[ed903174]1193 aoff64_t spos = pos;
[ddd1219]1194 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1195 fat_dentry_t *d;
1196
[4bf40f6]1197 assert(nodep->type == FAT_DIRECTORY);
[7a23d60]1198 assert(nodep->size % BPS(bs) == 0);
1199 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
[ddd1219]1200
1201 /*
1202 * Our strategy for readdir() is to use the position pointer as
1203 * an index into the array of all dentries. On entry, it points
1204 * to the first unread dentry. If we skip any dentries, we bump
1205 * the position pointer accordingly.
1206 */
[7a23d60]1207 bnum = (pos * sizeof(fat_dentry_t)) / BPS(bs);
1208 while (bnum < nodep->size / BPS(bs)) {
[ed903174]1209 aoff64_t o;
[ddd1219]1210
[684b655]1211 rc = fat_block_get(&b, bs, nodep, bnum,
1212 BLOCK_FLAGS_NONE);
[453f2e75]1213 if (rc != EOK)
1214 goto err;
[7a23d60]1215 for (o = pos % (BPS(bs) / sizeof(fat_dentry_t));
1216 o < BPS(bs) / sizeof(fat_dentry_t);
[ddd1219]1217 o++, pos++) {
1218 d = ((fat_dentry_t *)b->data) + o;
1219 switch (fat_classify_dentry(d)) {
1220 case FAT_DENTRY_SKIP:
[0fdd6bb]1221 case FAT_DENTRY_FREE:
[ddd1219]1222 continue;
1223 case FAT_DENTRY_LAST:
[c91f2d1b]1224 rc = block_put(b);
[453f2e75]1225 if (rc != EOK)
1226 goto err;
[ddd1219]1227 goto miss;
1228 default:
1229 case FAT_DENTRY_VALID:
[0fdd6bb]1230 fat_dentry_name_get(d, name);
[073f550]1231 rc = block_put(b);
[453f2e75]1232 if (rc != EOK)
1233 goto err;
[ddd1219]1234 goto hit;
1235 }
1236 }
[c91f2d1b]1237 rc = block_put(b);
[453f2e75]1238 if (rc != EOK)
1239 goto err;
[ddd1219]1240 bnum++;
1241 }
1242miss:
[453f2e75]1243 rc = fat_node_put(fn);
[ffa2c8ef]1244 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1245 async_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
[4bf40f6]1246 return;
[453f2e75]1247
1248err:
1249 (void) fat_node_put(fn);
[ffa2c8ef]1250 async_answer_0(callid, rc);
1251 async_answer_0(rid, rc);
[453f2e75]1252 return;
1253
[ddd1219]1254hit:
[0da4e41]1255 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
[ddd1219]1256 bytes = (pos - spos) + 1;
[4bf40f6]1257 }
1258
[453f2e75]1259 rc = fat_node_put(fn);
[ffa2c8ef]1260 async_answer_1(rid, rc, (sysarg_t)bytes);
[4bf40f6]1261}
1262
[c947dda]1263void fat_write(ipc_callid_t rid, ipc_call_t *request)
1264{
[991f645]1265 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1266 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1267 aoff64_t pos =
1268 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1269 fs_node_t *fn;
[b6035ba]1270 fat_node_t *nodep;
[7858bc5f]1271 fat_bs_t *bs;
[dfddfcd]1272 size_t bytes, size;
[7858bc5f]1273 block_t *b;
[ed903174]1274 aoff64_t boundary;
[1d8cdb1]1275 int flags = BLOCK_FLAGS_NONE;
[c91f2d1b]1276 int rc;
[8d32152]1277
[991f645]1278 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1279 if (rc != EOK) {
[ffa2c8ef]1280 async_answer_0(rid, rc);
[073f550]1281 return;
1282 }
[b6035ba]1283 if (!fn) {
[ffa2c8ef]1284 async_answer_0(rid, ENOENT);
[8d32152]1285 return;
1286 }
[b6035ba]1287 nodep = FAT_NODE(fn);
[8d32152]1288
1289 ipc_callid_t callid;
1290 size_t len;
[0da4e41]1291 if (!async_data_write_receive(&callid, &len)) {
[dfddfcd]1292 (void) fat_node_put(fn);
[ffa2c8ef]1293 async_answer_0(callid, EINVAL);
1294 async_answer_0(rid, EINVAL);
[8d32152]1295 return;
1296 }
1297
[991f645]1298 bs = block_bb_get(devmap_handle);
[913a821c]1299
[8d32152]1300 /*
1301 * In all scenarios, we will attempt to write out only one block worth
1302 * of data at maximum. There might be some more efficient approaches,
1303 * but this one greatly simplifies fat_write(). Note that we can afford
1304 * to do this because the client must be ready to handle the return
1305 * value signalizing a smaller number of bytes written.
1306 */
[7a23d60]1307 bytes = min(len, BPS(bs) - pos % BPS(bs));
1308 if (bytes == BPS(bs))
[1d8cdb1]1309 flags |= BLOCK_FLAGS_NOREAD;
[8d32152]1310
[7a23d60]1311 boundary = ROUND_UP(nodep->size, BPC(bs));
[b4b7187]1312 if (pos < boundary) {
[8d32152]1313 /*
1314 * This is the easier case - we are either overwriting already
1315 * existing contents or writing behind the EOF, but still within
1316 * the limits of the last cluster. The node size may grow to the
1317 * next block size boundary.
1318 */
[cca29e3c]1319 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
[dfddfcd]1320 if (rc != EOK) {
1321 (void) fat_node_put(fn);
[ffa2c8ef]1322 async_answer_0(callid, rc);
1323 async_answer_0(rid, rc);
[dfddfcd]1324 return;
1325 }
[7a23d60]1326 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
[dfddfcd]1327 if (rc != EOK) {
1328 (void) fat_node_put(fn);
[ffa2c8ef]1329 async_answer_0(callid, rc);
1330 async_answer_0(rid, rc);
[dfddfcd]1331 return;
1332 }
[7a23d60]1333 (void) async_data_write_finalize(callid,
1334 b->data + pos % BPS(bs), bytes);
[8d32152]1335 b->dirty = true; /* need to sync block */
[c91f2d1b]1336 rc = block_put(b);
[dfddfcd]1337 if (rc != EOK) {
1338 (void) fat_node_put(fn);
[ffa2c8ef]1339 async_answer_0(rid, rc);
[dfddfcd]1340 return;
1341 }
[8d32152]1342 if (pos + bytes > nodep->size) {
1343 nodep->size = pos + bytes;
1344 nodep->dirty = true; /* need to sync node */
1345 }
[dfddfcd]1346 size = nodep->size;
1347 rc = fat_node_put(fn);
[ffa2c8ef]1348 async_answer_2(rid, rc, bytes, nodep->size);
[8d32152]1349 return;
1350 } else {
1351 /*
1352 * This is the more difficult case. We must allocate new
1353 * clusters for the node and zero them out.
1354 */
1355 unsigned nclsts;
[8334a427]1356 fat_cluster_t mcl, lcl;
1357
[7a23d60]1358 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
[6f2dfd1]1359 /* create an independent chain of nclsts clusters in all FATs */
[991f645]1360 rc = fat_alloc_clusters(bs, devmap_handle, nclsts, &mcl, &lcl);
[dfddfcd]1361 if (rc != EOK) {
[6f2dfd1]1362 /* could not allocate a chain of nclsts clusters */
[dfddfcd]1363 (void) fat_node_put(fn);
[ffa2c8ef]1364 async_answer_0(callid, rc);
1365 async_answer_0(rid, rc);
[6f2dfd1]1366 return;
1367 }
1368 /* zero fill any gaps */
[cca29e3c]1369 rc = fat_fill_gap(bs, nodep, mcl, pos);
[dfddfcd]1370 if (rc != EOK) {
[991f645]1371 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1372 (void) fat_node_put(fn);
[ffa2c8ef]1373 async_answer_0(callid, rc);
1374 async_answer_0(rid, rc);
[dfddfcd]1375 return;
1376 }
[991f645]1377 rc = _fat_block_get(&b, bs, devmap_handle, lcl, NULL,
[7a23d60]1378 (pos / BPS(bs)) % SPC(bs), flags);
[dfddfcd]1379 if (rc != EOK) {
[991f645]1380 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1381 (void) fat_node_put(fn);
[ffa2c8ef]1382 async_answer_0(callid, rc);
1383 async_answer_0(rid, rc);
[dfddfcd]1384 return;
1385 }
[7a23d60]1386 (void) async_data_write_finalize(callid,
1387 b->data + pos % BPS(bs), bytes);
[b4b7187]1388 b->dirty = true; /* need to sync block */
[c91f2d1b]1389 rc = block_put(b);
[dfddfcd]1390 if (rc != EOK) {
[991f645]1391 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1392 (void) fat_node_put(fn);
[ffa2c8ef]1393 async_answer_0(rid, rc);
[dfddfcd]1394 return;
1395 }
[6f2dfd1]1396 /*
1397 * Append the cluster chain starting in mcl to the end of the
1398 * node's cluster chain.
1399 */
[377cce8]1400 rc = fat_append_clusters(bs, nodep, mcl, lcl);
[dfddfcd]1401 if (rc != EOK) {
[991f645]1402 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1403 (void) fat_node_put(fn);
[ffa2c8ef]1404 async_answer_0(rid, rc);
[dfddfcd]1405 return;
1406 }
1407 nodep->size = size = pos + bytes;
[b4b7187]1408 nodep->dirty = true; /* need to sync node */
[dfddfcd]1409 rc = fat_node_put(fn);
[ffa2c8ef]1410 async_answer_2(rid, rc, bytes, size);
[6f2dfd1]1411 return;
[8d32152]1412 }
[c947dda]1413}
1414
[6c71a1f]1415void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1416{
[991f645]1417 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1418 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1419 aoff64_t size =
1420 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1421 fs_node_t *fn;
[b6035ba]1422 fat_node_t *nodep;
[913a821c]1423 fat_bs_t *bs;
[8334a427]1424 int rc;
1425
[991f645]1426 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1427 if (rc != EOK) {
[ffa2c8ef]1428 async_answer_0(rid, rc);
[073f550]1429 return;
1430 }
[b6035ba]1431 if (!fn) {
[ffa2c8ef]1432 async_answer_0(rid, ENOENT);
[8334a427]1433 return;
1434 }
[b6035ba]1435 nodep = FAT_NODE(fn);
[8334a427]1436
[991f645]1437 bs = block_bb_get(devmap_handle);
[913a821c]1438
[8334a427]1439 if (nodep->size == size) {
1440 rc = EOK;
1441 } else if (nodep->size < size) {
1442 /*
[913a821c]1443 * The standard says we have the freedom to grow the node.
[8334a427]1444 * For now, we simply return an error.
1445 */
1446 rc = EINVAL;
[7a23d60]1447 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
[913a821c]1448 /*
1449 * The node will be shrunk, but no clusters will be deallocated.
1450 */
1451 nodep->size = size;
1452 nodep->dirty = true; /* need to sync node */
1453 rc = EOK;
[8334a427]1454 } else {
1455 /*
[913a821c]1456 * The node will be shrunk, clusters will be deallocated.
[8334a427]1457 */
[913a821c]1458 if (size == 0) {
[cca29e3c]1459 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1460 if (rc != EOK)
1461 goto out;
[913a821c]1462 } else {
1463 fat_cluster_t lastc;
[991f645]1464 rc = fat_cluster_walk(bs, devmap_handle, nodep->firstc,
[7a23d60]1465 &lastc, NULL, (size - 1) / BPC(bs));
[e402382]1466 if (rc != EOK)
1467 goto out;
[cca29e3c]1468 rc = fat_chop_clusters(bs, nodep, lastc);
1469 if (rc != EOK)
1470 goto out;
[913a821c]1471 }
1472 nodep->size = size;
1473 nodep->dirty = true; /* need to sync node */
1474 rc = EOK;
[8334a427]1475 }
[e402382]1476out:
[b6035ba]1477 fat_node_put(fn);
[ffa2c8ef]1478 async_answer_0(rid, rc);
[8334a427]1479 return;
[6c71a1f]1480}
1481
[c20aa06]1482void fat_close(ipc_callid_t rid, ipc_call_t *request)
1483{
[ffa2c8ef]1484 async_answer_0(rid, EOK);
[c20aa06]1485}
1486
[50e5b25]1487void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1488{
[991f645]1489 devmap_handle_t devmap_handle = (devmap_handle_t)IPC_GET_ARG1(*request);
[50e5b25]1490 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
[073f550]1491 fs_node_t *fn;
[5ca5eaa7]1492 fat_node_t *nodep;
[50e5b25]1493 int rc;
1494
[991f645]1495 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1496 if (rc != EOK) {
[ffa2c8ef]1497 async_answer_0(rid, rc);
[073f550]1498 return;
1499 }
[b6035ba]1500 if (!fn) {
[ffa2c8ef]1501 async_answer_0(rid, ENOENT);
[50e5b25]1502 return;
1503 }
1504
[5ca5eaa7]1505 nodep = FAT_NODE(fn);
1506 /*
1507 * We should have exactly two references. One for the above
1508 * call to fat_node_get() and one from fat_unlink().
1509 */
1510 assert(nodep->refcnt == 2);
1511
[b6035ba]1512 rc = fat_destroy_node(fn);
[ffa2c8ef]1513 async_answer_0(rid, rc);
[50e5b25]1514}
1515
[c20aa06]1516void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1517{
1518 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1519}
1520
[852b801]1521void fat_stat(ipc_callid_t rid, ipc_call_t *request)
[c20aa06]1522{
[75160a6]1523 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[c20aa06]1524}
1525
1526void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1527{
[991f645]1528 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[69a60c4]1529 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1530
1531 fs_node_t *fn;
[991f645]1532 int rc = fat_node_get(&fn, devmap_handle, index);
[69a60c4]1533 if (rc != EOK) {
[ffa2c8ef]1534 async_answer_0(rid, rc);
[69a60c4]1535 return;
1536 }
1537 if (!fn) {
[ffa2c8ef]1538 async_answer_0(rid, ENOENT);
[69a60c4]1539 return;
1540 }
1541
1542 fat_node_t *nodep = FAT_NODE(fn);
1543
1544 nodep->dirty = true;
1545 rc = fat_node_sync(nodep);
1546
1547 fat_node_put(fn);
[ffa2c8ef]1548 async_answer_0(rid, rc);
[c20aa06]1549}
1550
[be815bc]1551/**
1552 * @}
[c20aa06]1553 */
Note: See TracBrowser for help on using the repository browser.