source: mainline/uspace/srv/fs/fat/fat_ops.c@ 563686b

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 563686b was 563686b, checked in by Oleg Romanenko <romanenko.oleg@…>, 14 years ago

Fix for fat_read. Add error checking for fat_directory_open and use
new function: fat_directory_seek instead explicit assigning position.

  • Property mode set to 100644
File size: 37.2 KB
RevLine 
[be815bc]1/*
[a2aa1dec]2 * Copyright (c) 2008 Jakub Jermar
[be815bc]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
[97bc3ee]31 */
[be815bc]32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
[033ef7d3]39#include "fat_dentry.h"
40#include "fat_fat.h"
[8a40c49]41#include "fat_directory.h"
[6364d3c]42#include "../../vfs/vfs.h"
[a2aa1dec]43#include <libfs.h>
[fc840d9]44#include <libblock.h>
[7a35204a]45#include <ipc/services.h>
46#include <ipc/devmap.h>
[ed903174]47#include <macros.h>
[be815bc]48#include <async.h>
49#include <errno.h>
[19f857a]50#include <str.h>
[776f2e6]51#include <byteorder.h>
[d9c8c81]52#include <adt/hash_table.h>
53#include <adt/list.h>
[e1e3b26]54#include <assert.h>
[1e4cada]55#include <fibril_synch.h>
[7a35204a]56#include <sys/mman.h>
[8d32152]57#include <align.h>
[c7bbf029]58#include <malloc.h>
[65ccd23]59#include <str.h>
[e1e3b26]60
[b6035ba]61#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
62#define FS_NODE(node) ((node) ? (node)->bp : NULL)
63
[7a23d60]64#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
65#define BPC(bs) (BPS((bs)) * SPC((bs)))
66
[6ebe721]67/** Mutex protecting the list of cached free FAT nodes. */
68static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
[add5835]69
70/** List of cached free FAT nodes. */
71static LIST_INITIALIZE(ffn_head);
[6364d3c]72
[0fc1e5d]73/*
74 * Forward declarations of FAT libfs operations.
75 */
[991f645]76static int fat_root_get(fs_node_t **, devmap_handle_t);
[0fc1e5d]77static int fat_match(fs_node_t **, fs_node_t *, const char *);
[991f645]78static int fat_node_get(fs_node_t **, devmap_handle_t, fs_index_t);
[1313ee9]79static int fat_node_open(fs_node_t *);
[0fc1e5d]80static int fat_node_put(fs_node_t *);
[991f645]81static int fat_create_node(fs_node_t **, devmap_handle_t, int);
[0fc1e5d]82static int fat_destroy_node(fs_node_t *);
83static int fat_link(fs_node_t *, fs_node_t *, const char *);
84static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
85static int fat_has_children(bool *, fs_node_t *);
86static fs_index_t fat_index_get(fs_node_t *);
[ed903174]87static aoff64_t fat_size_get(fs_node_t *);
[0fc1e5d]88static unsigned fat_lnkcnt_get(fs_node_t *);
89static char fat_plb_get_char(unsigned);
90static bool fat_is_directory(fs_node_t *);
91static bool fat_is_file(fs_node_t *node);
[991f645]92static devmap_handle_t fat_device_get(fs_node_t *node);
[0fc1e5d]93
94/*
95 * Helper functions.
96 */
[e1e3b26]97static void fat_node_initialize(fat_node_t *node)
[a2aa1dec]98{
[6ebe721]99 fibril_mutex_initialize(&node->lock);
[b6035ba]100 node->bp = NULL;
[869e546]101 node->idx = NULL;
[e1e3b26]102 node->type = 0;
103 link_initialize(&node->ffn_link);
104 node->size = 0;
105 node->lnkcnt = 0;
106 node->refcnt = 0;
107 node->dirty = false;
[377cce8]108 node->lastc_cached_valid = false;
[88a27f1]109 node->lastc_cached_value = FAT32_CLST_LAST1;
[dba4a23]110 node->currc_cached_valid = false;
111 node->currc_cached_bn = 0;
[88a27f1]112 node->currc_cached_value = FAT32_CLST_LAST1;
[e1e3b26]113}
114
[4098e38]115static int fat_node_sync(fat_node_t *node)
[e1e3b26]116{
[7858bc5f]117 block_t *b;
118 fat_bs_t *bs;
[beb17734]119 fat_dentry_t *d;
[c91f2d1b]120 int rc;
[97bc3ee]121
[beb17734]122 assert(node->dirty);
123
[991f645]124 bs = block_bb_get(node->idx->devmap_handle);
[97bc3ee]125
[beb17734]126 /* Read the block that contains the dentry of interest. */
[991f645]127 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc,
[6da81e0]128 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
[7a23d60]129 BLOCK_FLAGS_NONE);
[4098e38]130 if (rc != EOK)
131 return rc;
[beb17734]132
[7a23d60]133 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
[beb17734]134
135 d->firstc = host2uint16_t_le(node->firstc);
[a5da446]136 if (node->type == FAT_FILE) {
[beb17734]137 d->size = host2uint32_t_le(node->size);
[a5da446]138 } else if (node->type == FAT_DIRECTORY) {
139 d->attr = FAT_ATTR_SUBDIR;
140 }
[97bc3ee]141
[a5da446]142 /* TODO: update other fields? (e.g time fields) */
[97bc3ee]143
[beb17734]144 b->dirty = true; /* need to sync block */
[c91f2d1b]145 rc = block_put(b);
[4098e38]146 return rc;
[e1e3b26]147}
148
[991f645]149static int fat_node_fini_by_devmap_handle(devmap_handle_t devmap_handle)
[430de97]150{
151 link_t *lnk;
152 fat_node_t *nodep;
153 int rc;
154
155 /*
156 * We are called from fat_unmounted() and assume that there are already
157 * no nodes belonging to this instance with non-zero refcount. Therefore
158 * it is sufficient to clean up only the FAT free node list.
159 */
160
161restart:
162 fibril_mutex_lock(&ffn_mutex);
163 for (lnk = ffn_head.next; lnk != &ffn_head; lnk = lnk->next) {
164 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
165 if (!fibril_mutex_trylock(&nodep->lock)) {
166 fibril_mutex_unlock(&ffn_mutex);
167 goto restart;
168 }
169 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
170 fibril_mutex_unlock(&nodep->lock);
171 fibril_mutex_unlock(&ffn_mutex);
172 goto restart;
173 }
[991f645]174 if (nodep->idx->devmap_handle != devmap_handle) {
[430de97]175 fibril_mutex_unlock(&nodep->idx->lock);
176 fibril_mutex_unlock(&nodep->lock);
177 continue;
178 }
179
180 list_remove(&nodep->ffn_link);
181 fibril_mutex_unlock(&ffn_mutex);
182
183 /*
184 * We can unlock the node and its index structure because we are
185 * the last player on this playground and VFS is preventing new
186 * players from entering.
187 */
188 fibril_mutex_unlock(&nodep->idx->lock);
189 fibril_mutex_unlock(&nodep->lock);
190
191 if (nodep->dirty) {
192 rc = fat_node_sync(nodep);
193 if (rc != EOK)
194 return rc;
195 }
196 nodep->idx->nodep = NULL;
197 free(nodep->bp);
198 free(nodep);
199
200 /* Need to restart because we changed the ffn_head list. */
201 goto restart;
202 }
203 fibril_mutex_unlock(&ffn_mutex);
204
205 return EOK;
206}
207
[17bf658]208static int fat_node_get_new(fat_node_t **nodepp)
[9a3d5f0]209{
[b6035ba]210 fs_node_t *fn;
[9a3d5f0]211 fat_node_t *nodep;
[4098e38]212 int rc;
[9a3d5f0]213
[6ebe721]214 fibril_mutex_lock(&ffn_mutex);
[9a3d5f0]215 if (!list_empty(&ffn_head)) {
216 /* Try to use a cached free node structure. */
217 fat_idx_t *idxp_tmp;
218 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
[6ebe721]219 if (!fibril_mutex_trylock(&nodep->lock))
[9a3d5f0]220 goto skip_cache;
221 idxp_tmp = nodep->idx;
[6ebe721]222 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
223 fibril_mutex_unlock(&nodep->lock);
[9a3d5f0]224 goto skip_cache;
225 }
226 list_remove(&nodep->ffn_link);
[6ebe721]227 fibril_mutex_unlock(&ffn_mutex);
[4098e38]228 if (nodep->dirty) {
229 rc = fat_node_sync(nodep);
[17bf658]230 if (rc != EOK) {
231 idxp_tmp->nodep = NULL;
232 fibril_mutex_unlock(&nodep->lock);
233 fibril_mutex_unlock(&idxp_tmp->lock);
234 free(nodep->bp);
235 free(nodep);
236 return rc;
237 }
[4098e38]238 }
[9a3d5f0]239 idxp_tmp->nodep = NULL;
[6ebe721]240 fibril_mutex_unlock(&nodep->lock);
241 fibril_mutex_unlock(&idxp_tmp->lock);
[b6035ba]242 fn = FS_NODE(nodep);
[9a3d5f0]243 } else {
244skip_cache:
245 /* Try to allocate a new node structure. */
[6ebe721]246 fibril_mutex_unlock(&ffn_mutex);
[b6035ba]247 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
248 if (!fn)
[17bf658]249 return ENOMEM;
[9a3d5f0]250 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
[b6035ba]251 if (!nodep) {
252 free(fn);
[17bf658]253 return ENOMEM;
[b6035ba]254 }
[9a3d5f0]255 }
256 fat_node_initialize(nodep);
[83937ccd]257 fs_node_initialize(fn);
[b6035ba]258 fn->data = nodep;
259 nodep->bp = fn;
[97bc3ee]260
[17bf658]261 *nodepp = nodep;
262 return EOK;
[9a3d5f0]263}
264
[add5835]265/** Internal version of fat_node_get().
266 *
267 * @param idxp Locked index structure.
268 */
[0fc1e5d]269static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
[e1e3b26]270{
[7858bc5f]271 block_t *b;
272 fat_bs_t *bs;
[4573a79]273 fat_dentry_t *d;
[c06dbf9]274 fat_node_t *nodep = NULL;
[c91f2d1b]275 int rc;
[4573a79]276
[add5835]277 if (idxp->nodep) {
[4573a79]278 /*
279 * We are lucky.
280 * The node is already instantiated in memory.
281 */
[6ebe721]282 fibril_mutex_lock(&idxp->nodep->lock);
[e6bc3a5]283 if (!idxp->nodep->refcnt++) {
284 fibril_mutex_lock(&ffn_mutex);
[c06dbf9]285 list_remove(&idxp->nodep->ffn_link);
[e6bc3a5]286 fibril_mutex_unlock(&ffn_mutex);
287 }
[6ebe721]288 fibril_mutex_unlock(&idxp->nodep->lock);
[0fc1e5d]289 *nodepp = idxp->nodep;
290 return EOK;
[4573a79]291 }
292
293 /*
294 * We must instantiate the node from the file system.
295 */
[97bc3ee]296
[add5835]297 assert(idxp->pfc);
[4573a79]298
[17bf658]299 rc = fat_node_get_new(&nodep);
300 if (rc != EOK)
[0fc1e5d]301 return rc;
[4573a79]302
[991f645]303 bs = block_bb_get(idxp->devmap_handle);
[4573a79]304
[2c4bbcde]305 /* Read the block that contains the dentry of interest. */
[991f645]306 rc = _fat_block_get(&b, bs, idxp->devmap_handle, idxp->pfc, NULL,
[7a23d60]307 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
[0fc1e5d]308 if (rc != EOK) {
309 (void) fat_node_put(FS_NODE(nodep));
310 return rc;
311 }
[4573a79]312
[7a23d60]313 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
[0182e5cc]314 if (FAT_IS_FAT32(bs)) {
315 nodep->firstc = uint16_t_le2host(d->firstc_lo) |
316 (uint16_t_le2host(d->firstc_hi) << 16);
317 }
318 else
319 nodep->firstc = uint16_t_le2host(d->firstc);
320
[2c4bbcde]321 if (d->attr & FAT_ATTR_SUBDIR) {
[97bc3ee]322 /*
[2c4bbcde]323 * The only directory which does not have this bit set is the
324 * root directory itself. The root directory node is handled
325 * and initialized elsewhere.
326 */
327 nodep->type = FAT_DIRECTORY;
[97bc3ee]328
[0182e5cc]329 /*
[e2115311]330 * Unfortunately, the 'size' field of the FAT dentry is not
331 * defined for the directory entry type. We must determine the
332 * size of the directory by walking the FAT.
[2ab1023]333 */
[0182e5cc]334 /* TODO uint16_t clusters to uint32_t */
[e402382]335 uint16_t clusters;
[0182e5cc]336 rc = fat_clusters_get(&clusters, bs, idxp->devmap_handle, nodep->firstc);
[0fc1e5d]337 if (rc != EOK) {
[9fec913]338 (void) block_put(b);
[0fc1e5d]339 (void) fat_node_put(FS_NODE(nodep));
340 return rc;
341 }
[7a23d60]342 nodep->size = BPS(bs) * SPC(bs) * clusters;
[2c4bbcde]343 } else {
344 nodep->type = FAT_FILE;
[2ab1023]345 nodep->size = uint32_t_le2host(d->size);
[2c4bbcde]346 }
[97bc3ee]347
[2c4bbcde]348 nodep->lnkcnt = 1;
349 nodep->refcnt = 1;
350
[c91f2d1b]351 rc = block_put(b);
[0fc1e5d]352 if (rc != EOK) {
353 (void) fat_node_put(FS_NODE(nodep));
354 return rc;
355 }
[2c4bbcde]356
357 /* Link the idx structure with the node structure. */
[add5835]358 nodep->idx = idxp;
359 idxp->nodep = nodep;
[2c4bbcde]360
[0fc1e5d]361 *nodepp = nodep;
362 return EOK;
[a2aa1dec]363}
364
[50e5b25]365/*
366 * FAT libfs operations.
367 */
368
[991f645]369int fat_root_get(fs_node_t **rfn, devmap_handle_t devmap_handle)
[073f550]370{
[991f645]371 return fat_node_get(rfn, devmap_handle, 0);
[073f550]372}
373
374int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
375{
376 fat_node_t *parentp = FAT_NODE(pfn);
[cefd3ec]377 char name[FAT_LFN_NAME_SIZE];
[073f550]378 fat_dentry_t *d;
[991f645]379 devmap_handle_t devmap_handle;
[073f550]380 int rc;
381
382 fibril_mutex_lock(&parentp->idx->lock);
[991f645]383 devmap_handle = parentp->idx->devmap_handle;
[a93d79a]384 fibril_mutex_unlock(&parentp->idx->lock);
[cefd3ec]385
386 fat_directory_t di;
387 fat_directory_open(parentp, &di);
388
389 while (fat_directory_read(&di, name, &d) == EOK) {
390 if (fat_dentry_namecmp(name, component) == 0) {
391 /* hit */
392 fat_node_t *nodep;
[b85c19a]393 aoff64_t o = di.pos % (BPS(di.bs) / sizeof(fat_dentry_t));
[cefd3ec]394 fat_idx_t *idx = fat_idx_get_by_pos(devmap_handle,
395 parentp->firstc, di.bnum * DPS(di.bs) + o);
396 if (!idx) {
397 /*
398 * Can happen if memory is low or if we
399 * run out of 32-bit indices.
400 */
401 rc = fat_directory_close(&di);
402 return (rc == EOK) ? ENOMEM : rc;
[073f550]403 }
[cefd3ec]404 rc = fat_node_get_core(&nodep, idx);
405 fibril_mutex_unlock(&idx->lock);
406 if (rc != EOK) {
407 (void) fat_directory_close(&di);
[1647323]408 return rc;
[073f550]409 }
[cefd3ec]410 *rfn = FS_NODE(nodep);
411 rc = fat_directory_close(&di);
412 if (rc != EOK)
413 (void) fat_node_put(*rfn);
[8810c63]414 return rc;
[b85c19a]415 } else {
416 rc = fat_directory_next(&di);
417 if (rc != EOK)
418 break;
[cefd3ec]419 }
[073f550]420 }
[010b52d8]421 (void) fat_directory_close(&di);
[073f550]422 *rfn = NULL;
423 return EOK;
424}
425
[add5835]426/** Instantiate a FAT in-core node. */
[991f645]427int fat_node_get(fs_node_t **rfn, devmap_handle_t devmap_handle, fs_index_t index)
[add5835]428{
[b6035ba]429 fat_node_t *nodep;
[add5835]430 fat_idx_t *idxp;
[0fc1e5d]431 int rc;
[add5835]432
[991f645]433 idxp = fat_idx_get_by_index(devmap_handle, index);
[073f550]434 if (!idxp) {
435 *rfn = NULL;
436 return EOK;
437 }
[add5835]438 /* idxp->lock held */
[0fc1e5d]439 rc = fat_node_get_core(&nodep, idxp);
[6ebe721]440 fibril_mutex_unlock(&idxp->lock);
[0fc1e5d]441 if (rc == EOK)
442 *rfn = FS_NODE(nodep);
443 return rc;
[add5835]444}
445
[1313ee9]446int fat_node_open(fs_node_t *fn)
447{
448 /*
449 * Opening a file is stateless, nothing
450 * to be done here.
451 */
452 return EOK;
453}
454
[073f550]455int fat_node_put(fs_node_t *fn)
[06901c6b]456{
[b6035ba]457 fat_node_t *nodep = FAT_NODE(fn);
[6571b78]458 bool destroy = false;
[34b3ce3]459
[6ebe721]460 fibril_mutex_lock(&nodep->lock);
[34b3ce3]461 if (!--nodep->refcnt) {
[6571b78]462 if (nodep->idx) {
[6ebe721]463 fibril_mutex_lock(&ffn_mutex);
[6571b78]464 list_append(&nodep->ffn_link, &ffn_head);
[6ebe721]465 fibril_mutex_unlock(&ffn_mutex);
[6571b78]466 } else {
467 /*
468 * The node does not have any index structure associated
469 * with itself. This can only mean that we are releasing
470 * the node after a failed attempt to allocate the index
471 * structure for it.
472 */
473 destroy = true;
474 }
[34b3ce3]475 }
[6ebe721]476 fibril_mutex_unlock(&nodep->lock);
[b6035ba]477 if (destroy) {
478 free(nodep->bp);
479 free(nodep);
480 }
[073f550]481 return EOK;
[06901c6b]482}
483
[991f645]484int fat_create_node(fs_node_t **rfn, devmap_handle_t devmap_handle, int flags)
[80e8482]485{
[6571b78]486 fat_idx_t *idxp;
487 fat_node_t *nodep;
[49df572]488 fat_bs_t *bs;
489 fat_cluster_t mcl, lcl;
490 int rc;
491
[991f645]492 bs = block_bb_get(devmap_handle);
[49df572]493 if (flags & L_DIRECTORY) {
494 /* allocate a cluster */
[991f645]495 rc = fat_alloc_clusters(bs, devmap_handle, 1, &mcl, &lcl);
[073f550]496 if (rc != EOK)
497 return rc;
498 /* populate the new cluster with unused dentries */
[991f645]499 rc = fat_zero_cluster(bs, devmap_handle, mcl);
[073f550]500 if (rc != EOK) {
[991f645]501 (void) fat_free_clusters(bs, devmap_handle, mcl);
[073f550]502 return rc;
503 }
[49df572]504 }
[6571b78]505
[17bf658]506 rc = fat_node_get_new(&nodep);
507 if (rc != EOK) {
[991f645]508 (void) fat_free_clusters(bs, devmap_handle, mcl);
[17bf658]509 return rc;
[49df572]510 }
[991f645]511 rc = fat_idx_get_new(&idxp, devmap_handle);
[9a15176]512 if (rc != EOK) {
[97bc3ee]513 (void) fat_free_clusters(bs, devmap_handle, mcl);
[073f550]514 (void) fat_node_put(FS_NODE(nodep));
[9a15176]515 return rc;
[6571b78]516 }
517 /* idxp->lock held */
518 if (flags & L_DIRECTORY) {
519 nodep->type = FAT_DIRECTORY;
[49df572]520 nodep->firstc = mcl;
[7a23d60]521 nodep->size = BPS(bs) * SPC(bs);
[6571b78]522 } else {
523 nodep->type = FAT_FILE;
[49df572]524 nodep->firstc = FAT_CLST_RES0;
525 nodep->size = 0;
[6571b78]526 }
527 nodep->lnkcnt = 0; /* not linked anywhere */
528 nodep->refcnt = 1;
[49df572]529 nodep->dirty = true;
[6571b78]530
531 nodep->idx = idxp;
532 idxp->nodep = nodep;
533
[6ebe721]534 fibril_mutex_unlock(&idxp->lock);
[073f550]535 *rfn = FS_NODE(nodep);
536 return EOK;
[80e8482]537}
538
[b6035ba]539int fat_destroy_node(fs_node_t *fn)
[80e8482]540{
[b6035ba]541 fat_node_t *nodep = FAT_NODE(fn);
[50e5b25]542 fat_bs_t *bs;
[073f550]543 bool has_children;
544 int rc;
[50e5b25]545
546 /*
547 * The node is not reachable from the file system. This means that the
548 * link count should be zero and that the index structure cannot be
549 * found in the position hash. Obviously, we don't need to lock the node
550 * nor its index structure.
551 */
552 assert(nodep->lnkcnt == 0);
553
554 /*
555 * The node may not have any children.
556 */
[073f550]557 rc = fat_has_children(&has_children, fn);
558 if (rc != EOK)
559 return rc;
560 assert(!has_children);
[50e5b25]561
[991f645]562 bs = block_bb_get(nodep->idx->devmap_handle);
[50e5b25]563 if (nodep->firstc != FAT_CLST_RES0) {
564 assert(nodep->size);
565 /* Free all clusters allocated to the node. */
[991f645]566 rc = fat_free_clusters(bs, nodep->idx->devmap_handle,
[cca29e3c]567 nodep->firstc);
[50e5b25]568 }
569
570 fat_idx_destroy(nodep->idx);
[b6035ba]571 free(nodep->bp);
[50e5b25]572 free(nodep);
[cca29e3c]573 return rc;
[80e8482]574}
575
[b6035ba]576int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
[80e8482]577{
[b6035ba]578 fat_node_t *parentp = FAT_NODE(pfn);
579 fat_node_t *childp = FAT_NODE(cfn);
[0fdd6bb]580 fat_dentry_t *d;
581 fat_bs_t *bs;
582 block_t *b;
[a405563]583 unsigned i, j;
[0fdd6bb]584 unsigned blocks;
[e32b65a]585 fat_cluster_t mcl, lcl;
586 int rc;
[0fdd6bb]587
[6ebe721]588 fibril_mutex_lock(&childp->lock);
[0fdd6bb]589 if (childp->lnkcnt == 1) {
590 /*
591 * On FAT, we don't support multiple hard links.
592 */
[6ebe721]593 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]594 return EMLINK;
595 }
596 assert(childp->lnkcnt == 0);
[6ebe721]597 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]598
599 if (!fat_dentry_name_verify(name)) {
600 /*
601 * Attempt to create unsupported name.
602 */
603 return ENOTSUP;
604 }
605
606 /*
607 * Get us an unused parent node's dentry or grow the parent and allocate
608 * a new one.
609 */
[97bc3ee]610
[6ebe721]611 fibril_mutex_lock(&parentp->idx->lock);
[991f645]612 bs = block_bb_get(parentp->idx->devmap_handle);
[0fdd6bb]613
[7a23d60]614 blocks = parentp->size / BPS(bs);
[0fdd6bb]615
616 for (i = 0; i < blocks; i++) {
[684b655]617 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]618 if (rc != EOK) {
619 fibril_mutex_unlock(&parentp->idx->lock);
620 return rc;
621 }
[7a23d60]622 for (j = 0; j < DPS(bs); j++) {
[0fdd6bb]623 d = ((fat_dentry_t *)b->data) + j;
624 switch (fat_classify_dentry(d)) {
[65ccd23]625 case FAT_DENTRY_LFN:
[0fdd6bb]626 case FAT_DENTRY_SKIP:
627 case FAT_DENTRY_VALID:
628 /* skipping used and meta entries */
629 continue;
630 case FAT_DENTRY_FREE:
631 case FAT_DENTRY_LAST:
632 /* found an empty slot */
633 goto hit;
634 }
635 }
[c91f2d1b]636 rc = block_put(b);
[4b4668e]637 if (rc != EOK) {
638 fibril_mutex_unlock(&parentp->idx->lock);
639 return rc;
640 }
[0fdd6bb]641 }
[699743c]642 j = 0;
[97bc3ee]643
[0fdd6bb]644 /*
645 * We need to grow the parent in order to create a new unused dentry.
646 */
[b5db2ae]647 if (!FAT_IS_FAT32(bs) && parentp->firstc == FAT_CLST_ROOT) {
[e32b65a]648 /* Can't grow the root directory. */
[6ebe721]649 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]650 return ENOSPC;
651 }
[991f645]652 rc = fat_alloc_clusters(bs, parentp->idx->devmap_handle, 1, &mcl, &lcl);
[e32b65a]653 if (rc != EOK) {
[6ebe721]654 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]655 return rc;
656 }
[991f645]657 rc = fat_zero_cluster(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]658 if (rc != EOK) {
[991f645]659 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]660 fibril_mutex_unlock(&parentp->idx->lock);
661 return rc;
662 }
[377cce8]663 rc = fat_append_clusters(bs, parentp, mcl, lcl);
[4b4668e]664 if (rc != EOK) {
[991f645]665 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]666 fibril_mutex_unlock(&parentp->idx->lock);
667 return rc;
668 }
[7a23d60]669 parentp->size += BPS(bs) * SPC(bs);
[d44aabd]670 parentp->dirty = true; /* need to sync node */
[684b655]671 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]672 if (rc != EOK) {
673 fibril_mutex_unlock(&parentp->idx->lock);
674 return rc;
675 }
[e32b65a]676 d = (fat_dentry_t *)b->data;
[0fdd6bb]677
678hit:
679 /*
680 * At this point we only establish the link between the parent and the
681 * child. The dentry, except of the name and the extension, will remain
[e32b65a]682 * uninitialized until the corresponding node is synced. Thus the valid
683 * dentry data is kept in the child node structure.
[0fdd6bb]684 */
685 memset(d, 0, sizeof(fat_dentry_t));
686 fat_dentry_name_set(d, name);
687 b->dirty = true; /* need to sync block */
[c91f2d1b]688 rc = block_put(b);
[6ebe721]689 fibril_mutex_unlock(&parentp->idx->lock);
[97bc3ee]690 if (rc != EOK)
[4b4668e]691 return rc;
[0fdd6bb]692
[6ebe721]693 fibril_mutex_lock(&childp->idx->lock);
[97bc3ee]694
[24a2517]695 if (childp->type == FAT_DIRECTORY) {
[4b4668e]696 /*
[24a2517]697 * If possible, create the Sub-directory Identifier Entry and
698 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
699 * These entries are not mandatory according to Standard
700 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
701 * rather a sign of our good will.
[4b4668e]702 */
[24a2517]703 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
704 if (rc != EOK) {
705 /*
706 * Rather than returning an error, simply skip the
707 * creation of these two entries.
708 */
709 goto skip_dots;
710 }
[ed903174]711 d = (fat_dentry_t *) b->data;
712 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
[ce8f4f4]713 (bcmp(d->name, FAT_NAME_DOT, FAT_NAME_LEN)) == 0) {
[24a2517]714 memset(d, 0, sizeof(fat_dentry_t));
[b62dc100]715 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN);
716 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
[24a2517]717 d->attr = FAT_ATTR_SUBDIR;
718 d->firstc = host2uint16_t_le(childp->firstc);
719 /* TODO: initialize also the date/time members. */
720 }
721 d++;
[ed903174]722 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
[ce8f4f4]723 (bcmp(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN) == 0)) {
[24a2517]724 memset(d, 0, sizeof(fat_dentry_t));
[b62dc100]725 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN);
726 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
[24a2517]727 d->attr = FAT_ATTR_SUBDIR;
[b5db2ae]728 d->firstc = (parentp->firstc == FAT_ROOT_CLST(bs)) ?
729 host2uint16_t_le(FAT_CLST_ROOTPAR) :
[24a2517]730 host2uint16_t_le(parentp->firstc);
731 /* TODO: initialize also the date/time members. */
732 }
733 b->dirty = true; /* need to sync block */
734 /*
735 * Ignore the return value as we would have fallen through on error
736 * anyway.
737 */
738 (void) block_put(b);
[1baec4b]739 }
[4b4668e]740skip_dots:
[1baec4b]741
[0fdd6bb]742 childp->idx->pfc = parentp->firstc;
[7a23d60]743 childp->idx->pdi = i * DPS(bs) + j;
[6ebe721]744 fibril_mutex_unlock(&childp->idx->lock);
[0fdd6bb]745
[6ebe721]746 fibril_mutex_lock(&childp->lock);
[0fdd6bb]747 childp->lnkcnt = 1;
748 childp->dirty = true; /* need to sync node */
[6ebe721]749 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]750
751 /*
752 * Hash in the index structure into the position hash.
753 */
754 fat_idx_hashin(childp->idx);
755
756 return EOK;
[80e8482]757}
758
[cf95bc0]759int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
[80e8482]760{
[b6035ba]761 fat_node_t *parentp = FAT_NODE(pfn);
762 fat_node_t *childp = FAT_NODE(cfn);
[073f550]763 bool has_children;
[c91f2d1b]764 int rc;
[a31c1ccf]765
[770d281]766 if (!parentp)
767 return EBUSY;
[97bc3ee]768
[073f550]769 rc = fat_has_children(&has_children, cfn);
770 if (rc != EOK)
771 return rc;
772 if (has_children)
[0be3e8b]773 return ENOTEMPTY;
[770d281]774
[6ebe721]775 fibril_mutex_lock(&parentp->lock);
776 fibril_mutex_lock(&childp->lock);
[a31c1ccf]777 assert(childp->lnkcnt == 1);
[6ebe721]778 fibril_mutex_lock(&childp->idx->lock);
[b85c19a]779
780 fat_directory_t di;
781 rc = fat_directory_open(parentp,&di);
[97bc3ee]782 if (rc != EOK)
[46c0498]783 goto error;
[b85c19a]784 rc = fat_directory_seek(&di, childp->idx->pdi);
785 if (rc != EOK)
786 goto error;
787 rc = fat_directory_erase(&di);
788 if (rc != EOK)
789 goto error;
790 rc = fat_directory_close(&di);
[46c0498]791 if (rc != EOK)
792 goto error;
[a31c1ccf]793
794 /* remove the index structure from the position hash */
795 fat_idx_hashout(childp->idx);
796 /* clear position information */
797 childp->idx->pfc = FAT_CLST_RES0;
798 childp->idx->pdi = 0;
[6ebe721]799 fibril_mutex_unlock(&childp->idx->lock);
[a31c1ccf]800 childp->lnkcnt = 0;
[5ca5eaa7]801 childp->refcnt++; /* keep the node in memory until destroyed */
[a31c1ccf]802 childp->dirty = true;
[6ebe721]803 fibril_mutex_unlock(&childp->lock);
804 fibril_mutex_unlock(&parentp->lock);
[a31c1ccf]805
806 return EOK;
[46c0498]807
808error:
[b85c19a]809 (void) fat_directory_close(&di);
[46c0498]810 fibril_mutex_unlock(&childp->idx->lock);
[b85c19a]811 fibril_mutex_unlock(&childp->lock);
812 fibril_mutex_unlock(&parentp->lock);
[46c0498]813 return rc;
[80e8482]814}
815
[073f550]816int fat_has_children(bool *has_children, fs_node_t *fn)
[32fb10ed]817{
[7858bc5f]818 fat_bs_t *bs;
[b6035ba]819 fat_node_t *nodep = FAT_NODE(fn);
[32fb10ed]820 unsigned blocks;
[7858bc5f]821 block_t *b;
[32fb10ed]822 unsigned i, j;
[c91f2d1b]823 int rc;
[32fb10ed]824
[073f550]825 if (nodep->type != FAT_DIRECTORY) {
826 *has_children = false;
827 return EOK;
828 }
[97bc3ee]829
[6ebe721]830 fibril_mutex_lock(&nodep->idx->lock);
[991f645]831 bs = block_bb_get(nodep->idx->devmap_handle);
[32fb10ed]832
[7a23d60]833 blocks = nodep->size / BPS(bs);
[32fb10ed]834
835 for (i = 0; i < blocks; i++) {
836 fat_dentry_t *d;
[97bc3ee]837
[684b655]838 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
[073f550]839 if (rc != EOK) {
840 fibril_mutex_unlock(&nodep->idx->lock);
841 return rc;
842 }
[7a23d60]843 for (j = 0; j < DPS(bs); j++) {
[32fb10ed]844 d = ((fat_dentry_t *)b->data) + j;
845 switch (fat_classify_dentry(d)) {
846 case FAT_DENTRY_SKIP:
[0fdd6bb]847 case FAT_DENTRY_FREE:
[32fb10ed]848 continue;
849 case FAT_DENTRY_LAST:
[c91f2d1b]850 rc = block_put(b);
[6ebe721]851 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]852 *has_children = false;
[8810c63]853 return rc;
[32fb10ed]854 default:
855 case FAT_DENTRY_VALID:
[c91f2d1b]856 rc = block_put(b);
[6ebe721]857 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]858 *has_children = true;
[8810c63]859 return rc;
[32fb10ed]860 }
861 }
[c91f2d1b]862 rc = block_put(b);
[8810c63]863 if (rc != EOK) {
864 fibril_mutex_unlock(&nodep->idx->lock);
[97bc3ee]865 return rc;
[8810c63]866 }
[32fb10ed]867 }
868
[6ebe721]869 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]870 *has_children = false;
871 return EOK;
872}
873
874
875fs_index_t fat_index_get(fs_node_t *fn)
876{
877 return FAT_NODE(fn)->idx->index;
878}
879
[ed903174]880aoff64_t fat_size_get(fs_node_t *fn)
[073f550]881{
882 return FAT_NODE(fn)->size;
[32fb10ed]883}
884
[073f550]885unsigned fat_lnkcnt_get(fs_node_t *fn)
[74ea3c6]886{
[073f550]887 return FAT_NODE(fn)->lnkcnt;
[74ea3c6]888}
889
[50e5b25]890char fat_plb_get_char(unsigned pos)
[74ea3c6]891{
892 return fat_reg.plb_ro[pos % PLB_SIZE];
893}
894
[b6035ba]895bool fat_is_directory(fs_node_t *fn)
[e1e3b26]896{
[b6035ba]897 return FAT_NODE(fn)->type == FAT_DIRECTORY;
[e1e3b26]898}
899
[b6035ba]900bool fat_is_file(fs_node_t *fn)
[e1e3b26]901{
[b6035ba]902 return FAT_NODE(fn)->type == FAT_FILE;
[e1e3b26]903}
904
[991f645]905devmap_handle_t fat_device_get(fs_node_t *node)
[1313ee9]906{
907 return 0;
908}
909
[a2aa1dec]910/** libfs operations */
911libfs_ops_t fat_libfs_ops = {
[073f550]912 .root_get = fat_root_get,
[a2aa1dec]913 .match = fat_match,
914 .node_get = fat_node_get,
[1313ee9]915 .node_open = fat_node_open,
[06901c6b]916 .node_put = fat_node_put,
[6571b78]917 .create = fat_create_node,
918 .destroy = fat_destroy_node,
[80e8482]919 .link = fat_link,
920 .unlink = fat_unlink,
[073f550]921 .has_children = fat_has_children,
[e1e3b26]922 .index_get = fat_index_get,
923 .size_get = fat_size_get,
924 .lnkcnt_get = fat_lnkcnt_get,
[1313ee9]925 .plb_get_char = fat_plb_get_char,
[e1e3b26]926 .is_directory = fat_is_directory,
[1313ee9]927 .is_file = fat_is_file,
928 .device_get = fat_device_get
[a2aa1dec]929};
930
[0013b9ce]931/*
932 * VFS operations.
933 */
934
[cde485d]935void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
936{
[991f645]937 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[1fbe064b]938 enum cache_mode cmode;
[7858bc5f]939 fat_bs_t *bs;
[97bc3ee]940
[472c09d]941 /* Accept the mount options */
942 char *opts;
[4cac2d69]943 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL);
[97bc3ee]944
[472c09d]945 if (rc != EOK) {
[ffa2c8ef]946 async_answer_0(rid, rc);
[594303b]947 return;
948 }
949
[1fbe064b]950 /* Check for option enabling write through. */
951 if (str_cmp(opts, "wtcache") == 0)
952 cmode = CACHE_MODE_WT;
953 else
954 cmode = CACHE_MODE_WB;
955
[64aed80]956 free(opts);
957
[7858bc5f]958 /* initialize libblock */
[991f645]959 rc = block_init(devmap_handle, BS_SIZE);
[7a35204a]960 if (rc != EOK) {
[ffa2c8ef]961 async_answer_0(rid, rc);
[6284978]962 return;
963 }
964
965 /* prepare the boot block */
[991f645]966 rc = block_bb_read(devmap_handle, BS_BLOCK);
[6284978]967 if (rc != EOK) {
[991f645]968 block_fini(devmap_handle);
[ffa2c8ef]969 async_answer_0(rid, rc);
[7a35204a]970 return;
971 }
972
[7858bc5f]973 /* get the buffer with the boot sector */
[991f645]974 bs = block_bb_get(devmap_handle);
[97bc3ee]975
[7a23d60]976 if (BPS(bs) != BS_SIZE) {
[991f645]977 block_fini(devmap_handle);
[ffa2c8ef]978 async_answer_0(rid, ENOTSUP);
[7a35204a]979 return;
980 }
981
[f1ba5d6]982 /* Initialize the block cache */
[991f645]983 rc = block_cache_init(devmap_handle, BPS(bs), 0 /* XXX */, cmode);
[f1ba5d6]984 if (rc != EOK) {
[991f645]985 block_fini(devmap_handle);
[ffa2c8ef]986 async_answer_0(rid, rc);
[f1ba5d6]987 return;
988 }
989
[2ffaab5]990 /* Do some simple sanity checks on the file system. */
[991f645]991 rc = fat_sanity_check(bs, devmap_handle);
[711e1f32]992 if (rc != EOK) {
[991f645]993 (void) block_cache_fini(devmap_handle);
994 block_fini(devmap_handle);
[ffa2c8ef]995 async_answer_0(rid, rc);
[711e1f32]996 return;
997 }
998
[991f645]999 rc = fat_idx_init_by_devmap_handle(devmap_handle);
[cde485d]1000 if (rc != EOK) {
[991f645]1001 (void) block_cache_fini(devmap_handle);
1002 block_fini(devmap_handle);
[ffa2c8ef]1003 async_answer_0(rid, rc);
[cde485d]1004 return;
1005 }
1006
[689f036]1007 /* Initialize the root node. */
[b6035ba]1008 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
1009 if (!rfn) {
[991f645]1010 (void) block_cache_fini(devmap_handle);
1011 block_fini(devmap_handle);
1012 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1013 async_answer_0(rid, ENOMEM);
[b6035ba]1014 return;
1015 }
[0182e5cc]1016
[83937ccd]1017 fs_node_initialize(rfn);
[689f036]1018 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1019 if (!rootp) {
[b6035ba]1020 free(rfn);
[991f645]1021 (void) block_cache_fini(devmap_handle);
1022 block_fini(devmap_handle);
1023 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1024 async_answer_0(rid, ENOMEM);
[689f036]1025 return;
1026 }
1027 fat_node_initialize(rootp);
1028
[991f645]1029 fat_idx_t *ridxp = fat_idx_get_by_pos(devmap_handle, FAT_CLST_ROOTPAR, 0);
[689f036]1030 if (!ridxp) {
[b6035ba]1031 free(rfn);
[689f036]1032 free(rootp);
[991f645]1033 (void) block_cache_fini(devmap_handle);
1034 block_fini(devmap_handle);
1035 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1036 async_answer_0(rid, ENOMEM);
[689f036]1037 return;
1038 }
1039 assert(ridxp->index == 0);
1040 /* ridxp->lock held */
1041
1042 rootp->type = FAT_DIRECTORY;
[b5db2ae]1043 rootp->firstc = FAT_ROOT_CLST(bs);
[689f036]1044 rootp->refcnt = 1;
[5ab597d]1045 rootp->lnkcnt = 0; /* FS root is not linked */
[b5db2ae]1046
1047 if (FAT_IS_FAT32(bs)) {
1048 uint16_t clusters;
1049 rc = fat_clusters_get(&clusters, bs, devmap_handle, rootp->firstc);
1050 if (rc != EOK) {
1051 free(rfn);
1052 free(rootp);
1053 free(ridxp); /* TODO: Is it right way to free ridxp? */
1054 (void) block_cache_fini(devmap_handle);
1055 block_fini(devmap_handle);
1056 fat_idx_fini_by_devmap_handle(devmap_handle);
1057 async_answer_0(rid, ENOTSUP);
1058 return;
1059 }
1060 rootp->size = BPS(bs) * SPC(bs) * clusters;
1061 } else
1062 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
1063
[689f036]1064 rootp->idx = ridxp;
1065 ridxp->nodep = rootp;
[b6035ba]1066 rootp->bp = rfn;
1067 rfn->data = rootp;
[97bc3ee]1068
[6ebe721]1069 fibril_mutex_unlock(&ridxp->lock);
[689f036]1070
[ffa2c8ef]1071 async_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
[cde485d]1072}
1073
1074void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1075{
[16d17ca]1076 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[cde485d]1077}
1078
[3c11713]1079void fat_unmounted(ipc_callid_t rid, ipc_call_t *request)
1080{
[991f645]1081 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[430de97]1082 fs_node_t *fn;
1083 fat_node_t *nodep;
1084 int rc;
1085
[991f645]1086 rc = fat_root_get(&fn, devmap_handle);
[430de97]1087 if (rc != EOK) {
[ffa2c8ef]1088 async_answer_0(rid, rc);
[430de97]1089 return;
1090 }
1091 nodep = FAT_NODE(fn);
1092
1093 /*
1094 * We expect exactly two references on the root node. One for the
1095 * fat_root_get() above and one created in fat_mounted().
1096 */
1097 if (nodep->refcnt != 2) {
1098 (void) fat_node_put(fn);
[ffa2c8ef]1099 async_answer_0(rid, EBUSY);
[430de97]1100 return;
1101 }
[97bc3ee]1102
[430de97]1103 /*
1104 * Put the root node and force it to the FAT free node list.
1105 */
1106 (void) fat_node_put(fn);
1107 (void) fat_node_put(fn);
1108
1109 /*
1110 * Perform cleanup of the node structures, index structures and
1111 * associated data. Write back this file system's dirty blocks and
1112 * stop using libblock for this instance.
1113 */
[991f645]1114 (void) fat_node_fini_by_devmap_handle(devmap_handle);
1115 fat_idx_fini_by_devmap_handle(devmap_handle);
1116 (void) block_cache_fini(devmap_handle);
1117 block_fini(devmap_handle);
[430de97]1118
[ffa2c8ef]1119 async_answer_0(rid, EOK);
[3c11713]1120}
1121
1122void fat_unmount(ipc_callid_t rid, ipc_call_t *request)
1123{
1124 libfs_unmount(&fat_libfs_ops, rid, request);
1125}
1126
[be815bc]1127void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1128{
[a2aa1dec]1129 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[be815bc]1130}
1131
[4bf40f6]1132void fat_read(ipc_callid_t rid, ipc_call_t *request)
1133{
[991f645]1134 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1135 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1136 aoff64_t pos =
1137 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1138 fs_node_t *fn;
[b6035ba]1139 fat_node_t *nodep;
[7858bc5f]1140 fat_bs_t *bs;
[79d031b]1141 size_t bytes;
[7858bc5f]1142 block_t *b;
[c91f2d1b]1143 int rc;
[79d031b]1144
[991f645]1145 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1146 if (rc != EOK) {
[ffa2c8ef]1147 async_answer_0(rid, rc);
[073f550]1148 return;
1149 }
[b6035ba]1150 if (!fn) {
[ffa2c8ef]1151 async_answer_0(rid, ENOENT);
[4bf40f6]1152 return;
1153 }
[b6035ba]1154 nodep = FAT_NODE(fn);
[4bf40f6]1155
1156 ipc_callid_t callid;
1157 size_t len;
[0da4e41]1158 if (!async_data_read_receive(&callid, &len)) {
[b6035ba]1159 fat_node_put(fn);
[ffa2c8ef]1160 async_answer_0(callid, EINVAL);
1161 async_answer_0(rid, EINVAL);
[4bf40f6]1162 return;
1163 }
1164
[991f645]1165 bs = block_bb_get(devmap_handle);
[cb682eb]1166
[4bf40f6]1167 if (nodep->type == FAT_FILE) {
[ddd1219]1168 /*
1169 * Our strategy for regular file reads is to read one block at
1170 * most and make use of the possibility to return less data than
1171 * requested. This keeps the code very simple.
1172 */
[0d974d8]1173 if (pos >= nodep->size) {
[7d861950]1174 /* reading beyond the EOF */
1175 bytes = 0;
[0da4e41]1176 (void) async_data_read_finalize(callid, NULL, 0);
[0d974d8]1177 } else {
[7a23d60]1178 bytes = min(len, BPS(bs) - pos % BPS(bs));
[0d974d8]1179 bytes = min(bytes, nodep->size - pos);
[7a23d60]1180 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
[1d8cdb1]1181 BLOCK_FLAGS_NONE);
[453f2e75]1182 if (rc != EOK) {
1183 fat_node_put(fn);
[ffa2c8ef]1184 async_answer_0(callid, rc);
1185 async_answer_0(rid, rc);
[453f2e75]1186 return;
1187 }
[7a23d60]1188 (void) async_data_read_finalize(callid,
1189 b->data + pos % BPS(bs), bytes);
[c91f2d1b]1190 rc = block_put(b);
[453f2e75]1191 if (rc != EOK) {
1192 fat_node_put(fn);
[ffa2c8ef]1193 async_answer_0(rid, rc);
[453f2e75]1194 return;
1195 }
[0d974d8]1196 }
[4bf40f6]1197 } else {
[ed903174]1198 aoff64_t spos = pos;
[65ccd23]1199 char name[FAT_LFN_NAME_SIZE];
[ddd1219]1200 fat_dentry_t *d;
1201
[4bf40f6]1202 assert(nodep->type == FAT_DIRECTORY);
[7a23d60]1203 assert(nodep->size % BPS(bs) == 0);
1204 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
[ddd1219]1205
[8a40c49]1206 fat_directory_t di;
[563686b]1207 rc = fat_directory_open(nodep, &di);
1208 if (rc != EOK) goto err;
1209 rc = fat_directory_seek(&di, pos);
1210 if (rc != EOK) {
1211 (void) fat_directory_close(&di);
1212 goto err;
1213 }
[ddd1219]1214
[8a40c49]1215 rc = fat_directory_read(&di, name, &d);
1216 if (rc == EOK) goto hit;
1217 if (rc == ENOENT) goto miss;
[453f2e75]1218
1219err:
1220 (void) fat_node_put(fn);
[ffa2c8ef]1221 async_answer_0(callid, rc);
1222 async_answer_0(rid, rc);
[453f2e75]1223 return;
1224
[8a40c49]1225miss:
1226 rc = fat_directory_close(&di);
1227 if (rc!=EOK)
1228 goto err;
1229 rc = fat_node_put(fn);
1230 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1231 async_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
1232 return;
1233
[ddd1219]1234hit:
[8a40c49]1235 pos = di.pos;
1236 rc = fat_directory_close(&di);
1237 if (rc!=EOK)
1238 goto err;
[0da4e41]1239 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
[b85c19a]1240 bytes = (pos - spos)+1;
[4bf40f6]1241 }
1242
[453f2e75]1243 rc = fat_node_put(fn);
[ffa2c8ef]1244 async_answer_1(rid, rc, (sysarg_t)bytes);
[4bf40f6]1245}
1246
[c947dda]1247void fat_write(ipc_callid_t rid, ipc_call_t *request)
1248{
[991f645]1249 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1250 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1251 aoff64_t pos =
1252 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1253 fs_node_t *fn;
[b6035ba]1254 fat_node_t *nodep;
[7858bc5f]1255 fat_bs_t *bs;
[dfddfcd]1256 size_t bytes, size;
[7858bc5f]1257 block_t *b;
[ed903174]1258 aoff64_t boundary;
[1d8cdb1]1259 int flags = BLOCK_FLAGS_NONE;
[c91f2d1b]1260 int rc;
[97bc3ee]1261
[991f645]1262 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1263 if (rc != EOK) {
[ffa2c8ef]1264 async_answer_0(rid, rc);
[073f550]1265 return;
1266 }
[b6035ba]1267 if (!fn) {
[ffa2c8ef]1268 async_answer_0(rid, ENOENT);
[8d32152]1269 return;
1270 }
[b6035ba]1271 nodep = FAT_NODE(fn);
[97bc3ee]1272
[8d32152]1273 ipc_callid_t callid;
1274 size_t len;
[0da4e41]1275 if (!async_data_write_receive(&callid, &len)) {
[dfddfcd]1276 (void) fat_node_put(fn);
[ffa2c8ef]1277 async_answer_0(callid, EINVAL);
1278 async_answer_0(rid, EINVAL);
[8d32152]1279 return;
1280 }
1281
[991f645]1282 bs = block_bb_get(devmap_handle);
[913a821c]1283
[8d32152]1284 /*
1285 * In all scenarios, we will attempt to write out only one block worth
1286 * of data at maximum. There might be some more efficient approaches,
1287 * but this one greatly simplifies fat_write(). Note that we can afford
1288 * to do this because the client must be ready to handle the return
[97bc3ee]1289 * value signalizing a smaller number of bytes written.
1290 */
[7a23d60]1291 bytes = min(len, BPS(bs) - pos % BPS(bs));
1292 if (bytes == BPS(bs))
[1d8cdb1]1293 flags |= BLOCK_FLAGS_NOREAD;
[97bc3ee]1294
[7a23d60]1295 boundary = ROUND_UP(nodep->size, BPC(bs));
[b4b7187]1296 if (pos < boundary) {
[8d32152]1297 /*
1298 * This is the easier case - we are either overwriting already
1299 * existing contents or writing behind the EOF, but still within
1300 * the limits of the last cluster. The node size may grow to the
1301 * next block size boundary.
1302 */
[cca29e3c]1303 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
[dfddfcd]1304 if (rc != EOK) {
1305 (void) fat_node_put(fn);
[ffa2c8ef]1306 async_answer_0(callid, rc);
1307 async_answer_0(rid, rc);
[dfddfcd]1308 return;
1309 }
[7a23d60]1310 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
[dfddfcd]1311 if (rc != EOK) {
1312 (void) fat_node_put(fn);
[ffa2c8ef]1313 async_answer_0(callid, rc);
1314 async_answer_0(rid, rc);
[dfddfcd]1315 return;
1316 }
[7a23d60]1317 (void) async_data_write_finalize(callid,
1318 b->data + pos % BPS(bs), bytes);
[8d32152]1319 b->dirty = true; /* need to sync block */
[c91f2d1b]1320 rc = block_put(b);
[dfddfcd]1321 if (rc != EOK) {
1322 (void) fat_node_put(fn);
[ffa2c8ef]1323 async_answer_0(rid, rc);
[dfddfcd]1324 return;
1325 }
[8d32152]1326 if (pos + bytes > nodep->size) {
1327 nodep->size = pos + bytes;
1328 nodep->dirty = true; /* need to sync node */
1329 }
[dfddfcd]1330 size = nodep->size;
1331 rc = fat_node_put(fn);
[ffa2c8ef]1332 async_answer_2(rid, rc, bytes, nodep->size);
[8d32152]1333 return;
1334 } else {
1335 /*
1336 * This is the more difficult case. We must allocate new
1337 * clusters for the node and zero them out.
1338 */
1339 unsigned nclsts;
[97bc3ee]1340 fat_cluster_t mcl, lcl;
1341
[7a23d60]1342 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
[6f2dfd1]1343 /* create an independent chain of nclsts clusters in all FATs */
[991f645]1344 rc = fat_alloc_clusters(bs, devmap_handle, nclsts, &mcl, &lcl);
[dfddfcd]1345 if (rc != EOK) {
[6f2dfd1]1346 /* could not allocate a chain of nclsts clusters */
[dfddfcd]1347 (void) fat_node_put(fn);
[ffa2c8ef]1348 async_answer_0(callid, rc);
1349 async_answer_0(rid, rc);
[6f2dfd1]1350 return;
1351 }
1352 /* zero fill any gaps */
[cca29e3c]1353 rc = fat_fill_gap(bs, nodep, mcl, pos);
[dfddfcd]1354 if (rc != EOK) {
[991f645]1355 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1356 (void) fat_node_put(fn);
[ffa2c8ef]1357 async_answer_0(callid, rc);
1358 async_answer_0(rid, rc);
[dfddfcd]1359 return;
1360 }
[991f645]1361 rc = _fat_block_get(&b, bs, devmap_handle, lcl, NULL,
[7a23d60]1362 (pos / BPS(bs)) % SPC(bs), flags);
[dfddfcd]1363 if (rc != EOK) {
[991f645]1364 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1365 (void) fat_node_put(fn);
[ffa2c8ef]1366 async_answer_0(callid, rc);
1367 async_answer_0(rid, rc);
[dfddfcd]1368 return;
1369 }
[7a23d60]1370 (void) async_data_write_finalize(callid,
1371 b->data + pos % BPS(bs), bytes);
[b4b7187]1372 b->dirty = true; /* need to sync block */
[c91f2d1b]1373 rc = block_put(b);
[dfddfcd]1374 if (rc != EOK) {
[991f645]1375 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1376 (void) fat_node_put(fn);
[ffa2c8ef]1377 async_answer_0(rid, rc);
[dfddfcd]1378 return;
1379 }
[6f2dfd1]1380 /*
1381 * Append the cluster chain starting in mcl to the end of the
1382 * node's cluster chain.
1383 */
[377cce8]1384 rc = fat_append_clusters(bs, nodep, mcl, lcl);
[dfddfcd]1385 if (rc != EOK) {
[991f645]1386 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1387 (void) fat_node_put(fn);
[ffa2c8ef]1388 async_answer_0(rid, rc);
[dfddfcd]1389 return;
1390 }
1391 nodep->size = size = pos + bytes;
[b4b7187]1392 nodep->dirty = true; /* need to sync node */
[dfddfcd]1393 rc = fat_node_put(fn);
[ffa2c8ef]1394 async_answer_2(rid, rc, bytes, size);
[6f2dfd1]1395 return;
[8d32152]1396 }
[c947dda]1397}
1398
[6c71a1f]1399void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1400{
[991f645]1401 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1402 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1403 aoff64_t size =
1404 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1405 fs_node_t *fn;
[b6035ba]1406 fat_node_t *nodep;
[913a821c]1407 fat_bs_t *bs;
[8334a427]1408 int rc;
1409
[991f645]1410 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1411 if (rc != EOK) {
[ffa2c8ef]1412 async_answer_0(rid, rc);
[073f550]1413 return;
1414 }
[b6035ba]1415 if (!fn) {
[ffa2c8ef]1416 async_answer_0(rid, ENOENT);
[8334a427]1417 return;
1418 }
[b6035ba]1419 nodep = FAT_NODE(fn);
[8334a427]1420
[991f645]1421 bs = block_bb_get(devmap_handle);
[913a821c]1422
[8334a427]1423 if (nodep->size == size) {
1424 rc = EOK;
1425 } else if (nodep->size < size) {
1426 /*
[913a821c]1427 * The standard says we have the freedom to grow the node.
[8334a427]1428 * For now, we simply return an error.
1429 */
1430 rc = EINVAL;
[7a23d60]1431 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
[913a821c]1432 /*
1433 * The node will be shrunk, but no clusters will be deallocated.
1434 */
1435 nodep->size = size;
1436 nodep->dirty = true; /* need to sync node */
[97bc3ee]1437 rc = EOK;
[8334a427]1438 } else {
1439 /*
[913a821c]1440 * The node will be shrunk, clusters will be deallocated.
[8334a427]1441 */
[913a821c]1442 if (size == 0) {
[cca29e3c]1443 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1444 if (rc != EOK)
1445 goto out;
[913a821c]1446 } else {
1447 fat_cluster_t lastc;
[991f645]1448 rc = fat_cluster_walk(bs, devmap_handle, nodep->firstc,
[7a23d60]1449 &lastc, NULL, (size - 1) / BPC(bs));
[e402382]1450 if (rc != EOK)
1451 goto out;
[cca29e3c]1452 rc = fat_chop_clusters(bs, nodep, lastc);
1453 if (rc != EOK)
1454 goto out;
[913a821c]1455 }
1456 nodep->size = size;
1457 nodep->dirty = true; /* need to sync node */
[97bc3ee]1458 rc = EOK;
[8334a427]1459 }
[e402382]1460out:
[b6035ba]1461 fat_node_put(fn);
[ffa2c8ef]1462 async_answer_0(rid, rc);
[8334a427]1463 return;
[6c71a1f]1464}
1465
[c20aa06]1466void fat_close(ipc_callid_t rid, ipc_call_t *request)
1467{
[ffa2c8ef]1468 async_answer_0(rid, EOK);
[c20aa06]1469}
1470
[50e5b25]1471void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1472{
[991f645]1473 devmap_handle_t devmap_handle = (devmap_handle_t)IPC_GET_ARG1(*request);
[50e5b25]1474 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
[073f550]1475 fs_node_t *fn;
[5ca5eaa7]1476 fat_node_t *nodep;
[50e5b25]1477 int rc;
1478
[991f645]1479 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1480 if (rc != EOK) {
[ffa2c8ef]1481 async_answer_0(rid, rc);
[073f550]1482 return;
1483 }
[b6035ba]1484 if (!fn) {
[ffa2c8ef]1485 async_answer_0(rid, ENOENT);
[50e5b25]1486 return;
1487 }
1488
[5ca5eaa7]1489 nodep = FAT_NODE(fn);
1490 /*
1491 * We should have exactly two references. One for the above
1492 * call to fat_node_get() and one from fat_unlink().
1493 */
1494 assert(nodep->refcnt == 2);
1495
[b6035ba]1496 rc = fat_destroy_node(fn);
[ffa2c8ef]1497 async_answer_0(rid, rc);
[50e5b25]1498}
1499
[c20aa06]1500void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1501{
1502 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1503}
1504
[852b801]1505void fat_stat(ipc_callid_t rid, ipc_call_t *request)
[c20aa06]1506{
[75160a6]1507 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[c20aa06]1508}
1509
1510void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1511{
[991f645]1512 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[69a60c4]1513 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
[97bc3ee]1514
[69a60c4]1515 fs_node_t *fn;
[991f645]1516 int rc = fat_node_get(&fn, devmap_handle, index);
[69a60c4]1517 if (rc != EOK) {
[ffa2c8ef]1518 async_answer_0(rid, rc);
[69a60c4]1519 return;
1520 }
1521 if (!fn) {
[ffa2c8ef]1522 async_answer_0(rid, ENOENT);
[69a60c4]1523 return;
1524 }
[97bc3ee]1525
[69a60c4]1526 fat_node_t *nodep = FAT_NODE(fn);
[97bc3ee]1527
[69a60c4]1528 nodep->dirty = true;
1529 rc = fat_node_sync(nodep);
[97bc3ee]1530
[69a60c4]1531 fat_node_put(fn);
[ffa2c8ef]1532 async_answer_0(rid, rc);
[c20aa06]1533}
1534
[be815bc]1535/**
1536 * @}
[c20aa06]1537 */
Note: See TracBrowser for help on using the repository browser.