source: mainline/uspace/srv/fs/fat/fat_ops.c@ e6910c8

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e6910c8 was b72efe8, checked in by Jiri Svoboda <jiri@…>, 14 years ago

Separate list_t typedef from link_t (user-space part).

  • list_t represents lists
  • Use list_first(), list_last(), list_empty() where appropriate
  • Use list_foreach() where possible
  • assert_link_not_used()
  • usb_hid_report_path_free() shall not unlink the path, caller must do it
  • Property mode set to 100644
File size: 37.7 KB
RevLine 
[be815bc]1/*
[a2aa1dec]2 * Copyright (c) 2008 Jakub Jermar
[be815bc]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
[033ef7d3]39#include "fat_dentry.h"
40#include "fat_fat.h"
[6364d3c]41#include "../../vfs/vfs.h"
[a2aa1dec]42#include <libfs.h>
[fc840d9]43#include <libblock.h>
[7a35204a]44#include <ipc/services.h>
45#include <ipc/devmap.h>
[ed903174]46#include <macros.h>
[be815bc]47#include <async.h>
48#include <errno.h>
[19f857a]49#include <str.h>
[776f2e6]50#include <byteorder.h>
[d9c8c81]51#include <adt/hash_table.h>
52#include <adt/list.h>
[e1e3b26]53#include <assert.h>
[1e4cada]54#include <fibril_synch.h>
[7a35204a]55#include <sys/mman.h>
[8d32152]56#include <align.h>
[c7bbf029]57#include <malloc.h>
[e1e3b26]58
[b6035ba]59#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
60#define FS_NODE(node) ((node) ? (node)->bp : NULL)
61
[7a23d60]62#define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))
63#define BPC(bs) (BPS((bs)) * SPC((bs)))
64
[6ebe721]65/** Mutex protecting the list of cached free FAT nodes. */
66static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
[add5835]67
68/** List of cached free FAT nodes. */
[b72efe8]69static LIST_INITIALIZE(ffn_list);
[6364d3c]70
[0fc1e5d]71/*
72 * Forward declarations of FAT libfs operations.
73 */
[991f645]74static int fat_root_get(fs_node_t **, devmap_handle_t);
[0fc1e5d]75static int fat_match(fs_node_t **, fs_node_t *, const char *);
[991f645]76static int fat_node_get(fs_node_t **, devmap_handle_t, fs_index_t);
[1313ee9]77static int fat_node_open(fs_node_t *);
[0fc1e5d]78static int fat_node_put(fs_node_t *);
[991f645]79static int fat_create_node(fs_node_t **, devmap_handle_t, int);
[0fc1e5d]80static int fat_destroy_node(fs_node_t *);
81static int fat_link(fs_node_t *, fs_node_t *, const char *);
82static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
83static int fat_has_children(bool *, fs_node_t *);
84static fs_index_t fat_index_get(fs_node_t *);
[ed903174]85static aoff64_t fat_size_get(fs_node_t *);
[0fc1e5d]86static unsigned fat_lnkcnt_get(fs_node_t *);
87static char fat_plb_get_char(unsigned);
88static bool fat_is_directory(fs_node_t *);
89static bool fat_is_file(fs_node_t *node);
[991f645]90static devmap_handle_t fat_device_get(fs_node_t *node);
[0fc1e5d]91
92/*
93 * Helper functions.
94 */
[e1e3b26]95static void fat_node_initialize(fat_node_t *node)
[a2aa1dec]96{
[6ebe721]97 fibril_mutex_initialize(&node->lock);
[b6035ba]98 node->bp = NULL;
[869e546]99 node->idx = NULL;
[e1e3b26]100 node->type = 0;
101 link_initialize(&node->ffn_link);
102 node->size = 0;
103 node->lnkcnt = 0;
104 node->refcnt = 0;
105 node->dirty = false;
[377cce8]106 node->lastc_cached_valid = false;
107 node->lastc_cached_value = FAT_CLST_LAST1;
[dba4a23]108 node->currc_cached_valid = false;
109 node->currc_cached_bn = 0;
110 node->currc_cached_value = FAT_CLST_LAST1;
[e1e3b26]111}
112
[4098e38]113static int fat_node_sync(fat_node_t *node)
[e1e3b26]114{
[7858bc5f]115 block_t *b;
116 fat_bs_t *bs;
[beb17734]117 fat_dentry_t *d;
[c91f2d1b]118 int rc;
[beb17734]119
120 assert(node->dirty);
121
[991f645]122 bs = block_bb_get(node->idx->devmap_handle);
[beb17734]123
124 /* Read the block that contains the dentry of interest. */
[991f645]125 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc,
[6da81e0]126 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
[7a23d60]127 BLOCK_FLAGS_NONE);
[4098e38]128 if (rc != EOK)
129 return rc;
[beb17734]130
[7a23d60]131 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));
[beb17734]132
133 d->firstc = host2uint16_t_le(node->firstc);
[a5da446]134 if (node->type == FAT_FILE) {
[beb17734]135 d->size = host2uint32_t_le(node->size);
[a5da446]136 } else if (node->type == FAT_DIRECTORY) {
137 d->attr = FAT_ATTR_SUBDIR;
138 }
139
140 /* TODO: update other fields? (e.g time fields) */
[beb17734]141
142 b->dirty = true; /* need to sync block */
[c91f2d1b]143 rc = block_put(b);
[4098e38]144 return rc;
[e1e3b26]145}
146
[991f645]147static int fat_node_fini_by_devmap_handle(devmap_handle_t devmap_handle)
[430de97]148{
149 fat_node_t *nodep;
150 int rc;
151
152 /*
153 * We are called from fat_unmounted() and assume that there are already
154 * no nodes belonging to this instance with non-zero refcount. Therefore
155 * it is sufficient to clean up only the FAT free node list.
156 */
157
158restart:
159 fibril_mutex_lock(&ffn_mutex);
[b72efe8]160 list_foreach(ffn_list, lnk) {
[430de97]161 nodep = list_get_instance(lnk, fat_node_t, ffn_link);
162 if (!fibril_mutex_trylock(&nodep->lock)) {
163 fibril_mutex_unlock(&ffn_mutex);
164 goto restart;
165 }
166 if (!fibril_mutex_trylock(&nodep->idx->lock)) {
167 fibril_mutex_unlock(&nodep->lock);
168 fibril_mutex_unlock(&ffn_mutex);
169 goto restart;
170 }
[991f645]171 if (nodep->idx->devmap_handle != devmap_handle) {
[430de97]172 fibril_mutex_unlock(&nodep->idx->lock);
173 fibril_mutex_unlock(&nodep->lock);
174 continue;
175 }
176
177 list_remove(&nodep->ffn_link);
178 fibril_mutex_unlock(&ffn_mutex);
179
180 /*
181 * We can unlock the node and its index structure because we are
182 * the last player on this playground and VFS is preventing new
183 * players from entering.
184 */
185 fibril_mutex_unlock(&nodep->idx->lock);
186 fibril_mutex_unlock(&nodep->lock);
187
188 if (nodep->dirty) {
189 rc = fat_node_sync(nodep);
190 if (rc != EOK)
191 return rc;
192 }
193 nodep->idx->nodep = NULL;
194 free(nodep->bp);
195 free(nodep);
196
[b72efe8]197 /* Need to restart because we changed ffn_list. */
[430de97]198 goto restart;
199 }
200 fibril_mutex_unlock(&ffn_mutex);
201
202 return EOK;
203}
204
[17bf658]205static int fat_node_get_new(fat_node_t **nodepp)
[9a3d5f0]206{
[b6035ba]207 fs_node_t *fn;
[9a3d5f0]208 fat_node_t *nodep;
[4098e38]209 int rc;
[9a3d5f0]210
[6ebe721]211 fibril_mutex_lock(&ffn_mutex);
[b72efe8]212 if (!list_empty(&ffn_list)) {
[9a3d5f0]213 /* Try to use a cached free node structure. */
214 fat_idx_t *idxp_tmp;
[b72efe8]215 nodep = list_get_instance(list_first(&ffn_list), fat_node_t,
216 ffn_link);
[6ebe721]217 if (!fibril_mutex_trylock(&nodep->lock))
[9a3d5f0]218 goto skip_cache;
219 idxp_tmp = nodep->idx;
[6ebe721]220 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
221 fibril_mutex_unlock(&nodep->lock);
[9a3d5f0]222 goto skip_cache;
223 }
224 list_remove(&nodep->ffn_link);
[6ebe721]225 fibril_mutex_unlock(&ffn_mutex);
[4098e38]226 if (nodep->dirty) {
227 rc = fat_node_sync(nodep);
[17bf658]228 if (rc != EOK) {
229 idxp_tmp->nodep = NULL;
230 fibril_mutex_unlock(&nodep->lock);
231 fibril_mutex_unlock(&idxp_tmp->lock);
232 free(nodep->bp);
233 free(nodep);
234 return rc;
235 }
[4098e38]236 }
[9a3d5f0]237 idxp_tmp->nodep = NULL;
[6ebe721]238 fibril_mutex_unlock(&nodep->lock);
239 fibril_mutex_unlock(&idxp_tmp->lock);
[b6035ba]240 fn = FS_NODE(nodep);
[9a3d5f0]241 } else {
242skip_cache:
243 /* Try to allocate a new node structure. */
[6ebe721]244 fibril_mutex_unlock(&ffn_mutex);
[b6035ba]245 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
246 if (!fn)
[17bf658]247 return ENOMEM;
[9a3d5f0]248 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
[b6035ba]249 if (!nodep) {
250 free(fn);
[17bf658]251 return ENOMEM;
[b6035ba]252 }
[9a3d5f0]253 }
254 fat_node_initialize(nodep);
[83937ccd]255 fs_node_initialize(fn);
[b6035ba]256 fn->data = nodep;
257 nodep->bp = fn;
[9a3d5f0]258
[17bf658]259 *nodepp = nodep;
260 return EOK;
[9a3d5f0]261}
262
[add5835]263/** Internal version of fat_node_get().
264 *
265 * @param idxp Locked index structure.
266 */
[0fc1e5d]267static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
[e1e3b26]268{
[7858bc5f]269 block_t *b;
270 fat_bs_t *bs;
[4573a79]271 fat_dentry_t *d;
[c06dbf9]272 fat_node_t *nodep = NULL;
[c91f2d1b]273 int rc;
[4573a79]274
[add5835]275 if (idxp->nodep) {
[4573a79]276 /*
277 * We are lucky.
278 * The node is already instantiated in memory.
279 */
[6ebe721]280 fibril_mutex_lock(&idxp->nodep->lock);
[e6bc3a5]281 if (!idxp->nodep->refcnt++) {
282 fibril_mutex_lock(&ffn_mutex);
[c06dbf9]283 list_remove(&idxp->nodep->ffn_link);
[e6bc3a5]284 fibril_mutex_unlock(&ffn_mutex);
285 }
[6ebe721]286 fibril_mutex_unlock(&idxp->nodep->lock);
[0fc1e5d]287 *nodepp = idxp->nodep;
288 return EOK;
[4573a79]289 }
290
291 /*
292 * We must instantiate the node from the file system.
293 */
294
[add5835]295 assert(idxp->pfc);
[4573a79]296
[17bf658]297 rc = fat_node_get_new(&nodep);
298 if (rc != EOK)
[0fc1e5d]299 return rc;
[4573a79]300
[991f645]301 bs = block_bb_get(idxp->devmap_handle);
[4573a79]302
[2c4bbcde]303 /* Read the block that contains the dentry of interest. */
[991f645]304 rc = _fat_block_get(&b, bs, idxp->devmap_handle, idxp->pfc, NULL,
[7a23d60]305 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);
[0fc1e5d]306 if (rc != EOK) {
307 (void) fat_node_put(FS_NODE(nodep));
308 return rc;
309 }
[4573a79]310
[7a23d60]311 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));
[2c4bbcde]312 if (d->attr & FAT_ATTR_SUBDIR) {
313 /*
314 * The only directory which does not have this bit set is the
315 * root directory itself. The root directory node is handled
316 * and initialized elsewhere.
317 */
318 nodep->type = FAT_DIRECTORY;
[2ab1023]319 /*
[e2115311]320 * Unfortunately, the 'size' field of the FAT dentry is not
321 * defined for the directory entry type. We must determine the
322 * size of the directory by walking the FAT.
[2ab1023]323 */
[e402382]324 uint16_t clusters;
[991f645]325 rc = fat_clusters_get(&clusters, bs, idxp->devmap_handle,
[4f1c0b4]326 uint16_t_le2host(d->firstc));
[0fc1e5d]327 if (rc != EOK) {
[9fec913]328 (void) block_put(b);
[0fc1e5d]329 (void) fat_node_put(FS_NODE(nodep));
330 return rc;
331 }
[7a23d60]332 nodep->size = BPS(bs) * SPC(bs) * clusters;
[2c4bbcde]333 } else {
334 nodep->type = FAT_FILE;
[2ab1023]335 nodep->size = uint32_t_le2host(d->size);
[2c4bbcde]336 }
337 nodep->firstc = uint16_t_le2host(d->firstc);
338 nodep->lnkcnt = 1;
339 nodep->refcnt = 1;
340
[c91f2d1b]341 rc = block_put(b);
[0fc1e5d]342 if (rc != EOK) {
343 (void) fat_node_put(FS_NODE(nodep));
344 return rc;
345 }
[2c4bbcde]346
347 /* Link the idx structure with the node structure. */
[add5835]348 nodep->idx = idxp;
349 idxp->nodep = nodep;
[2c4bbcde]350
[0fc1e5d]351 *nodepp = nodep;
352 return EOK;
[a2aa1dec]353}
354
[50e5b25]355/*
356 * FAT libfs operations.
357 */
358
[991f645]359int fat_root_get(fs_node_t **rfn, devmap_handle_t devmap_handle)
[073f550]360{
[991f645]361 return fat_node_get(rfn, devmap_handle, 0);
[073f550]362}
363
364int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
365{
366 fat_bs_t *bs;
367 fat_node_t *parentp = FAT_NODE(pfn);
368 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
369 unsigned i, j;
370 unsigned blocks;
371 fat_dentry_t *d;
[991f645]372 devmap_handle_t devmap_handle;
[073f550]373 block_t *b;
374 int rc;
375
376 fibril_mutex_lock(&parentp->idx->lock);
[991f645]377 devmap_handle = parentp->idx->devmap_handle;
[a93d79a]378 fibril_mutex_unlock(&parentp->idx->lock);
379
[991f645]380 bs = block_bb_get(devmap_handle);
[7a23d60]381 blocks = parentp->size / BPS(bs);
[073f550]382 for (i = 0; i < blocks; i++) {
383 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[a93d79a]384 if (rc != EOK)
[073f550]385 return rc;
[7a23d60]386 for (j = 0; j < DPS(bs); j++) {
[073f550]387 d = ((fat_dentry_t *)b->data) + j;
388 switch (fat_classify_dentry(d)) {
389 case FAT_DENTRY_SKIP:
390 case FAT_DENTRY_FREE:
391 continue;
392 case FAT_DENTRY_LAST:
[8810c63]393 /* miss */
[073f550]394 rc = block_put(b);
395 *rfn = NULL;
[8810c63]396 return rc;
[073f550]397 default:
398 case FAT_DENTRY_VALID:
399 fat_dentry_name_get(d, name);
400 break;
401 }
402 if (fat_dentry_namecmp(name, component) == 0) {
403 /* hit */
404 fat_node_t *nodep;
[991f645]405 fat_idx_t *idx = fat_idx_get_by_pos(devmap_handle,
[a93d79a]406 parentp->firstc, i * DPS(bs) + j);
[073f550]407 if (!idx) {
408 /*
409 * Can happen if memory is low or if we
410 * run out of 32-bit indices.
411 */
412 rc = block_put(b);
[8810c63]413 return (rc == EOK) ? ENOMEM : rc;
[073f550]414 }
[0fc1e5d]415 rc = fat_node_get_core(&nodep, idx);
[073f550]416 fibril_mutex_unlock(&idx->lock);
[1647323]417 if (rc != EOK) {
418 (void) block_put(b);
419 return rc;
420 }
[073f550]421 *rfn = FS_NODE(nodep);
[1647323]422 rc = block_put(b);
423 if (rc != EOK)
424 (void) fat_node_put(*rfn);
425 return rc;
[073f550]426 }
427 }
428 rc = block_put(b);
[a93d79a]429 if (rc != EOK)
[8810c63]430 return rc;
[073f550]431 }
432
433 *rfn = NULL;
434 return EOK;
435}
436
[add5835]437/** Instantiate a FAT in-core node. */
[991f645]438int fat_node_get(fs_node_t **rfn, devmap_handle_t devmap_handle, fs_index_t index)
[add5835]439{
[b6035ba]440 fat_node_t *nodep;
[add5835]441 fat_idx_t *idxp;
[0fc1e5d]442 int rc;
[add5835]443
[991f645]444 idxp = fat_idx_get_by_index(devmap_handle, index);
[073f550]445 if (!idxp) {
446 *rfn = NULL;
447 return EOK;
448 }
[add5835]449 /* idxp->lock held */
[0fc1e5d]450 rc = fat_node_get_core(&nodep, idxp);
[6ebe721]451 fibril_mutex_unlock(&idxp->lock);
[0fc1e5d]452 if (rc == EOK)
453 *rfn = FS_NODE(nodep);
454 return rc;
[add5835]455}
456
[1313ee9]457int fat_node_open(fs_node_t *fn)
458{
459 /*
460 * Opening a file is stateless, nothing
461 * to be done here.
462 */
463 return EOK;
464}
465
[073f550]466int fat_node_put(fs_node_t *fn)
[06901c6b]467{
[b6035ba]468 fat_node_t *nodep = FAT_NODE(fn);
[6571b78]469 bool destroy = false;
[34b3ce3]470
[6ebe721]471 fibril_mutex_lock(&nodep->lock);
[34b3ce3]472 if (!--nodep->refcnt) {
[6571b78]473 if (nodep->idx) {
[6ebe721]474 fibril_mutex_lock(&ffn_mutex);
[b72efe8]475 list_append(&nodep->ffn_link, &ffn_list);
[6ebe721]476 fibril_mutex_unlock(&ffn_mutex);
[6571b78]477 } else {
478 /*
479 * The node does not have any index structure associated
480 * with itself. This can only mean that we are releasing
481 * the node after a failed attempt to allocate the index
482 * structure for it.
483 */
484 destroy = true;
485 }
[34b3ce3]486 }
[6ebe721]487 fibril_mutex_unlock(&nodep->lock);
[b6035ba]488 if (destroy) {
489 free(nodep->bp);
490 free(nodep);
491 }
[073f550]492 return EOK;
[06901c6b]493}
494
[991f645]495int fat_create_node(fs_node_t **rfn, devmap_handle_t devmap_handle, int flags)
[80e8482]496{
[6571b78]497 fat_idx_t *idxp;
498 fat_node_t *nodep;
[49df572]499 fat_bs_t *bs;
500 fat_cluster_t mcl, lcl;
501 int rc;
502
[991f645]503 bs = block_bb_get(devmap_handle);
[49df572]504 if (flags & L_DIRECTORY) {
505 /* allocate a cluster */
[991f645]506 rc = fat_alloc_clusters(bs, devmap_handle, 1, &mcl, &lcl);
[073f550]507 if (rc != EOK)
508 return rc;
509 /* populate the new cluster with unused dentries */
[991f645]510 rc = fat_zero_cluster(bs, devmap_handle, mcl);
[073f550]511 if (rc != EOK) {
[991f645]512 (void) fat_free_clusters(bs, devmap_handle, mcl);
[073f550]513 return rc;
514 }
[49df572]515 }
[6571b78]516
[17bf658]517 rc = fat_node_get_new(&nodep);
518 if (rc != EOK) {
[991f645]519 (void) fat_free_clusters(bs, devmap_handle, mcl);
[17bf658]520 return rc;
[49df572]521 }
[991f645]522 rc = fat_idx_get_new(&idxp, devmap_handle);
[9a15176]523 if (rc != EOK) {
[991f645]524 (void) fat_free_clusters(bs, devmap_handle, mcl);
[073f550]525 (void) fat_node_put(FS_NODE(nodep));
[9a15176]526 return rc;
[6571b78]527 }
528 /* idxp->lock held */
529 if (flags & L_DIRECTORY) {
530 nodep->type = FAT_DIRECTORY;
[49df572]531 nodep->firstc = mcl;
[7a23d60]532 nodep->size = BPS(bs) * SPC(bs);
[6571b78]533 } else {
534 nodep->type = FAT_FILE;
[49df572]535 nodep->firstc = FAT_CLST_RES0;
536 nodep->size = 0;
[6571b78]537 }
538 nodep->lnkcnt = 0; /* not linked anywhere */
539 nodep->refcnt = 1;
[49df572]540 nodep->dirty = true;
[6571b78]541
542 nodep->idx = idxp;
543 idxp->nodep = nodep;
544
[6ebe721]545 fibril_mutex_unlock(&idxp->lock);
[073f550]546 *rfn = FS_NODE(nodep);
547 return EOK;
[80e8482]548}
549
[b6035ba]550int fat_destroy_node(fs_node_t *fn)
[80e8482]551{
[b6035ba]552 fat_node_t *nodep = FAT_NODE(fn);
[50e5b25]553 fat_bs_t *bs;
[073f550]554 bool has_children;
555 int rc;
[50e5b25]556
557 /*
558 * The node is not reachable from the file system. This means that the
559 * link count should be zero and that the index structure cannot be
560 * found in the position hash. Obviously, we don't need to lock the node
561 * nor its index structure.
562 */
563 assert(nodep->lnkcnt == 0);
564
565 /*
566 * The node may not have any children.
567 */
[073f550]568 rc = fat_has_children(&has_children, fn);
569 if (rc != EOK)
570 return rc;
571 assert(!has_children);
[50e5b25]572
[991f645]573 bs = block_bb_get(nodep->idx->devmap_handle);
[50e5b25]574 if (nodep->firstc != FAT_CLST_RES0) {
575 assert(nodep->size);
576 /* Free all clusters allocated to the node. */
[991f645]577 rc = fat_free_clusters(bs, nodep->idx->devmap_handle,
[cca29e3c]578 nodep->firstc);
[50e5b25]579 }
580
581 fat_idx_destroy(nodep->idx);
[b6035ba]582 free(nodep->bp);
[50e5b25]583 free(nodep);
[cca29e3c]584 return rc;
[80e8482]585}
586
[b6035ba]587int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
[80e8482]588{
[b6035ba]589 fat_node_t *parentp = FAT_NODE(pfn);
590 fat_node_t *childp = FAT_NODE(cfn);
[0fdd6bb]591 fat_dentry_t *d;
592 fat_bs_t *bs;
593 block_t *b;
[a405563]594 unsigned i, j;
[0fdd6bb]595 unsigned blocks;
[e32b65a]596 fat_cluster_t mcl, lcl;
597 int rc;
[0fdd6bb]598
[6ebe721]599 fibril_mutex_lock(&childp->lock);
[0fdd6bb]600 if (childp->lnkcnt == 1) {
601 /*
602 * On FAT, we don't support multiple hard links.
603 */
[6ebe721]604 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]605 return EMLINK;
606 }
607 assert(childp->lnkcnt == 0);
[6ebe721]608 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]609
610 if (!fat_dentry_name_verify(name)) {
611 /*
612 * Attempt to create unsupported name.
613 */
614 return ENOTSUP;
615 }
616
617 /*
618 * Get us an unused parent node's dentry or grow the parent and allocate
619 * a new one.
620 */
621
[6ebe721]622 fibril_mutex_lock(&parentp->idx->lock);
[991f645]623 bs = block_bb_get(parentp->idx->devmap_handle);
[0fdd6bb]624
[7a23d60]625 blocks = parentp->size / BPS(bs);
[0fdd6bb]626
627 for (i = 0; i < blocks; i++) {
[684b655]628 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]629 if (rc != EOK) {
630 fibril_mutex_unlock(&parentp->idx->lock);
631 return rc;
632 }
[7a23d60]633 for (j = 0; j < DPS(bs); j++) {
[0fdd6bb]634 d = ((fat_dentry_t *)b->data) + j;
635 switch (fat_classify_dentry(d)) {
636 case FAT_DENTRY_SKIP:
637 case FAT_DENTRY_VALID:
638 /* skipping used and meta entries */
639 continue;
640 case FAT_DENTRY_FREE:
641 case FAT_DENTRY_LAST:
642 /* found an empty slot */
643 goto hit;
644 }
645 }
[c91f2d1b]646 rc = block_put(b);
[4b4668e]647 if (rc != EOK) {
648 fibril_mutex_unlock(&parentp->idx->lock);
649 return rc;
650 }
[0fdd6bb]651 }
[699743c]652 j = 0;
[0fdd6bb]653
654 /*
655 * We need to grow the parent in order to create a new unused dentry.
656 */
[b713492b]657 if (parentp->firstc == FAT_CLST_ROOT) {
[e32b65a]658 /* Can't grow the root directory. */
[6ebe721]659 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]660 return ENOSPC;
661 }
[991f645]662 rc = fat_alloc_clusters(bs, parentp->idx->devmap_handle, 1, &mcl, &lcl);
[e32b65a]663 if (rc != EOK) {
[6ebe721]664 fibril_mutex_unlock(&parentp->idx->lock);
[e32b65a]665 return rc;
666 }
[991f645]667 rc = fat_zero_cluster(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]668 if (rc != EOK) {
[991f645]669 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]670 fibril_mutex_unlock(&parentp->idx->lock);
671 return rc;
672 }
[377cce8]673 rc = fat_append_clusters(bs, parentp, mcl, lcl);
[4b4668e]674 if (rc != EOK) {
[991f645]675 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl);
[4b4668e]676 fibril_mutex_unlock(&parentp->idx->lock);
677 return rc;
678 }
[7a23d60]679 parentp->size += BPS(bs) * SPC(bs);
[d44aabd]680 parentp->dirty = true; /* need to sync node */
[684b655]681 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
[4b4668e]682 if (rc != EOK) {
683 fibril_mutex_unlock(&parentp->idx->lock);
684 return rc;
685 }
[e32b65a]686 d = (fat_dentry_t *)b->data;
[0fdd6bb]687
688hit:
689 /*
690 * At this point we only establish the link between the parent and the
691 * child. The dentry, except of the name and the extension, will remain
[e32b65a]692 * uninitialized until the corresponding node is synced. Thus the valid
693 * dentry data is kept in the child node structure.
[0fdd6bb]694 */
695 memset(d, 0, sizeof(fat_dentry_t));
696 fat_dentry_name_set(d, name);
697 b->dirty = true; /* need to sync block */
[c91f2d1b]698 rc = block_put(b);
[6ebe721]699 fibril_mutex_unlock(&parentp->idx->lock);
[4b4668e]700 if (rc != EOK)
701 return rc;
[0fdd6bb]702
[6ebe721]703 fibril_mutex_lock(&childp->idx->lock);
[1baec4b]704
[24a2517]705 if (childp->type == FAT_DIRECTORY) {
[4b4668e]706 /*
[24a2517]707 * If possible, create the Sub-directory Identifier Entry and
708 * the Sub-directory Parent Pointer Entry (i.e. "." and "..").
709 * These entries are not mandatory according to Standard
710 * ECMA-107 and HelenOS VFS does not use them anyway, so this is
711 * rather a sign of our good will.
[4b4668e]712 */
[24a2517]713 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
714 if (rc != EOK) {
715 /*
716 * Rather than returning an error, simply skip the
717 * creation of these two entries.
718 */
719 goto skip_dots;
720 }
[ed903174]721 d = (fat_dentry_t *) b->data;
722 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
723 (str_cmp((char *) d->name, FAT_NAME_DOT)) == 0) {
[24a2517]724 memset(d, 0, sizeof(fat_dentry_t));
[b62dc100]725 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN);
726 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
[24a2517]727 d->attr = FAT_ATTR_SUBDIR;
728 d->firstc = host2uint16_t_le(childp->firstc);
729 /* TODO: initialize also the date/time members. */
730 }
731 d++;
[ed903174]732 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) ||
733 (str_cmp((char *) d->name, FAT_NAME_DOT_DOT) == 0)) {
[24a2517]734 memset(d, 0, sizeof(fat_dentry_t));
[b62dc100]735 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN);
736 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN);
[24a2517]737 d->attr = FAT_ATTR_SUBDIR;
738 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
739 host2uint16_t_le(FAT_CLST_RES0) :
740 host2uint16_t_le(parentp->firstc);
741 /* TODO: initialize also the date/time members. */
742 }
743 b->dirty = true; /* need to sync block */
744 /*
745 * Ignore the return value as we would have fallen through on error
746 * anyway.
747 */
748 (void) block_put(b);
[1baec4b]749 }
[4b4668e]750skip_dots:
[1baec4b]751
[0fdd6bb]752 childp->idx->pfc = parentp->firstc;
[7a23d60]753 childp->idx->pdi = i * DPS(bs) + j;
[6ebe721]754 fibril_mutex_unlock(&childp->idx->lock);
[0fdd6bb]755
[6ebe721]756 fibril_mutex_lock(&childp->lock);
[0fdd6bb]757 childp->lnkcnt = 1;
758 childp->dirty = true; /* need to sync node */
[6ebe721]759 fibril_mutex_unlock(&childp->lock);
[0fdd6bb]760
761 /*
762 * Hash in the index structure into the position hash.
763 */
764 fat_idx_hashin(childp->idx);
765
766 return EOK;
[80e8482]767}
768
[cf95bc0]769int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
[80e8482]770{
[b6035ba]771 fat_node_t *parentp = FAT_NODE(pfn);
772 fat_node_t *childp = FAT_NODE(cfn);
[a31c1ccf]773 fat_bs_t *bs;
774 fat_dentry_t *d;
775 block_t *b;
[073f550]776 bool has_children;
[c91f2d1b]777 int rc;
[a31c1ccf]778
[770d281]779 if (!parentp)
780 return EBUSY;
[0be3e8b]781
[073f550]782 rc = fat_has_children(&has_children, cfn);
783 if (rc != EOK)
784 return rc;
785 if (has_children)
[0be3e8b]786 return ENOTEMPTY;
[770d281]787
[6ebe721]788 fibril_mutex_lock(&parentp->lock);
789 fibril_mutex_lock(&childp->lock);
[a31c1ccf]790 assert(childp->lnkcnt == 1);
[6ebe721]791 fibril_mutex_lock(&childp->idx->lock);
[991f645]792 bs = block_bb_get(childp->idx->devmap_handle);
[a31c1ccf]793
[991f645]794 rc = _fat_block_get(&b, bs, childp->idx->devmap_handle, childp->idx->pfc,
[6da81e0]795 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs),
[a31c1ccf]796 BLOCK_FLAGS_NONE);
[46c0498]797 if (rc != EOK)
798 goto error;
[a31c1ccf]799 d = (fat_dentry_t *)b->data +
[7a23d60]800 (childp->idx->pdi % (BPS(bs) / sizeof(fat_dentry_t)));
[a31c1ccf]801 /* mark the dentry as not-currently-used */
802 d->name[0] = FAT_DENTRY_ERASED;
803 b->dirty = true; /* need to sync block */
[c91f2d1b]804 rc = block_put(b);
[46c0498]805 if (rc != EOK)
806 goto error;
[a31c1ccf]807
808 /* remove the index structure from the position hash */
809 fat_idx_hashout(childp->idx);
810 /* clear position information */
811 childp->idx->pfc = FAT_CLST_RES0;
812 childp->idx->pdi = 0;
[6ebe721]813 fibril_mutex_unlock(&childp->idx->lock);
[a31c1ccf]814 childp->lnkcnt = 0;
[5ca5eaa7]815 childp->refcnt++; /* keep the node in memory until destroyed */
[a31c1ccf]816 childp->dirty = true;
[6ebe721]817 fibril_mutex_unlock(&childp->lock);
818 fibril_mutex_unlock(&parentp->lock);
[a31c1ccf]819
820 return EOK;
[46c0498]821
822error:
823 fibril_mutex_unlock(&parentp->idx->lock);
824 fibril_mutex_unlock(&childp->lock);
825 fibril_mutex_unlock(&childp->idx->lock);
826 return rc;
[80e8482]827}
828
[073f550]829int fat_has_children(bool *has_children, fs_node_t *fn)
[32fb10ed]830{
[7858bc5f]831 fat_bs_t *bs;
[b6035ba]832 fat_node_t *nodep = FAT_NODE(fn);
[32fb10ed]833 unsigned blocks;
[7858bc5f]834 block_t *b;
[32fb10ed]835 unsigned i, j;
[c91f2d1b]836 int rc;
[32fb10ed]837
[073f550]838 if (nodep->type != FAT_DIRECTORY) {
839 *has_children = false;
840 return EOK;
841 }
[b0247bac]842
[6ebe721]843 fibril_mutex_lock(&nodep->idx->lock);
[991f645]844 bs = block_bb_get(nodep->idx->devmap_handle);
[32fb10ed]845
[7a23d60]846 blocks = nodep->size / BPS(bs);
[32fb10ed]847
848 for (i = 0; i < blocks; i++) {
849 fat_dentry_t *d;
850
[684b655]851 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
[073f550]852 if (rc != EOK) {
853 fibril_mutex_unlock(&nodep->idx->lock);
854 return rc;
855 }
[7a23d60]856 for (j = 0; j < DPS(bs); j++) {
[32fb10ed]857 d = ((fat_dentry_t *)b->data) + j;
858 switch (fat_classify_dentry(d)) {
859 case FAT_DENTRY_SKIP:
[0fdd6bb]860 case FAT_DENTRY_FREE:
[32fb10ed]861 continue;
862 case FAT_DENTRY_LAST:
[c91f2d1b]863 rc = block_put(b);
[6ebe721]864 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]865 *has_children = false;
[8810c63]866 return rc;
[32fb10ed]867 default:
868 case FAT_DENTRY_VALID:
[c91f2d1b]869 rc = block_put(b);
[6ebe721]870 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]871 *has_children = true;
[8810c63]872 return rc;
[32fb10ed]873 }
874 }
[c91f2d1b]875 rc = block_put(b);
[8810c63]876 if (rc != EOK) {
877 fibril_mutex_unlock(&nodep->idx->lock);
878 return rc;
879 }
[32fb10ed]880 }
881
[6ebe721]882 fibril_mutex_unlock(&nodep->idx->lock);
[073f550]883 *has_children = false;
884 return EOK;
885}
886
887
888fs_index_t fat_index_get(fs_node_t *fn)
889{
890 return FAT_NODE(fn)->idx->index;
891}
892
[ed903174]893aoff64_t fat_size_get(fs_node_t *fn)
[073f550]894{
895 return FAT_NODE(fn)->size;
[32fb10ed]896}
897
[073f550]898unsigned fat_lnkcnt_get(fs_node_t *fn)
[74ea3c6]899{
[073f550]900 return FAT_NODE(fn)->lnkcnt;
[74ea3c6]901}
902
[50e5b25]903char fat_plb_get_char(unsigned pos)
[74ea3c6]904{
905 return fat_reg.plb_ro[pos % PLB_SIZE];
906}
907
[b6035ba]908bool fat_is_directory(fs_node_t *fn)
[e1e3b26]909{
[b6035ba]910 return FAT_NODE(fn)->type == FAT_DIRECTORY;
[e1e3b26]911}
912
[b6035ba]913bool fat_is_file(fs_node_t *fn)
[e1e3b26]914{
[b6035ba]915 return FAT_NODE(fn)->type == FAT_FILE;
[e1e3b26]916}
917
[991f645]918devmap_handle_t fat_device_get(fs_node_t *node)
[1313ee9]919{
920 return 0;
921}
922
[a2aa1dec]923/** libfs operations */
924libfs_ops_t fat_libfs_ops = {
[073f550]925 .root_get = fat_root_get,
[a2aa1dec]926 .match = fat_match,
927 .node_get = fat_node_get,
[1313ee9]928 .node_open = fat_node_open,
[06901c6b]929 .node_put = fat_node_put,
[6571b78]930 .create = fat_create_node,
931 .destroy = fat_destroy_node,
[80e8482]932 .link = fat_link,
933 .unlink = fat_unlink,
[073f550]934 .has_children = fat_has_children,
[e1e3b26]935 .index_get = fat_index_get,
936 .size_get = fat_size_get,
937 .lnkcnt_get = fat_lnkcnt_get,
[1313ee9]938 .plb_get_char = fat_plb_get_char,
[e1e3b26]939 .is_directory = fat_is_directory,
[1313ee9]940 .is_file = fat_is_file,
941 .device_get = fat_device_get
[a2aa1dec]942};
943
[0013b9ce]944/*
945 * VFS operations.
946 */
947
[cde485d]948void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
949{
[991f645]950 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[1fbe064b]951 enum cache_mode cmode;
[7858bc5f]952 fat_bs_t *bs;
[472c09d]953
954 /* Accept the mount options */
955 char *opts;
[4cac2d69]956 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL);
[472c09d]957
958 if (rc != EOK) {
[ffa2c8ef]959 async_answer_0(rid, rc);
[594303b]960 return;
961 }
962
[1fbe064b]963 /* Check for option enabling write through. */
964 if (str_cmp(opts, "wtcache") == 0)
965 cmode = CACHE_MODE_WT;
966 else
967 cmode = CACHE_MODE_WB;
968
[64aed80]969 free(opts);
970
[7858bc5f]971 /* initialize libblock */
[79ae36dd]972 rc = block_init(EXCHANGE_SERIALIZE, devmap_handle, BS_SIZE);
[7a35204a]973 if (rc != EOK) {
[ffa2c8ef]974 async_answer_0(rid, rc);
[6284978]975 return;
976 }
977
978 /* prepare the boot block */
[991f645]979 rc = block_bb_read(devmap_handle, BS_BLOCK);
[6284978]980 if (rc != EOK) {
[991f645]981 block_fini(devmap_handle);
[ffa2c8ef]982 async_answer_0(rid, rc);
[7a35204a]983 return;
984 }
985
[7858bc5f]986 /* get the buffer with the boot sector */
[991f645]987 bs = block_bb_get(devmap_handle);
[7858bc5f]988
[7a23d60]989 if (BPS(bs) != BS_SIZE) {
[991f645]990 block_fini(devmap_handle);
[ffa2c8ef]991 async_answer_0(rid, ENOTSUP);
[7a35204a]992 return;
993 }
994
[f1ba5d6]995 /* Initialize the block cache */
[991f645]996 rc = block_cache_init(devmap_handle, BPS(bs), 0 /* XXX */, cmode);
[f1ba5d6]997 if (rc != EOK) {
[991f645]998 block_fini(devmap_handle);
[ffa2c8ef]999 async_answer_0(rid, rc);
[f1ba5d6]1000 return;
1001 }
1002
[2ffaab5]1003 /* Do some simple sanity checks on the file system. */
[991f645]1004 rc = fat_sanity_check(bs, devmap_handle);
[711e1f32]1005 if (rc != EOK) {
[991f645]1006 (void) block_cache_fini(devmap_handle);
1007 block_fini(devmap_handle);
[ffa2c8ef]1008 async_answer_0(rid, rc);
[711e1f32]1009 return;
1010 }
1011
[991f645]1012 rc = fat_idx_init_by_devmap_handle(devmap_handle);
[cde485d]1013 if (rc != EOK) {
[991f645]1014 (void) block_cache_fini(devmap_handle);
1015 block_fini(devmap_handle);
[ffa2c8ef]1016 async_answer_0(rid, rc);
[cde485d]1017 return;
1018 }
1019
[689f036]1020 /* Initialize the root node. */
[b6035ba]1021 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
1022 if (!rfn) {
[991f645]1023 (void) block_cache_fini(devmap_handle);
1024 block_fini(devmap_handle);
1025 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1026 async_answer_0(rid, ENOMEM);
[b6035ba]1027 return;
1028 }
[83937ccd]1029 fs_node_initialize(rfn);
[689f036]1030 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
1031 if (!rootp) {
[b6035ba]1032 free(rfn);
[991f645]1033 (void) block_cache_fini(devmap_handle);
1034 block_fini(devmap_handle);
1035 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1036 async_answer_0(rid, ENOMEM);
[689f036]1037 return;
1038 }
1039 fat_node_initialize(rootp);
1040
[991f645]1041 fat_idx_t *ridxp = fat_idx_get_by_pos(devmap_handle, FAT_CLST_ROOTPAR, 0);
[689f036]1042 if (!ridxp) {
[b6035ba]1043 free(rfn);
[689f036]1044 free(rootp);
[991f645]1045 (void) block_cache_fini(devmap_handle);
1046 block_fini(devmap_handle);
1047 fat_idx_fini_by_devmap_handle(devmap_handle);
[ffa2c8ef]1048 async_answer_0(rid, ENOMEM);
[689f036]1049 return;
1050 }
1051 assert(ridxp->index == 0);
1052 /* ridxp->lock held */
1053
1054 rootp->type = FAT_DIRECTORY;
1055 rootp->firstc = FAT_CLST_ROOT;
1056 rootp->refcnt = 1;
[5ab597d]1057 rootp->lnkcnt = 0; /* FS root is not linked */
[7a23d60]1058 rootp->size = RDE(bs) * sizeof(fat_dentry_t);
[689f036]1059 rootp->idx = ridxp;
1060 ridxp->nodep = rootp;
[b6035ba]1061 rootp->bp = rfn;
1062 rfn->data = rootp;
[689f036]1063
[6ebe721]1064 fibril_mutex_unlock(&ridxp->lock);
[689f036]1065
[ffa2c8ef]1066 async_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
[cde485d]1067}
1068
1069void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1070{
[16d17ca]1071 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[cde485d]1072}
1073
[3c11713]1074void fat_unmounted(ipc_callid_t rid, ipc_call_t *request)
1075{
[991f645]1076 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[430de97]1077 fs_node_t *fn;
1078 fat_node_t *nodep;
1079 int rc;
1080
[991f645]1081 rc = fat_root_get(&fn, devmap_handle);
[430de97]1082 if (rc != EOK) {
[ffa2c8ef]1083 async_answer_0(rid, rc);
[430de97]1084 return;
1085 }
1086 nodep = FAT_NODE(fn);
1087
1088 /*
1089 * We expect exactly two references on the root node. One for the
1090 * fat_root_get() above and one created in fat_mounted().
1091 */
1092 if (nodep->refcnt != 2) {
1093 (void) fat_node_put(fn);
[ffa2c8ef]1094 async_answer_0(rid, EBUSY);
[430de97]1095 return;
1096 }
1097
1098 /*
1099 * Put the root node and force it to the FAT free node list.
1100 */
1101 (void) fat_node_put(fn);
1102 (void) fat_node_put(fn);
1103
1104 /*
1105 * Perform cleanup of the node structures, index structures and
1106 * associated data. Write back this file system's dirty blocks and
1107 * stop using libblock for this instance.
1108 */
[991f645]1109 (void) fat_node_fini_by_devmap_handle(devmap_handle);
1110 fat_idx_fini_by_devmap_handle(devmap_handle);
1111 (void) block_cache_fini(devmap_handle);
1112 block_fini(devmap_handle);
[430de97]1113
[ffa2c8ef]1114 async_answer_0(rid, EOK);
[3c11713]1115}
1116
1117void fat_unmount(ipc_callid_t rid, ipc_call_t *request)
1118{
1119 libfs_unmount(&fat_libfs_ops, rid, request);
1120}
1121
[be815bc]1122void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1123{
[a2aa1dec]1124 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[be815bc]1125}
1126
[4bf40f6]1127void fat_read(ipc_callid_t rid, ipc_call_t *request)
1128{
[991f645]1129 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1130 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1131 aoff64_t pos =
1132 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1133 fs_node_t *fn;
[b6035ba]1134 fat_node_t *nodep;
[7858bc5f]1135 fat_bs_t *bs;
[79d031b]1136 size_t bytes;
[7858bc5f]1137 block_t *b;
[c91f2d1b]1138 int rc;
[79d031b]1139
[991f645]1140 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1141 if (rc != EOK) {
[ffa2c8ef]1142 async_answer_0(rid, rc);
[073f550]1143 return;
1144 }
[b6035ba]1145 if (!fn) {
[ffa2c8ef]1146 async_answer_0(rid, ENOENT);
[4bf40f6]1147 return;
1148 }
[b6035ba]1149 nodep = FAT_NODE(fn);
[4bf40f6]1150
1151 ipc_callid_t callid;
1152 size_t len;
[0da4e41]1153 if (!async_data_read_receive(&callid, &len)) {
[b6035ba]1154 fat_node_put(fn);
[ffa2c8ef]1155 async_answer_0(callid, EINVAL);
1156 async_answer_0(rid, EINVAL);
[4bf40f6]1157 return;
1158 }
1159
[991f645]1160 bs = block_bb_get(devmap_handle);
[cb682eb]1161
[4bf40f6]1162 if (nodep->type == FAT_FILE) {
[ddd1219]1163 /*
1164 * Our strategy for regular file reads is to read one block at
1165 * most and make use of the possibility to return less data than
1166 * requested. This keeps the code very simple.
1167 */
[0d974d8]1168 if (pos >= nodep->size) {
[7d861950]1169 /* reading beyond the EOF */
1170 bytes = 0;
[0da4e41]1171 (void) async_data_read_finalize(callid, NULL, 0);
[0d974d8]1172 } else {
[7a23d60]1173 bytes = min(len, BPS(bs) - pos % BPS(bs));
[0d974d8]1174 bytes = min(bytes, nodep->size - pos);
[7a23d60]1175 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),
[1d8cdb1]1176 BLOCK_FLAGS_NONE);
[453f2e75]1177 if (rc != EOK) {
1178 fat_node_put(fn);
[ffa2c8ef]1179 async_answer_0(callid, rc);
1180 async_answer_0(rid, rc);
[453f2e75]1181 return;
1182 }
[7a23d60]1183 (void) async_data_read_finalize(callid,
1184 b->data + pos % BPS(bs), bytes);
[c91f2d1b]1185 rc = block_put(b);
[453f2e75]1186 if (rc != EOK) {
1187 fat_node_put(fn);
[ffa2c8ef]1188 async_answer_0(rid, rc);
[453f2e75]1189 return;
1190 }
[0d974d8]1191 }
[4bf40f6]1192 } else {
[ddd1219]1193 unsigned bnum;
[ed903174]1194 aoff64_t spos = pos;
[ddd1219]1195 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1196 fat_dentry_t *d;
1197
[4bf40f6]1198 assert(nodep->type == FAT_DIRECTORY);
[7a23d60]1199 assert(nodep->size % BPS(bs) == 0);
1200 assert(BPS(bs) % sizeof(fat_dentry_t) == 0);
[ddd1219]1201
1202 /*
1203 * Our strategy for readdir() is to use the position pointer as
1204 * an index into the array of all dentries. On entry, it points
1205 * to the first unread dentry. If we skip any dentries, we bump
1206 * the position pointer accordingly.
1207 */
[7a23d60]1208 bnum = (pos * sizeof(fat_dentry_t)) / BPS(bs);
1209 while (bnum < nodep->size / BPS(bs)) {
[ed903174]1210 aoff64_t o;
[ddd1219]1211
[684b655]1212 rc = fat_block_get(&b, bs, nodep, bnum,
1213 BLOCK_FLAGS_NONE);
[453f2e75]1214 if (rc != EOK)
1215 goto err;
[7a23d60]1216 for (o = pos % (BPS(bs) / sizeof(fat_dentry_t));
1217 o < BPS(bs) / sizeof(fat_dentry_t);
[ddd1219]1218 o++, pos++) {
1219 d = ((fat_dentry_t *)b->data) + o;
1220 switch (fat_classify_dentry(d)) {
1221 case FAT_DENTRY_SKIP:
[0fdd6bb]1222 case FAT_DENTRY_FREE:
[ddd1219]1223 continue;
1224 case FAT_DENTRY_LAST:
[c91f2d1b]1225 rc = block_put(b);
[453f2e75]1226 if (rc != EOK)
1227 goto err;
[ddd1219]1228 goto miss;
1229 default:
1230 case FAT_DENTRY_VALID:
[0fdd6bb]1231 fat_dentry_name_get(d, name);
[073f550]1232 rc = block_put(b);
[453f2e75]1233 if (rc != EOK)
1234 goto err;
[ddd1219]1235 goto hit;
1236 }
1237 }
[c91f2d1b]1238 rc = block_put(b);
[453f2e75]1239 if (rc != EOK)
1240 goto err;
[ddd1219]1241 bnum++;
1242 }
1243miss:
[453f2e75]1244 rc = fat_node_put(fn);
[ffa2c8ef]1245 async_answer_0(callid, rc != EOK ? rc : ENOENT);
1246 async_answer_1(rid, rc != EOK ? rc : ENOENT, 0);
[4bf40f6]1247 return;
[453f2e75]1248
1249err:
1250 (void) fat_node_put(fn);
[ffa2c8ef]1251 async_answer_0(callid, rc);
1252 async_answer_0(rid, rc);
[453f2e75]1253 return;
1254
[ddd1219]1255hit:
[0da4e41]1256 (void) async_data_read_finalize(callid, name, str_size(name) + 1);
[ddd1219]1257 bytes = (pos - spos) + 1;
[4bf40f6]1258 }
1259
[453f2e75]1260 rc = fat_node_put(fn);
[ffa2c8ef]1261 async_answer_1(rid, rc, (sysarg_t)bytes);
[4bf40f6]1262}
1263
[c947dda]1264void fat_write(ipc_callid_t rid, ipc_call_t *request)
1265{
[991f645]1266 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1267 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1268 aoff64_t pos =
1269 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1270 fs_node_t *fn;
[b6035ba]1271 fat_node_t *nodep;
[7858bc5f]1272 fat_bs_t *bs;
[dfddfcd]1273 size_t bytes, size;
[7858bc5f]1274 block_t *b;
[ed903174]1275 aoff64_t boundary;
[1d8cdb1]1276 int flags = BLOCK_FLAGS_NONE;
[c91f2d1b]1277 int rc;
[8d32152]1278
[991f645]1279 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1280 if (rc != EOK) {
[ffa2c8ef]1281 async_answer_0(rid, rc);
[073f550]1282 return;
1283 }
[b6035ba]1284 if (!fn) {
[ffa2c8ef]1285 async_answer_0(rid, ENOENT);
[8d32152]1286 return;
1287 }
[b6035ba]1288 nodep = FAT_NODE(fn);
[8d32152]1289
1290 ipc_callid_t callid;
1291 size_t len;
[0da4e41]1292 if (!async_data_write_receive(&callid, &len)) {
[dfddfcd]1293 (void) fat_node_put(fn);
[ffa2c8ef]1294 async_answer_0(callid, EINVAL);
1295 async_answer_0(rid, EINVAL);
[8d32152]1296 return;
1297 }
1298
[991f645]1299 bs = block_bb_get(devmap_handle);
[913a821c]1300
[8d32152]1301 /*
1302 * In all scenarios, we will attempt to write out only one block worth
1303 * of data at maximum. There might be some more efficient approaches,
1304 * but this one greatly simplifies fat_write(). Note that we can afford
1305 * to do this because the client must be ready to handle the return
1306 * value signalizing a smaller number of bytes written.
1307 */
[7a23d60]1308 bytes = min(len, BPS(bs) - pos % BPS(bs));
1309 if (bytes == BPS(bs))
[1d8cdb1]1310 flags |= BLOCK_FLAGS_NOREAD;
[8d32152]1311
[7a23d60]1312 boundary = ROUND_UP(nodep->size, BPC(bs));
[b4b7187]1313 if (pos < boundary) {
[8d32152]1314 /*
1315 * This is the easier case - we are either overwriting already
1316 * existing contents or writing behind the EOF, but still within
1317 * the limits of the last cluster. The node size may grow to the
1318 * next block size boundary.
1319 */
[cca29e3c]1320 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
[dfddfcd]1321 if (rc != EOK) {
1322 (void) fat_node_put(fn);
[ffa2c8ef]1323 async_answer_0(callid, rc);
1324 async_answer_0(rid, rc);
[dfddfcd]1325 return;
1326 }
[7a23d60]1327 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);
[dfddfcd]1328 if (rc != EOK) {
1329 (void) fat_node_put(fn);
[ffa2c8ef]1330 async_answer_0(callid, rc);
1331 async_answer_0(rid, rc);
[dfddfcd]1332 return;
1333 }
[7a23d60]1334 (void) async_data_write_finalize(callid,
1335 b->data + pos % BPS(bs), bytes);
[8d32152]1336 b->dirty = true; /* need to sync block */
[c91f2d1b]1337 rc = block_put(b);
[dfddfcd]1338 if (rc != EOK) {
1339 (void) fat_node_put(fn);
[ffa2c8ef]1340 async_answer_0(rid, rc);
[dfddfcd]1341 return;
1342 }
[8d32152]1343 if (pos + bytes > nodep->size) {
1344 nodep->size = pos + bytes;
1345 nodep->dirty = true; /* need to sync node */
1346 }
[dfddfcd]1347 size = nodep->size;
1348 rc = fat_node_put(fn);
[ffa2c8ef]1349 async_answer_2(rid, rc, bytes, nodep->size);
[8d32152]1350 return;
1351 } else {
1352 /*
1353 * This is the more difficult case. We must allocate new
1354 * clusters for the node and zero them out.
1355 */
1356 unsigned nclsts;
[8334a427]1357 fat_cluster_t mcl, lcl;
1358
[7a23d60]1359 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);
[6f2dfd1]1360 /* create an independent chain of nclsts clusters in all FATs */
[991f645]1361 rc = fat_alloc_clusters(bs, devmap_handle, nclsts, &mcl, &lcl);
[dfddfcd]1362 if (rc != EOK) {
[6f2dfd1]1363 /* could not allocate a chain of nclsts clusters */
[dfddfcd]1364 (void) fat_node_put(fn);
[ffa2c8ef]1365 async_answer_0(callid, rc);
1366 async_answer_0(rid, rc);
[6f2dfd1]1367 return;
1368 }
1369 /* zero fill any gaps */
[cca29e3c]1370 rc = fat_fill_gap(bs, nodep, mcl, pos);
[dfddfcd]1371 if (rc != EOK) {
[991f645]1372 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1373 (void) fat_node_put(fn);
[ffa2c8ef]1374 async_answer_0(callid, rc);
1375 async_answer_0(rid, rc);
[dfddfcd]1376 return;
1377 }
[991f645]1378 rc = _fat_block_get(&b, bs, devmap_handle, lcl, NULL,
[7a23d60]1379 (pos / BPS(bs)) % SPC(bs), flags);
[dfddfcd]1380 if (rc != EOK) {
[991f645]1381 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1382 (void) fat_node_put(fn);
[ffa2c8ef]1383 async_answer_0(callid, rc);
1384 async_answer_0(rid, rc);
[dfddfcd]1385 return;
1386 }
[7a23d60]1387 (void) async_data_write_finalize(callid,
1388 b->data + pos % BPS(bs), bytes);
[b4b7187]1389 b->dirty = true; /* need to sync block */
[c91f2d1b]1390 rc = block_put(b);
[dfddfcd]1391 if (rc != EOK) {
[991f645]1392 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1393 (void) fat_node_put(fn);
[ffa2c8ef]1394 async_answer_0(rid, rc);
[dfddfcd]1395 return;
1396 }
[6f2dfd1]1397 /*
1398 * Append the cluster chain starting in mcl to the end of the
1399 * node's cluster chain.
1400 */
[377cce8]1401 rc = fat_append_clusters(bs, nodep, mcl, lcl);
[dfddfcd]1402 if (rc != EOK) {
[991f645]1403 (void) fat_free_clusters(bs, devmap_handle, mcl);
[dfddfcd]1404 (void) fat_node_put(fn);
[ffa2c8ef]1405 async_answer_0(rid, rc);
[dfddfcd]1406 return;
1407 }
1408 nodep->size = size = pos + bytes;
[b4b7187]1409 nodep->dirty = true; /* need to sync node */
[dfddfcd]1410 rc = fat_node_put(fn);
[ffa2c8ef]1411 async_answer_2(rid, rc, bytes, size);
[6f2dfd1]1412 return;
[8d32152]1413 }
[c947dda]1414}
1415
[6c71a1f]1416void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1417{
[991f645]1418 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[ed903174]1419 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1420 aoff64_t size =
1421 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request));
[073f550]1422 fs_node_t *fn;
[b6035ba]1423 fat_node_t *nodep;
[913a821c]1424 fat_bs_t *bs;
[8334a427]1425 int rc;
1426
[991f645]1427 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1428 if (rc != EOK) {
[ffa2c8ef]1429 async_answer_0(rid, rc);
[073f550]1430 return;
1431 }
[b6035ba]1432 if (!fn) {
[ffa2c8ef]1433 async_answer_0(rid, ENOENT);
[8334a427]1434 return;
1435 }
[b6035ba]1436 nodep = FAT_NODE(fn);
[8334a427]1437
[991f645]1438 bs = block_bb_get(devmap_handle);
[913a821c]1439
[8334a427]1440 if (nodep->size == size) {
1441 rc = EOK;
1442 } else if (nodep->size < size) {
1443 /*
[913a821c]1444 * The standard says we have the freedom to grow the node.
[8334a427]1445 * For now, we simply return an error.
1446 */
1447 rc = EINVAL;
[7a23d60]1448 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {
[913a821c]1449 /*
1450 * The node will be shrunk, but no clusters will be deallocated.
1451 */
1452 nodep->size = size;
1453 nodep->dirty = true; /* need to sync node */
1454 rc = EOK;
[8334a427]1455 } else {
1456 /*
[913a821c]1457 * The node will be shrunk, clusters will be deallocated.
[8334a427]1458 */
[913a821c]1459 if (size == 0) {
[cca29e3c]1460 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1461 if (rc != EOK)
1462 goto out;
[913a821c]1463 } else {
1464 fat_cluster_t lastc;
[991f645]1465 rc = fat_cluster_walk(bs, devmap_handle, nodep->firstc,
[7a23d60]1466 &lastc, NULL, (size - 1) / BPC(bs));
[e402382]1467 if (rc != EOK)
1468 goto out;
[cca29e3c]1469 rc = fat_chop_clusters(bs, nodep, lastc);
1470 if (rc != EOK)
1471 goto out;
[913a821c]1472 }
1473 nodep->size = size;
1474 nodep->dirty = true; /* need to sync node */
1475 rc = EOK;
[8334a427]1476 }
[e402382]1477out:
[b6035ba]1478 fat_node_put(fn);
[ffa2c8ef]1479 async_answer_0(rid, rc);
[8334a427]1480 return;
[6c71a1f]1481}
1482
[c20aa06]1483void fat_close(ipc_callid_t rid, ipc_call_t *request)
1484{
[ffa2c8ef]1485 async_answer_0(rid, EOK);
[c20aa06]1486}
1487
[50e5b25]1488void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1489{
[991f645]1490 devmap_handle_t devmap_handle = (devmap_handle_t)IPC_GET_ARG1(*request);
[50e5b25]1491 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
[073f550]1492 fs_node_t *fn;
[5ca5eaa7]1493 fat_node_t *nodep;
[50e5b25]1494 int rc;
1495
[991f645]1496 rc = fat_node_get(&fn, devmap_handle, index);
[073f550]1497 if (rc != EOK) {
[ffa2c8ef]1498 async_answer_0(rid, rc);
[073f550]1499 return;
1500 }
[b6035ba]1501 if (!fn) {
[ffa2c8ef]1502 async_answer_0(rid, ENOENT);
[50e5b25]1503 return;
1504 }
1505
[5ca5eaa7]1506 nodep = FAT_NODE(fn);
1507 /*
1508 * We should have exactly two references. One for the above
1509 * call to fat_node_get() and one from fat_unlink().
1510 */
1511 assert(nodep->refcnt == 2);
1512
[b6035ba]1513 rc = fat_destroy_node(fn);
[ffa2c8ef]1514 async_answer_0(rid, rc);
[50e5b25]1515}
1516
[c20aa06]1517void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1518{
1519 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1520}
1521
[852b801]1522void fat_stat(ipc_callid_t rid, ipc_call_t *request)
[c20aa06]1523{
[75160a6]1524 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
[c20aa06]1525}
1526
1527void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1528{
[991f645]1529 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
[69a60c4]1530 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request);
1531
1532 fs_node_t *fn;
[991f645]1533 int rc = fat_node_get(&fn, devmap_handle, index);
[69a60c4]1534 if (rc != EOK) {
[ffa2c8ef]1535 async_answer_0(rid, rc);
[69a60c4]1536 return;
1537 }
1538 if (!fn) {
[ffa2c8ef]1539 async_answer_0(rid, ENOENT);
[69a60c4]1540 return;
1541 }
1542
1543 fat_node_t *nodep = FAT_NODE(fn);
1544
1545 nodep->dirty = true;
1546 rc = fat_node_sync(nodep);
1547
1548 fat_node_put(fn);
[ffa2c8ef]1549 async_answer_0(rid, rc);
[c20aa06]1550}
1551
[be815bc]1552/**
1553 * @}
[c20aa06]1554 */
Note: See TracBrowser for help on using the repository browser.