source: mainline/uspace/srv/fs/fat/fat_ops.c@ 0fc1e5d

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 0fc1e5d was 0fc1e5d, checked in by Jakub Jermar <jakub@…>, 16 years ago

Make fat_node_get_core() return an error code.

  • Property mode set to 100644
File size: 33.4 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file fat_ops.c
35 * @brief Implementation of VFS operations for the FAT file system server.
36 */
37
38#include "fat.h"
39#include "fat_dentry.h"
40#include "fat_fat.h"
41#include "../../vfs/vfs.h"
42#include <libfs.h>
43#include <libblock.h>
44#include <ipc/ipc.h>
45#include <ipc/services.h>
46#include <ipc/devmap.h>
47#include <async.h>
48#include <errno.h>
49#include <string.h>
50#include <byteorder.h>
51#include <adt/hash_table.h>
52#include <adt/list.h>
53#include <assert.h>
54#include <fibril_sync.h>
55#include <sys/mman.h>
56#include <align.h>
57
58#define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL)
59#define FS_NODE(node) ((node) ? (node)->bp : NULL)
60
61/** Mutex protecting the list of cached free FAT nodes. */
62static FIBRIL_MUTEX_INITIALIZE(ffn_mutex);
63
64/** List of cached free FAT nodes. */
65static LIST_INITIALIZE(ffn_head);
66
67/*
68 * Forward declarations of FAT libfs operations.
69 */
70static int fat_root_get(fs_node_t **, dev_handle_t);
71static int fat_match(fs_node_t **, fs_node_t *, const char *);
72static int fat_node_get(fs_node_t **, dev_handle_t, fs_index_t);
73static int fat_node_put(fs_node_t *);
74static int fat_create_node(fs_node_t **, dev_handle_t, int);
75static int fat_destroy_node(fs_node_t *);
76static int fat_link(fs_node_t *, fs_node_t *, const char *);
77static int fat_unlink(fs_node_t *, fs_node_t *, const char *);
78static int fat_has_children(bool *, fs_node_t *);
79static fs_index_t fat_index_get(fs_node_t *);
80static size_t fat_size_get(fs_node_t *);
81static unsigned fat_lnkcnt_get(fs_node_t *);
82static char fat_plb_get_char(unsigned);
83static bool fat_is_directory(fs_node_t *);
84static bool fat_is_file(fs_node_t *node);
85
86/*
87 * Helper functions.
88 */
89static void fat_node_initialize(fat_node_t *node)
90{
91 fibril_mutex_initialize(&node->lock);
92 node->bp = NULL;
93 node->idx = NULL;
94 node->type = 0;
95 link_initialize(&node->ffn_link);
96 node->size = 0;
97 node->lnkcnt = 0;
98 node->refcnt = 0;
99 node->dirty = false;
100}
101
102static int fat_node_sync(fat_node_t *node)
103{
104 block_t *b;
105 fat_bs_t *bs;
106 fat_dentry_t *d;
107 uint16_t bps;
108 unsigned dps;
109 int rc;
110
111 assert(node->dirty);
112
113 bs = block_bb_get(node->idx->dev_handle);
114 bps = uint16_t_le2host(bs->bps);
115 dps = bps / sizeof(fat_dentry_t);
116
117 /* Read the block that contains the dentry of interest. */
118 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc,
119 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
120 if (rc != EOK)
121 return rc;
122
123 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
124
125 d->firstc = host2uint16_t_le(node->firstc);
126 if (node->type == FAT_FILE) {
127 d->size = host2uint32_t_le(node->size);
128 } else if (node->type == FAT_DIRECTORY) {
129 d->attr = FAT_ATTR_SUBDIR;
130 }
131
132 /* TODO: update other fields? (e.g time fields) */
133
134 b->dirty = true; /* need to sync block */
135 rc = block_put(b);
136 return rc;
137}
138
139static int fat_node_get_new(fat_node_t **nodepp)
140{
141 fs_node_t *fn;
142 fat_node_t *nodep;
143 int rc;
144
145 fibril_mutex_lock(&ffn_mutex);
146 if (!list_empty(&ffn_head)) {
147 /* Try to use a cached free node structure. */
148 fat_idx_t *idxp_tmp;
149 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
150 if (!fibril_mutex_trylock(&nodep->lock))
151 goto skip_cache;
152 idxp_tmp = nodep->idx;
153 if (!fibril_mutex_trylock(&idxp_tmp->lock)) {
154 fibril_mutex_unlock(&nodep->lock);
155 goto skip_cache;
156 }
157 list_remove(&nodep->ffn_link);
158 fibril_mutex_unlock(&ffn_mutex);
159 if (nodep->dirty) {
160 rc = fat_node_sync(nodep);
161 if (rc != EOK) {
162 idxp_tmp->nodep = NULL;
163 fibril_mutex_unlock(&nodep->lock);
164 fibril_mutex_unlock(&idxp_tmp->lock);
165 free(nodep->bp);
166 free(nodep);
167 return rc;
168 }
169 }
170 idxp_tmp->nodep = NULL;
171 fibril_mutex_unlock(&nodep->lock);
172 fibril_mutex_unlock(&idxp_tmp->lock);
173 fn = FS_NODE(nodep);
174 } else {
175skip_cache:
176 /* Try to allocate a new node structure. */
177 fibril_mutex_unlock(&ffn_mutex);
178 fn = (fs_node_t *)malloc(sizeof(fs_node_t));
179 if (!fn)
180 return ENOMEM;
181 nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
182 if (!nodep) {
183 free(fn);
184 return ENOMEM;
185 }
186 }
187 fat_node_initialize(nodep);
188 fs_node_initialize(fn);
189 fn->data = nodep;
190 nodep->bp = fn;
191
192 *nodepp = nodep;
193 return EOK;
194}
195
196/** Internal version of fat_node_get().
197 *
198 * @param idxp Locked index structure.
199 */
200static int fat_node_get_core(fat_node_t **nodepp, fat_idx_t *idxp)
201{
202 block_t *b;
203 fat_bs_t *bs;
204 fat_dentry_t *d;
205 fat_node_t *nodep = NULL;
206 unsigned bps;
207 unsigned spc;
208 unsigned dps;
209 int rc;
210
211 if (idxp->nodep) {
212 /*
213 * We are lucky.
214 * The node is already instantiated in memory.
215 */
216 fibril_mutex_lock(&idxp->nodep->lock);
217 if (!idxp->nodep->refcnt++) {
218 fibril_mutex_lock(&ffn_mutex);
219 list_remove(&idxp->nodep->ffn_link);
220 fibril_mutex_unlock(&ffn_mutex);
221 }
222 fibril_mutex_unlock(&idxp->nodep->lock);
223 *nodepp = idxp->nodep;
224 return EOK;
225 }
226
227 /*
228 * We must instantiate the node from the file system.
229 */
230
231 assert(idxp->pfc);
232
233 rc = fat_node_get_new(&nodep);
234 if (rc != EOK)
235 return rc;
236
237 bs = block_bb_get(idxp->dev_handle);
238 bps = uint16_t_le2host(bs->bps);
239 spc = bs->spc;
240 dps = bps / sizeof(fat_dentry_t);
241
242 /* Read the block that contains the dentry of interest. */
243 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc,
244 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
245 if (rc != EOK) {
246 (void) fat_node_put(FS_NODE(nodep));
247 return rc;
248 }
249
250 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
251 if (d->attr & FAT_ATTR_SUBDIR) {
252 /*
253 * The only directory which does not have this bit set is the
254 * root directory itself. The root directory node is handled
255 * and initialized elsewhere.
256 */
257 nodep->type = FAT_DIRECTORY;
258 /*
259 * Unfortunately, the 'size' field of the FAT dentry is not
260 * defined for the directory entry type. We must determine the
261 * size of the directory by walking the FAT.
262 */
263 uint16_t clusters;
264 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle,
265 uint16_t_le2host(d->firstc));
266 if (rc != EOK) {
267 (void) fat_node_put(FS_NODE(nodep));
268 return rc;
269 }
270 nodep->size = bps * spc * clusters;
271 } else {
272 nodep->type = FAT_FILE;
273 nodep->size = uint32_t_le2host(d->size);
274 }
275 nodep->firstc = uint16_t_le2host(d->firstc);
276 nodep->lnkcnt = 1;
277 nodep->refcnt = 1;
278
279 rc = block_put(b);
280 if (rc != EOK) {
281 (void) fat_node_put(FS_NODE(nodep));
282 return rc;
283 }
284
285 /* Link the idx structure with the node structure. */
286 nodep->idx = idxp;
287 idxp->nodep = nodep;
288
289 *nodepp = nodep;
290 return EOK;
291}
292
293/*
294 * FAT libfs operations.
295 */
296
297int fat_root_get(fs_node_t **rfn, dev_handle_t dev_handle)
298{
299 return fat_node_get(rfn, dev_handle, 0);
300}
301
302int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)
303{
304 fat_bs_t *bs;
305 fat_node_t *parentp = FAT_NODE(pfn);
306 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
307 unsigned i, j;
308 unsigned bps; /* bytes per sector */
309 unsigned dps; /* dentries per sector */
310 unsigned blocks;
311 fat_dentry_t *d;
312 block_t *b;
313 int rc;
314
315 fibril_mutex_lock(&parentp->idx->lock);
316 bs = block_bb_get(parentp->idx->dev_handle);
317 bps = uint16_t_le2host(bs->bps);
318 dps = bps / sizeof(fat_dentry_t);
319 blocks = parentp->size / bps;
320 for (i = 0; i < blocks; i++) {
321 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
322 if (rc != EOK) {
323 fibril_mutex_unlock(&parentp->idx->lock);
324 return rc;
325 }
326 for (j = 0; j < dps; j++) {
327 d = ((fat_dentry_t *)b->data) + j;
328 switch (fat_classify_dentry(d)) {
329 case FAT_DENTRY_SKIP:
330 case FAT_DENTRY_FREE:
331 continue;
332 case FAT_DENTRY_LAST:
333 rc = block_put(b);
334 /* expect EOK as b was not dirty */
335 assert(rc == EOK);
336 fibril_mutex_unlock(&parentp->idx->lock);
337 *rfn = NULL;
338 return EOK;
339 default:
340 case FAT_DENTRY_VALID:
341 fat_dentry_name_get(d, name);
342 break;
343 }
344 if (fat_dentry_namecmp(name, component) == 0) {
345 /* hit */
346 fat_node_t *nodep;
347 /*
348 * Assume tree hierarchy for locking. We
349 * already have the parent and now we are going
350 * to lock the child. Never lock in the oposite
351 * order.
352 */
353 fat_idx_t *idx = fat_idx_get_by_pos(
354 parentp->idx->dev_handle, parentp->firstc,
355 i * dps + j);
356 fibril_mutex_unlock(&parentp->idx->lock);
357 if (!idx) {
358 /*
359 * Can happen if memory is low or if we
360 * run out of 32-bit indices.
361 */
362 rc = block_put(b);
363 /* expect EOK as b was not dirty */
364 assert(rc == EOK);
365 return ENOMEM;
366 }
367 rc = fat_node_get_core(&nodep, idx);
368 assert(rc == EOK);
369 fibril_mutex_unlock(&idx->lock);
370 rc = block_put(b);
371 /* expect EOK as b was not dirty */
372 assert(rc == EOK);
373 *rfn = FS_NODE(nodep);
374 return EOK;
375 }
376 }
377 rc = block_put(b);
378 assert(rc == EOK); /* expect EOK as b was not dirty */
379 }
380
381 fibril_mutex_unlock(&parentp->idx->lock);
382 *rfn = NULL;
383 return EOK;
384}
385
386/** Instantiate a FAT in-core node. */
387int fat_node_get(fs_node_t **rfn, dev_handle_t dev_handle, fs_index_t index)
388{
389 fat_node_t *nodep;
390 fat_idx_t *idxp;
391 int rc;
392
393 idxp = fat_idx_get_by_index(dev_handle, index);
394 if (!idxp) {
395 *rfn = NULL;
396 return EOK;
397 }
398 /* idxp->lock held */
399 rc = fat_node_get_core(&nodep, idxp);
400 fibril_mutex_unlock(&idxp->lock);
401 if (rc == EOK)
402 *rfn = FS_NODE(nodep);
403 return rc;
404}
405
406int fat_node_put(fs_node_t *fn)
407{
408 fat_node_t *nodep = FAT_NODE(fn);
409 bool destroy = false;
410
411 fibril_mutex_lock(&nodep->lock);
412 if (!--nodep->refcnt) {
413 if (nodep->idx) {
414 fibril_mutex_lock(&ffn_mutex);
415 list_append(&nodep->ffn_link, &ffn_head);
416 fibril_mutex_unlock(&ffn_mutex);
417 } else {
418 /*
419 * The node does not have any index structure associated
420 * with itself. This can only mean that we are releasing
421 * the node after a failed attempt to allocate the index
422 * structure for it.
423 */
424 destroy = true;
425 }
426 }
427 fibril_mutex_unlock(&nodep->lock);
428 if (destroy) {
429 free(nodep->bp);
430 free(nodep);
431 }
432 return EOK;
433}
434
435int fat_create_node(fs_node_t **rfn, dev_handle_t dev_handle, int flags)
436{
437 fat_idx_t *idxp;
438 fat_node_t *nodep;
439 fat_bs_t *bs;
440 fat_cluster_t mcl, lcl;
441 uint16_t bps;
442 int rc;
443
444 bs = block_bb_get(dev_handle);
445 bps = uint16_t_le2host(bs->bps);
446 if (flags & L_DIRECTORY) {
447 /* allocate a cluster */
448 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
449 if (rc != EOK)
450 return rc;
451 /* populate the new cluster with unused dentries */
452 rc = fat_zero_cluster(bs, dev_handle, mcl);
453 if (rc != EOK) {
454 (void) fat_free_clusters(bs, dev_handle, mcl);
455 return rc;
456 }
457 }
458
459 rc = fat_node_get_new(&nodep);
460 if (rc != EOK) {
461 (void) fat_free_clusters(bs, dev_handle, mcl);
462 return rc;
463 }
464 idxp = fat_idx_get_new(dev_handle);
465 if (!idxp) {
466 (void) fat_free_clusters(bs, dev_handle, mcl);
467 (void) fat_node_put(FS_NODE(nodep));
468 return ENOMEM; /* FIXME: determine the true error code */
469 }
470 /* idxp->lock held */
471 if (flags & L_DIRECTORY) {
472 nodep->type = FAT_DIRECTORY;
473 nodep->firstc = mcl;
474 nodep->size = bps * bs->spc;
475 } else {
476 nodep->type = FAT_FILE;
477 nodep->firstc = FAT_CLST_RES0;
478 nodep->size = 0;
479 }
480 nodep->lnkcnt = 0; /* not linked anywhere */
481 nodep->refcnt = 1;
482 nodep->dirty = true;
483
484 nodep->idx = idxp;
485 idxp->nodep = nodep;
486
487 fibril_mutex_unlock(&idxp->lock);
488 *rfn = FS_NODE(nodep);
489 return EOK;
490}
491
492int fat_destroy_node(fs_node_t *fn)
493{
494 fat_node_t *nodep = FAT_NODE(fn);
495 fat_bs_t *bs;
496 bool has_children;
497 int rc;
498
499 /*
500 * The node is not reachable from the file system. This means that the
501 * link count should be zero and that the index structure cannot be
502 * found in the position hash. Obviously, we don't need to lock the node
503 * nor its index structure.
504 */
505 assert(nodep->lnkcnt == 0);
506
507 /*
508 * The node may not have any children.
509 */
510 rc = fat_has_children(&has_children, fn);
511 if (rc != EOK)
512 return rc;
513 assert(!has_children);
514
515 bs = block_bb_get(nodep->idx->dev_handle);
516 if (nodep->firstc != FAT_CLST_RES0) {
517 assert(nodep->size);
518 /* Free all clusters allocated to the node. */
519 rc = fat_free_clusters(bs, nodep->idx->dev_handle,
520 nodep->firstc);
521 }
522
523 fat_idx_destroy(nodep->idx);
524 free(nodep->bp);
525 free(nodep);
526 return rc;
527}
528
529int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name)
530{
531 fat_node_t *parentp = FAT_NODE(pfn);
532 fat_node_t *childp = FAT_NODE(cfn);
533 fat_dentry_t *d;
534 fat_bs_t *bs;
535 block_t *b;
536 unsigned i, j;
537 uint16_t bps;
538 unsigned dps;
539 unsigned blocks;
540 fat_cluster_t mcl, lcl;
541 int rc;
542
543 fibril_mutex_lock(&childp->lock);
544 if (childp->lnkcnt == 1) {
545 /*
546 * On FAT, we don't support multiple hard links.
547 */
548 fibril_mutex_unlock(&childp->lock);
549 return EMLINK;
550 }
551 assert(childp->lnkcnt == 0);
552 fibril_mutex_unlock(&childp->lock);
553
554 if (!fat_dentry_name_verify(name)) {
555 /*
556 * Attempt to create unsupported name.
557 */
558 return ENOTSUP;
559 }
560
561 /*
562 * Get us an unused parent node's dentry or grow the parent and allocate
563 * a new one.
564 */
565
566 fibril_mutex_lock(&parentp->idx->lock);
567 bs = block_bb_get(parentp->idx->dev_handle);
568 bps = uint16_t_le2host(bs->bps);
569 dps = bps / sizeof(fat_dentry_t);
570
571 blocks = parentp->size / bps;
572
573 for (i = 0; i < blocks; i++) {
574 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
575 if (rc != EOK) {
576 fibril_mutex_unlock(&parentp->idx->lock);
577 return rc;
578 }
579 for (j = 0; j < dps; j++) {
580 d = ((fat_dentry_t *)b->data) + j;
581 switch (fat_classify_dentry(d)) {
582 case FAT_DENTRY_SKIP:
583 case FAT_DENTRY_VALID:
584 /* skipping used and meta entries */
585 continue;
586 case FAT_DENTRY_FREE:
587 case FAT_DENTRY_LAST:
588 /* found an empty slot */
589 goto hit;
590 }
591 }
592 rc = block_put(b);
593 if (rc != EOK) {
594 fibril_mutex_unlock(&parentp->idx->lock);
595 return rc;
596 }
597 }
598 j = 0;
599
600 /*
601 * We need to grow the parent in order to create a new unused dentry.
602 */
603 if (parentp->firstc == FAT_CLST_ROOT) {
604 /* Can't grow the root directory. */
605 fibril_mutex_unlock(&parentp->idx->lock);
606 return ENOSPC;
607 }
608 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
609 if (rc != EOK) {
610 fibril_mutex_unlock(&parentp->idx->lock);
611 return rc;
612 }
613 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl);
614 if (rc != EOK) {
615 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
616 fibril_mutex_unlock(&parentp->idx->lock);
617 return rc;
618 }
619 rc = fat_append_clusters(bs, parentp, mcl);
620 if (rc != EOK) {
621 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl);
622 fibril_mutex_unlock(&parentp->idx->lock);
623 return rc;
624 }
625 parentp->size += bps * bs->spc;
626 parentp->dirty = true; /* need to sync node */
627 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);
628 if (rc != EOK) {
629 fibril_mutex_unlock(&parentp->idx->lock);
630 return rc;
631 }
632 d = (fat_dentry_t *)b->data;
633
634hit:
635 /*
636 * At this point we only establish the link between the parent and the
637 * child. The dentry, except of the name and the extension, will remain
638 * uninitialized until the corresponding node is synced. Thus the valid
639 * dentry data is kept in the child node structure.
640 */
641 memset(d, 0, sizeof(fat_dentry_t));
642 fat_dentry_name_set(d, name);
643 b->dirty = true; /* need to sync block */
644 rc = block_put(b);
645 fibril_mutex_unlock(&parentp->idx->lock);
646 if (rc != EOK)
647 return rc;
648
649 fibril_mutex_lock(&childp->idx->lock);
650
651 /*
652 * If possible, create the Sub-directory Identifier Entry and the
653 * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
654 * are not mandatory according to Standard ECMA-107 and HelenOS VFS does
655 * not use them anyway, so this is rather a sign of our good will.
656 */
657 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE);
658 if (rc != EOK) {
659 /*
660 * Rather than returning an error, simply skip the creation of
661 * these two entries.
662 */
663 goto skip_dots;
664 }
665 d = (fat_dentry_t *)b->data;
666 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
667 str_cmp(d->name, FAT_NAME_DOT) == 0) {
668 memset(d, 0, sizeof(fat_dentry_t));
669 str_cpy(d->name, 8, FAT_NAME_DOT);
670 str_cpy(d->ext, 3, FAT_EXT_PAD);
671 d->attr = FAT_ATTR_SUBDIR;
672 d->firstc = host2uint16_t_le(childp->firstc);
673 /* TODO: initialize also the date/time members. */
674 }
675 d++;
676 if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
677 str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) {
678 memset(d, 0, sizeof(fat_dentry_t));
679 str_cpy(d->name, 8, FAT_NAME_DOT_DOT);
680 str_cpy(d->ext, 3, FAT_EXT_PAD);
681 d->attr = FAT_ATTR_SUBDIR;
682 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
683 host2uint16_t_le(FAT_CLST_RES0) :
684 host2uint16_t_le(parentp->firstc);
685 /* TODO: initialize also the date/time members. */
686 }
687 b->dirty = true; /* need to sync block */
688 /*
689 * Ignore the return value as we would have fallen through on error
690 * anyway.
691 */
692 (void) block_put(b);
693skip_dots:
694
695 childp->idx->pfc = parentp->firstc;
696 childp->idx->pdi = i * dps + j;
697 fibril_mutex_unlock(&childp->idx->lock);
698
699 fibril_mutex_lock(&childp->lock);
700 childp->lnkcnt = 1;
701 childp->dirty = true; /* need to sync node */
702 fibril_mutex_unlock(&childp->lock);
703
704 /*
705 * Hash in the index structure into the position hash.
706 */
707 fat_idx_hashin(childp->idx);
708
709 return EOK;
710}
711
712int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm)
713{
714 fat_node_t *parentp = FAT_NODE(pfn);
715 fat_node_t *childp = FAT_NODE(cfn);
716 fat_bs_t *bs;
717 fat_dentry_t *d;
718 uint16_t bps;
719 block_t *b;
720 bool has_children;
721 int rc;
722
723 if (!parentp)
724 return EBUSY;
725
726 rc = fat_has_children(&has_children, cfn);
727 if (rc != EOK)
728 return rc;
729 if (has_children)
730 return ENOTEMPTY;
731
732 fibril_mutex_lock(&parentp->lock);
733 fibril_mutex_lock(&childp->lock);
734 assert(childp->lnkcnt == 1);
735 fibril_mutex_lock(&childp->idx->lock);
736 bs = block_bb_get(childp->idx->dev_handle);
737 bps = uint16_t_le2host(bs->bps);
738
739 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc,
740 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
741 BLOCK_FLAGS_NONE);
742 if (rc != EOK)
743 goto error;
744 d = (fat_dentry_t *)b->data +
745 (childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
746 /* mark the dentry as not-currently-used */
747 d->name[0] = FAT_DENTRY_ERASED;
748 b->dirty = true; /* need to sync block */
749 rc = block_put(b);
750 if (rc != EOK)
751 goto error;
752
753 /* remove the index structure from the position hash */
754 fat_idx_hashout(childp->idx);
755 /* clear position information */
756 childp->idx->pfc = FAT_CLST_RES0;
757 childp->idx->pdi = 0;
758 fibril_mutex_unlock(&childp->idx->lock);
759 childp->lnkcnt = 0;
760 childp->dirty = true;
761 fibril_mutex_unlock(&childp->lock);
762 fibril_mutex_unlock(&parentp->lock);
763
764 return EOK;
765
766error:
767 fibril_mutex_unlock(&parentp->idx->lock);
768 fibril_mutex_unlock(&childp->lock);
769 fibril_mutex_unlock(&childp->idx->lock);
770 return rc;
771}
772
773int fat_has_children(bool *has_children, fs_node_t *fn)
774{
775 fat_bs_t *bs;
776 fat_node_t *nodep = FAT_NODE(fn);
777 unsigned bps;
778 unsigned dps;
779 unsigned blocks;
780 block_t *b;
781 unsigned i, j;
782 int rc;
783
784 if (nodep->type != FAT_DIRECTORY) {
785 *has_children = false;
786 return EOK;
787 }
788
789 fibril_mutex_lock(&nodep->idx->lock);
790 bs = block_bb_get(nodep->idx->dev_handle);
791 bps = uint16_t_le2host(bs->bps);
792 dps = bps / sizeof(fat_dentry_t);
793
794 blocks = nodep->size / bps;
795
796 for (i = 0; i < blocks; i++) {
797 fat_dentry_t *d;
798
799 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE);
800 if (rc != EOK) {
801 fibril_mutex_unlock(&nodep->idx->lock);
802 return rc;
803 }
804 for (j = 0; j < dps; j++) {
805 d = ((fat_dentry_t *)b->data) + j;
806 switch (fat_classify_dentry(d)) {
807 case FAT_DENTRY_SKIP:
808 case FAT_DENTRY_FREE:
809 continue;
810 case FAT_DENTRY_LAST:
811 rc = block_put(b);
812 /* expect EOK as b was not dirty */
813 assert(rc == EOK);
814 fibril_mutex_unlock(&nodep->idx->lock);
815 *has_children = false;
816 return EOK;
817 default:
818 case FAT_DENTRY_VALID:
819 rc = block_put(b);
820 /* expect EOK as b was not dirty */
821 assert(rc == EOK);
822 fibril_mutex_unlock(&nodep->idx->lock);
823 *has_children = true;
824 return EOK;
825 }
826 }
827 rc = block_put(b);
828 assert(rc == EOK); /* expect EOK as b was not dirty */
829 }
830
831 fibril_mutex_unlock(&nodep->idx->lock);
832 *has_children = false;
833 return EOK;
834}
835
836
837fs_index_t fat_index_get(fs_node_t *fn)
838{
839 return FAT_NODE(fn)->idx->index;
840}
841
842size_t fat_size_get(fs_node_t *fn)
843{
844 return FAT_NODE(fn)->size;
845}
846
847unsigned fat_lnkcnt_get(fs_node_t *fn)
848{
849 return FAT_NODE(fn)->lnkcnt;
850}
851
852char fat_plb_get_char(unsigned pos)
853{
854 return fat_reg.plb_ro[pos % PLB_SIZE];
855}
856
857bool fat_is_directory(fs_node_t *fn)
858{
859 return FAT_NODE(fn)->type == FAT_DIRECTORY;
860}
861
862bool fat_is_file(fs_node_t *fn)
863{
864 return FAT_NODE(fn)->type == FAT_FILE;
865}
866
867/** libfs operations */
868libfs_ops_t fat_libfs_ops = {
869 .root_get = fat_root_get,
870 .match = fat_match,
871 .node_get = fat_node_get,
872 .node_put = fat_node_put,
873 .create = fat_create_node,
874 .destroy = fat_destroy_node,
875 .link = fat_link,
876 .unlink = fat_unlink,
877 .has_children = fat_has_children,
878 .index_get = fat_index_get,
879 .size_get = fat_size_get,
880 .lnkcnt_get = fat_lnkcnt_get,
881 .plb_get_char = fat_plb_get_char,
882 .is_directory = fat_is_directory,
883 .is_file = fat_is_file
884};
885
886/*
887 * VFS operations.
888 */
889
890void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
891{
892 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
893 enum cache_mode cmode;
894 fat_bs_t *bs;
895 uint16_t bps;
896 uint16_t rde;
897 int rc;
898
899 /* accept the mount options */
900 ipc_callid_t callid;
901 size_t size;
902 if (!ipc_data_write_receive(&callid, &size)) {
903 ipc_answer_0(callid, EINVAL);
904 ipc_answer_0(rid, EINVAL);
905 return;
906 }
907 char *opts = malloc(size + 1);
908 if (!opts) {
909 ipc_answer_0(callid, ENOMEM);
910 ipc_answer_0(rid, ENOMEM);
911 return;
912 }
913 ipcarg_t retval = ipc_data_write_finalize(callid, opts, size);
914 if (retval != EOK) {
915 ipc_answer_0(rid, retval);
916 free(opts);
917 return;
918 }
919 opts[size] = '\0';
920
921 /* Check for option enabling write through. */
922 if (str_cmp(opts, "wtcache") == 0)
923 cmode = CACHE_MODE_WT;
924 else
925 cmode = CACHE_MODE_WB;
926
927 /* initialize libblock */
928 rc = block_init(dev_handle, BS_SIZE);
929 if (rc != EOK) {
930 ipc_answer_0(rid, rc);
931 return;
932 }
933
934 /* prepare the boot block */
935 rc = block_bb_read(dev_handle, BS_BLOCK);
936 if (rc != EOK) {
937 block_fini(dev_handle);
938 ipc_answer_0(rid, rc);
939 return;
940 }
941
942 /* get the buffer with the boot sector */
943 bs = block_bb_get(dev_handle);
944
945 /* Read the number of root directory entries. */
946 bps = uint16_t_le2host(bs->bps);
947 rde = uint16_t_le2host(bs->root_ent_max);
948
949 if (bps != BS_SIZE) {
950 block_fini(dev_handle);
951 ipc_answer_0(rid, ENOTSUP);
952 return;
953 }
954
955 /* Initialize the block cache */
956 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);
957 if (rc != EOK) {
958 block_fini(dev_handle);
959 ipc_answer_0(rid, rc);
960 return;
961 }
962
963 rc = fat_idx_init_by_dev_handle(dev_handle);
964 if (rc != EOK) {
965 block_fini(dev_handle);
966 ipc_answer_0(rid, rc);
967 return;
968 }
969
970 /* Initialize the root node. */
971 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t));
972 if (!rfn) {
973 block_fini(dev_handle);
974 fat_idx_fini_by_dev_handle(dev_handle);
975 ipc_answer_0(rid, ENOMEM);
976 return;
977 }
978 fs_node_initialize(rfn);
979 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
980 if (!rootp) {
981 free(rfn);
982 block_fini(dev_handle);
983 fat_idx_fini_by_dev_handle(dev_handle);
984 ipc_answer_0(rid, ENOMEM);
985 return;
986 }
987 fat_node_initialize(rootp);
988
989 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
990 if (!ridxp) {
991 free(rfn);
992 free(rootp);
993 block_fini(dev_handle);
994 fat_idx_fini_by_dev_handle(dev_handle);
995 ipc_answer_0(rid, ENOMEM);
996 return;
997 }
998 assert(ridxp->index == 0);
999 /* ridxp->lock held */
1000
1001 rootp->type = FAT_DIRECTORY;
1002 rootp->firstc = FAT_CLST_ROOT;
1003 rootp->refcnt = 1;
1004 rootp->lnkcnt = 0; /* FS root is not linked */
1005 rootp->size = rde * sizeof(fat_dentry_t);
1006 rootp->idx = ridxp;
1007 ridxp->nodep = rootp;
1008 rootp->bp = rfn;
1009 rfn->data = rootp;
1010
1011 fibril_mutex_unlock(&ridxp->lock);
1012
1013 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
1014}
1015
1016void fat_mount(ipc_callid_t rid, ipc_call_t *request)
1017{
1018 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1019}
1020
1021void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
1022{
1023 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1024}
1025
1026void fat_read(ipc_callid_t rid, ipc_call_t *request)
1027{
1028 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1029 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1030 off_t pos = (off_t)IPC_GET_ARG3(*request);
1031 fs_node_t *fn;
1032 fat_node_t *nodep;
1033 fat_bs_t *bs;
1034 uint16_t bps;
1035 size_t bytes;
1036 block_t *b;
1037 int rc;
1038
1039 rc = fat_node_get(&fn, dev_handle, index);
1040 if (rc != EOK) {
1041 ipc_answer_0(rid, rc);
1042 return;
1043 }
1044 if (!fn) {
1045 ipc_answer_0(rid, ENOENT);
1046 return;
1047 }
1048 nodep = FAT_NODE(fn);
1049
1050 ipc_callid_t callid;
1051 size_t len;
1052 if (!ipc_data_read_receive(&callid, &len)) {
1053 fat_node_put(fn);
1054 ipc_answer_0(callid, EINVAL);
1055 ipc_answer_0(rid, EINVAL);
1056 return;
1057 }
1058
1059 bs = block_bb_get(dev_handle);
1060 bps = uint16_t_le2host(bs->bps);
1061
1062 if (nodep->type == FAT_FILE) {
1063 /*
1064 * Our strategy for regular file reads is to read one block at
1065 * most and make use of the possibility to return less data than
1066 * requested. This keeps the code very simple.
1067 */
1068 if (pos >= nodep->size) {
1069 /* reading beyond the EOF */
1070 bytes = 0;
1071 (void) ipc_data_read_finalize(callid, NULL, 0);
1072 } else {
1073 bytes = min(len, bps - pos % bps);
1074 bytes = min(bytes, nodep->size - pos);
1075 rc = fat_block_get(&b, bs, nodep, pos / bps,
1076 BLOCK_FLAGS_NONE);
1077 assert(rc == EOK);
1078 (void) ipc_data_read_finalize(callid, b->data + pos % bps,
1079 bytes);
1080 rc = block_put(b);
1081 assert(rc == EOK);
1082 }
1083 } else {
1084 unsigned bnum;
1085 off_t spos = pos;
1086 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
1087 fat_dentry_t *d;
1088
1089 assert(nodep->type == FAT_DIRECTORY);
1090 assert(nodep->size % bps == 0);
1091 assert(bps % sizeof(fat_dentry_t) == 0);
1092
1093 /*
1094 * Our strategy for readdir() is to use the position pointer as
1095 * an index into the array of all dentries. On entry, it points
1096 * to the first unread dentry. If we skip any dentries, we bump
1097 * the position pointer accordingly.
1098 */
1099 bnum = (pos * sizeof(fat_dentry_t)) / bps;
1100 while (bnum < nodep->size / bps) {
1101 off_t o;
1102
1103 rc = fat_block_get(&b, bs, nodep, bnum,
1104 BLOCK_FLAGS_NONE);
1105 assert(rc == EOK);
1106 for (o = pos % (bps / sizeof(fat_dentry_t));
1107 o < bps / sizeof(fat_dentry_t);
1108 o++, pos++) {
1109 d = ((fat_dentry_t *)b->data) + o;
1110 switch (fat_classify_dentry(d)) {
1111 case FAT_DENTRY_SKIP:
1112 case FAT_DENTRY_FREE:
1113 continue;
1114 case FAT_DENTRY_LAST:
1115 rc = block_put(b);
1116 assert(rc == EOK);
1117 goto miss;
1118 default:
1119 case FAT_DENTRY_VALID:
1120 fat_dentry_name_get(d, name);
1121 rc = block_put(b);
1122 assert(rc == EOK);
1123 goto hit;
1124 }
1125 }
1126 rc = block_put(b);
1127 assert(rc == EOK);
1128 bnum++;
1129 }
1130miss:
1131 fat_node_put(fn);
1132 ipc_answer_0(callid, ENOENT);
1133 ipc_answer_1(rid, ENOENT, 0);
1134 return;
1135hit:
1136 (void) ipc_data_read_finalize(callid, name, str_size(name) + 1);
1137 bytes = (pos - spos) + 1;
1138 }
1139
1140 fat_node_put(fn);
1141 ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
1142}
1143
1144void fat_write(ipc_callid_t rid, ipc_call_t *request)
1145{
1146 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1147 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1148 off_t pos = (off_t)IPC_GET_ARG3(*request);
1149 fs_node_t *fn;
1150 fat_node_t *nodep;
1151 fat_bs_t *bs;
1152 size_t bytes;
1153 block_t *b;
1154 uint16_t bps;
1155 unsigned spc;
1156 unsigned bpc; /* bytes per cluster */
1157 off_t boundary;
1158 int flags = BLOCK_FLAGS_NONE;
1159 int rc;
1160
1161 rc = fat_node_get(&fn, dev_handle, index);
1162 if (rc != EOK) {
1163 ipc_answer_0(rid, rc);
1164 return;
1165 }
1166 if (!fn) {
1167 ipc_answer_0(rid, ENOENT);
1168 return;
1169 }
1170 nodep = FAT_NODE(fn);
1171
1172 ipc_callid_t callid;
1173 size_t len;
1174 if (!ipc_data_write_receive(&callid, &len)) {
1175 fat_node_put(fn);
1176 ipc_answer_0(callid, EINVAL);
1177 ipc_answer_0(rid, EINVAL);
1178 return;
1179 }
1180
1181 bs = block_bb_get(dev_handle);
1182 bps = uint16_t_le2host(bs->bps);
1183 spc = bs->spc;
1184 bpc = bps * spc;
1185
1186 /*
1187 * In all scenarios, we will attempt to write out only one block worth
1188 * of data at maximum. There might be some more efficient approaches,
1189 * but this one greatly simplifies fat_write(). Note that we can afford
1190 * to do this because the client must be ready to handle the return
1191 * value signalizing a smaller number of bytes written.
1192 */
1193 bytes = min(len, bps - pos % bps);
1194 if (bytes == bps)
1195 flags |= BLOCK_FLAGS_NOREAD;
1196
1197 boundary = ROUND_UP(nodep->size, bpc);
1198 if (pos < boundary) {
1199 /*
1200 * This is the easier case - we are either overwriting already
1201 * existing contents or writing behind the EOF, but still within
1202 * the limits of the last cluster. The node size may grow to the
1203 * next block size boundary.
1204 */
1205 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
1206 assert(rc == EOK);
1207 rc = fat_block_get(&b, bs, nodep, pos / bps, flags);
1208 assert(rc == EOK);
1209 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1210 bytes);
1211 b->dirty = true; /* need to sync block */
1212 rc = block_put(b);
1213 assert(rc == EOK);
1214 if (pos + bytes > nodep->size) {
1215 nodep->size = pos + bytes;
1216 nodep->dirty = true; /* need to sync node */
1217 }
1218 ipc_answer_2(rid, EOK, bytes, nodep->size);
1219 fat_node_put(fn);
1220 return;
1221 } else {
1222 /*
1223 * This is the more difficult case. We must allocate new
1224 * clusters for the node and zero them out.
1225 */
1226 int status;
1227 unsigned nclsts;
1228 fat_cluster_t mcl, lcl;
1229
1230 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
1231 /* create an independent chain of nclsts clusters in all FATs */
1232 status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
1233 if (status != EOK) {
1234 /* could not allocate a chain of nclsts clusters */
1235 fat_node_put(fn);
1236 ipc_answer_0(callid, status);
1237 ipc_answer_0(rid, status);
1238 return;
1239 }
1240 /* zero fill any gaps */
1241 rc = fat_fill_gap(bs, nodep, mcl, pos);
1242 assert(rc == EOK);
1243 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc,
1244 flags);
1245 assert(rc == EOK);
1246 (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1247 bytes);
1248 b->dirty = true; /* need to sync block */
1249 rc = block_put(b);
1250 assert(rc == EOK);
1251 /*
1252 * Append the cluster chain starting in mcl to the end of the
1253 * node's cluster chain.
1254 */
1255 rc = fat_append_clusters(bs, nodep, mcl);
1256 assert(rc == EOK);
1257 nodep->size = pos + bytes;
1258 nodep->dirty = true; /* need to sync node */
1259 ipc_answer_2(rid, EOK, bytes, nodep->size);
1260 fat_node_put(fn);
1261 return;
1262 }
1263}
1264
1265void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
1266{
1267 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1268 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1269 size_t size = (off_t)IPC_GET_ARG3(*request);
1270 fs_node_t *fn;
1271 fat_node_t *nodep;
1272 fat_bs_t *bs;
1273 uint16_t bps;
1274 uint8_t spc;
1275 unsigned bpc; /* bytes per cluster */
1276 int rc;
1277
1278 rc = fat_node_get(&fn, dev_handle, index);
1279 if (rc != EOK) {
1280 ipc_answer_0(rid, rc);
1281 return;
1282 }
1283 if (!fn) {
1284 ipc_answer_0(rid, ENOENT);
1285 return;
1286 }
1287 nodep = FAT_NODE(fn);
1288
1289 bs = block_bb_get(dev_handle);
1290 bps = uint16_t_le2host(bs->bps);
1291 spc = bs->spc;
1292 bpc = bps * spc;
1293
1294 if (nodep->size == size) {
1295 rc = EOK;
1296 } else if (nodep->size < size) {
1297 /*
1298 * The standard says we have the freedom to grow the node.
1299 * For now, we simply return an error.
1300 */
1301 rc = EINVAL;
1302 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
1303 /*
1304 * The node will be shrunk, but no clusters will be deallocated.
1305 */
1306 nodep->size = size;
1307 nodep->dirty = true; /* need to sync node */
1308 rc = EOK;
1309 } else {
1310 /*
1311 * The node will be shrunk, clusters will be deallocated.
1312 */
1313 if (size == 0) {
1314 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
1315 if (rc != EOK)
1316 goto out;
1317 } else {
1318 fat_cluster_t lastc;
1319 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc,
1320 &lastc, NULL, (size - 1) / bpc);
1321 if (rc != EOK)
1322 goto out;
1323 rc = fat_chop_clusters(bs, nodep, lastc);
1324 if (rc != EOK)
1325 goto out;
1326 }
1327 nodep->size = size;
1328 nodep->dirty = true; /* need to sync node */
1329 rc = EOK;
1330 }
1331out:
1332 fat_node_put(fn);
1333 ipc_answer_0(rid, rc);
1334 return;
1335}
1336
1337void fat_close(ipc_callid_t rid, ipc_call_t *request)
1338{
1339 ipc_answer_0(rid, EOK);
1340}
1341
1342void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
1343{
1344 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
1345 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
1346 fs_node_t *fn;
1347 int rc;
1348
1349 rc = fat_node_get(&fn, dev_handle, index);
1350 if (rc != EOK) {
1351 ipc_answer_0(rid, rc);
1352 return;
1353 }
1354 if (!fn) {
1355 ipc_answer_0(rid, ENOENT);
1356 return;
1357 }
1358
1359 rc = fat_destroy_node(fn);
1360 ipc_answer_0(rid, rc);
1361}
1362
1363void fat_open_node(ipc_callid_t rid, ipc_call_t *request)
1364{
1365 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1366}
1367
1368void fat_stat(ipc_callid_t rid, ipc_call_t *request)
1369{
1370 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
1371}
1372
1373void fat_sync(ipc_callid_t rid, ipc_call_t *request)
1374{
1375 /* Dummy implementation */
1376 ipc_answer_0(rid, EOK);
1377}
1378
1379/**
1380 * @}
1381 */
Note: See TracBrowser for help on using the repository browser.