source: mainline/uspace/srv/vfs/vfs_ops.c@ 76a67ce

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 76a67ce was 76a67ce, checked in by Maurizio Lombardi <m.lombardi85@…>, 14 years ago

vfs_get_mtab:

  • Remove mtab entry when unmounting a filesystem
  • Add vfs_get_mtab() implementation
  • Property mode set to 100644
File size: 35.0 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file vfs_ops.c
35 * @brief Operations that VFS offers to its clients.
36 */
37
38#include "vfs.h"
39#include <macros.h>
40#include <stdint.h>
41#include <async.h>
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <str.h>
46#include <bool.h>
47#include <fibril_synch.h>
48#include <adt/list.h>
49#include <unistd.h>
50#include <ctype.h>
51#include <fcntl.h>
52#include <assert.h>
53#include <vfs/canonify.h>
54#include <vfs/vfs_mtab.h>
55
56FIBRIL_MUTEX_INITIALIZE(mtab_list_lock);
57LIST_INITIALIZE(mtab_list);
58static size_t mtab_size = 0;
59
60/* Forward declarations of static functions. */
61static int vfs_truncate_internal(fs_handle_t, service_id_t, fs_index_t,
62 aoff64_t);
63
64/**
65 * This rwlock prevents the race between a triplet-to-VFS-node resolution and a
66 * concurrent VFS operation which modifies the file system namespace.
67 */
68FIBRIL_RWLOCK_INITIALIZE(namespace_rwlock);
69
70vfs_pair_t rootfs = {
71 .fs_handle = 0,
72 .service_id = 0
73};
74
75static void vfs_mount_internal(ipc_callid_t rid, service_id_t service_id,
76 fs_handle_t fs_handle, char *mp, char *opts)
77{
78 vfs_lookup_res_t mp_res;
79 vfs_lookup_res_t mr_res;
80 vfs_node_t *mp_node = NULL;
81 vfs_node_t *mr_node;
82 fs_index_t rindex;
83 aoff64_t rsize;
84 unsigned rlnkcnt;
85 async_exch_t *exch;
86 sysarg_t rc;
87 aid_t msg;
88 ipc_call_t answer;
89
90 /* Resolve the path to the mountpoint. */
91 fibril_rwlock_write_lock(&namespace_rwlock);
92 if (rootfs.fs_handle) {
93 /* We already have the root FS. */
94 if (str_cmp(mp, "/") == 0) {
95 /* Trying to mount root FS over root FS */
96 fibril_rwlock_write_unlock(&namespace_rwlock);
97 async_answer_0(rid, EBUSY);
98 return;
99 }
100
101 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
102 if (rc != EOK) {
103 /* The lookup failed for some reason. */
104 fibril_rwlock_write_unlock(&namespace_rwlock);
105 async_answer_0(rid, rc);
106 return;
107 }
108
109 mp_node = vfs_node_get(&mp_res);
110 if (!mp_node) {
111 fibril_rwlock_write_unlock(&namespace_rwlock);
112 async_answer_0(rid, ENOMEM);
113 return;
114 }
115
116 /*
117 * Now we hold a reference to mp_node.
118 * It will be dropped upon the corresponding VFS_IN_UNMOUNT.
119 * This prevents the mount point from being deleted.
120 */
121 } else {
122 /* We still don't have the root file system mounted. */
123 if (str_cmp(mp, "/") == 0) {
124 /*
125 * For this simple, but important case,
126 * we are almost done.
127 */
128
129 /* Tell the mountee that it is being mounted. */
130 exch = vfs_exchange_grab(fs_handle);
131 msg = async_send_1(exch, VFS_OUT_MOUNTED,
132 (sysarg_t) service_id, &answer);
133 /* Send the mount options */
134 rc = async_data_write_start(exch, (void *)opts,
135 str_size(opts));
136 vfs_exchange_release(exch);
137
138 if (rc != EOK) {
139 async_wait_for(msg, NULL);
140 fibril_rwlock_write_unlock(&namespace_rwlock);
141 async_answer_0(rid, rc);
142 return;
143 }
144 async_wait_for(msg, &rc);
145
146 if (rc != EOK) {
147 fibril_rwlock_write_unlock(&namespace_rwlock);
148 async_answer_0(rid, rc);
149 return;
150 }
151
152 rindex = (fs_index_t) IPC_GET_ARG1(answer);
153 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
154 IPC_GET_ARG3(answer));
155 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
156
157 mr_res.triplet.fs_handle = fs_handle;
158 mr_res.triplet.service_id = service_id;
159 mr_res.triplet.index = rindex;
160 mr_res.size = rsize;
161 mr_res.lnkcnt = rlnkcnt;
162 mr_res.type = VFS_NODE_DIRECTORY;
163
164 rootfs.fs_handle = fs_handle;
165 rootfs.service_id = service_id;
166
167 /* Add reference to the mounted root. */
168 mr_node = vfs_node_get(&mr_res);
169 assert(mr_node);
170
171 fibril_rwlock_write_unlock(&namespace_rwlock);
172 async_answer_0(rid, rc);
173 return;
174 } else {
175 /*
176 * We can't resolve this without the root filesystem
177 * being mounted first.
178 */
179 fibril_rwlock_write_unlock(&namespace_rwlock);
180 async_answer_0(rid, ENOENT);
181 return;
182 }
183 }
184
185 /*
186 * At this point, we have all necessary pieces: file system handle
187 * and service ID, and we know the mount point VFS node.
188 */
189
190 async_exch_t *mountee_exch = vfs_exchange_grab(fs_handle);
191 assert(mountee_exch);
192
193 exch = vfs_exchange_grab(mp_res.triplet.fs_handle);
194 msg = async_send_4(exch, VFS_OUT_MOUNT,
195 (sysarg_t) mp_res.triplet.service_id,
196 (sysarg_t) mp_res.triplet.index,
197 (sysarg_t) fs_handle,
198 (sysarg_t) service_id, &answer);
199
200 /* Send connection */
201 rc = async_exchange_clone(exch, mountee_exch);
202 vfs_exchange_release(mountee_exch);
203
204 if (rc != EOK) {
205 vfs_exchange_release(exch);
206 async_wait_for(msg, NULL);
207
208 /* Mount failed, drop reference to mp_node. */
209 if (mp_node)
210 vfs_node_put(mp_node);
211
212 async_answer_0(rid, rc);
213 fibril_rwlock_write_unlock(&namespace_rwlock);
214 return;
215 }
216
217 /* send the mount options */
218 rc = async_data_write_start(exch, (void *) opts, str_size(opts));
219 if (rc != EOK) {
220 vfs_exchange_release(exch);
221 async_wait_for(msg, NULL);
222
223 /* Mount failed, drop reference to mp_node. */
224 if (mp_node)
225 vfs_node_put(mp_node);
226
227 fibril_rwlock_write_unlock(&namespace_rwlock);
228 async_answer_0(rid, rc);
229 return;
230 }
231
232 /*
233 * Wait for the answer before releasing the exchange to avoid deadlock
234 * in case the answer depends on further calls to the same file system.
235 * Think of a case when mounting a FS on a file_bd backed by a file on
236 * the same FS.
237 */
238 async_wait_for(msg, &rc);
239 vfs_exchange_release(exch);
240
241 if (rc == EOK) {
242 rindex = (fs_index_t) IPC_GET_ARG1(answer);
243 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
244 IPC_GET_ARG3(answer));
245 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
246
247 mr_res.triplet.fs_handle = fs_handle;
248 mr_res.triplet.service_id = service_id;
249 mr_res.triplet.index = rindex;
250 mr_res.size = rsize;
251 mr_res.lnkcnt = rlnkcnt;
252 mr_res.type = VFS_NODE_DIRECTORY;
253
254 /* Add reference to the mounted root. */
255 mr_node = vfs_node_get(&mr_res);
256 assert(mr_node);
257 } else {
258 /* Mount failed, drop reference to mp_node. */
259 if (mp_node)
260 vfs_node_put(mp_node);
261 }
262
263 async_answer_0(rid, rc);
264 fibril_rwlock_write_unlock(&namespace_rwlock);
265}
266
267void vfs_mount(ipc_callid_t rid, ipc_call_t *request)
268{
269 service_id_t service_id;
270
271 /*
272 * We expect the library to do the device-name to device-handle
273 * translation for us, thus the device handle will arrive as ARG1
274 * in the request.
275 */
276 service_id = (service_id_t) IPC_GET_ARG1(*request);
277
278 /*
279 * Mount flags are passed as ARG2.
280 */
281 unsigned int flags = (unsigned int) IPC_GET_ARG2(*request);
282
283 /*
284 * Instance number is passed as ARG3.
285 */
286 unsigned int instance = IPC_GET_ARG3(*request);
287
288 /* We want the client to send us the mount point. */
289 char *mp;
290 int rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
291 0, NULL);
292 if (rc != EOK) {
293 async_answer_0(rid, rc);
294 return;
295 }
296
297 /* Now we expect to receive the mount options. */
298 char *opts;
299 rc = async_data_write_accept((void **) &opts, true, 0, MAX_MNTOPTS_LEN,
300 0, NULL);
301 if (rc != EOK) {
302 free(mp);
303 async_answer_0(rid, rc);
304 return;
305 }
306
307 /*
308 * Now, we expect the client to send us data with the name of the file
309 * system.
310 */
311 char *fs_name;
312 rc = async_data_write_accept((void **) &fs_name, true, 0,
313 FS_NAME_MAXLEN, 0, NULL);
314 if (rc != EOK) {
315 free(mp);
316 free(opts);
317 async_answer_0(rid, rc);
318 return;
319 }
320
321 /*
322 * Wait for VFS_IN_PING so that we can return an error if we don't know
323 * fs_name.
324 */
325 ipc_call_t data;
326 ipc_callid_t callid = async_get_call(&data);
327 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
328 async_answer_0(callid, ENOTSUP);
329 async_answer_0(rid, ENOTSUP);
330 free(mp);
331 free(opts);
332 free(fs_name);
333 return;
334 }
335
336 /*
337 * Check if we know a file system with the same name as is in fs_name.
338 * This will also give us its file system handle.
339 */
340 fibril_mutex_lock(&fs_list_lock);
341 fs_handle_t fs_handle;
342recheck:
343 fs_handle = fs_name_to_handle(instance, fs_name, false);
344 if (!fs_handle) {
345 if (flags & IPC_FLAG_BLOCKING) {
346 fibril_condvar_wait(&fs_list_cv, &fs_list_lock);
347 goto recheck;
348 }
349
350 fibril_mutex_unlock(&fs_list_lock);
351 async_answer_0(callid, ENOENT);
352 async_answer_0(rid, ENOENT);
353 free(mp);
354 free(fs_name);
355 free(opts);
356 return;
357 }
358 fibril_mutex_unlock(&fs_list_lock);
359
360 /* Do the mount */
361 vfs_mount_internal(rid, service_id, fs_handle, mp, opts);
362
363 /* Add the filesystem info to the list of mounted filesystems */
364 mtab_list_ent_t *mtab_list_ent = malloc(sizeof(mtab_list_ent_t));
365 if (!mtab_list_ent) {
366 async_answer_0(callid, ENOMEM);
367 async_answer_0(rid, ENOMEM);
368 free(mp);
369 free(fs_name);
370 free(opts);
371 return;
372 }
373
374 mtab_ent_t *mtab_ent = &mtab_list_ent->mtab_ent;
375
376 mtab_ent->fs_handle = fs_handle;
377 str_cpy(mtab_ent->mp, MAX_PATH_LEN, mp);
378 str_cpy(mtab_ent->fs_name, FS_NAME_MAXLEN, fs_name);
379 str_cpy(mtab_ent->opts, MAX_MNTOPTS_LEN, opts);
380 mtab_ent->flags = flags;
381 mtab_ent->instance = instance;
382
383 link_initialize(&mtab_list_ent->link);
384
385 fibril_mutex_lock(&mtab_list_lock);
386 list_append(&mtab_list_ent->link, &mtab_list);
387 mtab_size++;
388 fibril_mutex_unlock(&mtab_list_lock);
389
390 free(mp);
391
392 /* Acknowledge that we know fs_name. */
393 async_answer_0(callid, EOK);
394}
395
396void vfs_unmount(ipc_callid_t rid, ipc_call_t *request)
397{
398 int rc;
399 char *mp;
400 vfs_lookup_res_t mp_res;
401 vfs_lookup_res_t mr_res;
402 vfs_node_t *mr_node;
403 async_exch_t *exch;
404
405 /*
406 * Receive the mount point path.
407 */
408 rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
409 0, NULL);
410 if (rc != EOK)
411 async_answer_0(rid, rc);
412
413 /*
414 * Taking the namespace lock will do two things for us. First, it will
415 * prevent races with other lookup operations. Second, it will stop new
416 * references to already existing VFS nodes and creation of new VFS
417 * nodes. This is because new references are added as a result of some
418 * lookup operation or at least of some operation which is protected by
419 * the namespace lock.
420 */
421 fibril_rwlock_write_lock(&namespace_rwlock);
422
423 /*
424 * Lookup the mounted root and instantiate it.
425 */
426 rc = vfs_lookup_internal(mp, L_ROOT, &mr_res, NULL);
427 if (rc != EOK) {
428 fibril_rwlock_write_unlock(&namespace_rwlock);
429 free(mp);
430 async_answer_0(rid, rc);
431 return;
432 }
433 mr_node = vfs_node_get(&mr_res);
434 if (!mr_node) {
435 fibril_rwlock_write_unlock(&namespace_rwlock);
436 free(mp);
437 async_answer_0(rid, ENOMEM);
438 return;
439 }
440
441 /*
442 * Count the total number of references for the mounted file system. We
443 * are expecting at least two. One which we got above and one which we
444 * got when the file system was mounted. If we find more, it means that
445 * the file system cannot be gracefully unmounted at the moment because
446 * someone is working with it.
447 */
448 if (vfs_nodes_refcount_sum_get(mr_node->fs_handle,
449 mr_node->service_id) != 2) {
450 fibril_rwlock_write_unlock(&namespace_rwlock);
451 vfs_node_put(mr_node);
452 free(mp);
453 async_answer_0(rid, EBUSY);
454 return;
455 }
456
457 if (str_cmp(mp, "/") == 0) {
458
459 /*
460 * Unmounting the root file system.
461 *
462 * In this case, there is no mount point node and we send
463 * VFS_OUT_UNMOUNTED directly to the mounted file system.
464 */
465
466 exch = vfs_exchange_grab(mr_node->fs_handle);
467 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED,
468 mr_node->service_id);
469 vfs_exchange_release(exch);
470
471 if (rc != EOK) {
472 fibril_rwlock_write_unlock(&namespace_rwlock);
473 free(mp);
474 vfs_node_put(mr_node);
475 async_answer_0(rid, rc);
476 return;
477 }
478
479 rootfs.fs_handle = 0;
480 rootfs.service_id = 0;
481 } else {
482
483 /*
484 * Unmounting a non-root file system.
485 *
486 * We have a regular mount point node representing the parent
487 * file system, so we delegate the operation to it.
488 */
489
490 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
491 if (rc != EOK) {
492 fibril_rwlock_write_unlock(&namespace_rwlock);
493 free(mp);
494 vfs_node_put(mr_node);
495 async_answer_0(rid, rc);
496 return;
497 }
498
499 vfs_node_t *mp_node = vfs_node_get(&mp_res);
500 if (!mp_node) {
501 fibril_rwlock_write_unlock(&namespace_rwlock);
502 free(mp);
503 vfs_node_put(mr_node);
504 async_answer_0(rid, ENOMEM);
505 return;
506 }
507
508 exch = vfs_exchange_grab(mp_node->fs_handle);
509 rc = async_req_2_0(exch, VFS_OUT_UNMOUNT,
510 mp_node->service_id, mp_node->index);
511 vfs_exchange_release(exch);
512
513 if (rc != EOK) {
514 fibril_rwlock_write_unlock(&namespace_rwlock);
515 free(mp);
516 vfs_node_put(mp_node);
517 vfs_node_put(mr_node);
518 async_answer_0(rid, rc);
519 return;
520 }
521
522 /* Drop the reference we got above. */
523 vfs_node_put(mp_node);
524 /* Drop the reference from when the file system was mounted. */
525 vfs_node_put(mp_node);
526 }
527
528 /*
529 * All went well, the mounted file system was successfully unmounted.
530 * The only thing left is to forget the unmounted root VFS node.
531 */
532 vfs_node_forget(mr_node);
533 fibril_rwlock_write_unlock(&namespace_rwlock);
534
535 fibril_mutex_lock(&mtab_list_lock);
536
537 int found = 0;
538
539 list_foreach(mtab_list, cur) {
540 mtab_list_ent_t *ent = list_get_instance(cur, mtab_list_ent_t,
541 link);
542
543 mtab_ent_t *mtab_ent = &ent->mtab_ent;
544
545 if (str_cmp(mtab_ent->mp, mp) == 0) {
546 list_remove(&ent->link);
547 mtab_size--;
548 free(ent);
549 found = 1;
550 break;
551 }
552 }
553 assert(found);
554
555 free(mp);
556
557 fibril_mutex_unlock(&mtab_list_lock);
558 async_answer_0(rid, EOK);
559}
560
561void vfs_open(ipc_callid_t rid, ipc_call_t *request)
562{
563 /*
564 * The POSIX interface is open(path, oflag, mode).
565 * We can receive oflags and mode along with the VFS_IN_OPEN call;
566 * the path will need to arrive in another call.
567 *
568 * We also receive one private, non-POSIX set of flags called lflag
569 * used to pass information to vfs_lookup_internal().
570 */
571 int lflag = IPC_GET_ARG1(*request);
572 int oflag = IPC_GET_ARG2(*request);
573 int mode = IPC_GET_ARG3(*request);
574
575 /* Ignore mode for now. */
576 (void) mode;
577
578 /*
579 * Make sure that we are called with exactly one of L_FILE and
580 * L_DIRECTORY. Make sure that the user does not pass L_OPEN,
581 * L_ROOT or L_MP.
582 */
583 if (((lflag & (L_FILE | L_DIRECTORY)) == 0) ||
584 ((lflag & (L_FILE | L_DIRECTORY)) == (L_FILE | L_DIRECTORY)) ||
585 (lflag & (L_OPEN | L_ROOT | L_MP))) {
586 async_answer_0(rid, EINVAL);
587 return;
588 }
589
590 if (oflag & O_CREAT)
591 lflag |= L_CREATE;
592 if (oflag & O_EXCL)
593 lflag |= L_EXCLUSIVE;
594
595 char *path;
596 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
597 if (rc != EOK) {
598 async_answer_0(rid, rc);
599 return;
600 }
601
602 /*
603 * Avoid the race condition in which the file can be deleted before we
604 * find/create-and-lock the VFS node corresponding to the looked-up
605 * triplet.
606 */
607 if (lflag & L_CREATE)
608 fibril_rwlock_write_lock(&namespace_rwlock);
609 else
610 fibril_rwlock_read_lock(&namespace_rwlock);
611
612 /* The path is now populated and we can call vfs_lookup_internal(). */
613 vfs_lookup_res_t lr;
614 rc = vfs_lookup_internal(path, lflag | L_OPEN, &lr, NULL);
615 if (rc != EOK) {
616 if (lflag & L_CREATE)
617 fibril_rwlock_write_unlock(&namespace_rwlock);
618 else
619 fibril_rwlock_read_unlock(&namespace_rwlock);
620 async_answer_0(rid, rc);
621 free(path);
622 return;
623 }
624
625 /* Path is no longer needed. */
626 free(path);
627
628 vfs_node_t *node = vfs_node_get(&lr);
629 if (lflag & L_CREATE)
630 fibril_rwlock_write_unlock(&namespace_rwlock);
631 else
632 fibril_rwlock_read_unlock(&namespace_rwlock);
633
634 /* Truncate the file if requested and if necessary. */
635 if (oflag & O_TRUNC) {
636 fibril_rwlock_write_lock(&node->contents_rwlock);
637 if (node->size) {
638 rc = vfs_truncate_internal(node->fs_handle,
639 node->service_id, node->index, 0);
640 if (rc) {
641 fibril_rwlock_write_unlock(&node->contents_rwlock);
642 vfs_node_put(node);
643 async_answer_0(rid, rc);
644 return;
645 }
646 node->size = 0;
647 }
648 fibril_rwlock_write_unlock(&node->contents_rwlock);
649 }
650
651 /*
652 * Get ourselves a file descriptor and the corresponding vfs_file_t
653 * structure.
654 */
655 int fd = vfs_fd_alloc((oflag & O_DESC) != 0);
656 if (fd < 0) {
657 vfs_node_put(node);
658 async_answer_0(rid, fd);
659 return;
660 }
661 vfs_file_t *file = vfs_file_get(fd);
662 assert(file);
663 file->node = node;
664 if (oflag & O_APPEND)
665 file->append = true;
666
667 /*
668 * The following increase in reference count is for the fact that the
669 * file is being opened and that a file structure is pointing to it.
670 * It is necessary so that the file will not disappear when
671 * vfs_node_put() is called. The reference will be dropped by the
672 * respective VFS_IN_CLOSE.
673 */
674 vfs_node_addref(node);
675 vfs_node_put(node);
676 vfs_file_put(file);
677
678 /* Success! Return the new file descriptor to the client. */
679 async_answer_1(rid, EOK, fd);
680}
681
682void vfs_sync(ipc_callid_t rid, ipc_call_t *request)
683{
684 int fd = IPC_GET_ARG1(*request);
685
686 /* Lookup the file structure corresponding to the file descriptor. */
687 vfs_file_t *file = vfs_file_get(fd);
688 if (!file) {
689 async_answer_0(rid, ENOENT);
690 return;
691 }
692
693 /*
694 * Lock the open file structure so that no other thread can manipulate
695 * the same open file at a time.
696 */
697 fibril_mutex_lock(&file->lock);
698 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
699
700 /* Make a VFS_OUT_SYMC request at the destination FS server. */
701 aid_t msg;
702 ipc_call_t answer;
703 msg = async_send_2(fs_exch, VFS_OUT_SYNC, file->node->service_id,
704 file->node->index, &answer);
705
706 vfs_exchange_release(fs_exch);
707
708 /* Wait for reply from the FS server. */
709 sysarg_t rc;
710 async_wait_for(msg, &rc);
711
712 fibril_mutex_unlock(&file->lock);
713
714 vfs_file_put(file);
715 async_answer_0(rid, rc);
716}
717
718void vfs_close(ipc_callid_t rid, ipc_call_t *request)
719{
720 int fd = IPC_GET_ARG1(*request);
721 int ret = vfs_fd_free(fd);
722 async_answer_0(rid, ret);
723}
724
725static void vfs_rdwr(ipc_callid_t rid, ipc_call_t *request, bool read)
726{
727 /*
728 * The following code strongly depends on the fact that the files data
729 * structure can be only accessed by a single fibril and all file
730 * operations are serialized (i.e. the reads and writes cannot
731 * interleave and a file cannot be closed while it is being read).
732 *
733 * Additional synchronization needs to be added once the table of
734 * open files supports parallel access!
735 */
736
737 int fd = IPC_GET_ARG1(*request);
738
739 /* Lookup the file structure corresponding to the file descriptor. */
740 vfs_file_t *file = vfs_file_get(fd);
741 if (!file) {
742 async_answer_0(rid, ENOENT);
743 return;
744 }
745
746 /*
747 * Lock the open file structure so that no other thread can manipulate
748 * the same open file at a time.
749 */
750 fibril_mutex_lock(&file->lock);
751
752 vfs_info_t *fs_info = fs_handle_to_info(file->node->fs_handle);
753 assert(fs_info);
754
755 /*
756 * Lock the file's node so that no other client can read/write to it at
757 * the same time unless the FS supports concurrent reads/writes and its
758 * write implementation does not modify the file size.
759 */
760 if ((read) ||
761 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
762 fibril_rwlock_read_lock(&file->node->contents_rwlock);
763 else
764 fibril_rwlock_write_lock(&file->node->contents_rwlock);
765
766 if (file->node->type == VFS_NODE_DIRECTORY) {
767 /*
768 * Make sure that no one is modifying the namespace
769 * while we are in readdir().
770 */
771 assert(read);
772 fibril_rwlock_read_lock(&namespace_rwlock);
773 }
774
775 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
776
777 /*
778 * Make a VFS_READ/VFS_WRITE request at the destination FS server
779 * and forward the IPC_M_DATA_READ/IPC_M_DATA_WRITE request to the
780 * destination FS server. The call will be routed as if sent by
781 * ourselves. Note that call arguments are immutable in this case so we
782 * don't have to bother.
783 */
784 sysarg_t rc;
785 ipc_call_t answer;
786 if (read) {
787 rc = async_data_read_forward_4_1(fs_exch, VFS_OUT_READ,
788 file->node->service_id, file->node->index,
789 LOWER32(file->pos), UPPER32(file->pos), &answer);
790 } else {
791 if (file->append)
792 file->pos = file->node->size;
793
794 rc = async_data_write_forward_4_1(fs_exch, VFS_OUT_WRITE,
795 file->node->service_id, file->node->index,
796 LOWER32(file->pos), UPPER32(file->pos), &answer);
797 }
798
799 vfs_exchange_release(fs_exch);
800
801 size_t bytes = IPC_GET_ARG1(answer);
802
803 if (file->node->type == VFS_NODE_DIRECTORY)
804 fibril_rwlock_read_unlock(&namespace_rwlock);
805
806 /* Unlock the VFS node. */
807 if ((read) ||
808 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
809 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
810 else {
811 /* Update the cached version of node's size. */
812 if (rc == EOK)
813 file->node->size = MERGE_LOUP32(IPC_GET_ARG2(answer),
814 IPC_GET_ARG3(answer));
815 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
816 }
817
818 /* Update the position pointer and unlock the open file. */
819 if (rc == EOK)
820 file->pos += bytes;
821 fibril_mutex_unlock(&file->lock);
822 vfs_file_put(file);
823
824 /*
825 * FS server's reply is the final result of the whole operation we
826 * return to the client.
827 */
828 async_answer_1(rid, rc, bytes);
829}
830
831void vfs_read(ipc_callid_t rid, ipc_call_t *request)
832{
833 vfs_rdwr(rid, request, true);
834}
835
836void vfs_write(ipc_callid_t rid, ipc_call_t *request)
837{
838 vfs_rdwr(rid, request, false);
839}
840
841void vfs_seek(ipc_callid_t rid, ipc_call_t *request)
842{
843 int fd = (int) IPC_GET_ARG1(*request);
844 off64_t off = (off64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
845 IPC_GET_ARG3(*request));
846 int whence = (int) IPC_GET_ARG4(*request);
847
848 /* Lookup the file structure corresponding to the file descriptor. */
849 vfs_file_t *file = vfs_file_get(fd);
850 if (!file) {
851 async_answer_0(rid, ENOENT);
852 return;
853 }
854
855 fibril_mutex_lock(&file->lock);
856
857 off64_t newoff;
858 switch (whence) {
859 case SEEK_SET:
860 if (off >= 0) {
861 file->pos = (aoff64_t) off;
862 fibril_mutex_unlock(&file->lock);
863 vfs_file_put(file);
864 async_answer_1(rid, EOK, off);
865 return;
866 }
867 break;
868 case SEEK_CUR:
869 if ((off >= 0) && (file->pos + off < file->pos)) {
870 fibril_mutex_unlock(&file->lock);
871 vfs_file_put(file);
872 async_answer_0(rid, EOVERFLOW);
873 return;
874 }
875
876 if ((off < 0) && (file->pos < (aoff64_t) -off)) {
877 fibril_mutex_unlock(&file->lock);
878 vfs_file_put(file);
879 async_answer_0(rid, EOVERFLOW);
880 return;
881 }
882
883 file->pos += off;
884 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
885
886 fibril_mutex_unlock(&file->lock);
887 vfs_file_put(file);
888 async_answer_2(rid, EOK, LOWER32(newoff),
889 UPPER32(newoff));
890 return;
891 case SEEK_END:
892 fibril_rwlock_read_lock(&file->node->contents_rwlock);
893 aoff64_t size = file->node->size;
894
895 if ((off >= 0) && (size + off < size)) {
896 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
897 fibril_mutex_unlock(&file->lock);
898 vfs_file_put(file);
899 async_answer_0(rid, EOVERFLOW);
900 return;
901 }
902
903 if ((off < 0) && (size < (aoff64_t) -off)) {
904 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
905 fibril_mutex_unlock(&file->lock);
906 vfs_file_put(file);
907 async_answer_0(rid, EOVERFLOW);
908 return;
909 }
910
911 file->pos = size + off;
912 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
913
914 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
915 fibril_mutex_unlock(&file->lock);
916 vfs_file_put(file);
917 async_answer_2(rid, EOK, LOWER32(newoff), UPPER32(newoff));
918 return;
919 }
920
921 fibril_mutex_unlock(&file->lock);
922 vfs_file_put(file);
923 async_answer_0(rid, EINVAL);
924}
925
926int vfs_truncate_internal(fs_handle_t fs_handle, service_id_t service_id,
927 fs_index_t index, aoff64_t size)
928{
929 async_exch_t *exch = vfs_exchange_grab(fs_handle);
930 sysarg_t rc = async_req_4_0(exch, VFS_OUT_TRUNCATE,
931 (sysarg_t) service_id, (sysarg_t) index, LOWER32(size),
932 UPPER32(size));
933 vfs_exchange_release(exch);
934
935 return (int) rc;
936}
937
938void vfs_truncate(ipc_callid_t rid, ipc_call_t *request)
939{
940 int fd = IPC_GET_ARG1(*request);
941 aoff64_t size = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
942 IPC_GET_ARG3(*request));
943 int rc;
944
945 vfs_file_t *file = vfs_file_get(fd);
946 if (!file) {
947 async_answer_0(rid, ENOENT);
948 return;
949 }
950 fibril_mutex_lock(&file->lock);
951
952 fibril_rwlock_write_lock(&file->node->contents_rwlock);
953 rc = vfs_truncate_internal(file->node->fs_handle,
954 file->node->service_id, file->node->index, size);
955 if (rc == EOK)
956 file->node->size = size;
957 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
958
959 fibril_mutex_unlock(&file->lock);
960 vfs_file_put(file);
961 async_answer_0(rid, (sysarg_t)rc);
962}
963
964void vfs_fstat(ipc_callid_t rid, ipc_call_t *request)
965{
966 int fd = IPC_GET_ARG1(*request);
967 sysarg_t rc;
968
969 vfs_file_t *file = vfs_file_get(fd);
970 if (!file) {
971 async_answer_0(rid, ENOENT);
972 return;
973 }
974
975 ipc_callid_t callid;
976 if (!async_data_read_receive(&callid, NULL)) {
977 vfs_file_put(file);
978 async_answer_0(callid, EINVAL);
979 async_answer_0(rid, EINVAL);
980 return;
981 }
982
983 fibril_mutex_lock(&file->lock);
984
985 async_exch_t *exch = vfs_exchange_grab(file->node->fs_handle);
986
987 aid_t msg;
988 msg = async_send_3(exch, VFS_OUT_STAT, file->node->service_id,
989 file->node->index, true, NULL);
990 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
991
992 vfs_exchange_release(exch);
993
994 async_wait_for(msg, &rc);
995
996 fibril_mutex_unlock(&file->lock);
997 vfs_file_put(file);
998 async_answer_0(rid, rc);
999}
1000
1001void vfs_stat(ipc_callid_t rid, ipc_call_t *request)
1002{
1003 char *path;
1004 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1005 if (rc != EOK) {
1006 async_answer_0(rid, rc);
1007 return;
1008 }
1009
1010 ipc_callid_t callid;
1011 if (!async_data_read_receive(&callid, NULL)) {
1012 free(path);
1013 async_answer_0(callid, EINVAL);
1014 async_answer_0(rid, EINVAL);
1015 return;
1016 }
1017
1018 vfs_lookup_res_t lr;
1019 fibril_rwlock_read_lock(&namespace_rwlock);
1020 rc = vfs_lookup_internal(path, L_NONE, &lr, NULL);
1021 free(path);
1022 if (rc != EOK) {
1023 fibril_rwlock_read_unlock(&namespace_rwlock);
1024 async_answer_0(callid, rc);
1025 async_answer_0(rid, rc);
1026 return;
1027 }
1028 vfs_node_t *node = vfs_node_get(&lr);
1029 if (!node) {
1030 fibril_rwlock_read_unlock(&namespace_rwlock);
1031 async_answer_0(callid, ENOMEM);
1032 async_answer_0(rid, ENOMEM);
1033 return;
1034 }
1035
1036 fibril_rwlock_read_unlock(&namespace_rwlock);
1037
1038 async_exch_t *exch = vfs_exchange_grab(node->fs_handle);
1039
1040 aid_t msg;
1041 msg = async_send_3(exch, VFS_OUT_STAT, node->service_id,
1042 node->index, false, NULL);
1043 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
1044
1045 vfs_exchange_release(exch);
1046
1047 sysarg_t rv;
1048 async_wait_for(msg, &rv);
1049
1050 async_answer_0(rid, rv);
1051
1052 vfs_node_put(node);
1053}
1054
1055void vfs_mkdir(ipc_callid_t rid, ipc_call_t *request)
1056{
1057 int mode = IPC_GET_ARG1(*request);
1058
1059 char *path;
1060 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1061 if (rc != EOK) {
1062 async_answer_0(rid, rc);
1063 return;
1064 }
1065
1066 /* Ignore mode for now. */
1067 (void) mode;
1068
1069 fibril_rwlock_write_lock(&namespace_rwlock);
1070 int lflag = L_DIRECTORY | L_CREATE | L_EXCLUSIVE;
1071 rc = vfs_lookup_internal(path, lflag, NULL, NULL);
1072 fibril_rwlock_write_unlock(&namespace_rwlock);
1073 free(path);
1074 async_answer_0(rid, rc);
1075}
1076
1077void vfs_unlink(ipc_callid_t rid, ipc_call_t *request)
1078{
1079 int lflag = IPC_GET_ARG1(*request);
1080
1081 char *path;
1082 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1083 if (rc != EOK) {
1084 async_answer_0(rid, rc);
1085 return;
1086 }
1087
1088 fibril_rwlock_write_lock(&namespace_rwlock);
1089 lflag &= L_DIRECTORY; /* sanitize lflag */
1090 vfs_lookup_res_t lr;
1091 rc = vfs_lookup_internal(path, lflag | L_UNLINK, &lr, NULL);
1092 free(path);
1093 if (rc != EOK) {
1094 fibril_rwlock_write_unlock(&namespace_rwlock);
1095 async_answer_0(rid, rc);
1096 return;
1097 }
1098
1099 /*
1100 * The name has already been unlinked by vfs_lookup_internal().
1101 * We have to get and put the VFS node to ensure that it is
1102 * VFS_OUT_DESTROY'ed after the last reference to it is dropped.
1103 */
1104 vfs_node_t *node = vfs_node_get(&lr);
1105 fibril_mutex_lock(&nodes_mutex);
1106 node->lnkcnt--;
1107 fibril_mutex_unlock(&nodes_mutex);
1108 fibril_rwlock_write_unlock(&namespace_rwlock);
1109 vfs_node_put(node);
1110 async_answer_0(rid, EOK);
1111}
1112
1113void vfs_rename(ipc_callid_t rid, ipc_call_t *request)
1114{
1115 /* Retrieve the old path. */
1116 char *old;
1117 int rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL);
1118 if (rc != EOK) {
1119 async_answer_0(rid, rc);
1120 return;
1121 }
1122
1123 /* Retrieve the new path. */
1124 char *new;
1125 rc = async_data_write_accept((void **) &new, true, 0, 0, 0, NULL);
1126 if (rc != EOK) {
1127 free(old);
1128 async_answer_0(rid, rc);
1129 return;
1130 }
1131
1132 size_t olen;
1133 size_t nlen;
1134 char *oldc = canonify(old, &olen);
1135 char *newc = canonify(new, &nlen);
1136
1137 if ((!oldc) || (!newc)) {
1138 async_answer_0(rid, EINVAL);
1139 free(old);
1140 free(new);
1141 return;
1142 }
1143
1144 oldc[olen] = '\0';
1145 newc[nlen] = '\0';
1146
1147 if ((!str_lcmp(newc, oldc, str_length(oldc))) &&
1148 ((newc[str_length(oldc)] == '/') ||
1149 (str_length(oldc) == 1) ||
1150 (str_length(oldc) == str_length(newc)))) {
1151 /*
1152 * oldc is a prefix of newc and either
1153 * - newc continues with a / where oldc ends, or
1154 * - oldc was / itself, or
1155 * - oldc and newc are equal.
1156 */
1157 async_answer_0(rid, EINVAL);
1158 free(old);
1159 free(new);
1160 return;
1161 }
1162
1163 vfs_lookup_res_t old_lr;
1164 vfs_lookup_res_t new_lr;
1165 vfs_lookup_res_t new_par_lr;
1166 fibril_rwlock_write_lock(&namespace_rwlock);
1167
1168 /* Lookup the node belonging to the old file name. */
1169 rc = vfs_lookup_internal(oldc, L_NONE, &old_lr, NULL);
1170 if (rc != EOK) {
1171 fibril_rwlock_write_unlock(&namespace_rwlock);
1172 async_answer_0(rid, rc);
1173 free(old);
1174 free(new);
1175 return;
1176 }
1177
1178 vfs_node_t *old_node = vfs_node_get(&old_lr);
1179 if (!old_node) {
1180 fibril_rwlock_write_unlock(&namespace_rwlock);
1181 async_answer_0(rid, ENOMEM);
1182 free(old);
1183 free(new);
1184 return;
1185 }
1186
1187 /* Determine the path to the parent of the node with the new name. */
1188 char *parentc = str_dup(newc);
1189 if (!parentc) {
1190 fibril_rwlock_write_unlock(&namespace_rwlock);
1191 vfs_node_put(old_node);
1192 async_answer_0(rid, rc);
1193 free(old);
1194 free(new);
1195 return;
1196 }
1197
1198 char *lastsl = str_rchr(parentc + 1, '/');
1199 if (lastsl)
1200 *lastsl = '\0';
1201 else
1202 parentc[1] = '\0';
1203
1204 /* Lookup parent of the new file name. */
1205 rc = vfs_lookup_internal(parentc, L_NONE, &new_par_lr, NULL);
1206 free(parentc); /* not needed anymore */
1207 if (rc != EOK) {
1208 fibril_rwlock_write_unlock(&namespace_rwlock);
1209 vfs_node_put(old_node);
1210 async_answer_0(rid, rc);
1211 free(old);
1212 free(new);
1213 return;
1214 }
1215
1216 /* Check whether linking to the same file system instance. */
1217 if ((old_node->fs_handle != new_par_lr.triplet.fs_handle) ||
1218 (old_node->service_id != new_par_lr.triplet.service_id)) {
1219 fibril_rwlock_write_unlock(&namespace_rwlock);
1220 vfs_node_put(old_node);
1221 async_answer_0(rid, EXDEV); /* different file systems */
1222 free(old);
1223 free(new);
1224 return;
1225 }
1226
1227 /* Destroy the old link for the new name. */
1228 vfs_node_t *new_node = NULL;
1229 rc = vfs_lookup_internal(newc, L_UNLINK, &new_lr, NULL);
1230
1231 switch (rc) {
1232 case ENOENT:
1233 /* simply not in our way */
1234 break;
1235 case EOK:
1236 new_node = vfs_node_get(&new_lr);
1237 if (!new_node) {
1238 fibril_rwlock_write_unlock(&namespace_rwlock);
1239 vfs_node_put(old_node);
1240 async_answer_0(rid, ENOMEM);
1241 free(old);
1242 free(new);
1243 return;
1244 }
1245 fibril_mutex_lock(&nodes_mutex);
1246 new_node->lnkcnt--;
1247 fibril_mutex_unlock(&nodes_mutex);
1248 break;
1249 default:
1250 fibril_rwlock_write_unlock(&namespace_rwlock);
1251 vfs_node_put(old_node);
1252 async_answer_0(rid, ENOTEMPTY);
1253 free(old);
1254 free(new);
1255 return;
1256 }
1257
1258 /* Create the new link for the new name. */
1259 rc = vfs_lookup_internal(newc, L_LINK, NULL, NULL, old_node->index);
1260 if (rc != EOK) {
1261 fibril_rwlock_write_unlock(&namespace_rwlock);
1262 vfs_node_put(old_node);
1263 if (new_node)
1264 vfs_node_put(new_node);
1265 async_answer_0(rid, rc);
1266 free(old);
1267 free(new);
1268 return;
1269 }
1270
1271 fibril_mutex_lock(&nodes_mutex);
1272 old_node->lnkcnt++;
1273 fibril_mutex_unlock(&nodes_mutex);
1274
1275 /* Destroy the link for the old name. */
1276 rc = vfs_lookup_internal(oldc, L_UNLINK, NULL, NULL);
1277 if (rc != EOK) {
1278 fibril_rwlock_write_unlock(&namespace_rwlock);
1279 vfs_node_put(old_node);
1280 if (new_node)
1281 vfs_node_put(new_node);
1282 async_answer_0(rid, rc);
1283 free(old);
1284 free(new);
1285 return;
1286 }
1287
1288 fibril_mutex_lock(&nodes_mutex);
1289 old_node->lnkcnt--;
1290 fibril_mutex_unlock(&nodes_mutex);
1291 fibril_rwlock_write_unlock(&namespace_rwlock);
1292 vfs_node_put(old_node);
1293
1294 if (new_node)
1295 vfs_node_put(new_node);
1296
1297 free(old);
1298 free(new);
1299 async_answer_0(rid, EOK);
1300}
1301
1302void vfs_dup(ipc_callid_t rid, ipc_call_t *request)
1303{
1304 int oldfd = IPC_GET_ARG1(*request);
1305 int newfd = IPC_GET_ARG2(*request);
1306
1307 /* If the file descriptors are the same, do nothing. */
1308 if (oldfd == newfd) {
1309 async_answer_1(rid, EOK, newfd);
1310 return;
1311 }
1312
1313 /* Lookup the file structure corresponding to oldfd. */
1314 vfs_file_t *oldfile = vfs_file_get(oldfd);
1315 if (!oldfile) {
1316 async_answer_0(rid, EBADF);
1317 return;
1318 }
1319
1320 /*
1321 * Lock the open file structure so that no other thread can manipulate
1322 * the same open file at a time.
1323 */
1324 fibril_mutex_lock(&oldfile->lock);
1325
1326 /* Make sure newfd is closed. */
1327 (void) vfs_fd_free(newfd);
1328
1329 /* Assign the old file to newfd. */
1330 int ret = vfs_fd_assign(oldfile, newfd);
1331 fibril_mutex_unlock(&oldfile->lock);
1332 vfs_file_put(oldfile);
1333
1334 if (ret != EOK)
1335 async_answer_0(rid, ret);
1336 else
1337 async_answer_1(rid, EOK, newfd);
1338}
1339
1340void vfs_wait_handle(ipc_callid_t rid, ipc_call_t *request)
1341{
1342 int fd = vfs_wait_handle_internal();
1343 async_answer_1(rid, EOK, fd);
1344}
1345
1346void vfs_get_mtab(ipc_callid_t rid, ipc_call_t *request)
1347{
1348 ipc_callid_t callid;
1349 ipc_call_t data;
1350 sysarg_t rc = EOK;
1351 size_t len;
1352
1353 fibril_mutex_lock(&mtab_list_lock);
1354
1355 /* Send to the caller the number of mounted filesystems */
1356 callid = async_get_call(&data);
1357 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
1358 rc = ENOTSUP;
1359 async_answer_1(callid, rc, 0);
1360 goto exit;
1361 }
1362 async_answer_1(callid, EOK, mtab_size);
1363
1364 list_foreach(mtab_list, cur) {
1365 mtab_list_ent_t *ent = list_get_instance(cur, mtab_list_ent_t,
1366 link);
1367
1368 mtab_ent_t *mtab_ent = &ent->mtab_ent;
1369
1370 rc = ENOTSUP;
1371
1372 if (!async_data_read_receive(&callid, &len))
1373 goto exit;
1374
1375 (void) async_data_read_finalize(callid, mtab_ent->mp,
1376 str_size(mtab_ent->mp));
1377
1378 if (!async_data_read_receive(&callid, &len))
1379 goto exit;
1380
1381 (void) async_data_read_finalize(callid, mtab_ent->opts,
1382 str_size(mtab_ent->opts));
1383
1384 if (!async_data_read_receive(&callid, &len))
1385 goto exit;
1386
1387 (void) async_data_read_finalize(callid, mtab_ent->fs_name,
1388 str_size(mtab_ent->fs_name));
1389
1390 sysarg_t p[3];
1391
1392 p[0] = mtab_ent->flags;
1393 p[1] = mtab_ent->instance;
1394 p[2] = mtab_ent->fs_handle;
1395
1396 int i;
1397 for (i = 0; i < 3; ++i) {
1398 callid = async_get_call(&data);
1399 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
1400 rc = ENOTSUP;
1401 async_answer_1(callid, rc, 0);
1402 goto exit;
1403 }
1404 async_answer_1(callid, EOK, p[i]);
1405 }
1406
1407 rc = EOK;
1408 }
1409
1410exit:
1411 fibril_mutex_unlock(&mtab_list_lock);
1412 async_answer_0(rid, rc);
1413}
1414
1415/**
1416 * @}
1417 */
Note: See TracBrowser for help on using the repository browser.