source: mainline/uspace/srv/vfs/vfs_ops.c@ cb65bbe

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since cb65bbe was cb65bbe, checked in by Ji?? Z?rev?cky <zarevucky.jiri@…>, 12 years ago

Implement server side of VFS_WALK and VFS_OPEN2.

  • Property mode set to 100644
File size: 38.2 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file vfs_ops.c
35 * @brief Operations that VFS offers to its clients.
36 */
37
38#include "vfs.h"
39#include <macros.h>
40#include <stdint.h>
41#include <async.h>
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <str.h>
46#include <stdbool.h>
47#include <fibril_synch.h>
48#include <adt/list.h>
49#include <unistd.h>
50#include <ctype.h>
51#include <fcntl.h>
52#include <assert.h>
53#include <vfs/canonify.h>
54#include <vfs/vfs_mtab.h>
55
56FIBRIL_MUTEX_INITIALIZE(mtab_list_lock);
57LIST_INITIALIZE(mtab_list);
58static size_t mtab_size = 0;
59
60/* Forward declarations of static functions. */
61static int vfs_truncate_internal(fs_handle_t, service_id_t, fs_index_t,
62 aoff64_t);
63
64/**
65 * This rwlock prevents the race between a triplet-to-VFS-node resolution and a
66 * concurrent VFS operation which modifies the file system namespace.
67 */
68FIBRIL_RWLOCK_INITIALIZE(namespace_rwlock);
69
70vfs_pair_t rootfs = {
71 .fs_handle = 0,
72 .service_id = 0
73};
74
75static int vfs_mount_internal(ipc_callid_t rid, service_id_t service_id,
76 fs_handle_t fs_handle, char *mp, char *opts)
77{
78 vfs_lookup_res_t mp_res;
79 vfs_lookup_res_t mr_res;
80 vfs_node_t *mp_node = NULL;
81 vfs_node_t *mr_node;
82 fs_index_t rindex;
83 aoff64_t rsize;
84 unsigned rlnkcnt;
85 async_exch_t *exch;
86 sysarg_t rc;
87 aid_t msg;
88 ipc_call_t answer;
89
90 /* Resolve the path to the mountpoint. */
91 fibril_rwlock_write_lock(&namespace_rwlock);
92 if (rootfs.fs_handle) {
93 /* We already have the root FS. */
94 if (str_cmp(mp, "/") == 0) {
95 /* Trying to mount root FS over root FS */
96 fibril_rwlock_write_unlock(&namespace_rwlock);
97 async_answer_0(rid, EBUSY);
98 return EBUSY;
99 }
100
101 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
102 if (rc != EOK) {
103 /* The lookup failed for some reason. */
104 fibril_rwlock_write_unlock(&namespace_rwlock);
105 async_answer_0(rid, rc);
106 return rc;
107 }
108
109 mp_node = vfs_node_get(&mp_res);
110 if (!mp_node) {
111 fibril_rwlock_write_unlock(&namespace_rwlock);
112 async_answer_0(rid, ENOMEM);
113 return ENOMEM;
114 }
115
116 /*
117 * Now we hold a reference to mp_node.
118 * It will be dropped upon the corresponding VFS_IN_UNMOUNT.
119 * This prevents the mount point from being deleted.
120 */
121 } else {
122 /* We still don't have the root file system mounted. */
123 if (str_cmp(mp, "/") == 0) {
124 /*
125 * For this simple, but important case,
126 * we are almost done.
127 */
128
129 /* Tell the mountee that it is being mounted. */
130 exch = vfs_exchange_grab(fs_handle);
131 msg = async_send_1(exch, VFS_OUT_MOUNTED,
132 (sysarg_t) service_id, &answer);
133 /* Send the mount options */
134 rc = async_data_write_start(exch, (void *)opts,
135 str_size(opts));
136 vfs_exchange_release(exch);
137
138 if (rc != EOK) {
139 async_forget(msg);
140 fibril_rwlock_write_unlock(&namespace_rwlock);
141 async_answer_0(rid, rc);
142 return rc;
143 }
144 async_wait_for(msg, &rc);
145
146 if (rc != EOK) {
147 fibril_rwlock_write_unlock(&namespace_rwlock);
148 async_answer_0(rid, rc);
149 return rc;
150 }
151
152 rindex = (fs_index_t) IPC_GET_ARG1(answer);
153 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
154 IPC_GET_ARG3(answer));
155 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
156
157 mr_res.triplet.fs_handle = fs_handle;
158 mr_res.triplet.service_id = service_id;
159 mr_res.triplet.index = rindex;
160 mr_res.size = rsize;
161 mr_res.lnkcnt = rlnkcnt;
162 mr_res.type = VFS_NODE_DIRECTORY;
163
164 rootfs.fs_handle = fs_handle;
165 rootfs.service_id = service_id;
166
167 /* Add reference to the mounted root. */
168 mr_node = vfs_node_get(&mr_res);
169 assert(mr_node);
170
171 fibril_rwlock_write_unlock(&namespace_rwlock);
172 async_answer_0(rid, rc);
173 return rc;
174 } else {
175 /*
176 * We can't resolve this without the root filesystem
177 * being mounted first.
178 */
179 fibril_rwlock_write_unlock(&namespace_rwlock);
180 async_answer_0(rid, ENOENT);
181 return ENOENT;
182 }
183 }
184
185 /*
186 * At this point, we have all necessary pieces: file system handle
187 * and service ID, and we know the mount point VFS node.
188 */
189
190 async_exch_t *mountee_exch = vfs_exchange_grab(fs_handle);
191 assert(mountee_exch);
192
193 exch = vfs_exchange_grab(mp_res.triplet.fs_handle);
194 msg = async_send_4(exch, VFS_OUT_MOUNT,
195 (sysarg_t) mp_res.triplet.service_id,
196 (sysarg_t) mp_res.triplet.index,
197 (sysarg_t) fs_handle,
198 (sysarg_t) service_id, &answer);
199
200 /* Send connection */
201 rc = async_exchange_clone(exch, mountee_exch);
202 vfs_exchange_release(mountee_exch);
203
204 if (rc != EOK) {
205 vfs_exchange_release(exch);
206 async_forget(msg);
207
208 /* Mount failed, drop reference to mp_node. */
209 if (mp_node)
210 vfs_node_put(mp_node);
211
212 async_answer_0(rid, rc);
213 fibril_rwlock_write_unlock(&namespace_rwlock);
214 return rc;
215 }
216
217 /* send the mount options */
218 rc = async_data_write_start(exch, (void *) opts, str_size(opts));
219 if (rc != EOK) {
220 vfs_exchange_release(exch);
221 async_forget(msg);
222
223 /* Mount failed, drop reference to mp_node. */
224 if (mp_node)
225 vfs_node_put(mp_node);
226
227 fibril_rwlock_write_unlock(&namespace_rwlock);
228 async_answer_0(rid, rc);
229 return rc;
230 }
231
232 /*
233 * Wait for the answer before releasing the exchange to avoid deadlock
234 * in case the answer depends on further calls to the same file system.
235 * Think of a case when mounting a FS on a file_bd backed by a file on
236 * the same FS.
237 */
238 async_wait_for(msg, &rc);
239 vfs_exchange_release(exch);
240
241 if (rc == EOK) {
242 rindex = (fs_index_t) IPC_GET_ARG1(answer);
243 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
244 IPC_GET_ARG3(answer));
245 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
246
247 mr_res.triplet.fs_handle = fs_handle;
248 mr_res.triplet.service_id = service_id;
249 mr_res.triplet.index = rindex;
250 mr_res.size = rsize;
251 mr_res.lnkcnt = rlnkcnt;
252 mr_res.type = VFS_NODE_DIRECTORY;
253
254 /* Add reference to the mounted root. */
255 mr_node = vfs_node_get(&mr_res);
256 assert(mr_node);
257 } else {
258 /* Mount failed, drop reference to mp_node. */
259 if (mp_node)
260 vfs_node_put(mp_node);
261 }
262
263 async_answer_0(rid, rc);
264 fibril_rwlock_write_unlock(&namespace_rwlock);
265 return rc;
266}
267
268void vfs_mount(ipc_callid_t rid, ipc_call_t *request)
269{
270 service_id_t service_id;
271
272 /*
273 * We expect the library to do the device-name to device-handle
274 * translation for us, thus the device handle will arrive as ARG1
275 * in the request.
276 */
277 service_id = (service_id_t) IPC_GET_ARG1(*request);
278
279 /*
280 * Mount flags are passed as ARG2.
281 */
282 unsigned int flags = (unsigned int) IPC_GET_ARG2(*request);
283
284 /*
285 * Instance number is passed as ARG3.
286 */
287 unsigned int instance = IPC_GET_ARG3(*request);
288
289 /* We want the client to send us the mount point. */
290 char *mp;
291 int rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
292 0, NULL);
293 if (rc != EOK) {
294 async_answer_0(rid, rc);
295 return;
296 }
297
298 /* Now we expect to receive the mount options. */
299 char *opts;
300 rc = async_data_write_accept((void **) &opts, true, 0, MAX_MNTOPTS_LEN,
301 0, NULL);
302 if (rc != EOK) {
303 free(mp);
304 async_answer_0(rid, rc);
305 return;
306 }
307
308 /*
309 * Now, we expect the client to send us data with the name of the file
310 * system.
311 */
312 char *fs_name;
313 rc = async_data_write_accept((void **) &fs_name, true, 0,
314 FS_NAME_MAXLEN, 0, NULL);
315 if (rc != EOK) {
316 free(mp);
317 free(opts);
318 async_answer_0(rid, rc);
319 return;
320 }
321
322 /*
323 * Wait for VFS_IN_PING so that we can return an error if we don't know
324 * fs_name.
325 */
326 ipc_call_t data;
327 ipc_callid_t callid = async_get_call(&data);
328 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
329 async_answer_0(callid, ENOTSUP);
330 async_answer_0(rid, ENOTSUP);
331 free(mp);
332 free(opts);
333 free(fs_name);
334 return;
335 }
336
337 /*
338 * Check if we know a file system with the same name as is in fs_name.
339 * This will also give us its file system handle.
340 */
341 fibril_mutex_lock(&fs_list_lock);
342 fs_handle_t fs_handle;
343recheck:
344 fs_handle = fs_name_to_handle(instance, fs_name, false);
345 if (!fs_handle) {
346 if (flags & IPC_FLAG_BLOCKING) {
347 fibril_condvar_wait(&fs_list_cv, &fs_list_lock);
348 goto recheck;
349 }
350
351 fibril_mutex_unlock(&fs_list_lock);
352 async_answer_0(callid, ENOENT);
353 async_answer_0(rid, ENOENT);
354 free(mp);
355 free(fs_name);
356 free(opts);
357 return;
358 }
359 fibril_mutex_unlock(&fs_list_lock);
360
361 /* Add the filesystem info to the list of mounted filesystems */
362 mtab_ent_t *mtab_ent = malloc(sizeof(mtab_ent_t));
363 if (!mtab_ent) {
364 async_answer_0(callid, ENOMEM);
365 async_answer_0(rid, ENOMEM);
366 free(mp);
367 free(fs_name);
368 free(opts);
369 return;
370 }
371
372 /* Do the mount */
373 rc = vfs_mount_internal(rid, service_id, fs_handle, mp, opts);
374 if (rc != EOK) {
375 async_answer_0(callid, ENOTSUP);
376 async_answer_0(rid, ENOTSUP);
377 free(mtab_ent);
378 free(mp);
379 free(opts);
380 free(fs_name);
381 return;
382 }
383
384 /* Add the filesystem info to the list of mounted filesystems */
385
386 str_cpy(mtab_ent->mp, MAX_PATH_LEN, mp);
387 str_cpy(mtab_ent->fs_name, FS_NAME_MAXLEN, fs_name);
388 str_cpy(mtab_ent->opts, MAX_MNTOPTS_LEN, opts);
389 mtab_ent->instance = instance;
390 mtab_ent->service_id = service_id;
391
392 link_initialize(&mtab_ent->link);
393
394 fibril_mutex_lock(&mtab_list_lock);
395 list_append(&mtab_ent->link, &mtab_list);
396 mtab_size++;
397 fibril_mutex_unlock(&mtab_list_lock);
398
399 free(mp);
400 free(fs_name);
401 free(opts);
402
403 /* Acknowledge that we know fs_name. */
404 async_answer_0(callid, EOK);
405}
406
407void vfs_unmount(ipc_callid_t rid, ipc_call_t *request)
408{
409 int rc;
410 char *mp;
411 vfs_lookup_res_t mp_res;
412 vfs_lookup_res_t mr_res;
413 vfs_node_t *mr_node;
414 async_exch_t *exch;
415
416 /*
417 * Receive the mount point path.
418 */
419 rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
420 0, NULL);
421 if (rc != EOK)
422 async_answer_0(rid, rc);
423
424 /*
425 * Taking the namespace lock will do two things for us. First, it will
426 * prevent races with other lookup operations. Second, it will stop new
427 * references to already existing VFS nodes and creation of new VFS
428 * nodes. This is because new references are added as a result of some
429 * lookup operation or at least of some operation which is protected by
430 * the namespace lock.
431 */
432 fibril_rwlock_write_lock(&namespace_rwlock);
433
434 /*
435 * Lookup the mounted root and instantiate it.
436 */
437 rc = vfs_lookup_internal(mp, L_ROOT, &mr_res, NULL);
438 if (rc != EOK) {
439 fibril_rwlock_write_unlock(&namespace_rwlock);
440 free(mp);
441 async_answer_0(rid, rc);
442 return;
443 }
444 mr_node = vfs_node_get(&mr_res);
445 if (!mr_node) {
446 fibril_rwlock_write_unlock(&namespace_rwlock);
447 free(mp);
448 async_answer_0(rid, ENOMEM);
449 return;
450 }
451
452 /*
453 * Count the total number of references for the mounted file system. We
454 * are expecting at least two. One which we got above and one which we
455 * got when the file system was mounted. If we find more, it means that
456 * the file system cannot be gracefully unmounted at the moment because
457 * someone is working with it.
458 */
459 if (vfs_nodes_refcount_sum_get(mr_node->fs_handle,
460 mr_node->service_id) != 2) {
461 fibril_rwlock_write_unlock(&namespace_rwlock);
462 vfs_node_put(mr_node);
463 free(mp);
464 async_answer_0(rid, EBUSY);
465 return;
466 }
467
468 if (str_cmp(mp, "/") == 0) {
469
470 /*
471 * Unmounting the root file system.
472 *
473 * In this case, there is no mount point node and we send
474 * VFS_OUT_UNMOUNTED directly to the mounted file system.
475 */
476
477 exch = vfs_exchange_grab(mr_node->fs_handle);
478 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED,
479 mr_node->service_id);
480 vfs_exchange_release(exch);
481
482 if (rc != EOK) {
483 fibril_rwlock_write_unlock(&namespace_rwlock);
484 free(mp);
485 vfs_node_put(mr_node);
486 async_answer_0(rid, rc);
487 return;
488 }
489
490 rootfs.fs_handle = 0;
491 rootfs.service_id = 0;
492 } else {
493
494 /*
495 * Unmounting a non-root file system.
496 *
497 * We have a regular mount point node representing the parent
498 * file system, so we delegate the operation to it.
499 */
500
501 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
502 if (rc != EOK) {
503 fibril_rwlock_write_unlock(&namespace_rwlock);
504 free(mp);
505 vfs_node_put(mr_node);
506 async_answer_0(rid, rc);
507 return;
508 }
509
510 vfs_node_t *mp_node = vfs_node_get(&mp_res);
511 if (!mp_node) {
512 fibril_rwlock_write_unlock(&namespace_rwlock);
513 free(mp);
514 vfs_node_put(mr_node);
515 async_answer_0(rid, ENOMEM);
516 return;
517 }
518
519 exch = vfs_exchange_grab(mp_node->fs_handle);
520 rc = async_req_2_0(exch, VFS_OUT_UNMOUNT,
521 mp_node->service_id, mp_node->index);
522 vfs_exchange_release(exch);
523
524 if (rc != EOK) {
525 fibril_rwlock_write_unlock(&namespace_rwlock);
526 free(mp);
527 vfs_node_put(mp_node);
528 vfs_node_put(mr_node);
529 async_answer_0(rid, rc);
530 return;
531 }
532
533 /* Drop the reference we got above. */
534 vfs_node_put(mp_node);
535 /* Drop the reference from when the file system was mounted. */
536 vfs_node_put(mp_node);
537 }
538
539 /*
540 * All went well, the mounted file system was successfully unmounted.
541 * The only thing left is to forget the unmounted root VFS node.
542 */
543 vfs_node_forget(mr_node);
544 fibril_rwlock_write_unlock(&namespace_rwlock);
545
546 fibril_mutex_lock(&mtab_list_lock);
547
548 int found = 0;
549
550 list_foreach(mtab_list, cur) {
551 mtab_ent_t *mtab_ent = list_get_instance(cur, mtab_ent_t,
552 link);
553
554 if (str_cmp(mtab_ent->mp, mp) == 0) {
555 list_remove(&mtab_ent->link);
556 mtab_size--;
557 free(mtab_ent);
558 found = 1;
559 break;
560 }
561 }
562 assert(found);
563 fibril_mutex_unlock(&mtab_list_lock);
564
565 free(mp);
566
567 async_answer_0(rid, EOK);
568}
569
570void vfs_walk(ipc_callid_t rid, ipc_call_t *request)
571{
572 /*
573 * Parent is our relative root for file lookup.
574 * For defined flags, see <ipc/vfs.h>.
575 */
576 int parentfd = IPC_GET_ARG1(*request);
577 int flags = IPC_GET_ARG2(*request);
578
579 if ((flags&~WALK_ALL_FLAGS) != 0) {
580 /* Invalid flags. */
581 async_answer_0(rid, EINVAL);
582 return;
583 }
584
585 char *path;
586 int rc = async_data_write_accept((void **)&path, true, 0, 0, 0, NULL);
587
588 /* Lookup the file structure corresponding to the file descriptor. */
589 vfs_file_t *parent = NULL;
590 vfs_pair_t *parent_node = NULL;
591 // TODO: Client-side root.
592 if (parentfd != -1) {
593 parent = vfs_file_get(parentfd);
594 if (!parent) {
595 free(path);
596 async_answer_0(rid, EBADF);
597 return;
598 }
599 parent_node = (vfs_pair_t *)parent->node;
600 }
601
602 fibril_rwlock_read_lock(&namespace_rwlock);
603
604 vfs_lookup_res_t lr;
605 rc = vfs_lookup_internal(path, 0, &lr, parent_node);
606 free(path);
607
608 if (rc != EOK) {
609 fibril_rwlock_read_unlock(&namespace_rwlock);
610 if (parent) {
611 vfs_file_put(parent);
612 }
613 async_answer_0(rid, rc);
614 return;
615 }
616
617 vfs_node_t *node = vfs_node_get(&lr);
618
619 int fd = vfs_fd_alloc(false);
620 if (fd < 0) {
621 vfs_node_put(node);
622 if (parent) {
623 vfs_file_put(parent);
624 }
625 async_answer_0(rid, fd);
626 return;
627 }
628
629 vfs_file_t *file = vfs_file_get(fd);
630 assert(file != NULL);
631
632 file->node = node;
633 if (parent) {
634 file->permissions = parent->permissions;
635 } else {
636 file->permissions = MODE_READ | MODE_WRITE | MODE_APPEND;
637 }
638 file->open_read = false;
639 file->open_write = false;
640
641 vfs_node_addref(node);
642 vfs_node_put(node);
643 vfs_file_put(file);
644 if (parent) {
645 vfs_file_put(parent);
646 }
647
648 fibril_rwlock_read_unlock(&namespace_rwlock);
649
650 async_answer_1(rid, EOK, fd);
651}
652
653void vfs_open2(ipc_callid_t rid, ipc_call_t *request)
654{
655 int fd = IPC_GET_ARG1(*request);
656 int flags = IPC_GET_ARG2(*request);
657
658 if (flags == 0) {
659 async_answer_0(rid, EINVAL);
660 return;
661 }
662
663 vfs_file_t *file = vfs_file_get(fd);
664 if (!file) {
665 async_answer_0(rid, EBADF);
666 return;
667 }
668
669 if ((flags & ~file->permissions) != 0) {
670 vfs_file_put(file);
671 async_answer_0(rid, EPERM);
672 return;
673 }
674
675 file->open_read = (flags & MODE_READ) != 0;
676 file->open_write = (flags & (MODE_WRITE | MODE_APPEND)) != 0;
677 file->append = (flags & MODE_APPEND) != 0;
678
679 if (!file->open_read && !file->open_write) {
680 vfs_file_put(file);
681 async_answer_0(rid, EINVAL);
682 return;
683 }
684
685 if (file->node->type == VFS_NODE_DIRECTORY && file->open_write) {
686 file->open_read = file->open_write = false;
687 vfs_file_put(file);
688 async_answer_0(rid, EINVAL);
689 return;
690 }
691
692 int rc = vfs_open_node_remote(file->node);
693 if (rc != EOK) {
694 file->open_read = file->open_write = false;
695 vfs_file_put(file);
696 async_answer_0(rid, rc);
697 return;
698 }
699
700 vfs_file_put(file);
701 async_answer_0(rid, EOK);
702}
703
704void vfs_open(ipc_callid_t rid, ipc_call_t *request)
705{
706 /*
707 * The POSIX interface is open(path, oflag, mode).
708 * We can receive oflags and mode along with the VFS_IN_OPEN call;
709 * the path will need to arrive in another call.
710 *
711 * We also receive one private, non-POSIX set of flags called lflag
712 * used to pass information to vfs_lookup_internal().
713 */
714 int lflag = IPC_GET_ARG1(*request);
715 int oflag = IPC_GET_ARG2(*request);
716 int mode = IPC_GET_ARG3(*request);
717
718 /* Ignore mode for now. */
719 (void) mode;
720
721 /*
722 * Make sure that we are called with exactly one of L_FILE and
723 * L_DIRECTORY. Make sure that the user does not pass L_OPEN,
724 * L_ROOT or L_MP.
725 */
726 if (((lflag & (L_FILE | L_DIRECTORY)) == 0) ||
727 ((lflag & (L_FILE | L_DIRECTORY)) == (L_FILE | L_DIRECTORY)) ||
728 (lflag & (L_OPEN | L_ROOT | L_MP))) {
729 async_answer_0(rid, EINVAL);
730 return;
731 }
732
733 if (oflag & O_CREAT)
734 lflag |= L_CREATE;
735 if (oflag & O_EXCL)
736 lflag |= L_EXCLUSIVE;
737
738 char *path;
739 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
740 if (rc != EOK) {
741 async_answer_0(rid, rc);
742 return;
743 }
744
745 /*
746 * Avoid the race condition in which the file can be deleted before we
747 * find/create-and-lock the VFS node corresponding to the looked-up
748 * triplet.
749 */
750 if (lflag & L_CREATE)
751 fibril_rwlock_write_lock(&namespace_rwlock);
752 else
753 fibril_rwlock_read_lock(&namespace_rwlock);
754
755 /* The path is now populated and we can call vfs_lookup_internal(). */
756 vfs_lookup_res_t lr;
757 rc = vfs_lookup_internal(path, lflag | L_OPEN, &lr, NULL);
758 if (rc != EOK) {
759 if (lflag & L_CREATE)
760 fibril_rwlock_write_unlock(&namespace_rwlock);
761 else
762 fibril_rwlock_read_unlock(&namespace_rwlock);
763 async_answer_0(rid, rc);
764 free(path);
765 return;
766 }
767
768 /* Path is no longer needed. */
769 free(path);
770
771 vfs_node_t *node = vfs_node_get(&lr);
772 if (lflag & L_CREATE)
773 fibril_rwlock_write_unlock(&namespace_rwlock);
774 else
775 fibril_rwlock_read_unlock(&namespace_rwlock);
776
777 /* Truncate the file if requested and if necessary. */
778 if (oflag & O_TRUNC) {
779 fibril_rwlock_write_lock(&node->contents_rwlock);
780 if (node->size) {
781 rc = vfs_truncate_internal(node->fs_handle,
782 node->service_id, node->index, 0);
783 if (rc) {
784 fibril_rwlock_write_unlock(&node->contents_rwlock);
785 vfs_node_put(node);
786 async_answer_0(rid, rc);
787 return;
788 }
789 node->size = 0;
790 }
791 fibril_rwlock_write_unlock(&node->contents_rwlock);
792 }
793
794 /*
795 * Get ourselves a file descriptor and the corresponding vfs_file_t
796 * structure.
797 */
798 int fd = vfs_fd_alloc((oflag & O_DESC) != 0);
799 if (fd < 0) {
800 vfs_node_put(node);
801 async_answer_0(rid, fd);
802 return;
803 }
804 vfs_file_t *file = vfs_file_get(fd);
805
806 /* There is a potential race with another fibril of a malicious client. */
807 if (!file) {
808 vfs_node_put(node);
809 async_answer_0(rid, EBUSY);
810 return;
811 }
812
813 file->node = node;
814 if (oflag & O_RDONLY)
815 file->open_read = true;
816 if (oflag & O_WRONLY)
817 file->open_write = true;
818 if (oflag & O_RDWR)
819 file->open_read = file->open_write = true;
820 if (oflag & O_APPEND)
821 file->append = true;
822
823 /*
824 * The following increase in reference count is for the fact that the
825 * file is being opened and that a file structure is pointing to it.
826 * It is necessary so that the file will not disappear when
827 * vfs_node_put() is called. The reference will be dropped by the
828 * respective VFS_IN_CLOSE.
829 */
830 vfs_node_addref(node);
831 vfs_node_put(node);
832 vfs_file_put(file);
833
834 /* Success! Return the new file descriptor to the client. */
835 async_answer_1(rid, EOK, fd);
836}
837
838void vfs_sync(ipc_callid_t rid, ipc_call_t *request)
839{
840 int fd = IPC_GET_ARG1(*request);
841
842 /* Lookup the file structure corresponding to the file descriptor. */
843 vfs_file_t *file = vfs_file_get(fd);
844 if (!file) {
845 async_answer_0(rid, ENOENT);
846 return;
847 }
848
849 /*
850 * Lock the open file structure so that no other thread can manipulate
851 * the same open file at a time.
852 */
853 fibril_mutex_lock(&file->lock);
854 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
855
856 /* Make a VFS_OUT_SYMC request at the destination FS server. */
857 aid_t msg;
858 ipc_call_t answer;
859 msg = async_send_2(fs_exch, VFS_OUT_SYNC, file->node->service_id,
860 file->node->index, &answer);
861
862 vfs_exchange_release(fs_exch);
863
864 /* Wait for reply from the FS server. */
865 sysarg_t rc;
866 async_wait_for(msg, &rc);
867
868 fibril_mutex_unlock(&file->lock);
869
870 vfs_file_put(file);
871 async_answer_0(rid, rc);
872}
873
874void vfs_close(ipc_callid_t rid, ipc_call_t *request)
875{
876 int fd = IPC_GET_ARG1(*request);
877 int ret = vfs_fd_free(fd);
878 async_answer_0(rid, ret);
879}
880
881static void vfs_rdwr(ipc_callid_t rid, ipc_call_t *request, bool read)
882{
883 /*
884 * The following code strongly depends on the fact that the files data
885 * structure can be only accessed by a single fibril and all file
886 * operations are serialized (i.e. the reads and writes cannot
887 * interleave and a file cannot be closed while it is being read).
888 *
889 * Additional synchronization needs to be added once the table of
890 * open files supports parallel access!
891 */
892
893 int fd = IPC_GET_ARG1(*request);
894
895 /* Lookup the file structure corresponding to the file descriptor. */
896 vfs_file_t *file = vfs_file_get(fd);
897 if (!file) {
898 async_answer_0(rid, ENOENT);
899 return;
900 }
901
902 /*
903 * Lock the open file structure so that no other thread can manipulate
904 * the same open file at a time.
905 */
906 fibril_mutex_lock(&file->lock);
907
908 if ((read && !file->open_read) || (!read && !file->open_write)) {
909 fibril_mutex_unlock(&file->lock);
910 async_answer_0(rid, EINVAL);
911 return;
912 }
913
914 vfs_info_t *fs_info = fs_handle_to_info(file->node->fs_handle);
915 assert(fs_info);
916
917 /*
918 * Lock the file's node so that no other client can read/write to it at
919 * the same time unless the FS supports concurrent reads/writes and its
920 * write implementation does not modify the file size.
921 */
922 if ((read) ||
923 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
924 fibril_rwlock_read_lock(&file->node->contents_rwlock);
925 else
926 fibril_rwlock_write_lock(&file->node->contents_rwlock);
927
928 if (file->node->type == VFS_NODE_DIRECTORY) {
929 /*
930 * Make sure that no one is modifying the namespace
931 * while we are in readdir().
932 */
933 assert(read);
934 fibril_rwlock_read_lock(&namespace_rwlock);
935 }
936
937 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
938
939 /*
940 * Make a VFS_READ/VFS_WRITE request at the destination FS server
941 * and forward the IPC_M_DATA_READ/IPC_M_DATA_WRITE request to the
942 * destination FS server. The call will be routed as if sent by
943 * ourselves. Note that call arguments are immutable in this case so we
944 * don't have to bother.
945 */
946 sysarg_t rc;
947 ipc_call_t answer;
948 if (read) {
949 rc = async_data_read_forward_4_1(fs_exch, VFS_OUT_READ,
950 file->node->service_id, file->node->index,
951 LOWER32(file->pos), UPPER32(file->pos), &answer);
952 } else {
953 if (file->append)
954 file->pos = file->node->size;
955
956 rc = async_data_write_forward_4_1(fs_exch, VFS_OUT_WRITE,
957 file->node->service_id, file->node->index,
958 LOWER32(file->pos), UPPER32(file->pos), &answer);
959 }
960
961 vfs_exchange_release(fs_exch);
962
963 size_t bytes = IPC_GET_ARG1(answer);
964
965 if (file->node->type == VFS_NODE_DIRECTORY)
966 fibril_rwlock_read_unlock(&namespace_rwlock);
967
968 /* Unlock the VFS node. */
969 if ((read) ||
970 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
971 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
972 else {
973 /* Update the cached version of node's size. */
974 if (rc == EOK)
975 file->node->size = MERGE_LOUP32(IPC_GET_ARG2(answer),
976 IPC_GET_ARG3(answer));
977 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
978 }
979
980 /* Update the position pointer and unlock the open file. */
981 if (rc == EOK)
982 file->pos += bytes;
983 fibril_mutex_unlock(&file->lock);
984 vfs_file_put(file);
985
986 /*
987 * FS server's reply is the final result of the whole operation we
988 * return to the client.
989 */
990 async_answer_1(rid, rc, bytes);
991}
992
993void vfs_read(ipc_callid_t rid, ipc_call_t *request)
994{
995 vfs_rdwr(rid, request, true);
996}
997
998void vfs_write(ipc_callid_t rid, ipc_call_t *request)
999{
1000 vfs_rdwr(rid, request, false);
1001}
1002
1003void vfs_seek(ipc_callid_t rid, ipc_call_t *request)
1004{
1005 int fd = (int) IPC_GET_ARG1(*request);
1006 off64_t off = (off64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
1007 IPC_GET_ARG3(*request));
1008 int whence = (int) IPC_GET_ARG4(*request);
1009
1010 /* Lookup the file structure corresponding to the file descriptor. */
1011 vfs_file_t *file = vfs_file_get(fd);
1012 if (!file) {
1013 async_answer_0(rid, ENOENT);
1014 return;
1015 }
1016
1017 fibril_mutex_lock(&file->lock);
1018
1019 off64_t newoff;
1020 switch (whence) {
1021 case SEEK_SET:
1022 if (off >= 0) {
1023 file->pos = (aoff64_t) off;
1024 fibril_mutex_unlock(&file->lock);
1025 vfs_file_put(file);
1026 async_answer_1(rid, EOK, off);
1027 return;
1028 }
1029 break;
1030 case SEEK_CUR:
1031 if ((off >= 0) && (file->pos + off < file->pos)) {
1032 fibril_mutex_unlock(&file->lock);
1033 vfs_file_put(file);
1034 async_answer_0(rid, EOVERFLOW);
1035 return;
1036 }
1037
1038 if ((off < 0) && (file->pos < (aoff64_t) -off)) {
1039 fibril_mutex_unlock(&file->lock);
1040 vfs_file_put(file);
1041 async_answer_0(rid, EOVERFLOW);
1042 return;
1043 }
1044
1045 file->pos += off;
1046 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
1047
1048 fibril_mutex_unlock(&file->lock);
1049 vfs_file_put(file);
1050 async_answer_2(rid, EOK, LOWER32(newoff),
1051 UPPER32(newoff));
1052 return;
1053 case SEEK_END:
1054 fibril_rwlock_read_lock(&file->node->contents_rwlock);
1055 aoff64_t size = file->node->size;
1056
1057 if ((off >= 0) && (size + off < size)) {
1058 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
1059 fibril_mutex_unlock(&file->lock);
1060 vfs_file_put(file);
1061 async_answer_0(rid, EOVERFLOW);
1062 return;
1063 }
1064
1065 if ((off < 0) && (size < (aoff64_t) -off)) {
1066 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
1067 fibril_mutex_unlock(&file->lock);
1068 vfs_file_put(file);
1069 async_answer_0(rid, EOVERFLOW);
1070 return;
1071 }
1072
1073 file->pos = size + off;
1074 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
1075
1076 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
1077 fibril_mutex_unlock(&file->lock);
1078 vfs_file_put(file);
1079 async_answer_2(rid, EOK, LOWER32(newoff), UPPER32(newoff));
1080 return;
1081 }
1082
1083 fibril_mutex_unlock(&file->lock);
1084 vfs_file_put(file);
1085 async_answer_0(rid, EINVAL);
1086}
1087
1088int vfs_truncate_internal(fs_handle_t fs_handle, service_id_t service_id,
1089 fs_index_t index, aoff64_t size)
1090{
1091 async_exch_t *exch = vfs_exchange_grab(fs_handle);
1092 sysarg_t rc = async_req_4_0(exch, VFS_OUT_TRUNCATE,
1093 (sysarg_t) service_id, (sysarg_t) index, LOWER32(size),
1094 UPPER32(size));
1095 vfs_exchange_release(exch);
1096
1097 return (int) rc;
1098}
1099
1100void vfs_truncate(ipc_callid_t rid, ipc_call_t *request)
1101{
1102 int fd = IPC_GET_ARG1(*request);
1103 aoff64_t size = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
1104 IPC_GET_ARG3(*request));
1105 int rc;
1106
1107 vfs_file_t *file = vfs_file_get(fd);
1108 if (!file) {
1109 async_answer_0(rid, ENOENT);
1110 return;
1111 }
1112 fibril_mutex_lock(&file->lock);
1113
1114 fibril_rwlock_write_lock(&file->node->contents_rwlock);
1115 rc = vfs_truncate_internal(file->node->fs_handle,
1116 file->node->service_id, file->node->index, size);
1117 if (rc == EOK)
1118 file->node->size = size;
1119 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
1120
1121 fibril_mutex_unlock(&file->lock);
1122 vfs_file_put(file);
1123 async_answer_0(rid, (sysarg_t)rc);
1124}
1125
1126void vfs_fstat(ipc_callid_t rid, ipc_call_t *request)
1127{
1128 int fd = IPC_GET_ARG1(*request);
1129 sysarg_t rc;
1130
1131 vfs_file_t *file = vfs_file_get(fd);
1132 if (!file) {
1133 async_answer_0(rid, ENOENT);
1134 return;
1135 }
1136
1137 ipc_callid_t callid;
1138 if (!async_data_read_receive(&callid, NULL)) {
1139 vfs_file_put(file);
1140 async_answer_0(callid, EINVAL);
1141 async_answer_0(rid, EINVAL);
1142 return;
1143 }
1144
1145 fibril_mutex_lock(&file->lock);
1146
1147 async_exch_t *exch = vfs_exchange_grab(file->node->fs_handle);
1148
1149 aid_t msg;
1150 msg = async_send_3(exch, VFS_OUT_STAT, file->node->service_id,
1151 file->node->index, true, NULL);
1152 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
1153
1154 vfs_exchange_release(exch);
1155
1156 async_wait_for(msg, &rc);
1157
1158 fibril_mutex_unlock(&file->lock);
1159 vfs_file_put(file);
1160 async_answer_0(rid, rc);
1161}
1162
1163void vfs_stat(ipc_callid_t rid, ipc_call_t *request)
1164{
1165 char *path;
1166 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1167 if (rc != EOK) {
1168 async_answer_0(rid, rc);
1169 return;
1170 }
1171
1172 ipc_callid_t callid;
1173 if (!async_data_read_receive(&callid, NULL)) {
1174 free(path);
1175 async_answer_0(callid, EINVAL);
1176 async_answer_0(rid, EINVAL);
1177 return;
1178 }
1179
1180 vfs_lookup_res_t lr;
1181 fibril_rwlock_read_lock(&namespace_rwlock);
1182 rc = vfs_lookup_internal(path, L_NONE, &lr, NULL);
1183 free(path);
1184 if (rc != EOK) {
1185 fibril_rwlock_read_unlock(&namespace_rwlock);
1186 async_answer_0(callid, rc);
1187 async_answer_0(rid, rc);
1188 return;
1189 }
1190 vfs_node_t *node = vfs_node_get(&lr);
1191 if (!node) {
1192 fibril_rwlock_read_unlock(&namespace_rwlock);
1193 async_answer_0(callid, ENOMEM);
1194 async_answer_0(rid, ENOMEM);
1195 return;
1196 }
1197
1198 fibril_rwlock_read_unlock(&namespace_rwlock);
1199
1200 async_exch_t *exch = vfs_exchange_grab(node->fs_handle);
1201
1202 aid_t msg;
1203 msg = async_send_3(exch, VFS_OUT_STAT, node->service_id,
1204 node->index, false, NULL);
1205 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
1206
1207 vfs_exchange_release(exch);
1208
1209 sysarg_t rv;
1210 async_wait_for(msg, &rv);
1211
1212 async_answer_0(rid, rv);
1213
1214 vfs_node_put(node);
1215}
1216
1217void vfs_mkdir(ipc_callid_t rid, ipc_call_t *request)
1218{
1219 int mode = IPC_GET_ARG1(*request);
1220
1221 char *path;
1222 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1223 if (rc != EOK) {
1224 async_answer_0(rid, rc);
1225 return;
1226 }
1227
1228 /* Ignore mode for now. */
1229 (void) mode;
1230
1231 fibril_rwlock_write_lock(&namespace_rwlock);
1232 int lflag = L_DIRECTORY | L_CREATE | L_EXCLUSIVE;
1233 rc = vfs_lookup_internal(path, lflag, NULL, NULL);
1234 fibril_rwlock_write_unlock(&namespace_rwlock);
1235 free(path);
1236 async_answer_0(rid, rc);
1237}
1238
1239void vfs_unlink(ipc_callid_t rid, ipc_call_t *request)
1240{
1241 int lflag = IPC_GET_ARG1(*request);
1242
1243 char *path;
1244 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1245 if (rc != EOK) {
1246 async_answer_0(rid, rc);
1247 return;
1248 }
1249
1250 fibril_rwlock_write_lock(&namespace_rwlock);
1251 lflag &= L_DIRECTORY; /* sanitize lflag */
1252 vfs_lookup_res_t lr;
1253 rc = vfs_lookup_internal(path, lflag | L_UNLINK, &lr, NULL);
1254 free(path);
1255 if (rc != EOK) {
1256 fibril_rwlock_write_unlock(&namespace_rwlock);
1257 async_answer_0(rid, rc);
1258 return;
1259 }
1260
1261 /*
1262 * The name has already been unlinked by vfs_lookup_internal().
1263 * We have to get and put the VFS node to ensure that it is
1264 * VFS_OUT_DESTROY'ed after the last reference to it is dropped.
1265 */
1266 vfs_node_t *node = vfs_node_get(&lr);
1267 fibril_mutex_lock(&nodes_mutex);
1268 node->lnkcnt--;
1269 fibril_mutex_unlock(&nodes_mutex);
1270 fibril_rwlock_write_unlock(&namespace_rwlock);
1271 vfs_node_put(node);
1272 async_answer_0(rid, EOK);
1273}
1274
1275void vfs_rename(ipc_callid_t rid, ipc_call_t *request)
1276{
1277 /* Retrieve the old path. */
1278 char *old;
1279 int rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL);
1280 if (rc != EOK) {
1281 async_answer_0(rid, rc);
1282 return;
1283 }
1284
1285 /* Retrieve the new path. */
1286 char *new;
1287 rc = async_data_write_accept((void **) &new, true, 0, 0, 0, NULL);
1288 if (rc != EOK) {
1289 free(old);
1290 async_answer_0(rid, rc);
1291 return;
1292 }
1293
1294 size_t olen;
1295 size_t nlen;
1296 char *oldc = canonify(old, &olen);
1297 char *newc = canonify(new, &nlen);
1298
1299 if ((!oldc) || (!newc)) {
1300 async_answer_0(rid, EINVAL);
1301 free(old);
1302 free(new);
1303 return;
1304 }
1305
1306 oldc[olen] = '\0';
1307 newc[nlen] = '\0';
1308
1309 if ((!str_lcmp(newc, oldc, str_length(oldc))) &&
1310 ((newc[str_length(oldc)] == '/') ||
1311 (str_length(oldc) == 1) ||
1312 (str_length(oldc) == str_length(newc)))) {
1313 /*
1314 * oldc is a prefix of newc and either
1315 * - newc continues with a / where oldc ends, or
1316 * - oldc was / itself, or
1317 * - oldc and newc are equal.
1318 */
1319 async_answer_0(rid, EINVAL);
1320 free(old);
1321 free(new);
1322 return;
1323 }
1324
1325 vfs_lookup_res_t old_lr;
1326 vfs_lookup_res_t new_lr;
1327 vfs_lookup_res_t new_par_lr;
1328 fibril_rwlock_write_lock(&namespace_rwlock);
1329
1330 /* Lookup the node belonging to the old file name. */
1331 rc = vfs_lookup_internal(oldc, L_NONE, &old_lr, NULL);
1332 if (rc != EOK) {
1333 fibril_rwlock_write_unlock(&namespace_rwlock);
1334 async_answer_0(rid, rc);
1335 free(old);
1336 free(new);
1337 return;
1338 }
1339
1340 vfs_node_t *old_node = vfs_node_get(&old_lr);
1341 if (!old_node) {
1342 fibril_rwlock_write_unlock(&namespace_rwlock);
1343 async_answer_0(rid, ENOMEM);
1344 free(old);
1345 free(new);
1346 return;
1347 }
1348
1349 /* Determine the path to the parent of the node with the new name. */
1350 char *parentc = str_dup(newc);
1351 if (!parentc) {
1352 fibril_rwlock_write_unlock(&namespace_rwlock);
1353 vfs_node_put(old_node);
1354 async_answer_0(rid, rc);
1355 free(old);
1356 free(new);
1357 return;
1358 }
1359
1360 char *lastsl = str_rchr(parentc + 1, '/');
1361 if (lastsl)
1362 *lastsl = '\0';
1363 else
1364 parentc[1] = '\0';
1365
1366 /* Lookup parent of the new file name. */
1367 rc = vfs_lookup_internal(parentc, L_NONE, &new_par_lr, NULL);
1368 free(parentc); /* not needed anymore */
1369 if (rc != EOK) {
1370 fibril_rwlock_write_unlock(&namespace_rwlock);
1371 vfs_node_put(old_node);
1372 async_answer_0(rid, rc);
1373 free(old);
1374 free(new);
1375 return;
1376 }
1377
1378 /* Check whether linking to the same file system instance. */
1379 if ((old_node->fs_handle != new_par_lr.triplet.fs_handle) ||
1380 (old_node->service_id != new_par_lr.triplet.service_id)) {
1381 fibril_rwlock_write_unlock(&namespace_rwlock);
1382 vfs_node_put(old_node);
1383 async_answer_0(rid, EXDEV); /* different file systems */
1384 free(old);
1385 free(new);
1386 return;
1387 }
1388
1389 /* Destroy the old link for the new name. */
1390 vfs_node_t *new_node = NULL;
1391 rc = vfs_lookup_internal(newc, L_UNLINK, &new_lr, NULL);
1392
1393 switch (rc) {
1394 case ENOENT:
1395 /* simply not in our way */
1396 break;
1397 case EOK:
1398 new_node = vfs_node_get(&new_lr);
1399 if (!new_node) {
1400 fibril_rwlock_write_unlock(&namespace_rwlock);
1401 vfs_node_put(old_node);
1402 async_answer_0(rid, ENOMEM);
1403 free(old);
1404 free(new);
1405 return;
1406 }
1407 fibril_mutex_lock(&nodes_mutex);
1408 new_node->lnkcnt--;
1409 fibril_mutex_unlock(&nodes_mutex);
1410 break;
1411 default:
1412 fibril_rwlock_write_unlock(&namespace_rwlock);
1413 vfs_node_put(old_node);
1414 async_answer_0(rid, ENOTEMPTY);
1415 free(old);
1416 free(new);
1417 return;
1418 }
1419
1420 /* Create the new link for the new name. */
1421 rc = vfs_lookup_internal(newc, L_LINK, NULL, NULL, old_node->index);
1422 if (rc != EOK) {
1423 fibril_rwlock_write_unlock(&namespace_rwlock);
1424 vfs_node_put(old_node);
1425 if (new_node)
1426 vfs_node_put(new_node);
1427 async_answer_0(rid, rc);
1428 free(old);
1429 free(new);
1430 return;
1431 }
1432
1433 fibril_mutex_lock(&nodes_mutex);
1434 old_node->lnkcnt++;
1435 fibril_mutex_unlock(&nodes_mutex);
1436
1437 /* Destroy the link for the old name. */
1438 rc = vfs_lookup_internal(oldc, L_UNLINK, NULL, NULL);
1439 if (rc != EOK) {
1440 fibril_rwlock_write_unlock(&namespace_rwlock);
1441 vfs_node_put(old_node);
1442 if (new_node)
1443 vfs_node_put(new_node);
1444 async_answer_0(rid, rc);
1445 free(old);
1446 free(new);
1447 return;
1448 }
1449
1450 fibril_mutex_lock(&nodes_mutex);
1451 old_node->lnkcnt--;
1452 fibril_mutex_unlock(&nodes_mutex);
1453 fibril_rwlock_write_unlock(&namespace_rwlock);
1454 vfs_node_put(old_node);
1455
1456 if (new_node)
1457 vfs_node_put(new_node);
1458
1459 free(old);
1460 free(new);
1461 async_answer_0(rid, EOK);
1462}
1463
1464void vfs_dup(ipc_callid_t rid, ipc_call_t *request)
1465{
1466 int oldfd = IPC_GET_ARG1(*request);
1467 int newfd = IPC_GET_ARG2(*request);
1468
1469 /* If the file descriptors are the same, do nothing. */
1470 if (oldfd == newfd) {
1471 async_answer_1(rid, EOK, newfd);
1472 return;
1473 }
1474
1475 /* Lookup the file structure corresponding to oldfd. */
1476 vfs_file_t *oldfile = vfs_file_get(oldfd);
1477 if (!oldfile) {
1478 async_answer_0(rid, EBADF);
1479 return;
1480 }
1481
1482 /*
1483 * Lock the open file structure so that no other thread can manipulate
1484 * the same open file at a time.
1485 */
1486 fibril_mutex_lock(&oldfile->lock);
1487
1488 /* Make sure newfd is closed. */
1489 (void) vfs_fd_free(newfd);
1490
1491 /* Assign the old file to newfd. */
1492 int ret = vfs_fd_assign(oldfile, newfd);
1493 fibril_mutex_unlock(&oldfile->lock);
1494 vfs_file_put(oldfile);
1495
1496 if (ret != EOK)
1497 async_answer_0(rid, ret);
1498 else
1499 async_answer_1(rid, EOK, newfd);
1500}
1501
1502void vfs_wait_handle(ipc_callid_t rid, ipc_call_t *request)
1503{
1504 int fd = vfs_wait_handle_internal();
1505 async_answer_1(rid, EOK, fd);
1506}
1507
1508void vfs_get_mtab(ipc_callid_t rid, ipc_call_t *request)
1509{
1510 ipc_callid_t callid;
1511 ipc_call_t data;
1512 sysarg_t rc = EOK;
1513 size_t len;
1514
1515 fibril_mutex_lock(&mtab_list_lock);
1516
1517 /* Send to the caller the number of mounted filesystems */
1518 callid = async_get_call(&data);
1519 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
1520 rc = ENOTSUP;
1521 async_answer_0(callid, rc);
1522 goto exit;
1523 }
1524 async_answer_1(callid, EOK, mtab_size);
1525
1526 list_foreach(mtab_list, cur) {
1527 mtab_ent_t *mtab_ent = list_get_instance(cur, mtab_ent_t,
1528 link);
1529
1530 rc = ENOTSUP;
1531
1532 if (!async_data_read_receive(&callid, &len)) {
1533 async_answer_0(callid, rc);
1534 goto exit;
1535 }
1536
1537 (void) async_data_read_finalize(callid, mtab_ent->mp,
1538 str_size(mtab_ent->mp));
1539
1540 if (!async_data_read_receive(&callid, &len)) {
1541 async_answer_0(callid, rc);
1542 goto exit;
1543 }
1544
1545 (void) async_data_read_finalize(callid, mtab_ent->opts,
1546 str_size(mtab_ent->opts));
1547
1548 if (!async_data_read_receive(&callid, &len)) {
1549 async_answer_0(callid, rc);
1550 goto exit;
1551 }
1552
1553 (void) async_data_read_finalize(callid, mtab_ent->fs_name,
1554 str_size(mtab_ent->fs_name));
1555
1556 callid = async_get_call(&data);
1557
1558 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
1559 async_answer_0(callid, rc);
1560 goto exit;
1561 }
1562
1563 rc = EOK;
1564 async_answer_2(callid, rc, mtab_ent->instance,
1565 mtab_ent->service_id);
1566 }
1567
1568exit:
1569 fibril_mutex_unlock(&mtab_list_lock);
1570 async_answer_0(rid, rc);
1571}
1572
1573/**
1574 * @}
1575 */
Note: See TracBrowser for help on using the repository browser.