source: mainline/uspace/srv/vfs/vfs_ops.c@ 4636a60

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4636a60 was 4636a60, checked in by Jiri Zarevucky <zarevucky.jiri@…>, 12 years ago

Handle mounts at the server side, instead of in the endpoints.

  • Property mode set to 100644
File size: 29.9 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file vfs_ops.c
35 * @brief Operations that VFS offers to its clients.
36 */
37
38#include "vfs.h"
39#include <macros.h>
40#include <stdint.h>
41#include <async.h>
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <str.h>
46#include <stdbool.h>
47#include <fibril_synch.h>
48#include <adt/list.h>
49#include <unistd.h>
50#include <ctype.h>
51#include <fcntl.h>
52#include <assert.h>
53#include <vfs/canonify.h>
54#include <vfs/vfs_mtab.h>
55
56FIBRIL_MUTEX_INITIALIZE(mtab_list_lock);
57LIST_INITIALIZE(mtab_list);
58static size_t mtab_size = 0;
59
60/* Forward declarations of static functions. */
61static int vfs_truncate_internal(fs_handle_t, service_id_t, fs_index_t,
62 aoff64_t);
63
64/**
65 * This rwlock prevents the race between a triplet-to-VFS-node resolution and a
66 * concurrent VFS operation which modifies the file system namespace.
67 */
68FIBRIL_RWLOCK_INITIALIZE(namespace_rwlock);
69
70vfs_node_t *root = NULL;
71
72static int vfs_connect_internal(service_id_t service_id, unsigned flags, unsigned instance,
73 char *options, char *fsname, vfs_node_t **root)
74{
75 fs_handle_t fs_handle = 0;
76
77 fibril_mutex_lock(&fs_list_lock);
78 while (1) {
79 fs_handle = fs_name_to_handle(instance, fsname, false);
80
81 if (fs_handle != 0 || !(flags & IPC_FLAG_BLOCKING)) {
82 break;
83 }
84
85 fibril_condvar_wait(&fs_list_cv, &fs_list_lock);
86 }
87 fibril_mutex_unlock(&fs_list_lock);
88
89 if (fs_handle == 0) {
90 return ENOENT;
91 }
92
93 /* Tell the mountee that it is being mounted. */
94 ipc_call_t answer;
95 async_exch_t *exch = vfs_exchange_grab(fs_handle);
96 aid_t msg = async_send_1(exch, VFS_OUT_MOUNTED, (sysarg_t) service_id, &answer);
97 /* Send the mount options */
98 sysarg_t rc = async_data_write_start(exch, options, str_size(options));
99 if (rc != EOK) {
100 async_forget(msg);
101 vfs_exchange_release(exch);
102 return rc;
103 }
104 async_wait_for(msg, &rc);
105 vfs_exchange_release(exch);
106
107 if (rc != EOK) {
108 return rc;
109 }
110
111 vfs_lookup_res_t res;
112 res.triplet.fs_handle = fs_handle;
113 res.triplet.service_id = service_id;
114 res.triplet.index = (fs_index_t) IPC_GET_ARG1(answer);
115 res.size = (int64_t) MERGE_LOUP32(IPC_GET_ARG2(answer), IPC_GET_ARG3(answer));
116 res.type = VFS_NODE_DIRECTORY;
117
118 /* Add reference to the mounted root. */
119 *root = vfs_node_get(&res);
120 assert(*root);
121
122 return EOK;
123}
124
125static int vfs_mount_internal(service_id_t service_id, unsigned flags, unsigned instance,
126 char *opts, char *fs_name, char *mp)
127{
128 /* Resolve the path to the mountpoint. */
129
130 if (root == NULL) {
131 /* We still don't have the root file system mounted. */
132 if (str_cmp(mp, "/") != 0) {
133 /*
134 * We can't resolve this without the root filesystem
135 * being mounted first.
136 */
137 return ENOENT;
138 }
139
140 return vfs_connect_internal(service_id, flags, instance, opts, fs_name, &root);
141 }
142
143 /* We already have the root FS. */
144 if (str_cmp(mp, "/") == 0) {
145 /* Trying to mount root FS over root FS */
146 return EBUSY;
147 }
148
149 vfs_lookup_res_t mp_res;
150 int rc = vfs_lookup_internal(root, mp, L_DIRECTORY, &mp_res);
151 if (rc != EOK) {
152 /* The lookup failed. */
153 return rc;
154 }
155
156 vfs_node_t *mp_node;
157 mp_node = vfs_node_get(&mp_res);
158 if (!mp_node) {
159 return ENOMEM;
160 }
161
162 if (mp_node->mount != NULL) {
163 return EBUSY;
164 }
165
166 if (mp_node->type != VFS_NODE_DIRECTORY) {
167 return ENOTDIR;
168 }
169
170 if (vfs_node_has_children(mp_node)) {
171 return ENOTEMPTY;
172 }
173
174 vfs_node_t *mountee;
175
176 rc = vfs_connect_internal(service_id, flags, instance, opts, fs_name, &mountee);
177 if (rc != EOK) {
178 vfs_node_put(mp_node);
179 return ENOMEM;
180 }
181
182 mp_node->mount = mountee;
183 /* The two references to nodes are held by the mount so that they cannot be freed.
184 * They are removed in detach_internal().
185 */
186 return EOK;
187}
188
189void vfs_mount(ipc_callid_t rid, ipc_call_t *request)
190{
191 /*
192 * We expect the library to do the device-name to device-handle
193 * translation for us, thus the device handle will arrive as ARG1
194 * in the request.
195 */
196 service_id_t service_id = (service_id_t) IPC_GET_ARG1(*request);
197
198 /*
199 * Mount flags are passed as ARG2.
200 */
201 unsigned int flags = (unsigned int) IPC_GET_ARG2(*request);
202
203 /*
204 * Instance number is passed as ARG3.
205 */
206 unsigned int instance = IPC_GET_ARG3(*request);
207
208 /* We want the client to send us the mount point. */
209 char *mp;
210 int rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
211 0, NULL);
212 if (rc != EOK) {
213 async_answer_0(rid, rc);
214 return;
215 }
216
217 /* Now we expect to receive the mount options. */
218 char *opts;
219 rc = async_data_write_accept((void **) &opts, true, 0, MAX_MNTOPTS_LEN,
220 0, NULL);
221 if (rc != EOK) {
222 async_answer_0(rid, rc);
223 free(mp);
224 return;
225 }
226
227 /*
228 * Now, we expect the client to send us data with the name of the file
229 * system.
230 */
231 char *fs_name;
232 rc = async_data_write_accept((void **) &fs_name, true, 0,
233 FS_NAME_MAXLEN, 0, NULL);
234 if (rc != EOK) {
235 async_answer_0(rid, rc);
236 free(mp);
237 free(opts);
238 return;
239 }
240
241 /* Add the filesystem info to the list of mounted filesystems */
242 mtab_ent_t *mtab_ent = malloc(sizeof(mtab_ent_t));
243 if (!mtab_ent) {
244 async_answer_0(rid, ENOMEM);
245 free(mp);
246 free(fs_name);
247 free(opts);
248 return;
249 }
250
251 /* Mount the filesystem. */
252 fibril_rwlock_write_lock(&namespace_rwlock);
253 rc = vfs_mount_internal(service_id, flags, instance, opts, fs_name, mp);
254 fibril_rwlock_write_unlock(&namespace_rwlock);
255
256 /* Add the filesystem info to the list of mounted filesystems */
257 if (rc == EOK) {
258 str_cpy(mtab_ent->mp, MAX_PATH_LEN, mp);
259 str_cpy(mtab_ent->fs_name, FS_NAME_MAXLEN, fs_name);
260 str_cpy(mtab_ent->opts, MAX_MNTOPTS_LEN, opts);
261 mtab_ent->instance = instance;
262 mtab_ent->service_id = service_id;
263
264 link_initialize(&mtab_ent->link);
265
266 fibril_mutex_lock(&mtab_list_lock);
267 list_append(&mtab_ent->link, &mtab_list);
268 mtab_size++;
269 fibril_mutex_unlock(&mtab_list_lock);
270 }
271
272 async_answer_0(rid, rc);
273
274 free(mp);
275 free(fs_name);
276 free(opts);
277}
278
279void vfs_unmount(ipc_callid_t rid, ipc_call_t *request)
280{
281 /*
282 * Receive the mount point path.
283 */
284 char *mp;
285 int rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
286 0, NULL);
287 if (rc != EOK)
288 async_answer_0(rid, rc);
289
290 /*
291 * Taking the namespace lock will do two things for us. First, it will
292 * prevent races with other lookup operations. Second, it will stop new
293 * references to already existing VFS nodes and creation of new VFS
294 * nodes. This is because new references are added as a result of some
295 * lookup operation or at least of some operation which is protected by
296 * the namespace lock.
297 */
298 fibril_rwlock_write_lock(&namespace_rwlock);
299
300 if (str_cmp(mp, "/") == 0) {
301 free(mp);
302
303 /*
304 * Unmounting the root file system.
305 *
306 * In this case, there is no mount point node and we send
307 * VFS_OUT_UNMOUNTED directly to the mounted file system.
308 */
309
310 if (!root) {
311 fibril_rwlock_write_unlock(&namespace_rwlock);
312 async_answer_0(rid, ENOENT);
313 return;
314 }
315
316 /*
317 * Count the total number of references for the mounted file system. We
318 * are expecting at least one, which we got when the file system was mounted.
319 * If we find more, it means that
320 * the file system cannot be gracefully unmounted at the moment because
321 * someone is working with it.
322 */
323 if (vfs_nodes_refcount_sum_get(root->fs_handle, root->service_id) != 1) {
324 fibril_rwlock_write_unlock(&namespace_rwlock);
325 async_answer_0(rid, EBUSY);
326 return;
327 }
328
329 async_exch_t *exch = vfs_exchange_grab(root->fs_handle);
330 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED, root->service_id);
331 vfs_exchange_release(exch);
332
333 fibril_rwlock_write_unlock(&namespace_rwlock);
334 if (rc == EOK) {
335 vfs_node_forget(root);
336 root = NULL;
337 }
338 async_answer_0(rid, rc);
339 return;
340 }
341
342 /*
343 * Lookup the mounted root and instantiate it.
344 */
345 vfs_lookup_res_t mp_res;
346 rc = vfs_lookup_internal(root, mp, L_MP, &mp_res);
347 if (rc != EOK) {
348 fibril_rwlock_write_unlock(&namespace_rwlock);
349 free(mp);
350 async_answer_0(rid, rc);
351 return;
352 }
353 vfs_node_t *mp_node = vfs_node_get(&mp_res);
354 if (!mp_node) {
355 fibril_rwlock_write_unlock(&namespace_rwlock);
356 free(mp);
357 async_answer_0(rid, ENOMEM);
358 return;
359 }
360
361 if (mp_node->mount == NULL) {
362 fibril_rwlock_write_unlock(&namespace_rwlock);
363 vfs_node_put(mp_node);
364 free(mp);
365 async_answer_0(rid, ENOENT);
366 return;
367 }
368
369 /*
370 * Count the total number of references for the mounted file system. We
371 * are expecting at least one, which we got when the file system was mounted.
372 * If we find more, it means that
373 * the file system cannot be gracefully unmounted at the moment because
374 * someone is working with it.
375 */
376 if (vfs_nodes_refcount_sum_get(mp_node->mount->fs_handle, mp_node->mount->service_id) != 1) {
377 fibril_rwlock_write_unlock(&namespace_rwlock);
378 vfs_node_put(mp_node);
379 free(mp);
380 async_answer_0(rid, EBUSY);
381 return;
382 }
383
384 /* Unmount the filesystem. */
385 async_exch_t *exch = vfs_exchange_grab(mp_node->mount->fs_handle);
386 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED, mp_node->mount->service_id);
387 vfs_exchange_release(exch);
388
389 vfs_node_forget(mp_node->mount);
390 mp_node->mount = NULL;
391
392 vfs_node_put(mp_node);
393 fibril_rwlock_write_unlock(&namespace_rwlock);
394
395 fibril_mutex_lock(&mtab_list_lock);
396 int found = 0;
397 list_foreach(mtab_list, cur) {
398 mtab_ent_t *mtab_ent = list_get_instance(cur, mtab_ent_t, link);
399
400 if (str_cmp(mtab_ent->mp, mp) == 0) {
401 list_remove(&mtab_ent->link);
402 mtab_size--;
403 free(mtab_ent);
404 found = 1;
405 break;
406 }
407 }
408 assert(found);
409 fibril_mutex_unlock(&mtab_list_lock);
410
411 free(mp);
412
413 async_answer_0(rid, EOK);
414 return;
415}
416
417static inline bool walk_flags_valid(int flags)
418{
419 if ((flags&~WALK_ALL_FLAGS) != 0) {
420 return false;
421 }
422 if ((flags&WALK_MAY_CREATE) && (flags&WALK_MUST_CREATE)) {
423 return false;
424 }
425 if ((flags&WALK_REGULAR) && (flags&WALK_DIRECTORY)) {
426 return false;
427 }
428 if ((flags&WALK_MAY_CREATE) || (flags&WALK_MUST_CREATE)) {
429 if (!(flags&WALK_DIRECTORY) && !(flags&WALK_REGULAR)) {
430 return false;
431 }
432 }
433 return true;
434}
435
436static inline int walk_lookup_flags(int flags)
437{
438 int lflags = 0;
439 if (flags&WALK_MAY_CREATE || flags&WALK_MUST_CREATE) {
440 lflags |= L_CREATE;
441 }
442 if (flags&WALK_MUST_CREATE) {
443 lflags |= L_EXCLUSIVE;
444 }
445 if (flags&WALK_REGULAR) {
446 lflags |= L_FILE;
447 }
448 if (flags&WALK_DIRECTORY) {
449 lflags |= L_DIRECTORY;
450 }
451 return lflags;
452}
453
454void vfs_walk(ipc_callid_t rid, ipc_call_t *request)
455{
456 /*
457 * Parent is our relative root for file lookup.
458 * For defined flags, see <ipc/vfs.h>.
459 */
460 int parentfd = IPC_GET_ARG1(*request);
461 int flags = IPC_GET_ARG2(*request);
462
463 if (!walk_flags_valid(flags)) {
464 async_answer_0(rid, EINVAL);
465 return;
466 }
467
468 char *path;
469 int rc = async_data_write_accept((void **)&path, true, 0, 0, 0, NULL);
470
471 /* Lookup the file structure corresponding to the file descriptor. */
472 vfs_file_t *parent = NULL;
473 vfs_node_t *parent_node = root;
474 // TODO: Client-side root.
475 if (parentfd != -1) {
476 parent = vfs_file_get(parentfd);
477 if (!parent) {
478 free(path);
479 async_answer_0(rid, EBADF);
480 return;
481 }
482 parent_node = parent->node;
483 }
484
485 fibril_rwlock_read_lock(&namespace_rwlock);
486
487 vfs_lookup_res_t lr;
488 rc = vfs_lookup_internal(parent_node, path, walk_lookup_flags(flags), &lr);
489 free(path);
490
491 if (rc != EOK) {
492 fibril_rwlock_read_unlock(&namespace_rwlock);
493 if (parent) {
494 vfs_file_put(parent);
495 }
496 async_answer_0(rid, rc);
497 return;
498 }
499
500 vfs_node_t *node = vfs_node_get(&lr);
501
502 int fd = vfs_fd_alloc(false);
503 if (fd < 0) {
504 vfs_node_put(node);
505 if (parent) {
506 vfs_file_put(parent);
507 }
508 async_answer_0(rid, fd);
509 return;
510 }
511
512 vfs_file_t *file = vfs_file_get(fd);
513 assert(file != NULL);
514
515 file->node = node;
516 if (parent) {
517 file->permissions = parent->permissions;
518 } else {
519 file->permissions = MODE_READ | MODE_WRITE | MODE_APPEND;
520 }
521 file->open_read = false;
522 file->open_write = false;
523
524 vfs_file_put(file);
525 if (parent) {
526 vfs_file_put(parent);
527 }
528
529 fibril_rwlock_read_unlock(&namespace_rwlock);
530
531 async_answer_1(rid, EOK, fd);
532}
533
534void vfs_open2(ipc_callid_t rid, ipc_call_t *request)
535{
536 int fd = IPC_GET_ARG1(*request);
537 int flags = IPC_GET_ARG2(*request);
538
539 if (flags == 0) {
540 async_answer_0(rid, EINVAL);
541 return;
542 }
543
544 vfs_file_t *file = vfs_file_get(fd);
545 if (!file) {
546 async_answer_0(rid, EBADF);
547 return;
548 }
549
550 if ((flags & ~file->permissions) != 0) {
551 vfs_file_put(file);
552 async_answer_0(rid, EPERM);
553 return;
554 }
555
556 file->open_read = (flags & MODE_READ) != 0;
557 file->open_write = (flags & (MODE_WRITE | MODE_APPEND)) != 0;
558 file->append = (flags & MODE_APPEND) != 0;
559
560 if (!file->open_read && !file->open_write) {
561 vfs_file_put(file);
562 async_answer_0(rid, EINVAL);
563 return;
564 }
565
566 if (file->node->type == VFS_NODE_DIRECTORY && file->open_write) {
567 file->open_read = file->open_write = false;
568 vfs_file_put(file);
569 async_answer_0(rid, EINVAL);
570 return;
571 }
572
573 int rc = vfs_open_node_remote(file->node);
574 if (rc != EOK) {
575 file->open_read = file->open_write = false;
576 vfs_file_put(file);
577 async_answer_0(rid, rc);
578 return;
579 }
580
581 vfs_file_put(file);
582 async_answer_0(rid, EOK);
583}
584
585void vfs_sync(ipc_callid_t rid, ipc_call_t *request)
586{
587 int fd = IPC_GET_ARG1(*request);
588
589 /* Lookup the file structure corresponding to the file descriptor. */
590 vfs_file_t *file = vfs_file_get(fd);
591 if (!file) {
592 async_answer_0(rid, ENOENT);
593 return;
594 }
595
596 /*
597 * Lock the open file structure so that no other thread can manipulate
598 * the same open file at a time.
599 */
600 fibril_mutex_lock(&file->lock);
601 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
602
603 /* Make a VFS_OUT_SYMC request at the destination FS server. */
604 aid_t msg;
605 ipc_call_t answer;
606 msg = async_send_2(fs_exch, VFS_OUT_SYNC, file->node->service_id,
607 file->node->index, &answer);
608
609 vfs_exchange_release(fs_exch);
610
611 /* Wait for reply from the FS server. */
612 sysarg_t rc;
613 async_wait_for(msg, &rc);
614
615 fibril_mutex_unlock(&file->lock);
616
617 vfs_file_put(file);
618 async_answer_0(rid, rc);
619}
620
621void vfs_close(ipc_callid_t rid, ipc_call_t *request)
622{
623 int fd = IPC_GET_ARG1(*request);
624 int ret = vfs_fd_free(fd);
625 async_answer_0(rid, ret);
626}
627
628static void vfs_rdwr(ipc_callid_t rid, ipc_call_t *request, bool read)
629{
630 /*
631 * The following code strongly depends on the fact that the files data
632 * structure can be only accessed by a single fibril and all file
633 * operations are serialized (i.e. the reads and writes cannot
634 * interleave and a file cannot be closed while it is being read).
635 *
636 * Additional synchronization needs to be added once the table of
637 * open files supports parallel access!
638 */
639
640 int fd = IPC_GET_ARG1(*request);
641
642 /* Lookup the file structure corresponding to the file descriptor. */
643 vfs_file_t *file = vfs_file_get(fd);
644 if (!file) {
645 async_answer_0(rid, ENOENT);
646 return;
647 }
648
649 /*
650 * Lock the open file structure so that no other thread can manipulate
651 * the same open file at a time.
652 */
653 fibril_mutex_lock(&file->lock);
654
655 if ((read && !file->open_read) || (!read && !file->open_write)) {
656 fibril_mutex_unlock(&file->lock);
657 async_answer_0(rid, EINVAL);
658 return;
659 }
660
661 vfs_info_t *fs_info = fs_handle_to_info(file->node->fs_handle);
662 assert(fs_info);
663
664 /*
665 * Lock the file's node so that no other client can read/write to it at
666 * the same time unless the FS supports concurrent reads/writes and its
667 * write implementation does not modify the file size.
668 */
669 if ((read) ||
670 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
671 fibril_rwlock_read_lock(&file->node->contents_rwlock);
672 else
673 fibril_rwlock_write_lock(&file->node->contents_rwlock);
674
675 if (file->node->type == VFS_NODE_DIRECTORY) {
676 /*
677 * Make sure that no one is modifying the namespace
678 * while we are in readdir().
679 */
680 assert(read);
681 fibril_rwlock_read_lock(&namespace_rwlock);
682 }
683
684 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
685
686 /*
687 * Make a VFS_READ/VFS_WRITE request at the destination FS server
688 * and forward the IPC_M_DATA_READ/IPC_M_DATA_WRITE request to the
689 * destination FS server. The call will be routed as if sent by
690 * ourselves. Note that call arguments are immutable in this case so we
691 * don't have to bother.
692 */
693 sysarg_t rc;
694 ipc_call_t answer;
695 if (read) {
696 rc = async_data_read_forward_4_1(fs_exch, VFS_OUT_READ,
697 file->node->service_id, file->node->index,
698 LOWER32(file->pos), UPPER32(file->pos), &answer);
699 } else {
700 if (file->append)
701 file->pos = vfs_node_get_size(file->node);
702
703 rc = async_data_write_forward_4_1(fs_exch, VFS_OUT_WRITE,
704 file->node->service_id, file->node->index,
705 LOWER32(file->pos), UPPER32(file->pos), &answer);
706 }
707
708 vfs_exchange_release(fs_exch);
709
710 size_t bytes = IPC_GET_ARG1(answer);
711
712 if (file->node->type == VFS_NODE_DIRECTORY) {
713 fibril_rwlock_read_unlock(&namespace_rwlock);
714 }
715
716 /* Unlock the VFS node. */
717 if ((read) ||
718 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
719 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
720 else {
721 /* Update the cached version of node's size. */
722 if (rc == EOK)
723 file->node->size = MERGE_LOUP32(IPC_GET_ARG2(answer),
724 IPC_GET_ARG3(answer));
725 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
726 }
727
728 /* Update the position pointer and unlock the open file. */
729 if (rc == EOK)
730 file->pos += bytes;
731 fibril_mutex_unlock(&file->lock);
732 vfs_file_put(file);
733
734 /*
735 * FS server's reply is the final result of the whole operation we
736 * return to the client.
737 */
738 async_answer_1(rid, rc, bytes);
739}
740
741void vfs_read(ipc_callid_t rid, ipc_call_t *request)
742{
743 vfs_rdwr(rid, request, true);
744}
745
746void vfs_write(ipc_callid_t rid, ipc_call_t *request)
747{
748 vfs_rdwr(rid, request, false);
749}
750
751void vfs_seek(ipc_callid_t rid, ipc_call_t *request)
752{
753 int fd = (int) IPC_GET_ARG1(*request);
754 off64_t off = (off64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
755 IPC_GET_ARG3(*request));
756 int whence = (int) IPC_GET_ARG4(*request);
757
758 /* Lookup the file structure corresponding to the file descriptor. */
759 vfs_file_t *file = vfs_file_get(fd);
760 if (!file) {
761 async_answer_0(rid, ENOENT);
762 return;
763 }
764
765 fibril_mutex_lock(&file->lock);
766
767 off64_t newoff;
768 switch (whence) {
769 case SEEK_SET:
770 if (off >= 0) {
771 file->pos = (aoff64_t) off;
772 fibril_mutex_unlock(&file->lock);
773 vfs_file_put(file);
774 async_answer_1(rid, EOK, off);
775 return;
776 }
777 break;
778 case SEEK_CUR:
779 if ((off >= 0) && (file->pos + off < file->pos)) {
780 fibril_mutex_unlock(&file->lock);
781 vfs_file_put(file);
782 async_answer_0(rid, EOVERFLOW);
783 return;
784 }
785
786 if ((off < 0) && (file->pos < (aoff64_t) -off)) {
787 fibril_mutex_unlock(&file->lock);
788 vfs_file_put(file);
789 async_answer_0(rid, EOVERFLOW);
790 return;
791 }
792
793 file->pos += off;
794 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
795
796 fibril_mutex_unlock(&file->lock);
797 vfs_file_put(file);
798 async_answer_2(rid, EOK, LOWER32(newoff),
799 UPPER32(newoff));
800 return;
801 case SEEK_END:
802 fibril_rwlock_read_lock(&file->node->contents_rwlock);
803 aoff64_t size = vfs_node_get_size(file->node);
804
805 if ((off >= 0) && (size + off < size)) {
806 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
807 fibril_mutex_unlock(&file->lock);
808 vfs_file_put(file);
809 async_answer_0(rid, EOVERFLOW);
810 return;
811 }
812
813 if ((off < 0) && (size < (aoff64_t) -off)) {
814 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
815 fibril_mutex_unlock(&file->lock);
816 vfs_file_put(file);
817 async_answer_0(rid, EOVERFLOW);
818 return;
819 }
820
821 file->pos = size + off;
822 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
823
824 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
825 fibril_mutex_unlock(&file->lock);
826 vfs_file_put(file);
827 async_answer_2(rid, EOK, LOWER32(newoff), UPPER32(newoff));
828 return;
829 }
830
831 fibril_mutex_unlock(&file->lock);
832 vfs_file_put(file);
833 async_answer_0(rid, EINVAL);
834}
835
836int vfs_truncate_internal(fs_handle_t fs_handle, service_id_t service_id,
837 fs_index_t index, aoff64_t size)
838{
839 async_exch_t *exch = vfs_exchange_grab(fs_handle);
840 sysarg_t rc = async_req_4_0(exch, VFS_OUT_TRUNCATE,
841 (sysarg_t) service_id, (sysarg_t) index, LOWER32(size),
842 UPPER32(size));
843 vfs_exchange_release(exch);
844
845 return (int) rc;
846}
847
848void vfs_truncate(ipc_callid_t rid, ipc_call_t *request)
849{
850 int fd = IPC_GET_ARG1(*request);
851 aoff64_t size = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
852 IPC_GET_ARG3(*request));
853 int rc;
854
855 vfs_file_t *file = vfs_file_get(fd);
856 if (!file) {
857 async_answer_0(rid, ENOENT);
858 return;
859 }
860 fibril_mutex_lock(&file->lock);
861
862 fibril_rwlock_write_lock(&file->node->contents_rwlock);
863 rc = vfs_truncate_internal(file->node->fs_handle,
864 file->node->service_id, file->node->index, size);
865 if (rc == EOK)
866 file->node->size = size;
867 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
868
869 fibril_mutex_unlock(&file->lock);
870 vfs_file_put(file);
871 async_answer_0(rid, (sysarg_t)rc);
872}
873
874void vfs_fstat(ipc_callid_t rid, ipc_call_t *request)
875{
876 int fd = IPC_GET_ARG1(*request);
877 sysarg_t rc;
878
879 vfs_file_t *file = vfs_file_get(fd);
880 if (!file) {
881 async_answer_0(rid, ENOENT);
882 return;
883 }
884
885 ipc_callid_t callid;
886 if (!async_data_read_receive(&callid, NULL)) {
887 vfs_file_put(file);
888 async_answer_0(callid, EINVAL);
889 async_answer_0(rid, EINVAL);
890 return;
891 }
892
893 fibril_mutex_lock(&file->lock);
894
895 async_exch_t *exch = vfs_exchange_grab(file->node->fs_handle);
896
897 aid_t msg;
898 msg = async_send_3(exch, VFS_OUT_STAT, file->node->service_id,
899 file->node->index, true, NULL);
900 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
901
902 vfs_exchange_release(exch);
903
904 async_wait_for(msg, &rc);
905
906 fibril_mutex_unlock(&file->lock);
907 vfs_file_put(file);
908 async_answer_0(rid, rc);
909}
910
911static void out_destroy(vfs_triplet_t *file)
912{
913 async_exch_t *exch = vfs_exchange_grab(file->fs_handle);
914 async_msg_2(exch, VFS_OUT_DESTROY,
915 (sysarg_t) file->service_id, (sysarg_t) file->index);
916 vfs_exchange_release(exch);
917}
918
919void vfs_unlink2(ipc_callid_t rid, ipc_call_t *request)
920{
921 int rc;
922 char *path;
923 vfs_file_t *parent = NULL;
924 vfs_file_t *expect = NULL;
925 vfs_node_t *parent_node = root;
926
927 int parentfd = IPC_GET_ARG1(*request);
928 int expectfd = IPC_GET_ARG2(*request);
929 int wflag = IPC_GET_ARG3(*request);
930
931 rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
932 if (rc != EOK) {
933 async_answer_0(rid, rc);
934 return;
935 }
936
937 fibril_rwlock_write_lock(&namespace_rwlock);
938
939 int lflag = (wflag&WALK_DIRECTORY) ? L_DIRECTORY: 0;
940
941 if (parentfd >= 0) {
942 parent = vfs_file_get(parentfd);
943 if (!parent) {
944 rc = ENOENT;
945 goto exit;
946 }
947 parent_node = parent->node;
948 }
949
950 if (expectfd >= 0) {
951 expect = vfs_file_get(expectfd);
952 if (!expect) {
953 rc = ENOENT;
954 goto exit;
955 }
956
957 vfs_lookup_res_t lr;
958 rc = vfs_lookup_internal(parent_node, path, lflag, &lr);
959 if (rc != EOK) {
960 goto exit;
961 }
962
963 if (__builtin_memcmp(&lr.triplet, expect->node, sizeof(vfs_triplet_t)) != 0) {
964 rc = ENOENT;
965 goto exit;
966 }
967
968 vfs_file_put(expect);
969 expect = NULL;
970 }
971
972 vfs_lookup_res_t lr;
973 rc = vfs_lookup_internal(parent_node, path, lflag | L_UNLINK, &lr);
974 if (rc != EOK) {
975 goto exit;
976 }
977
978 /* If the node is not held by anyone, try to destroy it. */
979 if (vfs_node_peek(&lr) == NULL) {
980 out_destroy(&lr.triplet);
981 }
982
983exit:
984 if (path) {
985 free(path);
986 }
987 if (parent) {
988 vfs_file_put(parent);
989 }
990 if (expect) {
991 vfs_file_put(expect);
992 }
993 fibril_rwlock_write_unlock(&namespace_rwlock);
994 async_answer_0(rid, rc);
995}
996
997static size_t shared_path(char *a, char *b)
998{
999 size_t res = 0;
1000
1001 while (a[res] == b[res] && a[res] != 0) {
1002 res++;
1003 }
1004
1005 if (a[res] == b[res]) {
1006 return res;
1007 }
1008
1009 res--;
1010 while (a[res] != '/') {
1011 res--;
1012 }
1013 return res;
1014}
1015
1016static int vfs_rename_internal(vfs_node_t *base, char *old, char *new)
1017{
1018 assert(base != NULL);
1019 assert(old != NULL);
1020 assert(new != NULL);
1021
1022 vfs_lookup_res_t base_lr;
1023 vfs_lookup_res_t old_lr;
1024 vfs_lookup_res_t new_lr_orig;
1025 bool orig_unlinked = false;
1026
1027 int rc;
1028
1029 size_t shared = shared_path(old, new);
1030
1031 /* Do not allow one path to be a prefix of the other. */
1032 if (old[shared] == 0 || new[shared] == 0) {
1033 return EINVAL;
1034 }
1035 assert(old[shared] == '/');
1036 assert(new[shared] == '/');
1037
1038 fibril_rwlock_write_lock(&namespace_rwlock);
1039
1040 /* Resolve the shared portion of the path first. */
1041 if (shared != 0) {
1042 old[shared] = 0;
1043 rc = vfs_lookup_internal(base, old, L_DIRECTORY, &base_lr);
1044 if (rc != EOK) {
1045 fibril_rwlock_write_unlock(&namespace_rwlock);
1046 return rc;
1047 }
1048
1049 base = vfs_node_get(&base_lr);
1050 old[shared] = '/';
1051 old += shared;
1052 new += shared;
1053 } else {
1054 vfs_node_addref(base);
1055 }
1056
1057
1058 rc = vfs_lookup_internal(base, new, L_UNLINK | L_DISABLE_MOUNTS, &new_lr_orig);
1059 if (rc == EOK) {
1060 orig_unlinked = true;
1061 } else if (rc != ENOENT) {
1062 vfs_node_put(base);
1063 fibril_rwlock_write_unlock(&namespace_rwlock);
1064 return rc;
1065 }
1066
1067 rc = vfs_lookup_internal(base, old, L_UNLINK | L_DISABLE_MOUNTS, &old_lr);
1068 if (rc != EOK) {
1069 if (orig_unlinked) {
1070 vfs_link_internal(base, new, &new_lr_orig.triplet);
1071 }
1072 vfs_node_put(base);
1073 fibril_rwlock_write_unlock(&namespace_rwlock);
1074 return rc;
1075 }
1076
1077 rc = vfs_link_internal(base, new, &old_lr.triplet);
1078 if (rc != EOK) {
1079 vfs_link_internal(base, old, &old_lr.triplet);
1080 if (orig_unlinked) {
1081 vfs_link_internal(base, new, &new_lr_orig.triplet);
1082 }
1083 vfs_node_put(base);
1084 fibril_rwlock_write_unlock(&namespace_rwlock);
1085 return rc;
1086 }
1087
1088 /* If the node is not held by anyone, try to destroy it. */
1089 if (orig_unlinked && vfs_node_peek(&new_lr_orig) == NULL) {
1090 out_destroy(&new_lr_orig.triplet);
1091 }
1092
1093 vfs_node_put(base);
1094 fibril_rwlock_write_unlock(&namespace_rwlock);
1095 return EOK;
1096}
1097
1098void vfs_rename(ipc_callid_t rid, ipc_call_t *request)
1099{
1100 /* The common base directory. */
1101 int basefd;
1102 char *old = NULL;
1103 char *new = NULL;
1104 vfs_file_t *base = NULL;
1105 int rc;
1106
1107 basefd = IPC_GET_ARG1(*request);
1108
1109 /* Retrieve the old path. */
1110 rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL);
1111 if (rc != EOK) {
1112 goto out;
1113 }
1114
1115 /* Retrieve the new path. */
1116 rc = async_data_write_accept((void **) &new, true, 0, 0, 0, NULL);
1117 if (rc != EOK) {
1118 goto out;
1119 }
1120
1121 size_t olen;
1122 size_t nlen;
1123 char *oldc = canonify(old, &olen);
1124 char *newc = canonify(new, &nlen);
1125
1126 if ((!oldc) || (!newc)) {
1127 rc = EINVAL;
1128 goto out;
1129 }
1130
1131 assert(oldc[olen] == '\0');
1132 assert(newc[nlen] == '\0');
1133
1134 /* Lookup the file structure corresponding to the file descriptor. */
1135 vfs_node_t *base_node = root;
1136 // TODO: Client-side root.
1137 if (basefd != -1) {
1138 base = vfs_file_get(basefd);
1139 if (!base) {
1140 rc = EBADF;
1141 goto out;
1142 }
1143 base_node = base->node;
1144 }
1145
1146 rc = vfs_rename_internal(base_node, oldc, newc);
1147
1148out:
1149 async_answer_0(rid, rc);
1150
1151 if (old) {
1152 free(old);
1153 }
1154 if (new) {
1155 free(new);
1156 }
1157 if (base) {
1158 vfs_file_put(base);
1159 }
1160}
1161
1162void vfs_dup(ipc_callid_t rid, ipc_call_t *request)
1163{
1164 int oldfd = IPC_GET_ARG1(*request);
1165 int newfd = IPC_GET_ARG2(*request);
1166
1167 /* If the file descriptors are the same, do nothing. */
1168 if (oldfd == newfd) {
1169 async_answer_1(rid, EOK, newfd);
1170 return;
1171 }
1172
1173 /* Lookup the file structure corresponding to oldfd. */
1174 vfs_file_t *oldfile = vfs_file_get(oldfd);
1175 if (!oldfile) {
1176 async_answer_0(rid, EBADF);
1177 return;
1178 }
1179
1180 /*
1181 * Lock the open file structure so that no other thread can manipulate
1182 * the same open file at a time.
1183 */
1184 fibril_mutex_lock(&oldfile->lock);
1185
1186 /* Make sure newfd is closed. */
1187 (void) vfs_fd_free(newfd);
1188
1189 /* Assign the old file to newfd. */
1190 int ret = vfs_fd_assign(oldfile, newfd);
1191 fibril_mutex_unlock(&oldfile->lock);
1192 vfs_file_put(oldfile);
1193
1194 if (ret != EOK)
1195 async_answer_0(rid, ret);
1196 else
1197 async_answer_1(rid, EOK, newfd);
1198}
1199
1200void vfs_wait_handle(ipc_callid_t rid, ipc_call_t *request)
1201{
1202 int fd = vfs_wait_handle_internal();
1203 async_answer_1(rid, EOK, fd);
1204}
1205
1206void vfs_get_mtab(ipc_callid_t rid, ipc_call_t *request)
1207{
1208 ipc_callid_t callid;
1209 ipc_call_t data;
1210 sysarg_t rc = EOK;
1211 size_t len;
1212
1213 fibril_mutex_lock(&mtab_list_lock);
1214
1215 /* Send to the caller the number of mounted filesystems */
1216 callid = async_get_call(&data);
1217 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
1218 rc = ENOTSUP;
1219 async_answer_0(callid, rc);
1220 goto exit;
1221 }
1222 async_answer_1(callid, EOK, mtab_size);
1223
1224 list_foreach(mtab_list, cur) {
1225 mtab_ent_t *mtab_ent = list_get_instance(cur, mtab_ent_t,
1226 link);
1227
1228 rc = ENOTSUP;
1229
1230 if (!async_data_read_receive(&callid, &len)) {
1231 async_answer_0(callid, rc);
1232 goto exit;
1233 }
1234
1235 (void) async_data_read_finalize(callid, mtab_ent->mp,
1236 str_size(mtab_ent->mp));
1237
1238 if (!async_data_read_receive(&callid, &len)) {
1239 async_answer_0(callid, rc);
1240 goto exit;
1241 }
1242
1243 (void) async_data_read_finalize(callid, mtab_ent->opts,
1244 str_size(mtab_ent->opts));
1245
1246 if (!async_data_read_receive(&callid, &len)) {
1247 async_answer_0(callid, rc);
1248 goto exit;
1249 }
1250
1251 (void) async_data_read_finalize(callid, mtab_ent->fs_name,
1252 str_size(mtab_ent->fs_name));
1253
1254 callid = async_get_call(&data);
1255
1256 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
1257 async_answer_0(callid, rc);
1258 goto exit;
1259 }
1260
1261 rc = EOK;
1262 async_answer_2(callid, rc, mtab_ent->instance,
1263 mtab_ent->service_id);
1264 }
1265
1266exit:
1267 fibril_mutex_unlock(&mtab_list_lock);
1268 async_answer_0(rid, rc);
1269}
1270
1271/**
1272 * @}
1273 */
Note: See TracBrowser for help on using the repository browser.