source: mainline/uspace/srv/vfs/vfs_ops.c@ 6b8e5b74

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 6b8e5b74 was 6b8e5b74, checked in by Maurizio Lombardi <m.lombardi85@…>, 14 years ago

vfs_get_mtab(): service_id is more useful than flags and fs_handle

  • Property mode set to 100644
File size: 34.7 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file vfs_ops.c
35 * @brief Operations that VFS offers to its clients.
36 */
37
38#include "vfs.h"
39#include <macros.h>
40#include <stdint.h>
41#include <async.h>
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <str.h>
46#include <bool.h>
47#include <fibril_synch.h>
48#include <adt/list.h>
49#include <unistd.h>
50#include <ctype.h>
51#include <fcntl.h>
52#include <assert.h>
53#include <vfs/canonify.h>
54#include <vfs/vfs_mtab.h>
55
56FIBRIL_MUTEX_INITIALIZE(mtab_list_lock);
57LIST_INITIALIZE(mtab_list);
58static size_t mtab_size = 0;
59
60/* Forward declarations of static functions. */
61static int vfs_truncate_internal(fs_handle_t, service_id_t, fs_index_t,
62 aoff64_t);
63
64/**
65 * This rwlock prevents the race between a triplet-to-VFS-node resolution and a
66 * concurrent VFS operation which modifies the file system namespace.
67 */
68FIBRIL_RWLOCK_INITIALIZE(namespace_rwlock);
69
70vfs_pair_t rootfs = {
71 .fs_handle = 0,
72 .service_id = 0
73};
74
75static void vfs_mount_internal(ipc_callid_t rid, service_id_t service_id,
76 fs_handle_t fs_handle, char *mp, char *opts)
77{
78 vfs_lookup_res_t mp_res;
79 vfs_lookup_res_t mr_res;
80 vfs_node_t *mp_node = NULL;
81 vfs_node_t *mr_node;
82 fs_index_t rindex;
83 aoff64_t rsize;
84 unsigned rlnkcnt;
85 async_exch_t *exch;
86 sysarg_t rc;
87 aid_t msg;
88 ipc_call_t answer;
89
90 /* Resolve the path to the mountpoint. */
91 fibril_rwlock_write_lock(&namespace_rwlock);
92 if (rootfs.fs_handle) {
93 /* We already have the root FS. */
94 if (str_cmp(mp, "/") == 0) {
95 /* Trying to mount root FS over root FS */
96 fibril_rwlock_write_unlock(&namespace_rwlock);
97 async_answer_0(rid, EBUSY);
98 return;
99 }
100
101 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
102 if (rc != EOK) {
103 /* The lookup failed for some reason. */
104 fibril_rwlock_write_unlock(&namespace_rwlock);
105 async_answer_0(rid, rc);
106 return;
107 }
108
109 mp_node = vfs_node_get(&mp_res);
110 if (!mp_node) {
111 fibril_rwlock_write_unlock(&namespace_rwlock);
112 async_answer_0(rid, ENOMEM);
113 return;
114 }
115
116 /*
117 * Now we hold a reference to mp_node.
118 * It will be dropped upon the corresponding VFS_IN_UNMOUNT.
119 * This prevents the mount point from being deleted.
120 */
121 } else {
122 /* We still don't have the root file system mounted. */
123 if (str_cmp(mp, "/") == 0) {
124 /*
125 * For this simple, but important case,
126 * we are almost done.
127 */
128
129 /* Tell the mountee that it is being mounted. */
130 exch = vfs_exchange_grab(fs_handle);
131 msg = async_send_1(exch, VFS_OUT_MOUNTED,
132 (sysarg_t) service_id, &answer);
133 /* Send the mount options */
134 rc = async_data_write_start(exch, (void *)opts,
135 str_size(opts));
136 vfs_exchange_release(exch);
137
138 if (rc != EOK) {
139 async_wait_for(msg, NULL);
140 fibril_rwlock_write_unlock(&namespace_rwlock);
141 async_answer_0(rid, rc);
142 return;
143 }
144 async_wait_for(msg, &rc);
145
146 if (rc != EOK) {
147 fibril_rwlock_write_unlock(&namespace_rwlock);
148 async_answer_0(rid, rc);
149 return;
150 }
151
152 rindex = (fs_index_t) IPC_GET_ARG1(answer);
153 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
154 IPC_GET_ARG3(answer));
155 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
156
157 mr_res.triplet.fs_handle = fs_handle;
158 mr_res.triplet.service_id = service_id;
159 mr_res.triplet.index = rindex;
160 mr_res.size = rsize;
161 mr_res.lnkcnt = rlnkcnt;
162 mr_res.type = VFS_NODE_DIRECTORY;
163
164 rootfs.fs_handle = fs_handle;
165 rootfs.service_id = service_id;
166
167 /* Add reference to the mounted root. */
168 mr_node = vfs_node_get(&mr_res);
169 assert(mr_node);
170
171 fibril_rwlock_write_unlock(&namespace_rwlock);
172 async_answer_0(rid, rc);
173 return;
174 } else {
175 /*
176 * We can't resolve this without the root filesystem
177 * being mounted first.
178 */
179 fibril_rwlock_write_unlock(&namespace_rwlock);
180 async_answer_0(rid, ENOENT);
181 return;
182 }
183 }
184
185 /*
186 * At this point, we have all necessary pieces: file system handle
187 * and service ID, and we know the mount point VFS node.
188 */
189
190 async_exch_t *mountee_exch = vfs_exchange_grab(fs_handle);
191 assert(mountee_exch);
192
193 exch = vfs_exchange_grab(mp_res.triplet.fs_handle);
194 msg = async_send_4(exch, VFS_OUT_MOUNT,
195 (sysarg_t) mp_res.triplet.service_id,
196 (sysarg_t) mp_res.triplet.index,
197 (sysarg_t) fs_handle,
198 (sysarg_t) service_id, &answer);
199
200 /* Send connection */
201 rc = async_exchange_clone(exch, mountee_exch);
202 vfs_exchange_release(mountee_exch);
203
204 if (rc != EOK) {
205 vfs_exchange_release(exch);
206 async_wait_for(msg, NULL);
207
208 /* Mount failed, drop reference to mp_node. */
209 if (mp_node)
210 vfs_node_put(mp_node);
211
212 async_answer_0(rid, rc);
213 fibril_rwlock_write_unlock(&namespace_rwlock);
214 return;
215 }
216
217 /* send the mount options */
218 rc = async_data_write_start(exch, (void *) opts, str_size(opts));
219 if (rc != EOK) {
220 vfs_exchange_release(exch);
221 async_wait_for(msg, NULL);
222
223 /* Mount failed, drop reference to mp_node. */
224 if (mp_node)
225 vfs_node_put(mp_node);
226
227 fibril_rwlock_write_unlock(&namespace_rwlock);
228 async_answer_0(rid, rc);
229 return;
230 }
231
232 /*
233 * Wait for the answer before releasing the exchange to avoid deadlock
234 * in case the answer depends on further calls to the same file system.
235 * Think of a case when mounting a FS on a file_bd backed by a file on
236 * the same FS.
237 */
238 async_wait_for(msg, &rc);
239 vfs_exchange_release(exch);
240
241 if (rc == EOK) {
242 rindex = (fs_index_t) IPC_GET_ARG1(answer);
243 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
244 IPC_GET_ARG3(answer));
245 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
246
247 mr_res.triplet.fs_handle = fs_handle;
248 mr_res.triplet.service_id = service_id;
249 mr_res.triplet.index = rindex;
250 mr_res.size = rsize;
251 mr_res.lnkcnt = rlnkcnt;
252 mr_res.type = VFS_NODE_DIRECTORY;
253
254 /* Add reference to the mounted root. */
255 mr_node = vfs_node_get(&mr_res);
256 assert(mr_node);
257 } else {
258 /* Mount failed, drop reference to mp_node. */
259 if (mp_node)
260 vfs_node_put(mp_node);
261 }
262
263 async_answer_0(rid, rc);
264 fibril_rwlock_write_unlock(&namespace_rwlock);
265}
266
267void vfs_mount(ipc_callid_t rid, ipc_call_t *request)
268{
269 service_id_t service_id;
270
271 /*
272 * We expect the library to do the device-name to device-handle
273 * translation for us, thus the device handle will arrive as ARG1
274 * in the request.
275 */
276 service_id = (service_id_t) IPC_GET_ARG1(*request);
277
278 /*
279 * Mount flags are passed as ARG2.
280 */
281 unsigned int flags = (unsigned int) IPC_GET_ARG2(*request);
282
283 /*
284 * Instance number is passed as ARG3.
285 */
286 unsigned int instance = IPC_GET_ARG3(*request);
287
288 /* We want the client to send us the mount point. */
289 char *mp;
290 int rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
291 0, NULL);
292 if (rc != EOK) {
293 async_answer_0(rid, rc);
294 return;
295 }
296
297 /* Now we expect to receive the mount options. */
298 char *opts;
299 rc = async_data_write_accept((void **) &opts, true, 0, MAX_MNTOPTS_LEN,
300 0, NULL);
301 if (rc != EOK) {
302 free(mp);
303 async_answer_0(rid, rc);
304 return;
305 }
306
307 /*
308 * Now, we expect the client to send us data with the name of the file
309 * system.
310 */
311 char *fs_name;
312 rc = async_data_write_accept((void **) &fs_name, true, 0,
313 FS_NAME_MAXLEN, 0, NULL);
314 if (rc != EOK) {
315 free(mp);
316 free(opts);
317 async_answer_0(rid, rc);
318 return;
319 }
320
321 /*
322 * Wait for VFS_IN_PING so that we can return an error if we don't know
323 * fs_name.
324 */
325 ipc_call_t data;
326 ipc_callid_t callid = async_get_call(&data);
327 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
328 async_answer_0(callid, ENOTSUP);
329 async_answer_0(rid, ENOTSUP);
330 free(mp);
331 free(opts);
332 free(fs_name);
333 return;
334 }
335
336 /*
337 * Check if we know a file system with the same name as is in fs_name.
338 * This will also give us its file system handle.
339 */
340 fibril_mutex_lock(&fs_list_lock);
341 fs_handle_t fs_handle;
342recheck:
343 fs_handle = fs_name_to_handle(instance, fs_name, false);
344 if (!fs_handle) {
345 if (flags & IPC_FLAG_BLOCKING) {
346 fibril_condvar_wait(&fs_list_cv, &fs_list_lock);
347 goto recheck;
348 }
349
350 fibril_mutex_unlock(&fs_list_lock);
351 async_answer_0(callid, ENOENT);
352 async_answer_0(rid, ENOENT);
353 free(mp);
354 free(fs_name);
355 free(opts);
356 return;
357 }
358 fibril_mutex_unlock(&fs_list_lock);
359
360 /* Add the filesystem info to the list of mounted filesystems */
361 mtab_ent_t *mtab_ent = malloc(sizeof(mtab_ent_t));
362 if (!mtab_ent) {
363 async_answer_0(callid, ENOMEM);
364 async_answer_0(rid, ENOMEM);
365 free(mp);
366 free(fs_name);
367 free(opts);
368 return;
369 }
370
371 str_cpy(mtab_ent->mp, MAX_PATH_LEN, mp);
372 str_cpy(mtab_ent->fs_name, FS_NAME_MAXLEN, fs_name);
373 str_cpy(mtab_ent->opts, MAX_MNTOPTS_LEN, opts);
374 mtab_ent->instance = instance;
375 mtab_ent->service_id = service_id;
376
377 link_initialize(&mtab_ent->link);
378
379 fibril_mutex_lock(&mtab_list_lock);
380 list_append(&mtab_ent->link, &mtab_list);
381 mtab_size++;
382 fibril_mutex_unlock(&mtab_list_lock);
383
384 /* Do the mount */
385 vfs_mount_internal(rid, service_id, fs_handle, mp, opts);
386
387 free(mp);
388 free(fs_name);
389 free(opts);
390
391 /* Acknowledge that we know fs_name. */
392 async_answer_0(callid, EOK);
393}
394
395void vfs_unmount(ipc_callid_t rid, ipc_call_t *request)
396{
397 int rc;
398 char *mp;
399 vfs_lookup_res_t mp_res;
400 vfs_lookup_res_t mr_res;
401 vfs_node_t *mr_node;
402 async_exch_t *exch;
403
404 /*
405 * Receive the mount point path.
406 */
407 rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
408 0, NULL);
409 if (rc != EOK)
410 async_answer_0(rid, rc);
411
412 /*
413 * Taking the namespace lock will do two things for us. First, it will
414 * prevent races with other lookup operations. Second, it will stop new
415 * references to already existing VFS nodes and creation of new VFS
416 * nodes. This is because new references are added as a result of some
417 * lookup operation or at least of some operation which is protected by
418 * the namespace lock.
419 */
420 fibril_rwlock_write_lock(&namespace_rwlock);
421
422 /*
423 * Lookup the mounted root and instantiate it.
424 */
425 rc = vfs_lookup_internal(mp, L_ROOT, &mr_res, NULL);
426 if (rc != EOK) {
427 fibril_rwlock_write_unlock(&namespace_rwlock);
428 free(mp);
429 async_answer_0(rid, rc);
430 return;
431 }
432 mr_node = vfs_node_get(&mr_res);
433 if (!mr_node) {
434 fibril_rwlock_write_unlock(&namespace_rwlock);
435 free(mp);
436 async_answer_0(rid, ENOMEM);
437 return;
438 }
439
440 /*
441 * Count the total number of references for the mounted file system. We
442 * are expecting at least two. One which we got above and one which we
443 * got when the file system was mounted. If we find more, it means that
444 * the file system cannot be gracefully unmounted at the moment because
445 * someone is working with it.
446 */
447 if (vfs_nodes_refcount_sum_get(mr_node->fs_handle,
448 mr_node->service_id) != 2) {
449 fibril_rwlock_write_unlock(&namespace_rwlock);
450 vfs_node_put(mr_node);
451 free(mp);
452 async_answer_0(rid, EBUSY);
453 return;
454 }
455
456 if (str_cmp(mp, "/") == 0) {
457
458 /*
459 * Unmounting the root file system.
460 *
461 * In this case, there is no mount point node and we send
462 * VFS_OUT_UNMOUNTED directly to the mounted file system.
463 */
464
465 exch = vfs_exchange_grab(mr_node->fs_handle);
466 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED,
467 mr_node->service_id);
468 vfs_exchange_release(exch);
469
470 if (rc != EOK) {
471 fibril_rwlock_write_unlock(&namespace_rwlock);
472 free(mp);
473 vfs_node_put(mr_node);
474 async_answer_0(rid, rc);
475 return;
476 }
477
478 rootfs.fs_handle = 0;
479 rootfs.service_id = 0;
480 } else {
481
482 /*
483 * Unmounting a non-root file system.
484 *
485 * We have a regular mount point node representing the parent
486 * file system, so we delegate the operation to it.
487 */
488
489 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
490 if (rc != EOK) {
491 fibril_rwlock_write_unlock(&namespace_rwlock);
492 free(mp);
493 vfs_node_put(mr_node);
494 async_answer_0(rid, rc);
495 return;
496 }
497
498 vfs_node_t *mp_node = vfs_node_get(&mp_res);
499 if (!mp_node) {
500 fibril_rwlock_write_unlock(&namespace_rwlock);
501 free(mp);
502 vfs_node_put(mr_node);
503 async_answer_0(rid, ENOMEM);
504 return;
505 }
506
507 exch = vfs_exchange_grab(mp_node->fs_handle);
508 rc = async_req_2_0(exch, VFS_OUT_UNMOUNT,
509 mp_node->service_id, mp_node->index);
510 vfs_exchange_release(exch);
511
512 if (rc != EOK) {
513 fibril_rwlock_write_unlock(&namespace_rwlock);
514 free(mp);
515 vfs_node_put(mp_node);
516 vfs_node_put(mr_node);
517 async_answer_0(rid, rc);
518 return;
519 }
520
521 /* Drop the reference we got above. */
522 vfs_node_put(mp_node);
523 /* Drop the reference from when the file system was mounted. */
524 vfs_node_put(mp_node);
525 }
526
527 /*
528 * All went well, the mounted file system was successfully unmounted.
529 * The only thing left is to forget the unmounted root VFS node.
530 */
531 vfs_node_forget(mr_node);
532 fibril_rwlock_write_unlock(&namespace_rwlock);
533
534 fibril_mutex_lock(&mtab_list_lock);
535
536 int found = 0;
537
538 list_foreach(mtab_list, cur) {
539 mtab_ent_t *mtab_ent = list_get_instance(cur, mtab_ent_t,
540 link);
541
542 if (str_cmp(mtab_ent->mp, mp) == 0) {
543 list_remove(&mtab_ent->link);
544 mtab_size--;
545 free(mtab_ent);
546 found = 1;
547 break;
548 }
549 }
550 assert(found);
551
552 free(mp);
553
554 fibril_mutex_unlock(&mtab_list_lock);
555 async_answer_0(rid, EOK);
556}
557
558void vfs_open(ipc_callid_t rid, ipc_call_t *request)
559{
560 /*
561 * The POSIX interface is open(path, oflag, mode).
562 * We can receive oflags and mode along with the VFS_IN_OPEN call;
563 * the path will need to arrive in another call.
564 *
565 * We also receive one private, non-POSIX set of flags called lflag
566 * used to pass information to vfs_lookup_internal().
567 */
568 int lflag = IPC_GET_ARG1(*request);
569 int oflag = IPC_GET_ARG2(*request);
570 int mode = IPC_GET_ARG3(*request);
571
572 /* Ignore mode for now. */
573 (void) mode;
574
575 /*
576 * Make sure that we are called with exactly one of L_FILE and
577 * L_DIRECTORY. Make sure that the user does not pass L_OPEN,
578 * L_ROOT or L_MP.
579 */
580 if (((lflag & (L_FILE | L_DIRECTORY)) == 0) ||
581 ((lflag & (L_FILE | L_DIRECTORY)) == (L_FILE | L_DIRECTORY)) ||
582 (lflag & (L_OPEN | L_ROOT | L_MP))) {
583 async_answer_0(rid, EINVAL);
584 return;
585 }
586
587 if (oflag & O_CREAT)
588 lflag |= L_CREATE;
589 if (oflag & O_EXCL)
590 lflag |= L_EXCLUSIVE;
591
592 char *path;
593 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
594 if (rc != EOK) {
595 async_answer_0(rid, rc);
596 return;
597 }
598
599 /*
600 * Avoid the race condition in which the file can be deleted before we
601 * find/create-and-lock the VFS node corresponding to the looked-up
602 * triplet.
603 */
604 if (lflag & L_CREATE)
605 fibril_rwlock_write_lock(&namespace_rwlock);
606 else
607 fibril_rwlock_read_lock(&namespace_rwlock);
608
609 /* The path is now populated and we can call vfs_lookup_internal(). */
610 vfs_lookup_res_t lr;
611 rc = vfs_lookup_internal(path, lflag | L_OPEN, &lr, NULL);
612 if (rc != EOK) {
613 if (lflag & L_CREATE)
614 fibril_rwlock_write_unlock(&namespace_rwlock);
615 else
616 fibril_rwlock_read_unlock(&namespace_rwlock);
617 async_answer_0(rid, rc);
618 free(path);
619 return;
620 }
621
622 /* Path is no longer needed. */
623 free(path);
624
625 vfs_node_t *node = vfs_node_get(&lr);
626 if (lflag & L_CREATE)
627 fibril_rwlock_write_unlock(&namespace_rwlock);
628 else
629 fibril_rwlock_read_unlock(&namespace_rwlock);
630
631 /* Truncate the file if requested and if necessary. */
632 if (oflag & O_TRUNC) {
633 fibril_rwlock_write_lock(&node->contents_rwlock);
634 if (node->size) {
635 rc = vfs_truncate_internal(node->fs_handle,
636 node->service_id, node->index, 0);
637 if (rc) {
638 fibril_rwlock_write_unlock(&node->contents_rwlock);
639 vfs_node_put(node);
640 async_answer_0(rid, rc);
641 return;
642 }
643 node->size = 0;
644 }
645 fibril_rwlock_write_unlock(&node->contents_rwlock);
646 }
647
648 /*
649 * Get ourselves a file descriptor and the corresponding vfs_file_t
650 * structure.
651 */
652 int fd = vfs_fd_alloc((oflag & O_DESC) != 0);
653 if (fd < 0) {
654 vfs_node_put(node);
655 async_answer_0(rid, fd);
656 return;
657 }
658 vfs_file_t *file = vfs_file_get(fd);
659 assert(file);
660 file->node = node;
661 if (oflag & O_APPEND)
662 file->append = true;
663
664 /*
665 * The following increase in reference count is for the fact that the
666 * file is being opened and that a file structure is pointing to it.
667 * It is necessary so that the file will not disappear when
668 * vfs_node_put() is called. The reference will be dropped by the
669 * respective VFS_IN_CLOSE.
670 */
671 vfs_node_addref(node);
672 vfs_node_put(node);
673 vfs_file_put(file);
674
675 /* Success! Return the new file descriptor to the client. */
676 async_answer_1(rid, EOK, fd);
677}
678
679void vfs_sync(ipc_callid_t rid, ipc_call_t *request)
680{
681 int fd = IPC_GET_ARG1(*request);
682
683 /* Lookup the file structure corresponding to the file descriptor. */
684 vfs_file_t *file = vfs_file_get(fd);
685 if (!file) {
686 async_answer_0(rid, ENOENT);
687 return;
688 }
689
690 /*
691 * Lock the open file structure so that no other thread can manipulate
692 * the same open file at a time.
693 */
694 fibril_mutex_lock(&file->lock);
695 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
696
697 /* Make a VFS_OUT_SYMC request at the destination FS server. */
698 aid_t msg;
699 ipc_call_t answer;
700 msg = async_send_2(fs_exch, VFS_OUT_SYNC, file->node->service_id,
701 file->node->index, &answer);
702
703 vfs_exchange_release(fs_exch);
704
705 /* Wait for reply from the FS server. */
706 sysarg_t rc;
707 async_wait_for(msg, &rc);
708
709 fibril_mutex_unlock(&file->lock);
710
711 vfs_file_put(file);
712 async_answer_0(rid, rc);
713}
714
715void vfs_close(ipc_callid_t rid, ipc_call_t *request)
716{
717 int fd = IPC_GET_ARG1(*request);
718 int ret = vfs_fd_free(fd);
719 async_answer_0(rid, ret);
720}
721
722static void vfs_rdwr(ipc_callid_t rid, ipc_call_t *request, bool read)
723{
724 /*
725 * The following code strongly depends on the fact that the files data
726 * structure can be only accessed by a single fibril and all file
727 * operations are serialized (i.e. the reads and writes cannot
728 * interleave and a file cannot be closed while it is being read).
729 *
730 * Additional synchronization needs to be added once the table of
731 * open files supports parallel access!
732 */
733
734 int fd = IPC_GET_ARG1(*request);
735
736 /* Lookup the file structure corresponding to the file descriptor. */
737 vfs_file_t *file = vfs_file_get(fd);
738 if (!file) {
739 async_answer_0(rid, ENOENT);
740 return;
741 }
742
743 /*
744 * Lock the open file structure so that no other thread can manipulate
745 * the same open file at a time.
746 */
747 fibril_mutex_lock(&file->lock);
748
749 vfs_info_t *fs_info = fs_handle_to_info(file->node->fs_handle);
750 assert(fs_info);
751
752 /*
753 * Lock the file's node so that no other client can read/write to it at
754 * the same time unless the FS supports concurrent reads/writes and its
755 * write implementation does not modify the file size.
756 */
757 if ((read) ||
758 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
759 fibril_rwlock_read_lock(&file->node->contents_rwlock);
760 else
761 fibril_rwlock_write_lock(&file->node->contents_rwlock);
762
763 if (file->node->type == VFS_NODE_DIRECTORY) {
764 /*
765 * Make sure that no one is modifying the namespace
766 * while we are in readdir().
767 */
768 assert(read);
769 fibril_rwlock_read_lock(&namespace_rwlock);
770 }
771
772 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
773
774 /*
775 * Make a VFS_READ/VFS_WRITE request at the destination FS server
776 * and forward the IPC_M_DATA_READ/IPC_M_DATA_WRITE request to the
777 * destination FS server. The call will be routed as if sent by
778 * ourselves. Note that call arguments are immutable in this case so we
779 * don't have to bother.
780 */
781 sysarg_t rc;
782 ipc_call_t answer;
783 if (read) {
784 rc = async_data_read_forward_4_1(fs_exch, VFS_OUT_READ,
785 file->node->service_id, file->node->index,
786 LOWER32(file->pos), UPPER32(file->pos), &answer);
787 } else {
788 if (file->append)
789 file->pos = file->node->size;
790
791 rc = async_data_write_forward_4_1(fs_exch, VFS_OUT_WRITE,
792 file->node->service_id, file->node->index,
793 LOWER32(file->pos), UPPER32(file->pos), &answer);
794 }
795
796 vfs_exchange_release(fs_exch);
797
798 size_t bytes = IPC_GET_ARG1(answer);
799
800 if (file->node->type == VFS_NODE_DIRECTORY)
801 fibril_rwlock_read_unlock(&namespace_rwlock);
802
803 /* Unlock the VFS node. */
804 if ((read) ||
805 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
806 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
807 else {
808 /* Update the cached version of node's size. */
809 if (rc == EOK)
810 file->node->size = MERGE_LOUP32(IPC_GET_ARG2(answer),
811 IPC_GET_ARG3(answer));
812 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
813 }
814
815 /* Update the position pointer and unlock the open file. */
816 if (rc == EOK)
817 file->pos += bytes;
818 fibril_mutex_unlock(&file->lock);
819 vfs_file_put(file);
820
821 /*
822 * FS server's reply is the final result of the whole operation we
823 * return to the client.
824 */
825 async_answer_1(rid, rc, bytes);
826}
827
828void vfs_read(ipc_callid_t rid, ipc_call_t *request)
829{
830 vfs_rdwr(rid, request, true);
831}
832
833void vfs_write(ipc_callid_t rid, ipc_call_t *request)
834{
835 vfs_rdwr(rid, request, false);
836}
837
838void vfs_seek(ipc_callid_t rid, ipc_call_t *request)
839{
840 int fd = (int) IPC_GET_ARG1(*request);
841 off64_t off = (off64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
842 IPC_GET_ARG3(*request));
843 int whence = (int) IPC_GET_ARG4(*request);
844
845 /* Lookup the file structure corresponding to the file descriptor. */
846 vfs_file_t *file = vfs_file_get(fd);
847 if (!file) {
848 async_answer_0(rid, ENOENT);
849 return;
850 }
851
852 fibril_mutex_lock(&file->lock);
853
854 off64_t newoff;
855 switch (whence) {
856 case SEEK_SET:
857 if (off >= 0) {
858 file->pos = (aoff64_t) off;
859 fibril_mutex_unlock(&file->lock);
860 vfs_file_put(file);
861 async_answer_1(rid, EOK, off);
862 return;
863 }
864 break;
865 case SEEK_CUR:
866 if ((off >= 0) && (file->pos + off < file->pos)) {
867 fibril_mutex_unlock(&file->lock);
868 vfs_file_put(file);
869 async_answer_0(rid, EOVERFLOW);
870 return;
871 }
872
873 if ((off < 0) && (file->pos < (aoff64_t) -off)) {
874 fibril_mutex_unlock(&file->lock);
875 vfs_file_put(file);
876 async_answer_0(rid, EOVERFLOW);
877 return;
878 }
879
880 file->pos += off;
881 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
882
883 fibril_mutex_unlock(&file->lock);
884 vfs_file_put(file);
885 async_answer_2(rid, EOK, LOWER32(newoff),
886 UPPER32(newoff));
887 return;
888 case SEEK_END:
889 fibril_rwlock_read_lock(&file->node->contents_rwlock);
890 aoff64_t size = file->node->size;
891
892 if ((off >= 0) && (size + off < size)) {
893 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
894 fibril_mutex_unlock(&file->lock);
895 vfs_file_put(file);
896 async_answer_0(rid, EOVERFLOW);
897 return;
898 }
899
900 if ((off < 0) && (size < (aoff64_t) -off)) {
901 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
902 fibril_mutex_unlock(&file->lock);
903 vfs_file_put(file);
904 async_answer_0(rid, EOVERFLOW);
905 return;
906 }
907
908 file->pos = size + off;
909 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
910
911 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
912 fibril_mutex_unlock(&file->lock);
913 vfs_file_put(file);
914 async_answer_2(rid, EOK, LOWER32(newoff), UPPER32(newoff));
915 return;
916 }
917
918 fibril_mutex_unlock(&file->lock);
919 vfs_file_put(file);
920 async_answer_0(rid, EINVAL);
921}
922
923int vfs_truncate_internal(fs_handle_t fs_handle, service_id_t service_id,
924 fs_index_t index, aoff64_t size)
925{
926 async_exch_t *exch = vfs_exchange_grab(fs_handle);
927 sysarg_t rc = async_req_4_0(exch, VFS_OUT_TRUNCATE,
928 (sysarg_t) service_id, (sysarg_t) index, LOWER32(size),
929 UPPER32(size));
930 vfs_exchange_release(exch);
931
932 return (int) rc;
933}
934
935void vfs_truncate(ipc_callid_t rid, ipc_call_t *request)
936{
937 int fd = IPC_GET_ARG1(*request);
938 aoff64_t size = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
939 IPC_GET_ARG3(*request));
940 int rc;
941
942 vfs_file_t *file = vfs_file_get(fd);
943 if (!file) {
944 async_answer_0(rid, ENOENT);
945 return;
946 }
947 fibril_mutex_lock(&file->lock);
948
949 fibril_rwlock_write_lock(&file->node->contents_rwlock);
950 rc = vfs_truncate_internal(file->node->fs_handle,
951 file->node->service_id, file->node->index, size);
952 if (rc == EOK)
953 file->node->size = size;
954 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
955
956 fibril_mutex_unlock(&file->lock);
957 vfs_file_put(file);
958 async_answer_0(rid, (sysarg_t)rc);
959}
960
961void vfs_fstat(ipc_callid_t rid, ipc_call_t *request)
962{
963 int fd = IPC_GET_ARG1(*request);
964 sysarg_t rc;
965
966 vfs_file_t *file = vfs_file_get(fd);
967 if (!file) {
968 async_answer_0(rid, ENOENT);
969 return;
970 }
971
972 ipc_callid_t callid;
973 if (!async_data_read_receive(&callid, NULL)) {
974 vfs_file_put(file);
975 async_answer_0(callid, EINVAL);
976 async_answer_0(rid, EINVAL);
977 return;
978 }
979
980 fibril_mutex_lock(&file->lock);
981
982 async_exch_t *exch = vfs_exchange_grab(file->node->fs_handle);
983
984 aid_t msg;
985 msg = async_send_3(exch, VFS_OUT_STAT, file->node->service_id,
986 file->node->index, true, NULL);
987 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
988
989 vfs_exchange_release(exch);
990
991 async_wait_for(msg, &rc);
992
993 fibril_mutex_unlock(&file->lock);
994 vfs_file_put(file);
995 async_answer_0(rid, rc);
996}
997
998void vfs_stat(ipc_callid_t rid, ipc_call_t *request)
999{
1000 char *path;
1001 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1002 if (rc != EOK) {
1003 async_answer_0(rid, rc);
1004 return;
1005 }
1006
1007 ipc_callid_t callid;
1008 if (!async_data_read_receive(&callid, NULL)) {
1009 free(path);
1010 async_answer_0(callid, EINVAL);
1011 async_answer_0(rid, EINVAL);
1012 return;
1013 }
1014
1015 vfs_lookup_res_t lr;
1016 fibril_rwlock_read_lock(&namespace_rwlock);
1017 rc = vfs_lookup_internal(path, L_NONE, &lr, NULL);
1018 free(path);
1019 if (rc != EOK) {
1020 fibril_rwlock_read_unlock(&namespace_rwlock);
1021 async_answer_0(callid, rc);
1022 async_answer_0(rid, rc);
1023 return;
1024 }
1025 vfs_node_t *node = vfs_node_get(&lr);
1026 if (!node) {
1027 fibril_rwlock_read_unlock(&namespace_rwlock);
1028 async_answer_0(callid, ENOMEM);
1029 async_answer_0(rid, ENOMEM);
1030 return;
1031 }
1032
1033 fibril_rwlock_read_unlock(&namespace_rwlock);
1034
1035 async_exch_t *exch = vfs_exchange_grab(node->fs_handle);
1036
1037 aid_t msg;
1038 msg = async_send_3(exch, VFS_OUT_STAT, node->service_id,
1039 node->index, false, NULL);
1040 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
1041
1042 vfs_exchange_release(exch);
1043
1044 sysarg_t rv;
1045 async_wait_for(msg, &rv);
1046
1047 async_answer_0(rid, rv);
1048
1049 vfs_node_put(node);
1050}
1051
1052void vfs_mkdir(ipc_callid_t rid, ipc_call_t *request)
1053{
1054 int mode = IPC_GET_ARG1(*request);
1055
1056 char *path;
1057 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1058 if (rc != EOK) {
1059 async_answer_0(rid, rc);
1060 return;
1061 }
1062
1063 /* Ignore mode for now. */
1064 (void) mode;
1065
1066 fibril_rwlock_write_lock(&namespace_rwlock);
1067 int lflag = L_DIRECTORY | L_CREATE | L_EXCLUSIVE;
1068 rc = vfs_lookup_internal(path, lflag, NULL, NULL);
1069 fibril_rwlock_write_unlock(&namespace_rwlock);
1070 free(path);
1071 async_answer_0(rid, rc);
1072}
1073
1074void vfs_unlink(ipc_callid_t rid, ipc_call_t *request)
1075{
1076 int lflag = IPC_GET_ARG1(*request);
1077
1078 char *path;
1079 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1080 if (rc != EOK) {
1081 async_answer_0(rid, rc);
1082 return;
1083 }
1084
1085 fibril_rwlock_write_lock(&namespace_rwlock);
1086 lflag &= L_DIRECTORY; /* sanitize lflag */
1087 vfs_lookup_res_t lr;
1088 rc = vfs_lookup_internal(path, lflag | L_UNLINK, &lr, NULL);
1089 free(path);
1090 if (rc != EOK) {
1091 fibril_rwlock_write_unlock(&namespace_rwlock);
1092 async_answer_0(rid, rc);
1093 return;
1094 }
1095
1096 /*
1097 * The name has already been unlinked by vfs_lookup_internal().
1098 * We have to get and put the VFS node to ensure that it is
1099 * VFS_OUT_DESTROY'ed after the last reference to it is dropped.
1100 */
1101 vfs_node_t *node = vfs_node_get(&lr);
1102 fibril_mutex_lock(&nodes_mutex);
1103 node->lnkcnt--;
1104 fibril_mutex_unlock(&nodes_mutex);
1105 fibril_rwlock_write_unlock(&namespace_rwlock);
1106 vfs_node_put(node);
1107 async_answer_0(rid, EOK);
1108}
1109
1110void vfs_rename(ipc_callid_t rid, ipc_call_t *request)
1111{
1112 /* Retrieve the old path. */
1113 char *old;
1114 int rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL);
1115 if (rc != EOK) {
1116 async_answer_0(rid, rc);
1117 return;
1118 }
1119
1120 /* Retrieve the new path. */
1121 char *new;
1122 rc = async_data_write_accept((void **) &new, true, 0, 0, 0, NULL);
1123 if (rc != EOK) {
1124 free(old);
1125 async_answer_0(rid, rc);
1126 return;
1127 }
1128
1129 size_t olen;
1130 size_t nlen;
1131 char *oldc = canonify(old, &olen);
1132 char *newc = canonify(new, &nlen);
1133
1134 if ((!oldc) || (!newc)) {
1135 async_answer_0(rid, EINVAL);
1136 free(old);
1137 free(new);
1138 return;
1139 }
1140
1141 oldc[olen] = '\0';
1142 newc[nlen] = '\0';
1143
1144 if ((!str_lcmp(newc, oldc, str_length(oldc))) &&
1145 ((newc[str_length(oldc)] == '/') ||
1146 (str_length(oldc) == 1) ||
1147 (str_length(oldc) == str_length(newc)))) {
1148 /*
1149 * oldc is a prefix of newc and either
1150 * - newc continues with a / where oldc ends, or
1151 * - oldc was / itself, or
1152 * - oldc and newc are equal.
1153 */
1154 async_answer_0(rid, EINVAL);
1155 free(old);
1156 free(new);
1157 return;
1158 }
1159
1160 vfs_lookup_res_t old_lr;
1161 vfs_lookup_res_t new_lr;
1162 vfs_lookup_res_t new_par_lr;
1163 fibril_rwlock_write_lock(&namespace_rwlock);
1164
1165 /* Lookup the node belonging to the old file name. */
1166 rc = vfs_lookup_internal(oldc, L_NONE, &old_lr, NULL);
1167 if (rc != EOK) {
1168 fibril_rwlock_write_unlock(&namespace_rwlock);
1169 async_answer_0(rid, rc);
1170 free(old);
1171 free(new);
1172 return;
1173 }
1174
1175 vfs_node_t *old_node = vfs_node_get(&old_lr);
1176 if (!old_node) {
1177 fibril_rwlock_write_unlock(&namespace_rwlock);
1178 async_answer_0(rid, ENOMEM);
1179 free(old);
1180 free(new);
1181 return;
1182 }
1183
1184 /* Determine the path to the parent of the node with the new name. */
1185 char *parentc = str_dup(newc);
1186 if (!parentc) {
1187 fibril_rwlock_write_unlock(&namespace_rwlock);
1188 vfs_node_put(old_node);
1189 async_answer_0(rid, rc);
1190 free(old);
1191 free(new);
1192 return;
1193 }
1194
1195 char *lastsl = str_rchr(parentc + 1, '/');
1196 if (lastsl)
1197 *lastsl = '\0';
1198 else
1199 parentc[1] = '\0';
1200
1201 /* Lookup parent of the new file name. */
1202 rc = vfs_lookup_internal(parentc, L_NONE, &new_par_lr, NULL);
1203 free(parentc); /* not needed anymore */
1204 if (rc != EOK) {
1205 fibril_rwlock_write_unlock(&namespace_rwlock);
1206 vfs_node_put(old_node);
1207 async_answer_0(rid, rc);
1208 free(old);
1209 free(new);
1210 return;
1211 }
1212
1213 /* Check whether linking to the same file system instance. */
1214 if ((old_node->fs_handle != new_par_lr.triplet.fs_handle) ||
1215 (old_node->service_id != new_par_lr.triplet.service_id)) {
1216 fibril_rwlock_write_unlock(&namespace_rwlock);
1217 vfs_node_put(old_node);
1218 async_answer_0(rid, EXDEV); /* different file systems */
1219 free(old);
1220 free(new);
1221 return;
1222 }
1223
1224 /* Destroy the old link for the new name. */
1225 vfs_node_t *new_node = NULL;
1226 rc = vfs_lookup_internal(newc, L_UNLINK, &new_lr, NULL);
1227
1228 switch (rc) {
1229 case ENOENT:
1230 /* simply not in our way */
1231 break;
1232 case EOK:
1233 new_node = vfs_node_get(&new_lr);
1234 if (!new_node) {
1235 fibril_rwlock_write_unlock(&namespace_rwlock);
1236 vfs_node_put(old_node);
1237 async_answer_0(rid, ENOMEM);
1238 free(old);
1239 free(new);
1240 return;
1241 }
1242 fibril_mutex_lock(&nodes_mutex);
1243 new_node->lnkcnt--;
1244 fibril_mutex_unlock(&nodes_mutex);
1245 break;
1246 default:
1247 fibril_rwlock_write_unlock(&namespace_rwlock);
1248 vfs_node_put(old_node);
1249 async_answer_0(rid, ENOTEMPTY);
1250 free(old);
1251 free(new);
1252 return;
1253 }
1254
1255 /* Create the new link for the new name. */
1256 rc = vfs_lookup_internal(newc, L_LINK, NULL, NULL, old_node->index);
1257 if (rc != EOK) {
1258 fibril_rwlock_write_unlock(&namespace_rwlock);
1259 vfs_node_put(old_node);
1260 if (new_node)
1261 vfs_node_put(new_node);
1262 async_answer_0(rid, rc);
1263 free(old);
1264 free(new);
1265 return;
1266 }
1267
1268 fibril_mutex_lock(&nodes_mutex);
1269 old_node->lnkcnt++;
1270 fibril_mutex_unlock(&nodes_mutex);
1271
1272 /* Destroy the link for the old name. */
1273 rc = vfs_lookup_internal(oldc, L_UNLINK, NULL, NULL);
1274 if (rc != EOK) {
1275 fibril_rwlock_write_unlock(&namespace_rwlock);
1276 vfs_node_put(old_node);
1277 if (new_node)
1278 vfs_node_put(new_node);
1279 async_answer_0(rid, rc);
1280 free(old);
1281 free(new);
1282 return;
1283 }
1284
1285 fibril_mutex_lock(&nodes_mutex);
1286 old_node->lnkcnt--;
1287 fibril_mutex_unlock(&nodes_mutex);
1288 fibril_rwlock_write_unlock(&namespace_rwlock);
1289 vfs_node_put(old_node);
1290
1291 if (new_node)
1292 vfs_node_put(new_node);
1293
1294 free(old);
1295 free(new);
1296 async_answer_0(rid, EOK);
1297}
1298
1299void vfs_dup(ipc_callid_t rid, ipc_call_t *request)
1300{
1301 int oldfd = IPC_GET_ARG1(*request);
1302 int newfd = IPC_GET_ARG2(*request);
1303
1304 /* If the file descriptors are the same, do nothing. */
1305 if (oldfd == newfd) {
1306 async_answer_1(rid, EOK, newfd);
1307 return;
1308 }
1309
1310 /* Lookup the file structure corresponding to oldfd. */
1311 vfs_file_t *oldfile = vfs_file_get(oldfd);
1312 if (!oldfile) {
1313 async_answer_0(rid, EBADF);
1314 return;
1315 }
1316
1317 /*
1318 * Lock the open file structure so that no other thread can manipulate
1319 * the same open file at a time.
1320 */
1321 fibril_mutex_lock(&oldfile->lock);
1322
1323 /* Make sure newfd is closed. */
1324 (void) vfs_fd_free(newfd);
1325
1326 /* Assign the old file to newfd. */
1327 int ret = vfs_fd_assign(oldfile, newfd);
1328 fibril_mutex_unlock(&oldfile->lock);
1329 vfs_file_put(oldfile);
1330
1331 if (ret != EOK)
1332 async_answer_0(rid, ret);
1333 else
1334 async_answer_1(rid, EOK, newfd);
1335}
1336
1337void vfs_wait_handle(ipc_callid_t rid, ipc_call_t *request)
1338{
1339 int fd = vfs_wait_handle_internal();
1340 async_answer_1(rid, EOK, fd);
1341}
1342
1343void vfs_get_mtab(ipc_callid_t rid, ipc_call_t *request)
1344{
1345 ipc_callid_t callid;
1346 ipc_call_t data;
1347 sysarg_t rc = EOK;
1348 size_t len;
1349
1350 fibril_mutex_lock(&mtab_list_lock);
1351
1352 /* Send to the caller the number of mounted filesystems */
1353 callid = async_get_call(&data);
1354 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
1355 rc = ENOTSUP;
1356 async_answer_0(callid, rc);
1357 goto exit;
1358 }
1359 async_answer_1(callid, EOK, mtab_size);
1360
1361 list_foreach(mtab_list, cur) {
1362 mtab_ent_t *mtab_ent = list_get_instance(cur, mtab_ent_t,
1363 link);
1364
1365 rc = ENOTSUP;
1366
1367 if (!async_data_read_receive(&callid, &len))
1368 goto exit;
1369
1370 (void) async_data_read_finalize(callid, mtab_ent->mp,
1371 str_size(mtab_ent->mp));
1372
1373 if (!async_data_read_receive(&callid, &len))
1374 goto exit;
1375
1376 (void) async_data_read_finalize(callid, mtab_ent->opts,
1377 str_size(mtab_ent->opts));
1378
1379 if (!async_data_read_receive(&callid, &len))
1380 goto exit;
1381
1382 (void) async_data_read_finalize(callid, mtab_ent->fs_name,
1383 str_size(mtab_ent->fs_name));
1384
1385 callid = async_get_call(&data);
1386
1387 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
1388 rc = ENOTSUP;
1389 async_answer_0(callid, rc);
1390 goto exit;
1391 }
1392
1393 rc = EOK;
1394 async_answer_2(callid, rc, mtab_ent->instance,
1395 mtab_ent->service_id);
1396 }
1397
1398exit:
1399 fibril_mutex_unlock(&mtab_list_lock);
1400 async_answer_0(rid, rc);
1401}
1402
1403/**
1404 * @}
1405 */
Note: See TracBrowser for help on using the repository browser.