source: mainline/uspace/srv/vfs/vfs_ops.c@ b33ec43

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b33ec43 was b33ec43, checked in by Jakub Jermar <jakub@…>, 14 years ago

Remove support for directly opening nodes from VFS and libfs.

  • Property mode set to 100644
File size: 32.0 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file vfs_ops.c
35 * @brief Operations that VFS offers to its clients.
36 */
37
38#include "vfs.h"
39#include <macros.h>
40#include <stdint.h>
41#include <async.h>
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <str.h>
46#include <bool.h>
47#include <fibril_synch.h>
48#include <adt/list.h>
49#include <unistd.h>
50#include <ctype.h>
51#include <fcntl.h>
52#include <assert.h>
53#include <vfs/canonify.h>
54
55/* Forward declarations of static functions. */
56static int vfs_truncate_internal(fs_handle_t, devmap_handle_t, fs_index_t,
57 aoff64_t);
58
59/**
60 * This rwlock prevents the race between a triplet-to-VFS-node resolution and a
61 * concurrent VFS operation which modifies the file system namespace.
62 */
63FIBRIL_RWLOCK_INITIALIZE(namespace_rwlock);
64
65vfs_pair_t rootfs = {
66 .fs_handle = 0,
67 .devmap_handle = 0
68};
69
70static void vfs_mount_internal(ipc_callid_t rid, devmap_handle_t devmap_handle,
71 fs_handle_t fs_handle, char *mp, char *opts)
72{
73 vfs_lookup_res_t mp_res;
74 vfs_lookup_res_t mr_res;
75 vfs_node_t *mp_node = NULL;
76 vfs_node_t *mr_node;
77 fs_index_t rindex;
78 aoff64_t rsize;
79 unsigned rlnkcnt;
80 async_exch_t *exch;
81 sysarg_t rc;
82 aid_t msg;
83 ipc_call_t answer;
84
85 /* Resolve the path to the mountpoint. */
86 fibril_rwlock_write_lock(&namespace_rwlock);
87 if (rootfs.fs_handle) {
88 /* We already have the root FS. */
89 if (str_cmp(mp, "/") == 0) {
90 /* Trying to mount root FS over root FS */
91 fibril_rwlock_write_unlock(&namespace_rwlock);
92 async_answer_0(rid, EBUSY);
93 return;
94 }
95
96 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
97 if (rc != EOK) {
98 /* The lookup failed for some reason. */
99 fibril_rwlock_write_unlock(&namespace_rwlock);
100 async_answer_0(rid, rc);
101 return;
102 }
103
104 mp_node = vfs_node_get(&mp_res);
105 if (!mp_node) {
106 fibril_rwlock_write_unlock(&namespace_rwlock);
107 async_answer_0(rid, ENOMEM);
108 return;
109 }
110
111 /*
112 * Now we hold a reference to mp_node.
113 * It will be dropped upon the corresponding VFS_IN_UNMOUNT.
114 * This prevents the mount point from being deleted.
115 */
116 } else {
117 /* We still don't have the root file system mounted. */
118 if (str_cmp(mp, "/") == 0) {
119 /*
120 * For this simple, but important case,
121 * we are almost done.
122 */
123
124 /* Tell the mountee that it is being mounted. */
125 exch = vfs_exchange_grab(fs_handle);
126 msg = async_send_1(exch, VFS_OUT_MOUNTED,
127 (sysarg_t) devmap_handle, &answer);
128 /* Send the mount options */
129 rc = async_data_write_start(exch, (void *)opts,
130 str_size(opts));
131 vfs_exchange_release(exch);
132
133 if (rc != EOK) {
134 async_wait_for(msg, NULL);
135 fibril_rwlock_write_unlock(&namespace_rwlock);
136 async_answer_0(rid, rc);
137 return;
138 }
139 async_wait_for(msg, &rc);
140
141 if (rc != EOK) {
142 fibril_rwlock_write_unlock(&namespace_rwlock);
143 async_answer_0(rid, rc);
144 return;
145 }
146
147 rindex = (fs_index_t) IPC_GET_ARG1(answer);
148 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer), IPC_GET_ARG3(answer));
149 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
150
151 mr_res.triplet.fs_handle = fs_handle;
152 mr_res.triplet.devmap_handle = devmap_handle;
153 mr_res.triplet.index = rindex;
154 mr_res.size = rsize;
155 mr_res.lnkcnt = rlnkcnt;
156 mr_res.type = VFS_NODE_DIRECTORY;
157
158 rootfs.fs_handle = fs_handle;
159 rootfs.devmap_handle = devmap_handle;
160
161 /* Add reference to the mounted root. */
162 mr_node = vfs_node_get(&mr_res);
163 assert(mr_node);
164
165 fibril_rwlock_write_unlock(&namespace_rwlock);
166 async_answer_0(rid, rc);
167 return;
168 } else {
169 /*
170 * We can't resolve this without the root filesystem
171 * being mounted first.
172 */
173 fibril_rwlock_write_unlock(&namespace_rwlock);
174 async_answer_0(rid, ENOENT);
175 return;
176 }
177 }
178
179 /*
180 * At this point, we have all necessary pieces: file system and device
181 * handles, and we know the mount point VFS node.
182 */
183
184 async_exch_t *mountee_exch = vfs_exchange_grab(fs_handle);
185 assert(mountee_exch);
186
187 exch = vfs_exchange_grab(mp_res.triplet.fs_handle);
188 msg = async_send_4(exch, VFS_OUT_MOUNT,
189 (sysarg_t) mp_res.triplet.devmap_handle,
190 (sysarg_t) mp_res.triplet.index,
191 (sysarg_t) fs_handle,
192 (sysarg_t) devmap_handle, &answer);
193
194 /* Send connection */
195 rc = async_exchange_clone(exch, mountee_exch);
196 vfs_exchange_release(mountee_exch);
197
198 if (rc != EOK) {
199 vfs_exchange_release(exch);
200 async_wait_for(msg, NULL);
201
202 /* Mount failed, drop reference to mp_node. */
203 if (mp_node)
204 vfs_node_put(mp_node);
205
206 async_answer_0(rid, rc);
207 fibril_rwlock_write_unlock(&namespace_rwlock);
208 return;
209 }
210
211 /* send the mount options */
212 rc = async_data_write_start(exch, (void *) opts, str_size(opts));
213 if (rc != EOK) {
214 vfs_exchange_release(exch);
215 async_wait_for(msg, NULL);
216
217 /* Mount failed, drop reference to mp_node. */
218 if (mp_node)
219 vfs_node_put(mp_node);
220
221 fibril_rwlock_write_unlock(&namespace_rwlock);
222 async_answer_0(rid, rc);
223 return;
224 }
225
226 vfs_exchange_release(exch);
227 async_wait_for(msg, &rc);
228
229 if (rc == EOK) {
230 rindex = (fs_index_t) IPC_GET_ARG1(answer);
231 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
232 IPC_GET_ARG3(answer));
233 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
234
235 mr_res.triplet.fs_handle = fs_handle;
236 mr_res.triplet.devmap_handle = devmap_handle;
237 mr_res.triplet.index = rindex;
238 mr_res.size = rsize;
239 mr_res.lnkcnt = rlnkcnt;
240 mr_res.type = VFS_NODE_DIRECTORY;
241
242 /* Add reference to the mounted root. */
243 mr_node = vfs_node_get(&mr_res);
244 assert(mr_node);
245 } else {
246 /* Mount failed, drop reference to mp_node. */
247 if (mp_node)
248 vfs_node_put(mp_node);
249 }
250
251 async_answer_0(rid, rc);
252 fibril_rwlock_write_unlock(&namespace_rwlock);
253}
254
255void vfs_mount(ipc_callid_t rid, ipc_call_t *request)
256{
257 devmap_handle_t devmap_handle;
258
259 /*
260 * We expect the library to do the device-name to device-handle
261 * translation for us, thus the device handle will arrive as ARG1
262 * in the request.
263 */
264 devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);
265
266 /*
267 * Mount flags are passed as ARG2.
268 */
269 unsigned int flags = (unsigned int) IPC_GET_ARG2(*request);
270
271 /*
272 * For now, don't make use of ARG3, but it can be used to
273 * carry mount options in the future.
274 */
275
276 /* We want the client to send us the mount point. */
277 char *mp;
278 int rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
279 0, NULL);
280 if (rc != EOK) {
281 async_answer_0(rid, rc);
282 return;
283 }
284
285 /* Now we expect to receive the mount options. */
286 char *opts;
287 rc = async_data_write_accept((void **) &opts, true, 0, MAX_MNTOPTS_LEN,
288 0, NULL);
289 if (rc != EOK) {
290 free(mp);
291 async_answer_0(rid, rc);
292 return;
293 }
294
295 /*
296 * Now, we expect the client to send us data with the name of the file
297 * system.
298 */
299 char *fs_name;
300 rc = async_data_write_accept((void **) &fs_name, true, 0,
301 FS_NAME_MAXLEN, 0, NULL);
302 if (rc != EOK) {
303 free(mp);
304 free(opts);
305 async_answer_0(rid, rc);
306 return;
307 }
308
309 /*
310 * Wait for VFS_IN_PING so that we can return an error if we don't know
311 * fs_name.
312 */
313 ipc_call_t data;
314 ipc_callid_t callid = async_get_call(&data);
315 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
316 async_answer_0(callid, ENOTSUP);
317 async_answer_0(rid, ENOTSUP);
318 free(mp);
319 free(opts);
320 free(fs_name);
321 return;
322 }
323
324 /*
325 * Check if we know a file system with the same name as is in fs_name.
326 * This will also give us its file system handle.
327 */
328 fibril_mutex_lock(&fs_list_lock);
329 fs_handle_t fs_handle;
330recheck:
331 fs_handle = fs_name_to_handle(fs_name, false);
332 if (!fs_handle) {
333 if (flags & IPC_FLAG_BLOCKING) {
334 fibril_condvar_wait(&fs_list_cv, &fs_list_lock);
335 goto recheck;
336 }
337
338 fibril_mutex_unlock(&fs_list_lock);
339 async_answer_0(callid, ENOENT);
340 async_answer_0(rid, ENOENT);
341 free(mp);
342 free(fs_name);
343 free(opts);
344 return;
345 }
346 fibril_mutex_unlock(&fs_list_lock);
347
348 /* Acknowledge that we know fs_name. */
349 async_answer_0(callid, EOK);
350
351 /* Do the mount */
352 vfs_mount_internal(rid, devmap_handle, fs_handle, mp, opts);
353 free(mp);
354 free(fs_name);
355 free(opts);
356}
357
358void vfs_unmount(ipc_callid_t rid, ipc_call_t *request)
359{
360 int rc;
361 char *mp;
362 vfs_lookup_res_t mp_res;
363 vfs_lookup_res_t mr_res;
364 vfs_node_t *mr_node;
365 async_exch_t *exch;
366
367 /*
368 * Receive the mount point path.
369 */
370 rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
371 0, NULL);
372 if (rc != EOK)
373 async_answer_0(rid, rc);
374
375 /*
376 * Taking the namespace lock will do two things for us. First, it will
377 * prevent races with other lookup operations. Second, it will stop new
378 * references to already existing VFS nodes and creation of new VFS
379 * nodes. This is because new references are added as a result of some
380 * lookup operation or at least of some operation which is protected by
381 * the namespace lock.
382 */
383 fibril_rwlock_write_lock(&namespace_rwlock);
384
385 /*
386 * Lookup the mounted root and instantiate it.
387 */
388 rc = vfs_lookup_internal(mp, L_ROOT, &mr_res, NULL);
389 if (rc != EOK) {
390 fibril_rwlock_write_unlock(&namespace_rwlock);
391 free(mp);
392 async_answer_0(rid, rc);
393 return;
394 }
395 mr_node = vfs_node_get(&mr_res);
396 if (!mr_node) {
397 fibril_rwlock_write_unlock(&namespace_rwlock);
398 free(mp);
399 async_answer_0(rid, ENOMEM);
400 return;
401 }
402
403 /*
404 * Count the total number of references for the mounted file system. We
405 * are expecting at least two. One which we got above and one which we
406 * got when the file system was mounted. If we find more, it means that
407 * the file system cannot be gracefully unmounted at the moment because
408 * someone is working with it.
409 */
410 if (vfs_nodes_refcount_sum_get(mr_node->fs_handle,
411 mr_node->devmap_handle) != 2) {
412 fibril_rwlock_write_unlock(&namespace_rwlock);
413 vfs_node_put(mr_node);
414 free(mp);
415 async_answer_0(rid, EBUSY);
416 return;
417 }
418
419 if (str_cmp(mp, "/") == 0) {
420
421 /*
422 * Unmounting the root file system.
423 *
424 * In this case, there is no mount point node and we send
425 * VFS_OUT_UNMOUNTED directly to the mounted file system.
426 */
427
428 free(mp);
429
430 exch = vfs_exchange_grab(mr_node->fs_handle);
431 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED,
432 mr_node->devmap_handle);
433 vfs_exchange_release(exch);
434
435 if (rc != EOK) {
436 fibril_rwlock_write_unlock(&namespace_rwlock);
437 vfs_node_put(mr_node);
438 async_answer_0(rid, rc);
439 return;
440 }
441
442 rootfs.fs_handle = 0;
443 rootfs.devmap_handle = 0;
444 } else {
445
446 /*
447 * Unmounting a non-root file system.
448 *
449 * We have a regular mount point node representing the parent
450 * file system, so we delegate the operation to it.
451 */
452
453 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
454 free(mp);
455 if (rc != EOK) {
456 fibril_rwlock_write_unlock(&namespace_rwlock);
457 vfs_node_put(mr_node);
458 async_answer_0(rid, rc);
459 return;
460 }
461
462 vfs_node_t *mp_node = vfs_node_get(&mp_res);
463 if (!mp_node) {
464 fibril_rwlock_write_unlock(&namespace_rwlock);
465 vfs_node_put(mr_node);
466 async_answer_0(rid, ENOMEM);
467 return;
468 }
469
470 exch = vfs_exchange_grab(mp_node->fs_handle);
471 rc = async_req_2_0(exch, VFS_OUT_UNMOUNT,
472 mp_node->devmap_handle, mp_node->index);
473 vfs_exchange_release(exch);
474
475 if (rc != EOK) {
476 fibril_rwlock_write_unlock(&namespace_rwlock);
477 vfs_node_put(mp_node);
478 vfs_node_put(mr_node);
479 async_answer_0(rid, rc);
480 return;
481 }
482
483 /* Drop the reference we got above. */
484 vfs_node_put(mp_node);
485 /* Drop the reference from when the file system was mounted. */
486 vfs_node_put(mp_node);
487 }
488
489 /*
490 * All went well, the mounted file system was successfully unmounted.
491 * The only thing left is to forget the unmounted root VFS node.
492 */
493 vfs_node_forget(mr_node);
494
495 fibril_rwlock_write_unlock(&namespace_rwlock);
496 async_answer_0(rid, EOK);
497}
498
499void vfs_open(ipc_callid_t rid, ipc_call_t *request)
500{
501 /*
502 * The POSIX interface is open(path, oflag, mode).
503 * We can receive oflags and mode along with the VFS_IN_OPEN call;
504 * the path will need to arrive in another call.
505 *
506 * We also receive one private, non-POSIX set of flags called lflag
507 * used to pass information to vfs_lookup_internal().
508 */
509 int lflag = IPC_GET_ARG1(*request);
510 int oflag = IPC_GET_ARG2(*request);
511 int mode = IPC_GET_ARG3(*request);
512
513 /* Ignore mode for now. */
514 (void) mode;
515
516 /*
517 * Make sure that we are called with exactly one of L_FILE and
518 * L_DIRECTORY. Make sure that the user does not pass L_OPEN,
519 * L_ROOT or L_MP.
520 */
521 if (((lflag & (L_FILE | L_DIRECTORY)) == 0) ||
522 ((lflag & (L_FILE | L_DIRECTORY)) == (L_FILE | L_DIRECTORY)) ||
523 (lflag & (L_OPEN | L_ROOT | L_MP))) {
524 async_answer_0(rid, EINVAL);
525 return;
526 }
527
528 if (oflag & O_CREAT)
529 lflag |= L_CREATE;
530 if (oflag & O_EXCL)
531 lflag |= L_EXCLUSIVE;
532
533 char *path;
534 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
535 if (rc != EOK) {
536 async_answer_0(rid, rc);
537 return;
538 }
539
540 /*
541 * Avoid the race condition in which the file can be deleted before we
542 * find/create-and-lock the VFS node corresponding to the looked-up
543 * triplet.
544 */
545 if (lflag & L_CREATE)
546 fibril_rwlock_write_lock(&namespace_rwlock);
547 else
548 fibril_rwlock_read_lock(&namespace_rwlock);
549
550 /* The path is now populated and we can call vfs_lookup_internal(). */
551 vfs_lookup_res_t lr;
552 rc = vfs_lookup_internal(path, lflag | L_OPEN, &lr, NULL);
553 if (rc != EOK) {
554 if (lflag & L_CREATE)
555 fibril_rwlock_write_unlock(&namespace_rwlock);
556 else
557 fibril_rwlock_read_unlock(&namespace_rwlock);
558 async_answer_0(rid, rc);
559 free(path);
560 return;
561 }
562
563 /* Path is no longer needed. */
564 free(path);
565
566 vfs_node_t *node = vfs_node_get(&lr);
567 if (lflag & L_CREATE)
568 fibril_rwlock_write_unlock(&namespace_rwlock);
569 else
570 fibril_rwlock_read_unlock(&namespace_rwlock);
571
572 /* Truncate the file if requested and if necessary. */
573 if (oflag & O_TRUNC) {
574 fibril_rwlock_write_lock(&node->contents_rwlock);
575 if (node->size) {
576 rc = vfs_truncate_internal(node->fs_handle,
577 node->devmap_handle, node->index, 0);
578 if (rc) {
579 fibril_rwlock_write_unlock(&node->contents_rwlock);
580 vfs_node_put(node);
581 async_answer_0(rid, rc);
582 return;
583 }
584 node->size = 0;
585 }
586 fibril_rwlock_write_unlock(&node->contents_rwlock);
587 }
588
589 /*
590 * Get ourselves a file descriptor and the corresponding vfs_file_t
591 * structure.
592 */
593 int fd = vfs_fd_alloc((oflag & O_DESC) != 0);
594 if (fd < 0) {
595 vfs_node_put(node);
596 async_answer_0(rid, fd);
597 return;
598 }
599 vfs_file_t *file = vfs_file_get(fd);
600 assert(file);
601 file->node = node;
602 if (oflag & O_APPEND)
603 file->append = true;
604
605 /*
606 * The following increase in reference count is for the fact that the
607 * file is being opened and that a file structure is pointing to it.
608 * It is necessary so that the file will not disappear when
609 * vfs_node_put() is called. The reference will be dropped by the
610 * respective VFS_IN_CLOSE.
611 */
612 vfs_node_addref(node);
613 vfs_node_put(node);
614 vfs_file_put(file);
615
616 /* Success! Return the new file descriptor to the client. */
617 async_answer_1(rid, EOK, fd);
618}
619
620void vfs_sync(ipc_callid_t rid, ipc_call_t *request)
621{
622 int fd = IPC_GET_ARG1(*request);
623
624 /* Lookup the file structure corresponding to the file descriptor. */
625 vfs_file_t *file = vfs_file_get(fd);
626 if (!file) {
627 async_answer_0(rid, ENOENT);
628 return;
629 }
630
631 /*
632 * Lock the open file structure so that no other thread can manipulate
633 * the same open file at a time.
634 */
635 fibril_mutex_lock(&file->lock);
636 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
637
638 /* Make a VFS_OUT_SYMC request at the destination FS server. */
639 aid_t msg;
640 ipc_call_t answer;
641 msg = async_send_2(fs_exch, VFS_OUT_SYNC, file->node->devmap_handle,
642 file->node->index, &answer);
643
644 vfs_exchange_release(fs_exch);
645
646 /* Wait for reply from the FS server. */
647 sysarg_t rc;
648 async_wait_for(msg, &rc);
649
650 fibril_mutex_unlock(&file->lock);
651
652 vfs_file_put(file);
653 async_answer_0(rid, rc);
654}
655
656void vfs_close(ipc_callid_t rid, ipc_call_t *request)
657{
658 int fd = IPC_GET_ARG1(*request);
659 int ret = vfs_fd_free(fd);
660 async_answer_0(rid, ret);
661}
662
663static void vfs_rdwr(ipc_callid_t rid, ipc_call_t *request, bool read)
664{
665 /*
666 * The following code strongly depends on the fact that the files data
667 * structure can be only accessed by a single fibril and all file
668 * operations are serialized (i.e. the reads and writes cannot
669 * interleave and a file cannot be closed while it is being read).
670 *
671 * Additional synchronization needs to be added once the table of
672 * open files supports parallel access!
673 */
674
675 int fd = IPC_GET_ARG1(*request);
676
677 /* Lookup the file structure corresponding to the file descriptor. */
678 vfs_file_t *file = vfs_file_get(fd);
679 if (!file) {
680 async_answer_0(rid, ENOENT);
681 return;
682 }
683
684 /*
685 * Lock the open file structure so that no other thread can manipulate
686 * the same open file at a time.
687 */
688 fibril_mutex_lock(&file->lock);
689
690 vfs_info_t *fs_info = fs_handle_to_info(file->node->fs_handle);
691 assert(fs_info);
692
693 /*
694 * Lock the file's node so that no other client can read/write to it at
695 * the same time unless the FS supports concurrent reads/writes and its
696 * write implementation does not modify the file size.
697 */
698 if ((read) ||
699 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
700 fibril_rwlock_read_lock(&file->node->contents_rwlock);
701 else
702 fibril_rwlock_write_lock(&file->node->contents_rwlock);
703
704 if (file->node->type == VFS_NODE_DIRECTORY) {
705 /*
706 * Make sure that no one is modifying the namespace
707 * while we are in readdir().
708 */
709 assert(read);
710 fibril_rwlock_read_lock(&namespace_rwlock);
711 }
712
713 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
714
715 /*
716 * Make a VFS_READ/VFS_WRITE request at the destination FS server
717 * and forward the IPC_M_DATA_READ/IPC_M_DATA_WRITE request to the
718 * destination FS server. The call will be routed as if sent by
719 * ourselves. Note that call arguments are immutable in this case so we
720 * don't have to bother.
721 */
722 sysarg_t rc;
723 ipc_call_t answer;
724 if (read) {
725 rc = async_data_read_forward_4_1(fs_exch, VFS_OUT_READ,
726 file->node->devmap_handle, file->node->index,
727 LOWER32(file->pos), UPPER32(file->pos), &answer);
728 } else {
729 if (file->append)
730 file->pos = file->node->size;
731
732 rc = async_data_write_forward_4_1(fs_exch, VFS_OUT_WRITE,
733 file->node->devmap_handle, file->node->index,
734 LOWER32(file->pos), UPPER32(file->pos), &answer);
735 }
736
737 vfs_exchange_release(fs_exch);
738
739 size_t bytes = IPC_GET_ARG1(answer);
740
741 if (file->node->type == VFS_NODE_DIRECTORY)
742 fibril_rwlock_read_unlock(&namespace_rwlock);
743
744 /* Unlock the VFS node. */
745 if ((read) ||
746 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
747 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
748 else {
749 /* Update the cached version of node's size. */
750 if (rc == EOK)
751 file->node->size = MERGE_LOUP32(IPC_GET_ARG2(answer),
752 IPC_GET_ARG3(answer));
753 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
754 }
755
756 /* Update the position pointer and unlock the open file. */
757 if (rc == EOK)
758 file->pos += bytes;
759 fibril_mutex_unlock(&file->lock);
760 vfs_file_put(file);
761
762 /*
763 * FS server's reply is the final result of the whole operation we
764 * return to the client.
765 */
766 async_answer_1(rid, rc, bytes);
767}
768
769void vfs_read(ipc_callid_t rid, ipc_call_t *request)
770{
771 vfs_rdwr(rid, request, true);
772}
773
774void vfs_write(ipc_callid_t rid, ipc_call_t *request)
775{
776 vfs_rdwr(rid, request, false);
777}
778
779void vfs_seek(ipc_callid_t rid, ipc_call_t *request)
780{
781 int fd = (int) IPC_GET_ARG1(*request);
782 off64_t off = (off64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
783 IPC_GET_ARG3(*request));
784 int whence = (int) IPC_GET_ARG4(*request);
785
786 /* Lookup the file structure corresponding to the file descriptor. */
787 vfs_file_t *file = vfs_file_get(fd);
788 if (!file) {
789 async_answer_0(rid, ENOENT);
790 return;
791 }
792
793 fibril_mutex_lock(&file->lock);
794
795 off64_t newoff;
796 switch (whence) {
797 case SEEK_SET:
798 if (off >= 0) {
799 file->pos = (aoff64_t) off;
800 fibril_mutex_unlock(&file->lock);
801 vfs_file_put(file);
802 async_answer_1(rid, EOK, off);
803 return;
804 }
805 break;
806 case SEEK_CUR:
807 if ((off >= 0) && (file->pos + off < file->pos)) {
808 fibril_mutex_unlock(&file->lock);
809 vfs_file_put(file);
810 async_answer_0(rid, EOVERFLOW);
811 return;
812 }
813
814 if ((off < 0) && (file->pos < (aoff64_t) -off)) {
815 fibril_mutex_unlock(&file->lock);
816 vfs_file_put(file);
817 async_answer_0(rid, EOVERFLOW);
818 return;
819 }
820
821 file->pos += off;
822 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
823
824 fibril_mutex_unlock(&file->lock);
825 vfs_file_put(file);
826 async_answer_2(rid, EOK, LOWER32(newoff),
827 UPPER32(newoff));
828 return;
829 case SEEK_END:
830 fibril_rwlock_read_lock(&file->node->contents_rwlock);
831 aoff64_t size = file->node->size;
832
833 if ((off >= 0) && (size + off < size)) {
834 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
835 fibril_mutex_unlock(&file->lock);
836 vfs_file_put(file);
837 async_answer_0(rid, EOVERFLOW);
838 return;
839 }
840
841 if ((off < 0) && (size < (aoff64_t) -off)) {
842 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
843 fibril_mutex_unlock(&file->lock);
844 vfs_file_put(file);
845 async_answer_0(rid, EOVERFLOW);
846 return;
847 }
848
849 file->pos = size + off;
850 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
851
852 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
853 fibril_mutex_unlock(&file->lock);
854 vfs_file_put(file);
855 async_answer_2(rid, EOK, LOWER32(newoff), UPPER32(newoff));
856 return;
857 }
858
859 fibril_mutex_unlock(&file->lock);
860 vfs_file_put(file);
861 async_answer_0(rid, EINVAL);
862}
863
864int vfs_truncate_internal(fs_handle_t fs_handle, devmap_handle_t devmap_handle,
865 fs_index_t index, aoff64_t size)
866{
867 async_exch_t *exch = vfs_exchange_grab(fs_handle);
868 sysarg_t rc = async_req_4_0(exch, VFS_OUT_TRUNCATE,
869 (sysarg_t) devmap_handle, (sysarg_t) index, LOWER32(size),
870 UPPER32(size));
871 vfs_exchange_release(exch);
872
873 return (int) rc;
874}
875
876void vfs_truncate(ipc_callid_t rid, ipc_call_t *request)
877{
878 int fd = IPC_GET_ARG1(*request);
879 aoff64_t size = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
880 IPC_GET_ARG3(*request));
881 int rc;
882
883 vfs_file_t *file = vfs_file_get(fd);
884 if (!file) {
885 async_answer_0(rid, ENOENT);
886 return;
887 }
888 fibril_mutex_lock(&file->lock);
889
890 fibril_rwlock_write_lock(&file->node->contents_rwlock);
891 rc = vfs_truncate_internal(file->node->fs_handle,
892 file->node->devmap_handle, file->node->index, size);
893 if (rc == EOK)
894 file->node->size = size;
895 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
896
897 fibril_mutex_unlock(&file->lock);
898 vfs_file_put(file);
899 async_answer_0(rid, (sysarg_t)rc);
900}
901
902void vfs_fstat(ipc_callid_t rid, ipc_call_t *request)
903{
904 int fd = IPC_GET_ARG1(*request);
905 sysarg_t rc;
906
907 vfs_file_t *file = vfs_file_get(fd);
908 if (!file) {
909 async_answer_0(rid, ENOENT);
910 return;
911 }
912
913 ipc_callid_t callid;
914 if (!async_data_read_receive(&callid, NULL)) {
915 vfs_file_put(file);
916 async_answer_0(callid, EINVAL);
917 async_answer_0(rid, EINVAL);
918 return;
919 }
920
921 fibril_mutex_lock(&file->lock);
922
923 async_exch_t *exch = vfs_exchange_grab(file->node->fs_handle);
924
925 aid_t msg;
926 msg = async_send_3(exch, VFS_OUT_STAT, file->node->devmap_handle,
927 file->node->index, true, NULL);
928 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
929
930 vfs_exchange_release(exch);
931
932 async_wait_for(msg, &rc);
933
934 fibril_mutex_unlock(&file->lock);
935 vfs_file_put(file);
936 async_answer_0(rid, rc);
937}
938
939void vfs_stat(ipc_callid_t rid, ipc_call_t *request)
940{
941 char *path;
942 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
943 if (rc != EOK) {
944 async_answer_0(rid, rc);
945 return;
946 }
947
948 ipc_callid_t callid;
949 if (!async_data_read_receive(&callid, NULL)) {
950 free(path);
951 async_answer_0(callid, EINVAL);
952 async_answer_0(rid, EINVAL);
953 return;
954 }
955
956 vfs_lookup_res_t lr;
957 fibril_rwlock_read_lock(&namespace_rwlock);
958 rc = vfs_lookup_internal(path, L_NONE, &lr, NULL);
959 free(path);
960 if (rc != EOK) {
961 fibril_rwlock_read_unlock(&namespace_rwlock);
962 async_answer_0(callid, rc);
963 async_answer_0(rid, rc);
964 return;
965 }
966 vfs_node_t *node = vfs_node_get(&lr);
967 if (!node) {
968 fibril_rwlock_read_unlock(&namespace_rwlock);
969 async_answer_0(callid, ENOMEM);
970 async_answer_0(rid, ENOMEM);
971 return;
972 }
973
974 fibril_rwlock_read_unlock(&namespace_rwlock);
975
976 async_exch_t *exch = vfs_exchange_grab(node->fs_handle);
977
978 aid_t msg;
979 msg = async_send_3(exch, VFS_OUT_STAT, node->devmap_handle,
980 node->index, false, NULL);
981 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
982
983 vfs_exchange_release(exch);
984
985 sysarg_t rv;
986 async_wait_for(msg, &rv);
987
988 async_answer_0(rid, rv);
989
990 vfs_node_put(node);
991}
992
993void vfs_mkdir(ipc_callid_t rid, ipc_call_t *request)
994{
995 int mode = IPC_GET_ARG1(*request);
996
997 char *path;
998 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
999 if (rc != EOK) {
1000 async_answer_0(rid, rc);
1001 return;
1002 }
1003
1004 /* Ignore mode for now. */
1005 (void) mode;
1006
1007 fibril_rwlock_write_lock(&namespace_rwlock);
1008 int lflag = L_DIRECTORY | L_CREATE | L_EXCLUSIVE;
1009 rc = vfs_lookup_internal(path, lflag, NULL, NULL);
1010 fibril_rwlock_write_unlock(&namespace_rwlock);
1011 free(path);
1012 async_answer_0(rid, rc);
1013}
1014
1015void vfs_unlink(ipc_callid_t rid, ipc_call_t *request)
1016{
1017 int lflag = IPC_GET_ARG1(*request);
1018
1019 char *path;
1020 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1021 if (rc != EOK) {
1022 async_answer_0(rid, rc);
1023 return;
1024 }
1025
1026 fibril_rwlock_write_lock(&namespace_rwlock);
1027 lflag &= L_DIRECTORY; /* sanitize lflag */
1028 vfs_lookup_res_t lr;
1029 rc = vfs_lookup_internal(path, lflag | L_UNLINK, &lr, NULL);
1030 free(path);
1031 if (rc != EOK) {
1032 fibril_rwlock_write_unlock(&namespace_rwlock);
1033 async_answer_0(rid, rc);
1034 return;
1035 }
1036
1037 /*
1038 * The name has already been unlinked by vfs_lookup_internal().
1039 * We have to get and put the VFS node to ensure that it is
1040 * VFS_OUT_DESTROY'ed after the last reference to it is dropped.
1041 */
1042 vfs_node_t *node = vfs_node_get(&lr);
1043 fibril_mutex_lock(&nodes_mutex);
1044 node->lnkcnt--;
1045 fibril_mutex_unlock(&nodes_mutex);
1046 fibril_rwlock_write_unlock(&namespace_rwlock);
1047 vfs_node_put(node);
1048 async_answer_0(rid, EOK);
1049}
1050
1051void vfs_rename(ipc_callid_t rid, ipc_call_t *request)
1052{
1053 /* Retrieve the old path. */
1054 char *old;
1055 int rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL);
1056 if (rc != EOK) {
1057 async_answer_0(rid, rc);
1058 return;
1059 }
1060
1061 /* Retrieve the new path. */
1062 char *new;
1063 rc = async_data_write_accept((void **) &new, true, 0, 0, 0, NULL);
1064 if (rc != EOK) {
1065 free(old);
1066 async_answer_0(rid, rc);
1067 return;
1068 }
1069
1070 size_t olen;
1071 size_t nlen;
1072 char *oldc = canonify(old, &olen);
1073 char *newc = canonify(new, &nlen);
1074
1075 if ((!oldc) || (!newc)) {
1076 async_answer_0(rid, EINVAL);
1077 free(old);
1078 free(new);
1079 return;
1080 }
1081
1082 oldc[olen] = '\0';
1083 newc[nlen] = '\0';
1084
1085 if ((!str_lcmp(newc, oldc, str_length(oldc))) &&
1086 ((newc[str_length(oldc)] == '/') ||
1087 (str_length(oldc) == 1) ||
1088 (str_length(oldc) == str_length(newc)))) {
1089 /*
1090 * oldc is a prefix of newc and either
1091 * - newc continues with a / where oldc ends, or
1092 * - oldc was / itself, or
1093 * - oldc and newc are equal.
1094 */
1095 async_answer_0(rid, EINVAL);
1096 free(old);
1097 free(new);
1098 return;
1099 }
1100
1101 vfs_lookup_res_t old_lr;
1102 vfs_lookup_res_t new_lr;
1103 vfs_lookup_res_t new_par_lr;
1104 fibril_rwlock_write_lock(&namespace_rwlock);
1105
1106 /* Lookup the node belonging to the old file name. */
1107 rc = vfs_lookup_internal(oldc, L_NONE, &old_lr, NULL);
1108 if (rc != EOK) {
1109 fibril_rwlock_write_unlock(&namespace_rwlock);
1110 async_answer_0(rid, rc);
1111 free(old);
1112 free(new);
1113 return;
1114 }
1115
1116 vfs_node_t *old_node = vfs_node_get(&old_lr);
1117 if (!old_node) {
1118 fibril_rwlock_write_unlock(&namespace_rwlock);
1119 async_answer_0(rid, ENOMEM);
1120 free(old);
1121 free(new);
1122 return;
1123 }
1124
1125 /* Determine the path to the parent of the node with the new name. */
1126 char *parentc = str_dup(newc);
1127 if (!parentc) {
1128 fibril_rwlock_write_unlock(&namespace_rwlock);
1129 vfs_node_put(old_node);
1130 async_answer_0(rid, rc);
1131 free(old);
1132 free(new);
1133 return;
1134 }
1135
1136 char *lastsl = str_rchr(parentc + 1, '/');
1137 if (lastsl)
1138 *lastsl = '\0';
1139 else
1140 parentc[1] = '\0';
1141
1142 /* Lookup parent of the new file name. */
1143 rc = vfs_lookup_internal(parentc, L_NONE, &new_par_lr, NULL);
1144 free(parentc); /* not needed anymore */
1145 if (rc != EOK) {
1146 fibril_rwlock_write_unlock(&namespace_rwlock);
1147 vfs_node_put(old_node);
1148 async_answer_0(rid, rc);
1149 free(old);
1150 free(new);
1151 return;
1152 }
1153
1154 /* Check whether linking to the same file system instance. */
1155 if ((old_node->fs_handle != new_par_lr.triplet.fs_handle) ||
1156 (old_node->devmap_handle != new_par_lr.triplet.devmap_handle)) {
1157 fibril_rwlock_write_unlock(&namespace_rwlock);
1158 vfs_node_put(old_node);
1159 async_answer_0(rid, EXDEV); /* different file systems */
1160 free(old);
1161 free(new);
1162 return;
1163 }
1164
1165 /* Destroy the old link for the new name. */
1166 vfs_node_t *new_node = NULL;
1167 rc = vfs_lookup_internal(newc, L_UNLINK, &new_lr, NULL);
1168
1169 switch (rc) {
1170 case ENOENT:
1171 /* simply not in our way */
1172 break;
1173 case EOK:
1174 new_node = vfs_node_get(&new_lr);
1175 if (!new_node) {
1176 fibril_rwlock_write_unlock(&namespace_rwlock);
1177 vfs_node_put(old_node);
1178 async_answer_0(rid, ENOMEM);
1179 free(old);
1180 free(new);
1181 return;
1182 }
1183 fibril_mutex_lock(&nodes_mutex);
1184 new_node->lnkcnt--;
1185 fibril_mutex_unlock(&nodes_mutex);
1186 break;
1187 default:
1188 fibril_rwlock_write_unlock(&namespace_rwlock);
1189 vfs_node_put(old_node);
1190 async_answer_0(rid, ENOTEMPTY);
1191 free(old);
1192 free(new);
1193 return;
1194 }
1195
1196 /* Create the new link for the new name. */
1197 rc = vfs_lookup_internal(newc, L_LINK, NULL, NULL, old_node->index);
1198 if (rc != EOK) {
1199 fibril_rwlock_write_unlock(&namespace_rwlock);
1200 vfs_node_put(old_node);
1201 if (new_node)
1202 vfs_node_put(new_node);
1203 async_answer_0(rid, rc);
1204 free(old);
1205 free(new);
1206 return;
1207 }
1208
1209 fibril_mutex_lock(&nodes_mutex);
1210 old_node->lnkcnt++;
1211 fibril_mutex_unlock(&nodes_mutex);
1212
1213 /* Destroy the link for the old name. */
1214 rc = vfs_lookup_internal(oldc, L_UNLINK, NULL, NULL);
1215 if (rc != EOK) {
1216 fibril_rwlock_write_unlock(&namespace_rwlock);
1217 vfs_node_put(old_node);
1218 if (new_node)
1219 vfs_node_put(new_node);
1220 async_answer_0(rid, rc);
1221 free(old);
1222 free(new);
1223 return;
1224 }
1225
1226 fibril_mutex_lock(&nodes_mutex);
1227 old_node->lnkcnt--;
1228 fibril_mutex_unlock(&nodes_mutex);
1229 fibril_rwlock_write_unlock(&namespace_rwlock);
1230 vfs_node_put(old_node);
1231
1232 if (new_node)
1233 vfs_node_put(new_node);
1234
1235 free(old);
1236 free(new);
1237 async_answer_0(rid, EOK);
1238}
1239
1240void vfs_dup(ipc_callid_t rid, ipc_call_t *request)
1241{
1242 int oldfd = IPC_GET_ARG1(*request);
1243 int newfd = IPC_GET_ARG2(*request);
1244
1245 /* If the file descriptors are the same, do nothing. */
1246 if (oldfd == newfd) {
1247 async_answer_1(rid, EOK, newfd);
1248 return;
1249 }
1250
1251 /* Lookup the file structure corresponding to oldfd. */
1252 vfs_file_t *oldfile = vfs_file_get(oldfd);
1253 if (!oldfile) {
1254 async_answer_0(rid, EBADF);
1255 return;
1256 }
1257
1258 /*
1259 * Lock the open file structure so that no other thread can manipulate
1260 * the same open file at a time.
1261 */
1262 fibril_mutex_lock(&oldfile->lock);
1263
1264 /* Make sure newfd is closed. */
1265 (void) vfs_fd_free(newfd);
1266
1267 /* Assign the old file to newfd. */
1268 int ret = vfs_fd_assign(oldfile, newfd);
1269 fibril_mutex_unlock(&oldfile->lock);
1270 vfs_file_put(oldfile);
1271
1272 if (ret != EOK)
1273 async_answer_0(rid, ret);
1274 else
1275 async_answer_1(rid, EOK, newfd);
1276}
1277
1278/**
1279 * @}
1280 */
Note: See TracBrowser for help on using the repository browser.