source: mainline/uspace/srv/vfs/vfs_ops.c@ 54de4836

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 54de4836 was 286286c, checked in by Jakub Jermar <jakub@…>, 14 years ago

Cstyle fixes and cleanup.

  • Property mode set to 100644
File size: 32.3 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file vfs_ops.c
35 * @brief Operations that VFS offers to its clients.
36 */
37
38#include "vfs.h"
39#include <macros.h>
40#include <stdint.h>
41#include <async.h>
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <str.h>
46#include <bool.h>
47#include <fibril_synch.h>
48#include <adt/list.h>
49#include <unistd.h>
50#include <ctype.h>
51#include <fcntl.h>
52#include <assert.h>
53#include <vfs/canonify.h>
54
55/* Forward declarations of static functions. */
56static int vfs_truncate_internal(fs_handle_t, service_id_t, fs_index_t,
57 aoff64_t);
58
59/**
60 * This rwlock prevents the race between a triplet-to-VFS-node resolution and a
61 * concurrent VFS operation which modifies the file system namespace.
62 */
63FIBRIL_RWLOCK_INITIALIZE(namespace_rwlock);
64
65vfs_pair_t rootfs = {
66 .fs_handle = 0,
67 .service_id = 0
68};
69
70static void vfs_mount_internal(ipc_callid_t rid, service_id_t service_id,
71 fs_handle_t fs_handle, char *mp, char *opts)
72{
73 vfs_lookup_res_t mp_res;
74 vfs_lookup_res_t mr_res;
75 vfs_node_t *mp_node = NULL;
76 vfs_node_t *mr_node;
77 fs_index_t rindex;
78 aoff64_t rsize;
79 unsigned rlnkcnt;
80 async_exch_t *exch;
81 sysarg_t rc;
82 aid_t msg;
83 ipc_call_t answer;
84
85 /* Resolve the path to the mountpoint. */
86 fibril_rwlock_write_lock(&namespace_rwlock);
87 if (rootfs.fs_handle) {
88 /* We already have the root FS. */
89 if (str_cmp(mp, "/") == 0) {
90 /* Trying to mount root FS over root FS */
91 fibril_rwlock_write_unlock(&namespace_rwlock);
92 async_answer_0(rid, EBUSY);
93 return;
94 }
95
96 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
97 if (rc != EOK) {
98 /* The lookup failed for some reason. */
99 fibril_rwlock_write_unlock(&namespace_rwlock);
100 async_answer_0(rid, rc);
101 return;
102 }
103
104 mp_node = vfs_node_get(&mp_res);
105 if (!mp_node) {
106 fibril_rwlock_write_unlock(&namespace_rwlock);
107 async_answer_0(rid, ENOMEM);
108 return;
109 }
110
111 /*
112 * Now we hold a reference to mp_node.
113 * It will be dropped upon the corresponding VFS_IN_UNMOUNT.
114 * This prevents the mount point from being deleted.
115 */
116 } else {
117 /* We still don't have the root file system mounted. */
118 if (str_cmp(mp, "/") == 0) {
119 /*
120 * For this simple, but important case,
121 * we are almost done.
122 */
123
124 /* Tell the mountee that it is being mounted. */
125 exch = vfs_exchange_grab(fs_handle);
126 msg = async_send_1(exch, VFS_OUT_MOUNTED,
127 (sysarg_t) service_id, &answer);
128 /* Send the mount options */
129 rc = async_data_write_start(exch, (void *)opts,
130 str_size(opts));
131 vfs_exchange_release(exch);
132
133 if (rc != EOK) {
134 async_wait_for(msg, NULL);
135 fibril_rwlock_write_unlock(&namespace_rwlock);
136 async_answer_0(rid, rc);
137 return;
138 }
139 async_wait_for(msg, &rc);
140
141 if (rc != EOK) {
142 fibril_rwlock_write_unlock(&namespace_rwlock);
143 async_answer_0(rid, rc);
144 return;
145 }
146
147 rindex = (fs_index_t) IPC_GET_ARG1(answer);
148 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
149 IPC_GET_ARG3(answer));
150 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
151
152 mr_res.triplet.fs_handle = fs_handle;
153 mr_res.triplet.service_id = service_id;
154 mr_res.triplet.index = rindex;
155 mr_res.size = rsize;
156 mr_res.lnkcnt = rlnkcnt;
157 mr_res.type = VFS_NODE_DIRECTORY;
158
159 rootfs.fs_handle = fs_handle;
160 rootfs.service_id = service_id;
161
162 /* Add reference to the mounted root. */
163 mr_node = vfs_node_get(&mr_res);
164 assert(mr_node);
165
166 fibril_rwlock_write_unlock(&namespace_rwlock);
167 async_answer_0(rid, rc);
168 return;
169 } else {
170 /*
171 * We can't resolve this without the root filesystem
172 * being mounted first.
173 */
174 fibril_rwlock_write_unlock(&namespace_rwlock);
175 async_answer_0(rid, ENOENT);
176 return;
177 }
178 }
179
180 /*
181 * At this point, we have all necessary pieces: file system handle
182 * and service ID, and we know the mount point VFS node.
183 */
184
185 async_exch_t *mountee_exch = vfs_exchange_grab(fs_handle);
186 assert(mountee_exch);
187
188 exch = vfs_exchange_grab(mp_res.triplet.fs_handle);
189 msg = async_send_4(exch, VFS_OUT_MOUNT,
190 (sysarg_t) mp_res.triplet.service_id,
191 (sysarg_t) mp_res.triplet.index,
192 (sysarg_t) fs_handle,
193 (sysarg_t) service_id, &answer);
194
195 /* Send connection */
196 rc = async_exchange_clone(exch, mountee_exch);
197 vfs_exchange_release(mountee_exch);
198
199 if (rc != EOK) {
200 vfs_exchange_release(exch);
201 async_wait_for(msg, NULL);
202
203 /* Mount failed, drop reference to mp_node. */
204 if (mp_node)
205 vfs_node_put(mp_node);
206
207 async_answer_0(rid, rc);
208 fibril_rwlock_write_unlock(&namespace_rwlock);
209 return;
210 }
211
212 /* send the mount options */
213 rc = async_data_write_start(exch, (void *) opts, str_size(opts));
214 if (rc != EOK) {
215 vfs_exchange_release(exch);
216 async_wait_for(msg, NULL);
217
218 /* Mount failed, drop reference to mp_node. */
219 if (mp_node)
220 vfs_node_put(mp_node);
221
222 fibril_rwlock_write_unlock(&namespace_rwlock);
223 async_answer_0(rid, rc);
224 return;
225 }
226
227 /*
228 * Wait for the answer before releasing the exchange to avoid deadlock
229 * in case the answer depends on further calls to the same file system.
230 * Think of a case when mounting a FS on a file_bd backed by a file on
231 * the same FS.
232 */
233 async_wait_for(msg, &rc);
234 vfs_exchange_release(exch);
235
236 if (rc == EOK) {
237 rindex = (fs_index_t) IPC_GET_ARG1(answer);
238 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
239 IPC_GET_ARG3(answer));
240 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
241
242 mr_res.triplet.fs_handle = fs_handle;
243 mr_res.triplet.service_id = service_id;
244 mr_res.triplet.index = rindex;
245 mr_res.size = rsize;
246 mr_res.lnkcnt = rlnkcnt;
247 mr_res.type = VFS_NODE_DIRECTORY;
248
249 /* Add reference to the mounted root. */
250 mr_node = vfs_node_get(&mr_res);
251 assert(mr_node);
252 } else {
253 /* Mount failed, drop reference to mp_node. */
254 if (mp_node)
255 vfs_node_put(mp_node);
256 }
257
258 async_answer_0(rid, rc);
259 fibril_rwlock_write_unlock(&namespace_rwlock);
260}
261
262void vfs_mount(ipc_callid_t rid, ipc_call_t *request)
263{
264 service_id_t service_id;
265
266 /*
267 * We expect the library to do the device-name to device-handle
268 * translation for us, thus the device handle will arrive as ARG1
269 * in the request.
270 */
271 service_id = (service_id_t) IPC_GET_ARG1(*request);
272
273 /*
274 * Mount flags are passed as ARG2.
275 */
276 unsigned int flags = (unsigned int) IPC_GET_ARG2(*request);
277
278 /*
279 * Instance number is passed as ARG3.
280 */
281 unsigned int instance = IPC_GET_ARG3(*request);
282
283 /* We want the client to send us the mount point. */
284 char *mp;
285 int rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
286 0, NULL);
287 if (rc != EOK) {
288 async_answer_0(rid, rc);
289 return;
290 }
291
292 /* Now we expect to receive the mount options. */
293 char *opts;
294 rc = async_data_write_accept((void **) &opts, true, 0, MAX_MNTOPTS_LEN,
295 0, NULL);
296 if (rc != EOK) {
297 free(mp);
298 async_answer_0(rid, rc);
299 return;
300 }
301
302 /*
303 * Now, we expect the client to send us data with the name of the file
304 * system.
305 */
306 char *fs_name;
307 rc = async_data_write_accept((void **) &fs_name, true, 0,
308 FS_NAME_MAXLEN, 0, NULL);
309 if (rc != EOK) {
310 free(mp);
311 free(opts);
312 async_answer_0(rid, rc);
313 return;
314 }
315
316 /*
317 * Wait for VFS_IN_PING so that we can return an error if we don't know
318 * fs_name.
319 */
320 ipc_call_t data;
321 ipc_callid_t callid = async_get_call(&data);
322 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
323 async_answer_0(callid, ENOTSUP);
324 async_answer_0(rid, ENOTSUP);
325 free(mp);
326 free(opts);
327 free(fs_name);
328 return;
329 }
330
331 /*
332 * Check if we know a file system with the same name as is in fs_name.
333 * This will also give us its file system handle.
334 */
335 fibril_mutex_lock(&fs_list_lock);
336 fs_handle_t fs_handle;
337recheck:
338 fs_handle = fs_name_to_handle(instance, fs_name, false);
339 if (!fs_handle) {
340 if (flags & IPC_FLAG_BLOCKING) {
341 fibril_condvar_wait(&fs_list_cv, &fs_list_lock);
342 goto recheck;
343 }
344
345 fibril_mutex_unlock(&fs_list_lock);
346 async_answer_0(callid, ENOENT);
347 async_answer_0(rid, ENOENT);
348 free(mp);
349 free(fs_name);
350 free(opts);
351 return;
352 }
353 fibril_mutex_unlock(&fs_list_lock);
354
355 /* Acknowledge that we know fs_name. */
356 async_answer_0(callid, EOK);
357
358 /* Do the mount */
359 vfs_mount_internal(rid, service_id, fs_handle, mp, opts);
360 free(mp);
361 free(fs_name);
362 free(opts);
363}
364
365void vfs_unmount(ipc_callid_t rid, ipc_call_t *request)
366{
367 int rc;
368 char *mp;
369 vfs_lookup_res_t mp_res;
370 vfs_lookup_res_t mr_res;
371 vfs_node_t *mr_node;
372 async_exch_t *exch;
373
374 /*
375 * Receive the mount point path.
376 */
377 rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
378 0, NULL);
379 if (rc != EOK)
380 async_answer_0(rid, rc);
381
382 /*
383 * Taking the namespace lock will do two things for us. First, it will
384 * prevent races with other lookup operations. Second, it will stop new
385 * references to already existing VFS nodes and creation of new VFS
386 * nodes. This is because new references are added as a result of some
387 * lookup operation or at least of some operation which is protected by
388 * the namespace lock.
389 */
390 fibril_rwlock_write_lock(&namespace_rwlock);
391
392 /*
393 * Lookup the mounted root and instantiate it.
394 */
395 rc = vfs_lookup_internal(mp, L_ROOT, &mr_res, NULL);
396 if (rc != EOK) {
397 fibril_rwlock_write_unlock(&namespace_rwlock);
398 free(mp);
399 async_answer_0(rid, rc);
400 return;
401 }
402 mr_node = vfs_node_get(&mr_res);
403 if (!mr_node) {
404 fibril_rwlock_write_unlock(&namespace_rwlock);
405 free(mp);
406 async_answer_0(rid, ENOMEM);
407 return;
408 }
409
410 /*
411 * Count the total number of references for the mounted file system. We
412 * are expecting at least two. One which we got above and one which we
413 * got when the file system was mounted. If we find more, it means that
414 * the file system cannot be gracefully unmounted at the moment because
415 * someone is working with it.
416 */
417 if (vfs_nodes_refcount_sum_get(mr_node->fs_handle,
418 mr_node->service_id) != 2) {
419 fibril_rwlock_write_unlock(&namespace_rwlock);
420 vfs_node_put(mr_node);
421 free(mp);
422 async_answer_0(rid, EBUSY);
423 return;
424 }
425
426 if (str_cmp(mp, "/") == 0) {
427
428 /*
429 * Unmounting the root file system.
430 *
431 * In this case, there is no mount point node and we send
432 * VFS_OUT_UNMOUNTED directly to the mounted file system.
433 */
434
435 free(mp);
436
437 exch = vfs_exchange_grab(mr_node->fs_handle);
438 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED,
439 mr_node->service_id);
440 vfs_exchange_release(exch);
441
442 if (rc != EOK) {
443 fibril_rwlock_write_unlock(&namespace_rwlock);
444 vfs_node_put(mr_node);
445 async_answer_0(rid, rc);
446 return;
447 }
448
449 rootfs.fs_handle = 0;
450 rootfs.service_id = 0;
451 } else {
452
453 /*
454 * Unmounting a non-root file system.
455 *
456 * We have a regular mount point node representing the parent
457 * file system, so we delegate the operation to it.
458 */
459
460 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
461 free(mp);
462 if (rc != EOK) {
463 fibril_rwlock_write_unlock(&namespace_rwlock);
464 vfs_node_put(mr_node);
465 async_answer_0(rid, rc);
466 return;
467 }
468
469 vfs_node_t *mp_node = vfs_node_get(&mp_res);
470 if (!mp_node) {
471 fibril_rwlock_write_unlock(&namespace_rwlock);
472 vfs_node_put(mr_node);
473 async_answer_0(rid, ENOMEM);
474 return;
475 }
476
477 exch = vfs_exchange_grab(mp_node->fs_handle);
478 rc = async_req_2_0(exch, VFS_OUT_UNMOUNT,
479 mp_node->service_id, mp_node->index);
480 vfs_exchange_release(exch);
481
482 if (rc != EOK) {
483 fibril_rwlock_write_unlock(&namespace_rwlock);
484 vfs_node_put(mp_node);
485 vfs_node_put(mr_node);
486 async_answer_0(rid, rc);
487 return;
488 }
489
490 /* Drop the reference we got above. */
491 vfs_node_put(mp_node);
492 /* Drop the reference from when the file system was mounted. */
493 vfs_node_put(mp_node);
494 }
495
496 /*
497 * All went well, the mounted file system was successfully unmounted.
498 * The only thing left is to forget the unmounted root VFS node.
499 */
500 vfs_node_forget(mr_node);
501
502 fibril_rwlock_write_unlock(&namespace_rwlock);
503 async_answer_0(rid, EOK);
504}
505
506void vfs_open(ipc_callid_t rid, ipc_call_t *request)
507{
508 /*
509 * The POSIX interface is open(path, oflag, mode).
510 * We can receive oflags and mode along with the VFS_IN_OPEN call;
511 * the path will need to arrive in another call.
512 *
513 * We also receive one private, non-POSIX set of flags called lflag
514 * used to pass information to vfs_lookup_internal().
515 */
516 int lflag = IPC_GET_ARG1(*request);
517 int oflag = IPC_GET_ARG2(*request);
518 int mode = IPC_GET_ARG3(*request);
519
520 /* Ignore mode for now. */
521 (void) mode;
522
523 /*
524 * Make sure that we are called with exactly one of L_FILE and
525 * L_DIRECTORY. Make sure that the user does not pass L_OPEN,
526 * L_ROOT or L_MP.
527 */
528 if (((lflag & (L_FILE | L_DIRECTORY)) == 0) ||
529 ((lflag & (L_FILE | L_DIRECTORY)) == (L_FILE | L_DIRECTORY)) ||
530 (lflag & (L_OPEN | L_ROOT | L_MP))) {
531 async_answer_0(rid, EINVAL);
532 return;
533 }
534
535 if (oflag & O_CREAT)
536 lflag |= L_CREATE;
537 if (oflag & O_EXCL)
538 lflag |= L_EXCLUSIVE;
539
540 char *path;
541 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
542 if (rc != EOK) {
543 async_answer_0(rid, rc);
544 return;
545 }
546
547 /*
548 * Avoid the race condition in which the file can be deleted before we
549 * find/create-and-lock the VFS node corresponding to the looked-up
550 * triplet.
551 */
552 if (lflag & L_CREATE)
553 fibril_rwlock_write_lock(&namespace_rwlock);
554 else
555 fibril_rwlock_read_lock(&namespace_rwlock);
556
557 /* The path is now populated and we can call vfs_lookup_internal(). */
558 vfs_lookup_res_t lr;
559 rc = vfs_lookup_internal(path, lflag | L_OPEN, &lr, NULL);
560 if (rc != EOK) {
561 if (lflag & L_CREATE)
562 fibril_rwlock_write_unlock(&namespace_rwlock);
563 else
564 fibril_rwlock_read_unlock(&namespace_rwlock);
565 async_answer_0(rid, rc);
566 free(path);
567 return;
568 }
569
570 /* Path is no longer needed. */
571 free(path);
572
573 vfs_node_t *node = vfs_node_get(&lr);
574 if (lflag & L_CREATE)
575 fibril_rwlock_write_unlock(&namespace_rwlock);
576 else
577 fibril_rwlock_read_unlock(&namespace_rwlock);
578
579 /* Truncate the file if requested and if necessary. */
580 if (oflag & O_TRUNC) {
581 fibril_rwlock_write_lock(&node->contents_rwlock);
582 if (node->size) {
583 rc = vfs_truncate_internal(node->fs_handle,
584 node->service_id, node->index, 0);
585 if (rc) {
586 fibril_rwlock_write_unlock(&node->contents_rwlock);
587 vfs_node_put(node);
588 async_answer_0(rid, rc);
589 return;
590 }
591 node->size = 0;
592 }
593 fibril_rwlock_write_unlock(&node->contents_rwlock);
594 }
595
596 /*
597 * Get ourselves a file descriptor and the corresponding vfs_file_t
598 * structure.
599 */
600 int fd = vfs_fd_alloc((oflag & O_DESC) != 0);
601 if (fd < 0) {
602 vfs_node_put(node);
603 async_answer_0(rid, fd);
604 return;
605 }
606 vfs_file_t *file = vfs_file_get(fd);
607 assert(file);
608 file->node = node;
609 if (oflag & O_APPEND)
610 file->append = true;
611
612 /*
613 * The following increase in reference count is for the fact that the
614 * file is being opened and that a file structure is pointing to it.
615 * It is necessary so that the file will not disappear when
616 * vfs_node_put() is called. The reference will be dropped by the
617 * respective VFS_IN_CLOSE.
618 */
619 vfs_node_addref(node);
620 vfs_node_put(node);
621 vfs_file_put(file);
622
623 /* Success! Return the new file descriptor to the client. */
624 async_answer_1(rid, EOK, fd);
625}
626
627void vfs_sync(ipc_callid_t rid, ipc_call_t *request)
628{
629 int fd = IPC_GET_ARG1(*request);
630
631 /* Lookup the file structure corresponding to the file descriptor. */
632 vfs_file_t *file = vfs_file_get(fd);
633 if (!file) {
634 async_answer_0(rid, ENOENT);
635 return;
636 }
637
638 /*
639 * Lock the open file structure so that no other thread can manipulate
640 * the same open file at a time.
641 */
642 fibril_mutex_lock(&file->lock);
643 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
644
645 /* Make a VFS_OUT_SYMC request at the destination FS server. */
646 aid_t msg;
647 ipc_call_t answer;
648 msg = async_send_2(fs_exch, VFS_OUT_SYNC, file->node->service_id,
649 file->node->index, &answer);
650
651 vfs_exchange_release(fs_exch);
652
653 /* Wait for reply from the FS server. */
654 sysarg_t rc;
655 async_wait_for(msg, &rc);
656
657 fibril_mutex_unlock(&file->lock);
658
659 vfs_file_put(file);
660 async_answer_0(rid, rc);
661}
662
663void vfs_close(ipc_callid_t rid, ipc_call_t *request)
664{
665 int fd = IPC_GET_ARG1(*request);
666 int ret = vfs_fd_free(fd);
667 async_answer_0(rid, ret);
668}
669
670static void vfs_rdwr(ipc_callid_t rid, ipc_call_t *request, bool read)
671{
672 /*
673 * The following code strongly depends on the fact that the files data
674 * structure can be only accessed by a single fibril and all file
675 * operations are serialized (i.e. the reads and writes cannot
676 * interleave and a file cannot be closed while it is being read).
677 *
678 * Additional synchronization needs to be added once the table of
679 * open files supports parallel access!
680 */
681
682 int fd = IPC_GET_ARG1(*request);
683
684 /* Lookup the file structure corresponding to the file descriptor. */
685 vfs_file_t *file = vfs_file_get(fd);
686 if (!file) {
687 async_answer_0(rid, ENOENT);
688 return;
689 }
690
691 /*
692 * Lock the open file structure so that no other thread can manipulate
693 * the same open file at a time.
694 */
695 fibril_mutex_lock(&file->lock);
696
697 vfs_info_t *fs_info = fs_handle_to_info(file->node->fs_handle);
698 assert(fs_info);
699
700 /*
701 * Lock the file's node so that no other client can read/write to it at
702 * the same time unless the FS supports concurrent reads/writes and its
703 * write implementation does not modify the file size.
704 */
705 if ((read) ||
706 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
707 fibril_rwlock_read_lock(&file->node->contents_rwlock);
708 else
709 fibril_rwlock_write_lock(&file->node->contents_rwlock);
710
711 if (file->node->type == VFS_NODE_DIRECTORY) {
712 /*
713 * Make sure that no one is modifying the namespace
714 * while we are in readdir().
715 */
716 assert(read);
717 fibril_rwlock_read_lock(&namespace_rwlock);
718 }
719
720 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
721
722 /*
723 * Make a VFS_READ/VFS_WRITE request at the destination FS server
724 * and forward the IPC_M_DATA_READ/IPC_M_DATA_WRITE request to the
725 * destination FS server. The call will be routed as if sent by
726 * ourselves. Note that call arguments are immutable in this case so we
727 * don't have to bother.
728 */
729 sysarg_t rc;
730 ipc_call_t answer;
731 if (read) {
732 rc = async_data_read_forward_4_1(fs_exch, VFS_OUT_READ,
733 file->node->service_id, file->node->index,
734 LOWER32(file->pos), UPPER32(file->pos), &answer);
735 } else {
736 if (file->append)
737 file->pos = file->node->size;
738
739 rc = async_data_write_forward_4_1(fs_exch, VFS_OUT_WRITE,
740 file->node->service_id, file->node->index,
741 LOWER32(file->pos), UPPER32(file->pos), &answer);
742 }
743
744 vfs_exchange_release(fs_exch);
745
746 size_t bytes = IPC_GET_ARG1(answer);
747
748 if (file->node->type == VFS_NODE_DIRECTORY)
749 fibril_rwlock_read_unlock(&namespace_rwlock);
750
751 /* Unlock the VFS node. */
752 if ((read) ||
753 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
754 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
755 else {
756 /* Update the cached version of node's size. */
757 if (rc == EOK)
758 file->node->size = MERGE_LOUP32(IPC_GET_ARG2(answer),
759 IPC_GET_ARG3(answer));
760 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
761 }
762
763 /* Update the position pointer and unlock the open file. */
764 if (rc == EOK)
765 file->pos += bytes;
766 fibril_mutex_unlock(&file->lock);
767 vfs_file_put(file);
768
769 /*
770 * FS server's reply is the final result of the whole operation we
771 * return to the client.
772 */
773 async_answer_1(rid, rc, bytes);
774}
775
776void vfs_read(ipc_callid_t rid, ipc_call_t *request)
777{
778 vfs_rdwr(rid, request, true);
779}
780
781void vfs_write(ipc_callid_t rid, ipc_call_t *request)
782{
783 vfs_rdwr(rid, request, false);
784}
785
786void vfs_seek(ipc_callid_t rid, ipc_call_t *request)
787{
788 int fd = (int) IPC_GET_ARG1(*request);
789 off64_t off = (off64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
790 IPC_GET_ARG3(*request));
791 int whence = (int) IPC_GET_ARG4(*request);
792
793 /* Lookup the file structure corresponding to the file descriptor. */
794 vfs_file_t *file = vfs_file_get(fd);
795 if (!file) {
796 async_answer_0(rid, ENOENT);
797 return;
798 }
799
800 fibril_mutex_lock(&file->lock);
801
802 off64_t newoff;
803 switch (whence) {
804 case SEEK_SET:
805 if (off >= 0) {
806 file->pos = (aoff64_t) off;
807 fibril_mutex_unlock(&file->lock);
808 vfs_file_put(file);
809 async_answer_1(rid, EOK, off);
810 return;
811 }
812 break;
813 case SEEK_CUR:
814 if ((off >= 0) && (file->pos + off < file->pos)) {
815 fibril_mutex_unlock(&file->lock);
816 vfs_file_put(file);
817 async_answer_0(rid, EOVERFLOW);
818 return;
819 }
820
821 if ((off < 0) && (file->pos < (aoff64_t) -off)) {
822 fibril_mutex_unlock(&file->lock);
823 vfs_file_put(file);
824 async_answer_0(rid, EOVERFLOW);
825 return;
826 }
827
828 file->pos += off;
829 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
830
831 fibril_mutex_unlock(&file->lock);
832 vfs_file_put(file);
833 async_answer_2(rid, EOK, LOWER32(newoff),
834 UPPER32(newoff));
835 return;
836 case SEEK_END:
837 fibril_rwlock_read_lock(&file->node->contents_rwlock);
838 aoff64_t size = file->node->size;
839
840 if ((off >= 0) && (size + off < size)) {
841 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
842 fibril_mutex_unlock(&file->lock);
843 vfs_file_put(file);
844 async_answer_0(rid, EOVERFLOW);
845 return;
846 }
847
848 if ((off < 0) && (size < (aoff64_t) -off)) {
849 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
850 fibril_mutex_unlock(&file->lock);
851 vfs_file_put(file);
852 async_answer_0(rid, EOVERFLOW);
853 return;
854 }
855
856 file->pos = size + off;
857 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
858
859 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
860 fibril_mutex_unlock(&file->lock);
861 vfs_file_put(file);
862 async_answer_2(rid, EOK, LOWER32(newoff), UPPER32(newoff));
863 return;
864 }
865
866 fibril_mutex_unlock(&file->lock);
867 vfs_file_put(file);
868 async_answer_0(rid, EINVAL);
869}
870
871int vfs_truncate_internal(fs_handle_t fs_handle, service_id_t service_id,
872 fs_index_t index, aoff64_t size)
873{
874 async_exch_t *exch = vfs_exchange_grab(fs_handle);
875 sysarg_t rc = async_req_4_0(exch, VFS_OUT_TRUNCATE,
876 (sysarg_t) service_id, (sysarg_t) index, LOWER32(size),
877 UPPER32(size));
878 vfs_exchange_release(exch);
879
880 return (int) rc;
881}
882
883void vfs_truncate(ipc_callid_t rid, ipc_call_t *request)
884{
885 int fd = IPC_GET_ARG1(*request);
886 aoff64_t size = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
887 IPC_GET_ARG3(*request));
888 int rc;
889
890 vfs_file_t *file = vfs_file_get(fd);
891 if (!file) {
892 async_answer_0(rid, ENOENT);
893 return;
894 }
895 fibril_mutex_lock(&file->lock);
896
897 fibril_rwlock_write_lock(&file->node->contents_rwlock);
898 rc = vfs_truncate_internal(file->node->fs_handle,
899 file->node->service_id, file->node->index, size);
900 if (rc == EOK)
901 file->node->size = size;
902 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
903
904 fibril_mutex_unlock(&file->lock);
905 vfs_file_put(file);
906 async_answer_0(rid, (sysarg_t)rc);
907}
908
909void vfs_fstat(ipc_callid_t rid, ipc_call_t *request)
910{
911 int fd = IPC_GET_ARG1(*request);
912 sysarg_t rc;
913
914 vfs_file_t *file = vfs_file_get(fd);
915 if (!file) {
916 async_answer_0(rid, ENOENT);
917 return;
918 }
919
920 ipc_callid_t callid;
921 if (!async_data_read_receive(&callid, NULL)) {
922 vfs_file_put(file);
923 async_answer_0(callid, EINVAL);
924 async_answer_0(rid, EINVAL);
925 return;
926 }
927
928 fibril_mutex_lock(&file->lock);
929
930 async_exch_t *exch = vfs_exchange_grab(file->node->fs_handle);
931
932 aid_t msg;
933 msg = async_send_3(exch, VFS_OUT_STAT, file->node->service_id,
934 file->node->index, true, NULL);
935 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
936
937 vfs_exchange_release(exch);
938
939 async_wait_for(msg, &rc);
940
941 fibril_mutex_unlock(&file->lock);
942 vfs_file_put(file);
943 async_answer_0(rid, rc);
944}
945
946void vfs_stat(ipc_callid_t rid, ipc_call_t *request)
947{
948 char *path;
949 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
950 if (rc != EOK) {
951 async_answer_0(rid, rc);
952 return;
953 }
954
955 ipc_callid_t callid;
956 if (!async_data_read_receive(&callid, NULL)) {
957 free(path);
958 async_answer_0(callid, EINVAL);
959 async_answer_0(rid, EINVAL);
960 return;
961 }
962
963 vfs_lookup_res_t lr;
964 fibril_rwlock_read_lock(&namespace_rwlock);
965 rc = vfs_lookup_internal(path, L_NONE, &lr, NULL);
966 free(path);
967 if (rc != EOK) {
968 fibril_rwlock_read_unlock(&namespace_rwlock);
969 async_answer_0(callid, rc);
970 async_answer_0(rid, rc);
971 return;
972 }
973 vfs_node_t *node = vfs_node_get(&lr);
974 if (!node) {
975 fibril_rwlock_read_unlock(&namespace_rwlock);
976 async_answer_0(callid, ENOMEM);
977 async_answer_0(rid, ENOMEM);
978 return;
979 }
980
981 fibril_rwlock_read_unlock(&namespace_rwlock);
982
983 async_exch_t *exch = vfs_exchange_grab(node->fs_handle);
984
985 aid_t msg;
986 msg = async_send_3(exch, VFS_OUT_STAT, node->service_id,
987 node->index, false, NULL);
988 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
989
990 vfs_exchange_release(exch);
991
992 sysarg_t rv;
993 async_wait_for(msg, &rv);
994
995 async_answer_0(rid, rv);
996
997 vfs_node_put(node);
998}
999
1000void vfs_mkdir(ipc_callid_t rid, ipc_call_t *request)
1001{
1002 int mode = IPC_GET_ARG1(*request);
1003
1004 char *path;
1005 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1006 if (rc != EOK) {
1007 async_answer_0(rid, rc);
1008 return;
1009 }
1010
1011 /* Ignore mode for now. */
1012 (void) mode;
1013
1014 fibril_rwlock_write_lock(&namespace_rwlock);
1015 int lflag = L_DIRECTORY | L_CREATE | L_EXCLUSIVE;
1016 rc = vfs_lookup_internal(path, lflag, NULL, NULL);
1017 fibril_rwlock_write_unlock(&namespace_rwlock);
1018 free(path);
1019 async_answer_0(rid, rc);
1020}
1021
1022void vfs_unlink(ipc_callid_t rid, ipc_call_t *request)
1023{
1024 int lflag = IPC_GET_ARG1(*request);
1025
1026 char *path;
1027 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1028 if (rc != EOK) {
1029 async_answer_0(rid, rc);
1030 return;
1031 }
1032
1033 fibril_rwlock_write_lock(&namespace_rwlock);
1034 lflag &= L_DIRECTORY; /* sanitize lflag */
1035 vfs_lookup_res_t lr;
1036 rc = vfs_lookup_internal(path, lflag | L_UNLINK, &lr, NULL);
1037 free(path);
1038 if (rc != EOK) {
1039 fibril_rwlock_write_unlock(&namespace_rwlock);
1040 async_answer_0(rid, rc);
1041 return;
1042 }
1043
1044 /*
1045 * The name has already been unlinked by vfs_lookup_internal().
1046 * We have to get and put the VFS node to ensure that it is
1047 * VFS_OUT_DESTROY'ed after the last reference to it is dropped.
1048 */
1049 vfs_node_t *node = vfs_node_get(&lr);
1050 fibril_mutex_lock(&nodes_mutex);
1051 node->lnkcnt--;
1052 fibril_mutex_unlock(&nodes_mutex);
1053 fibril_rwlock_write_unlock(&namespace_rwlock);
1054 vfs_node_put(node);
1055 async_answer_0(rid, EOK);
1056}
1057
1058void vfs_rename(ipc_callid_t rid, ipc_call_t *request)
1059{
1060 /* Retrieve the old path. */
1061 char *old;
1062 int rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL);
1063 if (rc != EOK) {
1064 async_answer_0(rid, rc);
1065 return;
1066 }
1067
1068 /* Retrieve the new path. */
1069 char *new;
1070 rc = async_data_write_accept((void **) &new, true, 0, 0, 0, NULL);
1071 if (rc != EOK) {
1072 free(old);
1073 async_answer_0(rid, rc);
1074 return;
1075 }
1076
1077 size_t olen;
1078 size_t nlen;
1079 char *oldc = canonify(old, &olen);
1080 char *newc = canonify(new, &nlen);
1081
1082 if ((!oldc) || (!newc)) {
1083 async_answer_0(rid, EINVAL);
1084 free(old);
1085 free(new);
1086 return;
1087 }
1088
1089 oldc[olen] = '\0';
1090 newc[nlen] = '\0';
1091
1092 if ((!str_lcmp(newc, oldc, str_length(oldc))) &&
1093 ((newc[str_length(oldc)] == '/') ||
1094 (str_length(oldc) == 1) ||
1095 (str_length(oldc) == str_length(newc)))) {
1096 /*
1097 * oldc is a prefix of newc and either
1098 * - newc continues with a / where oldc ends, or
1099 * - oldc was / itself, or
1100 * - oldc and newc are equal.
1101 */
1102 async_answer_0(rid, EINVAL);
1103 free(old);
1104 free(new);
1105 return;
1106 }
1107
1108 vfs_lookup_res_t old_lr;
1109 vfs_lookup_res_t new_lr;
1110 vfs_lookup_res_t new_par_lr;
1111 fibril_rwlock_write_lock(&namespace_rwlock);
1112
1113 /* Lookup the node belonging to the old file name. */
1114 rc = vfs_lookup_internal(oldc, L_NONE, &old_lr, NULL);
1115 if (rc != EOK) {
1116 fibril_rwlock_write_unlock(&namespace_rwlock);
1117 async_answer_0(rid, rc);
1118 free(old);
1119 free(new);
1120 return;
1121 }
1122
1123 vfs_node_t *old_node = vfs_node_get(&old_lr);
1124 if (!old_node) {
1125 fibril_rwlock_write_unlock(&namespace_rwlock);
1126 async_answer_0(rid, ENOMEM);
1127 free(old);
1128 free(new);
1129 return;
1130 }
1131
1132 /* Determine the path to the parent of the node with the new name. */
1133 char *parentc = str_dup(newc);
1134 if (!parentc) {
1135 fibril_rwlock_write_unlock(&namespace_rwlock);
1136 vfs_node_put(old_node);
1137 async_answer_0(rid, rc);
1138 free(old);
1139 free(new);
1140 return;
1141 }
1142
1143 char *lastsl = str_rchr(parentc + 1, '/');
1144 if (lastsl)
1145 *lastsl = '\0';
1146 else
1147 parentc[1] = '\0';
1148
1149 /* Lookup parent of the new file name. */
1150 rc = vfs_lookup_internal(parentc, L_NONE, &new_par_lr, NULL);
1151 free(parentc); /* not needed anymore */
1152 if (rc != EOK) {
1153 fibril_rwlock_write_unlock(&namespace_rwlock);
1154 vfs_node_put(old_node);
1155 async_answer_0(rid, rc);
1156 free(old);
1157 free(new);
1158 return;
1159 }
1160
1161 /* Check whether linking to the same file system instance. */
1162 if ((old_node->fs_handle != new_par_lr.triplet.fs_handle) ||
1163 (old_node->service_id != new_par_lr.triplet.service_id)) {
1164 fibril_rwlock_write_unlock(&namespace_rwlock);
1165 vfs_node_put(old_node);
1166 async_answer_0(rid, EXDEV); /* different file systems */
1167 free(old);
1168 free(new);
1169 return;
1170 }
1171
1172 /* Destroy the old link for the new name. */
1173 vfs_node_t *new_node = NULL;
1174 rc = vfs_lookup_internal(newc, L_UNLINK, &new_lr, NULL);
1175
1176 switch (rc) {
1177 case ENOENT:
1178 /* simply not in our way */
1179 break;
1180 case EOK:
1181 new_node = vfs_node_get(&new_lr);
1182 if (!new_node) {
1183 fibril_rwlock_write_unlock(&namespace_rwlock);
1184 vfs_node_put(old_node);
1185 async_answer_0(rid, ENOMEM);
1186 free(old);
1187 free(new);
1188 return;
1189 }
1190 fibril_mutex_lock(&nodes_mutex);
1191 new_node->lnkcnt--;
1192 fibril_mutex_unlock(&nodes_mutex);
1193 break;
1194 default:
1195 fibril_rwlock_write_unlock(&namespace_rwlock);
1196 vfs_node_put(old_node);
1197 async_answer_0(rid, ENOTEMPTY);
1198 free(old);
1199 free(new);
1200 return;
1201 }
1202
1203 /* Create the new link for the new name. */
1204 rc = vfs_lookup_internal(newc, L_LINK, NULL, NULL, old_node->index);
1205 if (rc != EOK) {
1206 fibril_rwlock_write_unlock(&namespace_rwlock);
1207 vfs_node_put(old_node);
1208 if (new_node)
1209 vfs_node_put(new_node);
1210 async_answer_0(rid, rc);
1211 free(old);
1212 free(new);
1213 return;
1214 }
1215
1216 fibril_mutex_lock(&nodes_mutex);
1217 old_node->lnkcnt++;
1218 fibril_mutex_unlock(&nodes_mutex);
1219
1220 /* Destroy the link for the old name. */
1221 rc = vfs_lookup_internal(oldc, L_UNLINK, NULL, NULL);
1222 if (rc != EOK) {
1223 fibril_rwlock_write_unlock(&namespace_rwlock);
1224 vfs_node_put(old_node);
1225 if (new_node)
1226 vfs_node_put(new_node);
1227 async_answer_0(rid, rc);
1228 free(old);
1229 free(new);
1230 return;
1231 }
1232
1233 fibril_mutex_lock(&nodes_mutex);
1234 old_node->lnkcnt--;
1235 fibril_mutex_unlock(&nodes_mutex);
1236 fibril_rwlock_write_unlock(&namespace_rwlock);
1237 vfs_node_put(old_node);
1238
1239 if (new_node)
1240 vfs_node_put(new_node);
1241
1242 free(old);
1243 free(new);
1244 async_answer_0(rid, EOK);
1245}
1246
1247void vfs_dup(ipc_callid_t rid, ipc_call_t *request)
1248{
1249 int oldfd = IPC_GET_ARG1(*request);
1250 int newfd = IPC_GET_ARG2(*request);
1251
1252 /* If the file descriptors are the same, do nothing. */
1253 if (oldfd == newfd) {
1254 async_answer_1(rid, EOK, newfd);
1255 return;
1256 }
1257
1258 /* Lookup the file structure corresponding to oldfd. */
1259 vfs_file_t *oldfile = vfs_file_get(oldfd);
1260 if (!oldfile) {
1261 async_answer_0(rid, EBADF);
1262 return;
1263 }
1264
1265 /*
1266 * Lock the open file structure so that no other thread can manipulate
1267 * the same open file at a time.
1268 */
1269 fibril_mutex_lock(&oldfile->lock);
1270
1271 /* Make sure newfd is closed. */
1272 (void) vfs_fd_free(newfd);
1273
1274 /* Assign the old file to newfd. */
1275 int ret = vfs_fd_assign(oldfile, newfd);
1276 fibril_mutex_unlock(&oldfile->lock);
1277 vfs_file_put(oldfile);
1278
1279 if (ret != EOK)
1280 async_answer_0(rid, ret);
1281 else
1282 async_answer_1(rid, EOK, newfd);
1283}
1284
1285void vfs_wait_handle(ipc_callid_t rid, ipc_call_t *request)
1286{
1287 int fd = vfs_wait_handle_internal();
1288 async_answer_1(rid, EOK, fd);
1289}
1290
1291/**
1292 * @}
1293 */
Note: See TracBrowser for help on using the repository browser.