source: mainline/uspace/srv/vfs/vfs_ops.c@ 4979403

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4979403 was 4979403, checked in by Jakub Jermar <jakub@…>, 14 years ago

Allow more instances of the same FS to be used.
(Thanks to Maurizio Lombardi.)

  • Property mode set to 100644
File size: 32.4 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file vfs_ops.c
35 * @brief Operations that VFS offers to its clients.
36 */
37
38#include "vfs.h"
39#include <macros.h>
40#include <stdint.h>
41#include <async.h>
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <str.h>
46#include <bool.h>
47#include <fibril_synch.h>
48#include <adt/list.h>
49#include <unistd.h>
50#include <ctype.h>
51#include <fcntl.h>
52#include <assert.h>
53#include <vfs/canonify.h>
54
55/* Forward declarations of static functions. */
56static int vfs_truncate_internal(fs_handle_t, service_id_t, fs_index_t,
57 aoff64_t);
58
59/**
60 * This rwlock prevents the race between a triplet-to-VFS-node resolution and a
61 * concurrent VFS operation which modifies the file system namespace.
62 */
63FIBRIL_RWLOCK_INITIALIZE(namespace_rwlock);
64
65vfs_pair_t rootfs = {
66 .fs_handle = 0,
67 .service_id = 0
68};
69
70static void vfs_mount_internal(ipc_callid_t rid, service_id_t service_id,
71 fs_handle_t fs_handle, char *mp, char *opts)
72{
73 vfs_lookup_res_t mp_res;
74 vfs_lookup_res_t mr_res;
75 vfs_node_t *mp_node = NULL;
76 vfs_node_t *mr_node;
77 fs_index_t rindex;
78 aoff64_t rsize;
79 unsigned rlnkcnt;
80 async_exch_t *exch;
81 sysarg_t rc;
82 aid_t msg;
83 ipc_call_t answer;
84
85 /* Resolve the path to the mountpoint. */
86 fibril_rwlock_write_lock(&namespace_rwlock);
87 if (rootfs.fs_handle) {
88 /* We already have the root FS. */
89 if (str_cmp(mp, "/") == 0) {
90 /* Trying to mount root FS over root FS */
91 fibril_rwlock_write_unlock(&namespace_rwlock);
92 async_answer_0(rid, EBUSY);
93 return;
94 }
95
96 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
97 if (rc != EOK) {
98 /* The lookup failed for some reason. */
99 fibril_rwlock_write_unlock(&namespace_rwlock);
100 async_answer_0(rid, rc);
101 return;
102 }
103
104 mp_node = vfs_node_get(&mp_res);
105 if (!mp_node) {
106 fibril_rwlock_write_unlock(&namespace_rwlock);
107 async_answer_0(rid, ENOMEM);
108 return;
109 }
110
111 /*
112 * Now we hold a reference to mp_node.
113 * It will be dropped upon the corresponding VFS_IN_UNMOUNT.
114 * This prevents the mount point from being deleted.
115 */
116 } else {
117 /* We still don't have the root file system mounted. */
118 if (str_cmp(mp, "/") == 0) {
119 /*
120 * For this simple, but important case,
121 * we are almost done.
122 */
123
124 /* Tell the mountee that it is being mounted. */
125 exch = vfs_exchange_grab(fs_handle);
126 msg = async_send_1(exch, VFS_OUT_MOUNTED,
127 (sysarg_t) service_id, &answer);
128 /* Send the mount options */
129 rc = async_data_write_start(exch, (void *)opts,
130 str_size(opts));
131 vfs_exchange_release(exch);
132
133 if (rc != EOK) {
134 async_wait_for(msg, NULL);
135 fibril_rwlock_write_unlock(&namespace_rwlock);
136 async_answer_0(rid, rc);
137 return;
138 }
139 async_wait_for(msg, &rc);
140
141 if (rc != EOK) {
142 fibril_rwlock_write_unlock(&namespace_rwlock);
143 async_answer_0(rid, rc);
144 return;
145 }
146
147 rindex = (fs_index_t) IPC_GET_ARG1(answer);
148 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer), IPC_GET_ARG3(answer));
149 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
150
151 mr_res.triplet.fs_handle = fs_handle;
152 mr_res.triplet.service_id = service_id;
153 mr_res.triplet.index = rindex;
154 mr_res.size = rsize;
155 mr_res.lnkcnt = rlnkcnt;
156 mr_res.type = VFS_NODE_DIRECTORY;
157
158 rootfs.fs_handle = fs_handle;
159 rootfs.service_id = service_id;
160
161 /* Add reference to the mounted root. */
162 mr_node = vfs_node_get(&mr_res);
163 assert(mr_node);
164
165 fibril_rwlock_write_unlock(&namespace_rwlock);
166 async_answer_0(rid, rc);
167 return;
168 } else {
169 /*
170 * We can't resolve this without the root filesystem
171 * being mounted first.
172 */
173 fibril_rwlock_write_unlock(&namespace_rwlock);
174 async_answer_0(rid, ENOENT);
175 return;
176 }
177 }
178
179 /*
180 * At this point, we have all necessary pieces: file system handle
181 * and service ID, and we know the mount point VFS node.
182 */
183
184 async_exch_t *mountee_exch = vfs_exchange_grab(fs_handle);
185 assert(mountee_exch);
186
187 exch = vfs_exchange_grab(mp_res.triplet.fs_handle);
188 msg = async_send_4(exch, VFS_OUT_MOUNT,
189 (sysarg_t) mp_res.triplet.service_id,
190 (sysarg_t) mp_res.triplet.index,
191 (sysarg_t) fs_handle,
192 (sysarg_t) service_id, &answer);
193
194 /* Send connection */
195 rc = async_exchange_clone(exch, mountee_exch);
196 vfs_exchange_release(mountee_exch);
197
198 if (rc != EOK) {
199 vfs_exchange_release(exch);
200 async_wait_for(msg, NULL);
201
202 /* Mount failed, drop reference to mp_node. */
203 if (mp_node)
204 vfs_node_put(mp_node);
205
206 async_answer_0(rid, rc);
207 fibril_rwlock_write_unlock(&namespace_rwlock);
208 return;
209 }
210
211 /* send the mount options */
212 rc = async_data_write_start(exch, (void *) opts, str_size(opts));
213 if (rc != EOK) {
214 vfs_exchange_release(exch);
215 async_wait_for(msg, NULL);
216
217 /* Mount failed, drop reference to mp_node. */
218 if (mp_node)
219 vfs_node_put(mp_node);
220
221 fibril_rwlock_write_unlock(&namespace_rwlock);
222 async_answer_0(rid, rc);
223 return;
224 }
225
226 /*
227 * Wait for the answer before releasing the exchange to avoid deadlock
228 * in case the answer depends on further calls to the same file system.
229 * Think of a case when mounting a FS on a file_bd backed by a file on
230 * the same FS.
231 */
232 async_wait_for(msg, &rc);
233 vfs_exchange_release(exch);
234
235 if (rc == EOK) {
236 rindex = (fs_index_t) IPC_GET_ARG1(answer);
237 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
238 IPC_GET_ARG3(answer));
239 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
240
241 mr_res.triplet.fs_handle = fs_handle;
242 mr_res.triplet.service_id = service_id;
243 mr_res.triplet.index = rindex;
244 mr_res.size = rsize;
245 mr_res.lnkcnt = rlnkcnt;
246 mr_res.type = VFS_NODE_DIRECTORY;
247
248 /* Add reference to the mounted root. */
249 mr_node = vfs_node_get(&mr_res);
250 assert(mr_node);
251 } else {
252 /* Mount failed, drop reference to mp_node. */
253 if (mp_node)
254 vfs_node_put(mp_node);
255 }
256
257 async_answer_0(rid, rc);
258 fibril_rwlock_write_unlock(&namespace_rwlock);
259}
260
261void vfs_mount(ipc_callid_t rid, ipc_call_t *request)
262{
263 service_id_t service_id;
264
265 /*
266 * We expect the library to do the device-name to device-handle
267 * translation for us, thus the device handle will arrive as ARG1
268 * in the request.
269 */
270 service_id = (service_id_t) IPC_GET_ARG1(*request);
271
272 /*
273 * Mount flags are passed as ARG2.
274 */
275 unsigned int flags = (unsigned int) IPC_GET_ARG2(*request);
276
277 /*
278 * Instance number is passed as ARG3.
279 */
280 unsigned int instance = IPC_GET_ARG3(*request);
281
282 /*
283 * For now, don't make use of ARG3, but it can be used to
284 * carry mount options in the future.
285 */
286
287 /* We want the client to send us the mount point. */
288 char *mp;
289 int rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
290 0, NULL);
291 if (rc != EOK) {
292 async_answer_0(rid, rc);
293 return;
294 }
295
296 /* Now we expect to receive the mount options. */
297 char *opts;
298 rc = async_data_write_accept((void **) &opts, true, 0, MAX_MNTOPTS_LEN,
299 0, NULL);
300 if (rc != EOK) {
301 free(mp);
302 async_answer_0(rid, rc);
303 return;
304 }
305
306 /*
307 * Now, we expect the client to send us data with the name of the file
308 * system.
309 */
310 char *fs_name;
311 rc = async_data_write_accept((void **) &fs_name, true, 0,
312 FS_NAME_MAXLEN, 0, NULL);
313 if (rc != EOK) {
314 free(mp);
315 free(opts);
316 async_answer_0(rid, rc);
317 return;
318 }
319
320 /*
321 * Wait for VFS_IN_PING so that we can return an error if we don't know
322 * fs_name.
323 */
324 ipc_call_t data;
325 ipc_callid_t callid = async_get_call(&data);
326 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
327 async_answer_0(callid, ENOTSUP);
328 async_answer_0(rid, ENOTSUP);
329 free(mp);
330 free(opts);
331 free(fs_name);
332 return;
333 }
334
335 /*
336 * Check if we know a file system with the same name as is in fs_name.
337 * This will also give us its file system handle.
338 */
339 fibril_mutex_lock(&fs_list_lock);
340 fs_handle_t fs_handle;
341recheck:
342 fs_handle = fs_name_to_handle(instance, fs_name, false);
343 if (!fs_handle) {
344 if (flags & IPC_FLAG_BLOCKING) {
345 fibril_condvar_wait(&fs_list_cv, &fs_list_lock);
346 goto recheck;
347 }
348
349 fibril_mutex_unlock(&fs_list_lock);
350 async_answer_0(callid, ENOENT);
351 async_answer_0(rid, ENOENT);
352 free(mp);
353 free(fs_name);
354 free(opts);
355 return;
356 }
357 fibril_mutex_unlock(&fs_list_lock);
358
359 /* Acknowledge that we know fs_name. */
360 async_answer_0(callid, EOK);
361
362 /* Do the mount */
363 vfs_mount_internal(rid, service_id, fs_handle, mp, opts);
364 free(mp);
365 free(fs_name);
366 free(opts);
367}
368
369void vfs_unmount(ipc_callid_t rid, ipc_call_t *request)
370{
371 int rc;
372 char *mp;
373 vfs_lookup_res_t mp_res;
374 vfs_lookup_res_t mr_res;
375 vfs_node_t *mr_node;
376 async_exch_t *exch;
377
378 /*
379 * Receive the mount point path.
380 */
381 rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
382 0, NULL);
383 if (rc != EOK)
384 async_answer_0(rid, rc);
385
386 /*
387 * Taking the namespace lock will do two things for us. First, it will
388 * prevent races with other lookup operations. Second, it will stop new
389 * references to already existing VFS nodes and creation of new VFS
390 * nodes. This is because new references are added as a result of some
391 * lookup operation or at least of some operation which is protected by
392 * the namespace lock.
393 */
394 fibril_rwlock_write_lock(&namespace_rwlock);
395
396 /*
397 * Lookup the mounted root and instantiate it.
398 */
399 rc = vfs_lookup_internal(mp, L_ROOT, &mr_res, NULL);
400 if (rc != EOK) {
401 fibril_rwlock_write_unlock(&namespace_rwlock);
402 free(mp);
403 async_answer_0(rid, rc);
404 return;
405 }
406 mr_node = vfs_node_get(&mr_res);
407 if (!mr_node) {
408 fibril_rwlock_write_unlock(&namespace_rwlock);
409 free(mp);
410 async_answer_0(rid, ENOMEM);
411 return;
412 }
413
414 /*
415 * Count the total number of references for the mounted file system. We
416 * are expecting at least two. One which we got above and one which we
417 * got when the file system was mounted. If we find more, it means that
418 * the file system cannot be gracefully unmounted at the moment because
419 * someone is working with it.
420 */
421 if (vfs_nodes_refcount_sum_get(mr_node->fs_handle,
422 mr_node->service_id) != 2) {
423 fibril_rwlock_write_unlock(&namespace_rwlock);
424 vfs_node_put(mr_node);
425 free(mp);
426 async_answer_0(rid, EBUSY);
427 return;
428 }
429
430 if (str_cmp(mp, "/") == 0) {
431
432 /*
433 * Unmounting the root file system.
434 *
435 * In this case, there is no mount point node and we send
436 * VFS_OUT_UNMOUNTED directly to the mounted file system.
437 */
438
439 free(mp);
440
441 exch = vfs_exchange_grab(mr_node->fs_handle);
442 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED,
443 mr_node->service_id);
444 vfs_exchange_release(exch);
445
446 if (rc != EOK) {
447 fibril_rwlock_write_unlock(&namespace_rwlock);
448 vfs_node_put(mr_node);
449 async_answer_0(rid, rc);
450 return;
451 }
452
453 rootfs.fs_handle = 0;
454 rootfs.service_id = 0;
455 } else {
456
457 /*
458 * Unmounting a non-root file system.
459 *
460 * We have a regular mount point node representing the parent
461 * file system, so we delegate the operation to it.
462 */
463
464 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
465 free(mp);
466 if (rc != EOK) {
467 fibril_rwlock_write_unlock(&namespace_rwlock);
468 vfs_node_put(mr_node);
469 async_answer_0(rid, rc);
470 return;
471 }
472
473 vfs_node_t *mp_node = vfs_node_get(&mp_res);
474 if (!mp_node) {
475 fibril_rwlock_write_unlock(&namespace_rwlock);
476 vfs_node_put(mr_node);
477 async_answer_0(rid, ENOMEM);
478 return;
479 }
480
481 exch = vfs_exchange_grab(mp_node->fs_handle);
482 rc = async_req_2_0(exch, VFS_OUT_UNMOUNT,
483 mp_node->service_id, mp_node->index);
484 vfs_exchange_release(exch);
485
486 if (rc != EOK) {
487 fibril_rwlock_write_unlock(&namespace_rwlock);
488 vfs_node_put(mp_node);
489 vfs_node_put(mr_node);
490 async_answer_0(rid, rc);
491 return;
492 }
493
494 /* Drop the reference we got above. */
495 vfs_node_put(mp_node);
496 /* Drop the reference from when the file system was mounted. */
497 vfs_node_put(mp_node);
498 }
499
500 /*
501 * All went well, the mounted file system was successfully unmounted.
502 * The only thing left is to forget the unmounted root VFS node.
503 */
504 vfs_node_forget(mr_node);
505
506 fibril_rwlock_write_unlock(&namespace_rwlock);
507 async_answer_0(rid, EOK);
508}
509
510void vfs_open(ipc_callid_t rid, ipc_call_t *request)
511{
512 /*
513 * The POSIX interface is open(path, oflag, mode).
514 * We can receive oflags and mode along with the VFS_IN_OPEN call;
515 * the path will need to arrive in another call.
516 *
517 * We also receive one private, non-POSIX set of flags called lflag
518 * used to pass information to vfs_lookup_internal().
519 */
520 int lflag = IPC_GET_ARG1(*request);
521 int oflag = IPC_GET_ARG2(*request);
522 int mode = IPC_GET_ARG3(*request);
523
524 /* Ignore mode for now. */
525 (void) mode;
526
527 /*
528 * Make sure that we are called with exactly one of L_FILE and
529 * L_DIRECTORY. Make sure that the user does not pass L_OPEN,
530 * L_ROOT or L_MP.
531 */
532 if (((lflag & (L_FILE | L_DIRECTORY)) == 0) ||
533 ((lflag & (L_FILE | L_DIRECTORY)) == (L_FILE | L_DIRECTORY)) ||
534 (lflag & (L_OPEN | L_ROOT | L_MP))) {
535 async_answer_0(rid, EINVAL);
536 return;
537 }
538
539 if (oflag & O_CREAT)
540 lflag |= L_CREATE;
541 if (oflag & O_EXCL)
542 lflag |= L_EXCLUSIVE;
543
544 char *path;
545 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
546 if (rc != EOK) {
547 async_answer_0(rid, rc);
548 return;
549 }
550
551 /*
552 * Avoid the race condition in which the file can be deleted before we
553 * find/create-and-lock the VFS node corresponding to the looked-up
554 * triplet.
555 */
556 if (lflag & L_CREATE)
557 fibril_rwlock_write_lock(&namespace_rwlock);
558 else
559 fibril_rwlock_read_lock(&namespace_rwlock);
560
561 /* The path is now populated and we can call vfs_lookup_internal(). */
562 vfs_lookup_res_t lr;
563 rc = vfs_lookup_internal(path, lflag | L_OPEN, &lr, NULL);
564 if (rc != EOK) {
565 if (lflag & L_CREATE)
566 fibril_rwlock_write_unlock(&namespace_rwlock);
567 else
568 fibril_rwlock_read_unlock(&namespace_rwlock);
569 async_answer_0(rid, rc);
570 free(path);
571 return;
572 }
573
574 /* Path is no longer needed. */
575 free(path);
576
577 vfs_node_t *node = vfs_node_get(&lr);
578 if (lflag & L_CREATE)
579 fibril_rwlock_write_unlock(&namespace_rwlock);
580 else
581 fibril_rwlock_read_unlock(&namespace_rwlock);
582
583 /* Truncate the file if requested and if necessary. */
584 if (oflag & O_TRUNC) {
585 fibril_rwlock_write_lock(&node->contents_rwlock);
586 if (node->size) {
587 rc = vfs_truncate_internal(node->fs_handle,
588 node->service_id, node->index, 0);
589 if (rc) {
590 fibril_rwlock_write_unlock(&node->contents_rwlock);
591 vfs_node_put(node);
592 async_answer_0(rid, rc);
593 return;
594 }
595 node->size = 0;
596 }
597 fibril_rwlock_write_unlock(&node->contents_rwlock);
598 }
599
600 /*
601 * Get ourselves a file descriptor and the corresponding vfs_file_t
602 * structure.
603 */
604 int fd = vfs_fd_alloc((oflag & O_DESC) != 0);
605 if (fd < 0) {
606 vfs_node_put(node);
607 async_answer_0(rid, fd);
608 return;
609 }
610 vfs_file_t *file = vfs_file_get(fd);
611 assert(file);
612 file->node = node;
613 if (oflag & O_APPEND)
614 file->append = true;
615
616 /*
617 * The following increase in reference count is for the fact that the
618 * file is being opened and that a file structure is pointing to it.
619 * It is necessary so that the file will not disappear when
620 * vfs_node_put() is called. The reference will be dropped by the
621 * respective VFS_IN_CLOSE.
622 */
623 vfs_node_addref(node);
624 vfs_node_put(node);
625 vfs_file_put(file);
626
627 /* Success! Return the new file descriptor to the client. */
628 async_answer_1(rid, EOK, fd);
629}
630
631void vfs_sync(ipc_callid_t rid, ipc_call_t *request)
632{
633 int fd = IPC_GET_ARG1(*request);
634
635 /* Lookup the file structure corresponding to the file descriptor. */
636 vfs_file_t *file = vfs_file_get(fd);
637 if (!file) {
638 async_answer_0(rid, ENOENT);
639 return;
640 }
641
642 /*
643 * Lock the open file structure so that no other thread can manipulate
644 * the same open file at a time.
645 */
646 fibril_mutex_lock(&file->lock);
647 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
648
649 /* Make a VFS_OUT_SYMC request at the destination FS server. */
650 aid_t msg;
651 ipc_call_t answer;
652 msg = async_send_2(fs_exch, VFS_OUT_SYNC, file->node->service_id,
653 file->node->index, &answer);
654
655 vfs_exchange_release(fs_exch);
656
657 /* Wait for reply from the FS server. */
658 sysarg_t rc;
659 async_wait_for(msg, &rc);
660
661 fibril_mutex_unlock(&file->lock);
662
663 vfs_file_put(file);
664 async_answer_0(rid, rc);
665}
666
667void vfs_close(ipc_callid_t rid, ipc_call_t *request)
668{
669 int fd = IPC_GET_ARG1(*request);
670 int ret = vfs_fd_free(fd);
671 async_answer_0(rid, ret);
672}
673
674static void vfs_rdwr(ipc_callid_t rid, ipc_call_t *request, bool read)
675{
676 /*
677 * The following code strongly depends on the fact that the files data
678 * structure can be only accessed by a single fibril and all file
679 * operations are serialized (i.e. the reads and writes cannot
680 * interleave and a file cannot be closed while it is being read).
681 *
682 * Additional synchronization needs to be added once the table of
683 * open files supports parallel access!
684 */
685
686 int fd = IPC_GET_ARG1(*request);
687
688 /* Lookup the file structure corresponding to the file descriptor. */
689 vfs_file_t *file = vfs_file_get(fd);
690 if (!file) {
691 async_answer_0(rid, ENOENT);
692 return;
693 }
694
695 /*
696 * Lock the open file structure so that no other thread can manipulate
697 * the same open file at a time.
698 */
699 fibril_mutex_lock(&file->lock);
700
701 vfs_info_t *fs_info = fs_handle_to_info(file->node->fs_handle);
702 assert(fs_info);
703
704 /*
705 * Lock the file's node so that no other client can read/write to it at
706 * the same time unless the FS supports concurrent reads/writes and its
707 * write implementation does not modify the file size.
708 */
709 if ((read) ||
710 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
711 fibril_rwlock_read_lock(&file->node->contents_rwlock);
712 else
713 fibril_rwlock_write_lock(&file->node->contents_rwlock);
714
715 if (file->node->type == VFS_NODE_DIRECTORY) {
716 /*
717 * Make sure that no one is modifying the namespace
718 * while we are in readdir().
719 */
720 assert(read);
721 fibril_rwlock_read_lock(&namespace_rwlock);
722 }
723
724 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
725
726 /*
727 * Make a VFS_READ/VFS_WRITE request at the destination FS server
728 * and forward the IPC_M_DATA_READ/IPC_M_DATA_WRITE request to the
729 * destination FS server. The call will be routed as if sent by
730 * ourselves. Note that call arguments are immutable in this case so we
731 * don't have to bother.
732 */
733 sysarg_t rc;
734 ipc_call_t answer;
735 if (read) {
736 rc = async_data_read_forward_4_1(fs_exch, VFS_OUT_READ,
737 file->node->service_id, file->node->index,
738 LOWER32(file->pos), UPPER32(file->pos), &answer);
739 } else {
740 if (file->append)
741 file->pos = file->node->size;
742
743 rc = async_data_write_forward_4_1(fs_exch, VFS_OUT_WRITE,
744 file->node->service_id, file->node->index,
745 LOWER32(file->pos), UPPER32(file->pos), &answer);
746 }
747
748 vfs_exchange_release(fs_exch);
749
750 size_t bytes = IPC_GET_ARG1(answer);
751
752 if (file->node->type == VFS_NODE_DIRECTORY)
753 fibril_rwlock_read_unlock(&namespace_rwlock);
754
755 /* Unlock the VFS node. */
756 if ((read) ||
757 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
758 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
759 else {
760 /* Update the cached version of node's size. */
761 if (rc == EOK)
762 file->node->size = MERGE_LOUP32(IPC_GET_ARG2(answer),
763 IPC_GET_ARG3(answer));
764 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
765 }
766
767 /* Update the position pointer and unlock the open file. */
768 if (rc == EOK)
769 file->pos += bytes;
770 fibril_mutex_unlock(&file->lock);
771 vfs_file_put(file);
772
773 /*
774 * FS server's reply is the final result of the whole operation we
775 * return to the client.
776 */
777 async_answer_1(rid, rc, bytes);
778}
779
780void vfs_read(ipc_callid_t rid, ipc_call_t *request)
781{
782 vfs_rdwr(rid, request, true);
783}
784
785void vfs_write(ipc_callid_t rid, ipc_call_t *request)
786{
787 vfs_rdwr(rid, request, false);
788}
789
790void vfs_seek(ipc_callid_t rid, ipc_call_t *request)
791{
792 int fd = (int) IPC_GET_ARG1(*request);
793 off64_t off = (off64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
794 IPC_GET_ARG3(*request));
795 int whence = (int) IPC_GET_ARG4(*request);
796
797 /* Lookup the file structure corresponding to the file descriptor. */
798 vfs_file_t *file = vfs_file_get(fd);
799 if (!file) {
800 async_answer_0(rid, ENOENT);
801 return;
802 }
803
804 fibril_mutex_lock(&file->lock);
805
806 off64_t newoff;
807 switch (whence) {
808 case SEEK_SET:
809 if (off >= 0) {
810 file->pos = (aoff64_t) off;
811 fibril_mutex_unlock(&file->lock);
812 vfs_file_put(file);
813 async_answer_1(rid, EOK, off);
814 return;
815 }
816 break;
817 case SEEK_CUR:
818 if ((off >= 0) && (file->pos + off < file->pos)) {
819 fibril_mutex_unlock(&file->lock);
820 vfs_file_put(file);
821 async_answer_0(rid, EOVERFLOW);
822 return;
823 }
824
825 if ((off < 0) && (file->pos < (aoff64_t) -off)) {
826 fibril_mutex_unlock(&file->lock);
827 vfs_file_put(file);
828 async_answer_0(rid, EOVERFLOW);
829 return;
830 }
831
832 file->pos += off;
833 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
834
835 fibril_mutex_unlock(&file->lock);
836 vfs_file_put(file);
837 async_answer_2(rid, EOK, LOWER32(newoff),
838 UPPER32(newoff));
839 return;
840 case SEEK_END:
841 fibril_rwlock_read_lock(&file->node->contents_rwlock);
842 aoff64_t size = file->node->size;
843
844 if ((off >= 0) && (size + off < size)) {
845 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
846 fibril_mutex_unlock(&file->lock);
847 vfs_file_put(file);
848 async_answer_0(rid, EOVERFLOW);
849 return;
850 }
851
852 if ((off < 0) && (size < (aoff64_t) -off)) {
853 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
854 fibril_mutex_unlock(&file->lock);
855 vfs_file_put(file);
856 async_answer_0(rid, EOVERFLOW);
857 return;
858 }
859
860 file->pos = size + off;
861 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
862
863 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
864 fibril_mutex_unlock(&file->lock);
865 vfs_file_put(file);
866 async_answer_2(rid, EOK, LOWER32(newoff), UPPER32(newoff));
867 return;
868 }
869
870 fibril_mutex_unlock(&file->lock);
871 vfs_file_put(file);
872 async_answer_0(rid, EINVAL);
873}
874
875int vfs_truncate_internal(fs_handle_t fs_handle, service_id_t service_id,
876 fs_index_t index, aoff64_t size)
877{
878 async_exch_t *exch = vfs_exchange_grab(fs_handle);
879 sysarg_t rc = async_req_4_0(exch, VFS_OUT_TRUNCATE,
880 (sysarg_t) service_id, (sysarg_t) index, LOWER32(size),
881 UPPER32(size));
882 vfs_exchange_release(exch);
883
884 return (int) rc;
885}
886
887void vfs_truncate(ipc_callid_t rid, ipc_call_t *request)
888{
889 int fd = IPC_GET_ARG1(*request);
890 aoff64_t size = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
891 IPC_GET_ARG3(*request));
892 int rc;
893
894 vfs_file_t *file = vfs_file_get(fd);
895 if (!file) {
896 async_answer_0(rid, ENOENT);
897 return;
898 }
899 fibril_mutex_lock(&file->lock);
900
901 fibril_rwlock_write_lock(&file->node->contents_rwlock);
902 rc = vfs_truncate_internal(file->node->fs_handle,
903 file->node->service_id, file->node->index, size);
904 if (rc == EOK)
905 file->node->size = size;
906 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
907
908 fibril_mutex_unlock(&file->lock);
909 vfs_file_put(file);
910 async_answer_0(rid, (sysarg_t)rc);
911}
912
913void vfs_fstat(ipc_callid_t rid, ipc_call_t *request)
914{
915 int fd = IPC_GET_ARG1(*request);
916 sysarg_t rc;
917
918 vfs_file_t *file = vfs_file_get(fd);
919 if (!file) {
920 async_answer_0(rid, ENOENT);
921 return;
922 }
923
924 ipc_callid_t callid;
925 if (!async_data_read_receive(&callid, NULL)) {
926 vfs_file_put(file);
927 async_answer_0(callid, EINVAL);
928 async_answer_0(rid, EINVAL);
929 return;
930 }
931
932 fibril_mutex_lock(&file->lock);
933
934 async_exch_t *exch = vfs_exchange_grab(file->node->fs_handle);
935
936 aid_t msg;
937 msg = async_send_3(exch, VFS_OUT_STAT, file->node->service_id,
938 file->node->index, true, NULL);
939 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
940
941 vfs_exchange_release(exch);
942
943 async_wait_for(msg, &rc);
944
945 fibril_mutex_unlock(&file->lock);
946 vfs_file_put(file);
947 async_answer_0(rid, rc);
948}
949
950void vfs_stat(ipc_callid_t rid, ipc_call_t *request)
951{
952 char *path;
953 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
954 if (rc != EOK) {
955 async_answer_0(rid, rc);
956 return;
957 }
958
959 ipc_callid_t callid;
960 if (!async_data_read_receive(&callid, NULL)) {
961 free(path);
962 async_answer_0(callid, EINVAL);
963 async_answer_0(rid, EINVAL);
964 return;
965 }
966
967 vfs_lookup_res_t lr;
968 fibril_rwlock_read_lock(&namespace_rwlock);
969 rc = vfs_lookup_internal(path, L_NONE, &lr, NULL);
970 free(path);
971 if (rc != EOK) {
972 fibril_rwlock_read_unlock(&namespace_rwlock);
973 async_answer_0(callid, rc);
974 async_answer_0(rid, rc);
975 return;
976 }
977 vfs_node_t *node = vfs_node_get(&lr);
978 if (!node) {
979 fibril_rwlock_read_unlock(&namespace_rwlock);
980 async_answer_0(callid, ENOMEM);
981 async_answer_0(rid, ENOMEM);
982 return;
983 }
984
985 fibril_rwlock_read_unlock(&namespace_rwlock);
986
987 async_exch_t *exch = vfs_exchange_grab(node->fs_handle);
988
989 aid_t msg;
990 msg = async_send_3(exch, VFS_OUT_STAT, node->service_id,
991 node->index, false, NULL);
992 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
993
994 vfs_exchange_release(exch);
995
996 sysarg_t rv;
997 async_wait_for(msg, &rv);
998
999 async_answer_0(rid, rv);
1000
1001 vfs_node_put(node);
1002}
1003
1004void vfs_mkdir(ipc_callid_t rid, ipc_call_t *request)
1005{
1006 int mode = IPC_GET_ARG1(*request);
1007
1008 char *path;
1009 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1010 if (rc != EOK) {
1011 async_answer_0(rid, rc);
1012 return;
1013 }
1014
1015 /* Ignore mode for now. */
1016 (void) mode;
1017
1018 fibril_rwlock_write_lock(&namespace_rwlock);
1019 int lflag = L_DIRECTORY | L_CREATE | L_EXCLUSIVE;
1020 rc = vfs_lookup_internal(path, lflag, NULL, NULL);
1021 fibril_rwlock_write_unlock(&namespace_rwlock);
1022 free(path);
1023 async_answer_0(rid, rc);
1024}
1025
1026void vfs_unlink(ipc_callid_t rid, ipc_call_t *request)
1027{
1028 int lflag = IPC_GET_ARG1(*request);
1029
1030 char *path;
1031 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1032 if (rc != EOK) {
1033 async_answer_0(rid, rc);
1034 return;
1035 }
1036
1037 fibril_rwlock_write_lock(&namespace_rwlock);
1038 lflag &= L_DIRECTORY; /* sanitize lflag */
1039 vfs_lookup_res_t lr;
1040 rc = vfs_lookup_internal(path, lflag | L_UNLINK, &lr, NULL);
1041 free(path);
1042 if (rc != EOK) {
1043 fibril_rwlock_write_unlock(&namespace_rwlock);
1044 async_answer_0(rid, rc);
1045 return;
1046 }
1047
1048 /*
1049 * The name has already been unlinked by vfs_lookup_internal().
1050 * We have to get and put the VFS node to ensure that it is
1051 * VFS_OUT_DESTROY'ed after the last reference to it is dropped.
1052 */
1053 vfs_node_t *node = vfs_node_get(&lr);
1054 fibril_mutex_lock(&nodes_mutex);
1055 node->lnkcnt--;
1056 fibril_mutex_unlock(&nodes_mutex);
1057 fibril_rwlock_write_unlock(&namespace_rwlock);
1058 vfs_node_put(node);
1059 async_answer_0(rid, EOK);
1060}
1061
1062void vfs_rename(ipc_callid_t rid, ipc_call_t *request)
1063{
1064 /* Retrieve the old path. */
1065 char *old;
1066 int rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL);
1067 if (rc != EOK) {
1068 async_answer_0(rid, rc);
1069 return;
1070 }
1071
1072 /* Retrieve the new path. */
1073 char *new;
1074 rc = async_data_write_accept((void **) &new, true, 0, 0, 0, NULL);
1075 if (rc != EOK) {
1076 free(old);
1077 async_answer_0(rid, rc);
1078 return;
1079 }
1080
1081 size_t olen;
1082 size_t nlen;
1083 char *oldc = canonify(old, &olen);
1084 char *newc = canonify(new, &nlen);
1085
1086 if ((!oldc) || (!newc)) {
1087 async_answer_0(rid, EINVAL);
1088 free(old);
1089 free(new);
1090 return;
1091 }
1092
1093 oldc[olen] = '\0';
1094 newc[nlen] = '\0';
1095
1096 if ((!str_lcmp(newc, oldc, str_length(oldc))) &&
1097 ((newc[str_length(oldc)] == '/') ||
1098 (str_length(oldc) == 1) ||
1099 (str_length(oldc) == str_length(newc)))) {
1100 /*
1101 * oldc is a prefix of newc and either
1102 * - newc continues with a / where oldc ends, or
1103 * - oldc was / itself, or
1104 * - oldc and newc are equal.
1105 */
1106 async_answer_0(rid, EINVAL);
1107 free(old);
1108 free(new);
1109 return;
1110 }
1111
1112 vfs_lookup_res_t old_lr;
1113 vfs_lookup_res_t new_lr;
1114 vfs_lookup_res_t new_par_lr;
1115 fibril_rwlock_write_lock(&namespace_rwlock);
1116
1117 /* Lookup the node belonging to the old file name. */
1118 rc = vfs_lookup_internal(oldc, L_NONE, &old_lr, NULL);
1119 if (rc != EOK) {
1120 fibril_rwlock_write_unlock(&namespace_rwlock);
1121 async_answer_0(rid, rc);
1122 free(old);
1123 free(new);
1124 return;
1125 }
1126
1127 vfs_node_t *old_node = vfs_node_get(&old_lr);
1128 if (!old_node) {
1129 fibril_rwlock_write_unlock(&namespace_rwlock);
1130 async_answer_0(rid, ENOMEM);
1131 free(old);
1132 free(new);
1133 return;
1134 }
1135
1136 /* Determine the path to the parent of the node with the new name. */
1137 char *parentc = str_dup(newc);
1138 if (!parentc) {
1139 fibril_rwlock_write_unlock(&namespace_rwlock);
1140 vfs_node_put(old_node);
1141 async_answer_0(rid, rc);
1142 free(old);
1143 free(new);
1144 return;
1145 }
1146
1147 char *lastsl = str_rchr(parentc + 1, '/');
1148 if (lastsl)
1149 *lastsl = '\0';
1150 else
1151 parentc[1] = '\0';
1152
1153 /* Lookup parent of the new file name. */
1154 rc = vfs_lookup_internal(parentc, L_NONE, &new_par_lr, NULL);
1155 free(parentc); /* not needed anymore */
1156 if (rc != EOK) {
1157 fibril_rwlock_write_unlock(&namespace_rwlock);
1158 vfs_node_put(old_node);
1159 async_answer_0(rid, rc);
1160 free(old);
1161 free(new);
1162 return;
1163 }
1164
1165 /* Check whether linking to the same file system instance. */
1166 if ((old_node->fs_handle != new_par_lr.triplet.fs_handle) ||
1167 (old_node->service_id != new_par_lr.triplet.service_id)) {
1168 fibril_rwlock_write_unlock(&namespace_rwlock);
1169 vfs_node_put(old_node);
1170 async_answer_0(rid, EXDEV); /* different file systems */
1171 free(old);
1172 free(new);
1173 return;
1174 }
1175
1176 /* Destroy the old link for the new name. */
1177 vfs_node_t *new_node = NULL;
1178 rc = vfs_lookup_internal(newc, L_UNLINK, &new_lr, NULL);
1179
1180 switch (rc) {
1181 case ENOENT:
1182 /* simply not in our way */
1183 break;
1184 case EOK:
1185 new_node = vfs_node_get(&new_lr);
1186 if (!new_node) {
1187 fibril_rwlock_write_unlock(&namespace_rwlock);
1188 vfs_node_put(old_node);
1189 async_answer_0(rid, ENOMEM);
1190 free(old);
1191 free(new);
1192 return;
1193 }
1194 fibril_mutex_lock(&nodes_mutex);
1195 new_node->lnkcnt--;
1196 fibril_mutex_unlock(&nodes_mutex);
1197 break;
1198 default:
1199 fibril_rwlock_write_unlock(&namespace_rwlock);
1200 vfs_node_put(old_node);
1201 async_answer_0(rid, ENOTEMPTY);
1202 free(old);
1203 free(new);
1204 return;
1205 }
1206
1207 /* Create the new link for the new name. */
1208 rc = vfs_lookup_internal(newc, L_LINK, NULL, NULL, old_node->index);
1209 if (rc != EOK) {
1210 fibril_rwlock_write_unlock(&namespace_rwlock);
1211 vfs_node_put(old_node);
1212 if (new_node)
1213 vfs_node_put(new_node);
1214 async_answer_0(rid, rc);
1215 free(old);
1216 free(new);
1217 return;
1218 }
1219
1220 fibril_mutex_lock(&nodes_mutex);
1221 old_node->lnkcnt++;
1222 fibril_mutex_unlock(&nodes_mutex);
1223
1224 /* Destroy the link for the old name. */
1225 rc = vfs_lookup_internal(oldc, L_UNLINK, NULL, NULL);
1226 if (rc != EOK) {
1227 fibril_rwlock_write_unlock(&namespace_rwlock);
1228 vfs_node_put(old_node);
1229 if (new_node)
1230 vfs_node_put(new_node);
1231 async_answer_0(rid, rc);
1232 free(old);
1233 free(new);
1234 return;
1235 }
1236
1237 fibril_mutex_lock(&nodes_mutex);
1238 old_node->lnkcnt--;
1239 fibril_mutex_unlock(&nodes_mutex);
1240 fibril_rwlock_write_unlock(&namespace_rwlock);
1241 vfs_node_put(old_node);
1242
1243 if (new_node)
1244 vfs_node_put(new_node);
1245
1246 free(old);
1247 free(new);
1248 async_answer_0(rid, EOK);
1249}
1250
1251void vfs_dup(ipc_callid_t rid, ipc_call_t *request)
1252{
1253 int oldfd = IPC_GET_ARG1(*request);
1254 int newfd = IPC_GET_ARG2(*request);
1255
1256 /* If the file descriptors are the same, do nothing. */
1257 if (oldfd == newfd) {
1258 async_answer_1(rid, EOK, newfd);
1259 return;
1260 }
1261
1262 /* Lookup the file structure corresponding to oldfd. */
1263 vfs_file_t *oldfile = vfs_file_get(oldfd);
1264 if (!oldfile) {
1265 async_answer_0(rid, EBADF);
1266 return;
1267 }
1268
1269 /*
1270 * Lock the open file structure so that no other thread can manipulate
1271 * the same open file at a time.
1272 */
1273 fibril_mutex_lock(&oldfile->lock);
1274
1275 /* Make sure newfd is closed. */
1276 (void) vfs_fd_free(newfd);
1277
1278 /* Assign the old file to newfd. */
1279 int ret = vfs_fd_assign(oldfile, newfd);
1280 fibril_mutex_unlock(&oldfile->lock);
1281 vfs_file_put(oldfile);
1282
1283 if (ret != EOK)
1284 async_answer_0(rid, ret);
1285 else
1286 async_answer_1(rid, EOK, newfd);
1287}
1288
1289void vfs_wait_handle(ipc_callid_t rid, ipc_call_t *request)
1290{
1291 int fd = vfs_wait_handle_internal();
1292 async_answer_1(rid, EOK, fd);
1293}
1294
1295/**
1296 * @}
1297 */
Note: See TracBrowser for help on using the repository browser.