source: mainline/uspace/srv/vfs/vfs_ops.c@ 8d6a41c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8d6a41c was 8d6a41c, checked in by Maurizio Lombardi <m.lombardi85@…>, 14 years ago

Remove the mtab_list_ent structure.

  • Property mode set to 100644
File size: 34.8 KB
Line 
1/*
2 * Copyright (c) 2008 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup fs
30 * @{
31 */
32
33/**
34 * @file vfs_ops.c
35 * @brief Operations that VFS offers to its clients.
36 */
37
38#include "vfs.h"
39#include <macros.h>
40#include <stdint.h>
41#include <async.h>
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <str.h>
46#include <bool.h>
47#include <fibril_synch.h>
48#include <adt/list.h>
49#include <unistd.h>
50#include <ctype.h>
51#include <fcntl.h>
52#include <assert.h>
53#include <vfs/canonify.h>
54#include <vfs/vfs_mtab.h>
55
56FIBRIL_MUTEX_INITIALIZE(mtab_list_lock);
57LIST_INITIALIZE(mtab_list);
58static size_t mtab_size = 0;
59
60/* Forward declarations of static functions. */
61static int vfs_truncate_internal(fs_handle_t, service_id_t, fs_index_t,
62 aoff64_t);
63
64/**
65 * This rwlock prevents the race between a triplet-to-VFS-node resolution and a
66 * concurrent VFS operation which modifies the file system namespace.
67 */
68FIBRIL_RWLOCK_INITIALIZE(namespace_rwlock);
69
70vfs_pair_t rootfs = {
71 .fs_handle = 0,
72 .service_id = 0
73};
74
75static void vfs_mount_internal(ipc_callid_t rid, service_id_t service_id,
76 fs_handle_t fs_handle, char *mp, char *opts)
77{
78 vfs_lookup_res_t mp_res;
79 vfs_lookup_res_t mr_res;
80 vfs_node_t *mp_node = NULL;
81 vfs_node_t *mr_node;
82 fs_index_t rindex;
83 aoff64_t rsize;
84 unsigned rlnkcnt;
85 async_exch_t *exch;
86 sysarg_t rc;
87 aid_t msg;
88 ipc_call_t answer;
89
90 /* Resolve the path to the mountpoint. */
91 fibril_rwlock_write_lock(&namespace_rwlock);
92 if (rootfs.fs_handle) {
93 /* We already have the root FS. */
94 if (str_cmp(mp, "/") == 0) {
95 /* Trying to mount root FS over root FS */
96 fibril_rwlock_write_unlock(&namespace_rwlock);
97 async_answer_0(rid, EBUSY);
98 return;
99 }
100
101 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
102 if (rc != EOK) {
103 /* The lookup failed for some reason. */
104 fibril_rwlock_write_unlock(&namespace_rwlock);
105 async_answer_0(rid, rc);
106 return;
107 }
108
109 mp_node = vfs_node_get(&mp_res);
110 if (!mp_node) {
111 fibril_rwlock_write_unlock(&namespace_rwlock);
112 async_answer_0(rid, ENOMEM);
113 return;
114 }
115
116 /*
117 * Now we hold a reference to mp_node.
118 * It will be dropped upon the corresponding VFS_IN_UNMOUNT.
119 * This prevents the mount point from being deleted.
120 */
121 } else {
122 /* We still don't have the root file system mounted. */
123 if (str_cmp(mp, "/") == 0) {
124 /*
125 * For this simple, but important case,
126 * we are almost done.
127 */
128
129 /* Tell the mountee that it is being mounted. */
130 exch = vfs_exchange_grab(fs_handle);
131 msg = async_send_1(exch, VFS_OUT_MOUNTED,
132 (sysarg_t) service_id, &answer);
133 /* Send the mount options */
134 rc = async_data_write_start(exch, (void *)opts,
135 str_size(opts));
136 vfs_exchange_release(exch);
137
138 if (rc != EOK) {
139 async_wait_for(msg, NULL);
140 fibril_rwlock_write_unlock(&namespace_rwlock);
141 async_answer_0(rid, rc);
142 return;
143 }
144 async_wait_for(msg, &rc);
145
146 if (rc != EOK) {
147 fibril_rwlock_write_unlock(&namespace_rwlock);
148 async_answer_0(rid, rc);
149 return;
150 }
151
152 rindex = (fs_index_t) IPC_GET_ARG1(answer);
153 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
154 IPC_GET_ARG3(answer));
155 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
156
157 mr_res.triplet.fs_handle = fs_handle;
158 mr_res.triplet.service_id = service_id;
159 mr_res.triplet.index = rindex;
160 mr_res.size = rsize;
161 mr_res.lnkcnt = rlnkcnt;
162 mr_res.type = VFS_NODE_DIRECTORY;
163
164 rootfs.fs_handle = fs_handle;
165 rootfs.service_id = service_id;
166
167 /* Add reference to the mounted root. */
168 mr_node = vfs_node_get(&mr_res);
169 assert(mr_node);
170
171 fibril_rwlock_write_unlock(&namespace_rwlock);
172 async_answer_0(rid, rc);
173 return;
174 } else {
175 /*
176 * We can't resolve this without the root filesystem
177 * being mounted first.
178 */
179 fibril_rwlock_write_unlock(&namespace_rwlock);
180 async_answer_0(rid, ENOENT);
181 return;
182 }
183 }
184
185 /*
186 * At this point, we have all necessary pieces: file system handle
187 * and service ID, and we know the mount point VFS node.
188 */
189
190 async_exch_t *mountee_exch = vfs_exchange_grab(fs_handle);
191 assert(mountee_exch);
192
193 exch = vfs_exchange_grab(mp_res.triplet.fs_handle);
194 msg = async_send_4(exch, VFS_OUT_MOUNT,
195 (sysarg_t) mp_res.triplet.service_id,
196 (sysarg_t) mp_res.triplet.index,
197 (sysarg_t) fs_handle,
198 (sysarg_t) service_id, &answer);
199
200 /* Send connection */
201 rc = async_exchange_clone(exch, mountee_exch);
202 vfs_exchange_release(mountee_exch);
203
204 if (rc != EOK) {
205 vfs_exchange_release(exch);
206 async_wait_for(msg, NULL);
207
208 /* Mount failed, drop reference to mp_node. */
209 if (mp_node)
210 vfs_node_put(mp_node);
211
212 async_answer_0(rid, rc);
213 fibril_rwlock_write_unlock(&namespace_rwlock);
214 return;
215 }
216
217 /* send the mount options */
218 rc = async_data_write_start(exch, (void *) opts, str_size(opts));
219 if (rc != EOK) {
220 vfs_exchange_release(exch);
221 async_wait_for(msg, NULL);
222
223 /* Mount failed, drop reference to mp_node. */
224 if (mp_node)
225 vfs_node_put(mp_node);
226
227 fibril_rwlock_write_unlock(&namespace_rwlock);
228 async_answer_0(rid, rc);
229 return;
230 }
231
232 /*
233 * Wait for the answer before releasing the exchange to avoid deadlock
234 * in case the answer depends on further calls to the same file system.
235 * Think of a case when mounting a FS on a file_bd backed by a file on
236 * the same FS.
237 */
238 async_wait_for(msg, &rc);
239 vfs_exchange_release(exch);
240
241 if (rc == EOK) {
242 rindex = (fs_index_t) IPC_GET_ARG1(answer);
243 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer),
244 IPC_GET_ARG3(answer));
245 rlnkcnt = (unsigned) IPC_GET_ARG4(answer);
246
247 mr_res.triplet.fs_handle = fs_handle;
248 mr_res.triplet.service_id = service_id;
249 mr_res.triplet.index = rindex;
250 mr_res.size = rsize;
251 mr_res.lnkcnt = rlnkcnt;
252 mr_res.type = VFS_NODE_DIRECTORY;
253
254 /* Add reference to the mounted root. */
255 mr_node = vfs_node_get(&mr_res);
256 assert(mr_node);
257 } else {
258 /* Mount failed, drop reference to mp_node. */
259 if (mp_node)
260 vfs_node_put(mp_node);
261 }
262
263 async_answer_0(rid, rc);
264 fibril_rwlock_write_unlock(&namespace_rwlock);
265}
266
267void vfs_mount(ipc_callid_t rid, ipc_call_t *request)
268{
269 service_id_t service_id;
270
271 /*
272 * We expect the library to do the device-name to device-handle
273 * translation for us, thus the device handle will arrive as ARG1
274 * in the request.
275 */
276 service_id = (service_id_t) IPC_GET_ARG1(*request);
277
278 /*
279 * Mount flags are passed as ARG2.
280 */
281 unsigned int flags = (unsigned int) IPC_GET_ARG2(*request);
282
283 /*
284 * Instance number is passed as ARG3.
285 */
286 unsigned int instance = IPC_GET_ARG3(*request);
287
288 /* We want the client to send us the mount point. */
289 char *mp;
290 int rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
291 0, NULL);
292 if (rc != EOK) {
293 async_answer_0(rid, rc);
294 return;
295 }
296
297 /* Now we expect to receive the mount options. */
298 char *opts;
299 rc = async_data_write_accept((void **) &opts, true, 0, MAX_MNTOPTS_LEN,
300 0, NULL);
301 if (rc != EOK) {
302 free(mp);
303 async_answer_0(rid, rc);
304 return;
305 }
306
307 /*
308 * Now, we expect the client to send us data with the name of the file
309 * system.
310 */
311 char *fs_name;
312 rc = async_data_write_accept((void **) &fs_name, true, 0,
313 FS_NAME_MAXLEN, 0, NULL);
314 if (rc != EOK) {
315 free(mp);
316 free(opts);
317 async_answer_0(rid, rc);
318 return;
319 }
320
321 /*
322 * Wait for VFS_IN_PING so that we can return an error if we don't know
323 * fs_name.
324 */
325 ipc_call_t data;
326 ipc_callid_t callid = async_get_call(&data);
327 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
328 async_answer_0(callid, ENOTSUP);
329 async_answer_0(rid, ENOTSUP);
330 free(mp);
331 free(opts);
332 free(fs_name);
333 return;
334 }
335
336 /*
337 * Check if we know a file system with the same name as is in fs_name.
338 * This will also give us its file system handle.
339 */
340 fibril_mutex_lock(&fs_list_lock);
341 fs_handle_t fs_handle;
342recheck:
343 fs_handle = fs_name_to_handle(instance, fs_name, false);
344 if (!fs_handle) {
345 if (flags & IPC_FLAG_BLOCKING) {
346 fibril_condvar_wait(&fs_list_cv, &fs_list_lock);
347 goto recheck;
348 }
349
350 fibril_mutex_unlock(&fs_list_lock);
351 async_answer_0(callid, ENOENT);
352 async_answer_0(rid, ENOENT);
353 free(mp);
354 free(fs_name);
355 free(opts);
356 return;
357 }
358 fibril_mutex_unlock(&fs_list_lock);
359
360 /* Do the mount */
361 vfs_mount_internal(rid, service_id, fs_handle, mp, opts);
362
363 /* Add the filesystem info to the list of mounted filesystems */
364 mtab_ent_t *mtab_ent = malloc(sizeof(mtab_ent_t));
365 if (!mtab_ent) {
366 async_answer_0(callid, ENOMEM);
367 async_answer_0(rid, ENOMEM);
368 free(mp);
369 free(fs_name);
370 free(opts);
371 return;
372 }
373
374 mtab_ent->fs_handle = fs_handle;
375 str_cpy(mtab_ent->mp, MAX_PATH_LEN, mp);
376 str_cpy(mtab_ent->fs_name, FS_NAME_MAXLEN, fs_name);
377 str_cpy(mtab_ent->opts, MAX_MNTOPTS_LEN, opts);
378 mtab_ent->flags = flags;
379 mtab_ent->instance = instance;
380
381 link_initialize(&mtab_ent->link);
382
383 fibril_mutex_lock(&mtab_list_lock);
384 list_append(&mtab_ent->link, &mtab_list);
385 mtab_size++;
386 fibril_mutex_unlock(&mtab_list_lock);
387
388 free(mp);
389
390 /* Acknowledge that we know fs_name. */
391 async_answer_0(callid, EOK);
392}
393
394void vfs_unmount(ipc_callid_t rid, ipc_call_t *request)
395{
396 int rc;
397 char *mp;
398 vfs_lookup_res_t mp_res;
399 vfs_lookup_res_t mr_res;
400 vfs_node_t *mr_node;
401 async_exch_t *exch;
402
403 /*
404 * Receive the mount point path.
405 */
406 rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN,
407 0, NULL);
408 if (rc != EOK)
409 async_answer_0(rid, rc);
410
411 /*
412 * Taking the namespace lock will do two things for us. First, it will
413 * prevent races with other lookup operations. Second, it will stop new
414 * references to already existing VFS nodes and creation of new VFS
415 * nodes. This is because new references are added as a result of some
416 * lookup operation or at least of some operation which is protected by
417 * the namespace lock.
418 */
419 fibril_rwlock_write_lock(&namespace_rwlock);
420
421 /*
422 * Lookup the mounted root and instantiate it.
423 */
424 rc = vfs_lookup_internal(mp, L_ROOT, &mr_res, NULL);
425 if (rc != EOK) {
426 fibril_rwlock_write_unlock(&namespace_rwlock);
427 free(mp);
428 async_answer_0(rid, rc);
429 return;
430 }
431 mr_node = vfs_node_get(&mr_res);
432 if (!mr_node) {
433 fibril_rwlock_write_unlock(&namespace_rwlock);
434 free(mp);
435 async_answer_0(rid, ENOMEM);
436 return;
437 }
438
439 /*
440 * Count the total number of references for the mounted file system. We
441 * are expecting at least two. One which we got above and one which we
442 * got when the file system was mounted. If we find more, it means that
443 * the file system cannot be gracefully unmounted at the moment because
444 * someone is working with it.
445 */
446 if (vfs_nodes_refcount_sum_get(mr_node->fs_handle,
447 mr_node->service_id) != 2) {
448 fibril_rwlock_write_unlock(&namespace_rwlock);
449 vfs_node_put(mr_node);
450 free(mp);
451 async_answer_0(rid, EBUSY);
452 return;
453 }
454
455 if (str_cmp(mp, "/") == 0) {
456
457 /*
458 * Unmounting the root file system.
459 *
460 * In this case, there is no mount point node and we send
461 * VFS_OUT_UNMOUNTED directly to the mounted file system.
462 */
463
464 exch = vfs_exchange_grab(mr_node->fs_handle);
465 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED,
466 mr_node->service_id);
467 vfs_exchange_release(exch);
468
469 if (rc != EOK) {
470 fibril_rwlock_write_unlock(&namespace_rwlock);
471 free(mp);
472 vfs_node_put(mr_node);
473 async_answer_0(rid, rc);
474 return;
475 }
476
477 rootfs.fs_handle = 0;
478 rootfs.service_id = 0;
479 } else {
480
481 /*
482 * Unmounting a non-root file system.
483 *
484 * We have a regular mount point node representing the parent
485 * file system, so we delegate the operation to it.
486 */
487
488 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL);
489 if (rc != EOK) {
490 fibril_rwlock_write_unlock(&namespace_rwlock);
491 free(mp);
492 vfs_node_put(mr_node);
493 async_answer_0(rid, rc);
494 return;
495 }
496
497 vfs_node_t *mp_node = vfs_node_get(&mp_res);
498 if (!mp_node) {
499 fibril_rwlock_write_unlock(&namespace_rwlock);
500 free(mp);
501 vfs_node_put(mr_node);
502 async_answer_0(rid, ENOMEM);
503 return;
504 }
505
506 exch = vfs_exchange_grab(mp_node->fs_handle);
507 rc = async_req_2_0(exch, VFS_OUT_UNMOUNT,
508 mp_node->service_id, mp_node->index);
509 vfs_exchange_release(exch);
510
511 if (rc != EOK) {
512 fibril_rwlock_write_unlock(&namespace_rwlock);
513 free(mp);
514 vfs_node_put(mp_node);
515 vfs_node_put(mr_node);
516 async_answer_0(rid, rc);
517 return;
518 }
519
520 /* Drop the reference we got above. */
521 vfs_node_put(mp_node);
522 /* Drop the reference from when the file system was mounted. */
523 vfs_node_put(mp_node);
524 }
525
526 /*
527 * All went well, the mounted file system was successfully unmounted.
528 * The only thing left is to forget the unmounted root VFS node.
529 */
530 vfs_node_forget(mr_node);
531 fibril_rwlock_write_unlock(&namespace_rwlock);
532
533 fibril_mutex_lock(&mtab_list_lock);
534
535 int found = 0;
536
537 list_foreach(mtab_list, cur) {
538 mtab_ent_t *mtab_ent = list_get_instance(cur, mtab_ent_t,
539 link);
540
541 if (str_cmp(mtab_ent->mp, mp) == 0) {
542 list_remove(&mtab_ent->link);
543 mtab_size--;
544 free(mtab_ent);
545 found = 1;
546 break;
547 }
548 }
549 assert(found);
550
551 free(mp);
552
553 fibril_mutex_unlock(&mtab_list_lock);
554 async_answer_0(rid, EOK);
555}
556
557void vfs_open(ipc_callid_t rid, ipc_call_t *request)
558{
559 /*
560 * The POSIX interface is open(path, oflag, mode).
561 * We can receive oflags and mode along with the VFS_IN_OPEN call;
562 * the path will need to arrive in another call.
563 *
564 * We also receive one private, non-POSIX set of flags called lflag
565 * used to pass information to vfs_lookup_internal().
566 */
567 int lflag = IPC_GET_ARG1(*request);
568 int oflag = IPC_GET_ARG2(*request);
569 int mode = IPC_GET_ARG3(*request);
570
571 /* Ignore mode for now. */
572 (void) mode;
573
574 /*
575 * Make sure that we are called with exactly one of L_FILE and
576 * L_DIRECTORY. Make sure that the user does not pass L_OPEN,
577 * L_ROOT or L_MP.
578 */
579 if (((lflag & (L_FILE | L_DIRECTORY)) == 0) ||
580 ((lflag & (L_FILE | L_DIRECTORY)) == (L_FILE | L_DIRECTORY)) ||
581 (lflag & (L_OPEN | L_ROOT | L_MP))) {
582 async_answer_0(rid, EINVAL);
583 return;
584 }
585
586 if (oflag & O_CREAT)
587 lflag |= L_CREATE;
588 if (oflag & O_EXCL)
589 lflag |= L_EXCLUSIVE;
590
591 char *path;
592 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
593 if (rc != EOK) {
594 async_answer_0(rid, rc);
595 return;
596 }
597
598 /*
599 * Avoid the race condition in which the file can be deleted before we
600 * find/create-and-lock the VFS node corresponding to the looked-up
601 * triplet.
602 */
603 if (lflag & L_CREATE)
604 fibril_rwlock_write_lock(&namespace_rwlock);
605 else
606 fibril_rwlock_read_lock(&namespace_rwlock);
607
608 /* The path is now populated and we can call vfs_lookup_internal(). */
609 vfs_lookup_res_t lr;
610 rc = vfs_lookup_internal(path, lflag | L_OPEN, &lr, NULL);
611 if (rc != EOK) {
612 if (lflag & L_CREATE)
613 fibril_rwlock_write_unlock(&namespace_rwlock);
614 else
615 fibril_rwlock_read_unlock(&namespace_rwlock);
616 async_answer_0(rid, rc);
617 free(path);
618 return;
619 }
620
621 /* Path is no longer needed. */
622 free(path);
623
624 vfs_node_t *node = vfs_node_get(&lr);
625 if (lflag & L_CREATE)
626 fibril_rwlock_write_unlock(&namespace_rwlock);
627 else
628 fibril_rwlock_read_unlock(&namespace_rwlock);
629
630 /* Truncate the file if requested and if necessary. */
631 if (oflag & O_TRUNC) {
632 fibril_rwlock_write_lock(&node->contents_rwlock);
633 if (node->size) {
634 rc = vfs_truncate_internal(node->fs_handle,
635 node->service_id, node->index, 0);
636 if (rc) {
637 fibril_rwlock_write_unlock(&node->contents_rwlock);
638 vfs_node_put(node);
639 async_answer_0(rid, rc);
640 return;
641 }
642 node->size = 0;
643 }
644 fibril_rwlock_write_unlock(&node->contents_rwlock);
645 }
646
647 /*
648 * Get ourselves a file descriptor and the corresponding vfs_file_t
649 * structure.
650 */
651 int fd = vfs_fd_alloc((oflag & O_DESC) != 0);
652 if (fd < 0) {
653 vfs_node_put(node);
654 async_answer_0(rid, fd);
655 return;
656 }
657 vfs_file_t *file = vfs_file_get(fd);
658 assert(file);
659 file->node = node;
660 if (oflag & O_APPEND)
661 file->append = true;
662
663 /*
664 * The following increase in reference count is for the fact that the
665 * file is being opened and that a file structure is pointing to it.
666 * It is necessary so that the file will not disappear when
667 * vfs_node_put() is called. The reference will be dropped by the
668 * respective VFS_IN_CLOSE.
669 */
670 vfs_node_addref(node);
671 vfs_node_put(node);
672 vfs_file_put(file);
673
674 /* Success! Return the new file descriptor to the client. */
675 async_answer_1(rid, EOK, fd);
676}
677
678void vfs_sync(ipc_callid_t rid, ipc_call_t *request)
679{
680 int fd = IPC_GET_ARG1(*request);
681
682 /* Lookup the file structure corresponding to the file descriptor. */
683 vfs_file_t *file = vfs_file_get(fd);
684 if (!file) {
685 async_answer_0(rid, ENOENT);
686 return;
687 }
688
689 /*
690 * Lock the open file structure so that no other thread can manipulate
691 * the same open file at a time.
692 */
693 fibril_mutex_lock(&file->lock);
694 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
695
696 /* Make a VFS_OUT_SYMC request at the destination FS server. */
697 aid_t msg;
698 ipc_call_t answer;
699 msg = async_send_2(fs_exch, VFS_OUT_SYNC, file->node->service_id,
700 file->node->index, &answer);
701
702 vfs_exchange_release(fs_exch);
703
704 /* Wait for reply from the FS server. */
705 sysarg_t rc;
706 async_wait_for(msg, &rc);
707
708 fibril_mutex_unlock(&file->lock);
709
710 vfs_file_put(file);
711 async_answer_0(rid, rc);
712}
713
714void vfs_close(ipc_callid_t rid, ipc_call_t *request)
715{
716 int fd = IPC_GET_ARG1(*request);
717 int ret = vfs_fd_free(fd);
718 async_answer_0(rid, ret);
719}
720
721static void vfs_rdwr(ipc_callid_t rid, ipc_call_t *request, bool read)
722{
723 /*
724 * The following code strongly depends on the fact that the files data
725 * structure can be only accessed by a single fibril and all file
726 * operations are serialized (i.e. the reads and writes cannot
727 * interleave and a file cannot be closed while it is being read).
728 *
729 * Additional synchronization needs to be added once the table of
730 * open files supports parallel access!
731 */
732
733 int fd = IPC_GET_ARG1(*request);
734
735 /* Lookup the file structure corresponding to the file descriptor. */
736 vfs_file_t *file = vfs_file_get(fd);
737 if (!file) {
738 async_answer_0(rid, ENOENT);
739 return;
740 }
741
742 /*
743 * Lock the open file structure so that no other thread can manipulate
744 * the same open file at a time.
745 */
746 fibril_mutex_lock(&file->lock);
747
748 vfs_info_t *fs_info = fs_handle_to_info(file->node->fs_handle);
749 assert(fs_info);
750
751 /*
752 * Lock the file's node so that no other client can read/write to it at
753 * the same time unless the FS supports concurrent reads/writes and its
754 * write implementation does not modify the file size.
755 */
756 if ((read) ||
757 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
758 fibril_rwlock_read_lock(&file->node->contents_rwlock);
759 else
760 fibril_rwlock_write_lock(&file->node->contents_rwlock);
761
762 if (file->node->type == VFS_NODE_DIRECTORY) {
763 /*
764 * Make sure that no one is modifying the namespace
765 * while we are in readdir().
766 */
767 assert(read);
768 fibril_rwlock_read_lock(&namespace_rwlock);
769 }
770
771 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle);
772
773 /*
774 * Make a VFS_READ/VFS_WRITE request at the destination FS server
775 * and forward the IPC_M_DATA_READ/IPC_M_DATA_WRITE request to the
776 * destination FS server. The call will be routed as if sent by
777 * ourselves. Note that call arguments are immutable in this case so we
778 * don't have to bother.
779 */
780 sysarg_t rc;
781 ipc_call_t answer;
782 if (read) {
783 rc = async_data_read_forward_4_1(fs_exch, VFS_OUT_READ,
784 file->node->service_id, file->node->index,
785 LOWER32(file->pos), UPPER32(file->pos), &answer);
786 } else {
787 if (file->append)
788 file->pos = file->node->size;
789
790 rc = async_data_write_forward_4_1(fs_exch, VFS_OUT_WRITE,
791 file->node->service_id, file->node->index,
792 LOWER32(file->pos), UPPER32(file->pos), &answer);
793 }
794
795 vfs_exchange_release(fs_exch);
796
797 size_t bytes = IPC_GET_ARG1(answer);
798
799 if (file->node->type == VFS_NODE_DIRECTORY)
800 fibril_rwlock_read_unlock(&namespace_rwlock);
801
802 /* Unlock the VFS node. */
803 if ((read) ||
804 ((fs_info->concurrent_read_write) && (fs_info->write_retains_size)))
805 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
806 else {
807 /* Update the cached version of node's size. */
808 if (rc == EOK)
809 file->node->size = MERGE_LOUP32(IPC_GET_ARG2(answer),
810 IPC_GET_ARG3(answer));
811 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
812 }
813
814 /* Update the position pointer and unlock the open file. */
815 if (rc == EOK)
816 file->pos += bytes;
817 fibril_mutex_unlock(&file->lock);
818 vfs_file_put(file);
819
820 /*
821 * FS server's reply is the final result of the whole operation we
822 * return to the client.
823 */
824 async_answer_1(rid, rc, bytes);
825}
826
827void vfs_read(ipc_callid_t rid, ipc_call_t *request)
828{
829 vfs_rdwr(rid, request, true);
830}
831
832void vfs_write(ipc_callid_t rid, ipc_call_t *request)
833{
834 vfs_rdwr(rid, request, false);
835}
836
837void vfs_seek(ipc_callid_t rid, ipc_call_t *request)
838{
839 int fd = (int) IPC_GET_ARG1(*request);
840 off64_t off = (off64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
841 IPC_GET_ARG3(*request));
842 int whence = (int) IPC_GET_ARG4(*request);
843
844 /* Lookup the file structure corresponding to the file descriptor. */
845 vfs_file_t *file = vfs_file_get(fd);
846 if (!file) {
847 async_answer_0(rid, ENOENT);
848 return;
849 }
850
851 fibril_mutex_lock(&file->lock);
852
853 off64_t newoff;
854 switch (whence) {
855 case SEEK_SET:
856 if (off >= 0) {
857 file->pos = (aoff64_t) off;
858 fibril_mutex_unlock(&file->lock);
859 vfs_file_put(file);
860 async_answer_1(rid, EOK, off);
861 return;
862 }
863 break;
864 case SEEK_CUR:
865 if ((off >= 0) && (file->pos + off < file->pos)) {
866 fibril_mutex_unlock(&file->lock);
867 vfs_file_put(file);
868 async_answer_0(rid, EOVERFLOW);
869 return;
870 }
871
872 if ((off < 0) && (file->pos < (aoff64_t) -off)) {
873 fibril_mutex_unlock(&file->lock);
874 vfs_file_put(file);
875 async_answer_0(rid, EOVERFLOW);
876 return;
877 }
878
879 file->pos += off;
880 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
881
882 fibril_mutex_unlock(&file->lock);
883 vfs_file_put(file);
884 async_answer_2(rid, EOK, LOWER32(newoff),
885 UPPER32(newoff));
886 return;
887 case SEEK_END:
888 fibril_rwlock_read_lock(&file->node->contents_rwlock);
889 aoff64_t size = file->node->size;
890
891 if ((off >= 0) && (size + off < size)) {
892 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
893 fibril_mutex_unlock(&file->lock);
894 vfs_file_put(file);
895 async_answer_0(rid, EOVERFLOW);
896 return;
897 }
898
899 if ((off < 0) && (size < (aoff64_t) -off)) {
900 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
901 fibril_mutex_unlock(&file->lock);
902 vfs_file_put(file);
903 async_answer_0(rid, EOVERFLOW);
904 return;
905 }
906
907 file->pos = size + off;
908 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos;
909
910 fibril_rwlock_read_unlock(&file->node->contents_rwlock);
911 fibril_mutex_unlock(&file->lock);
912 vfs_file_put(file);
913 async_answer_2(rid, EOK, LOWER32(newoff), UPPER32(newoff));
914 return;
915 }
916
917 fibril_mutex_unlock(&file->lock);
918 vfs_file_put(file);
919 async_answer_0(rid, EINVAL);
920}
921
922int vfs_truncate_internal(fs_handle_t fs_handle, service_id_t service_id,
923 fs_index_t index, aoff64_t size)
924{
925 async_exch_t *exch = vfs_exchange_grab(fs_handle);
926 sysarg_t rc = async_req_4_0(exch, VFS_OUT_TRUNCATE,
927 (sysarg_t) service_id, (sysarg_t) index, LOWER32(size),
928 UPPER32(size));
929 vfs_exchange_release(exch);
930
931 return (int) rc;
932}
933
934void vfs_truncate(ipc_callid_t rid, ipc_call_t *request)
935{
936 int fd = IPC_GET_ARG1(*request);
937 aoff64_t size = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(*request),
938 IPC_GET_ARG3(*request));
939 int rc;
940
941 vfs_file_t *file = vfs_file_get(fd);
942 if (!file) {
943 async_answer_0(rid, ENOENT);
944 return;
945 }
946 fibril_mutex_lock(&file->lock);
947
948 fibril_rwlock_write_lock(&file->node->contents_rwlock);
949 rc = vfs_truncate_internal(file->node->fs_handle,
950 file->node->service_id, file->node->index, size);
951 if (rc == EOK)
952 file->node->size = size;
953 fibril_rwlock_write_unlock(&file->node->contents_rwlock);
954
955 fibril_mutex_unlock(&file->lock);
956 vfs_file_put(file);
957 async_answer_0(rid, (sysarg_t)rc);
958}
959
960void vfs_fstat(ipc_callid_t rid, ipc_call_t *request)
961{
962 int fd = IPC_GET_ARG1(*request);
963 sysarg_t rc;
964
965 vfs_file_t *file = vfs_file_get(fd);
966 if (!file) {
967 async_answer_0(rid, ENOENT);
968 return;
969 }
970
971 ipc_callid_t callid;
972 if (!async_data_read_receive(&callid, NULL)) {
973 vfs_file_put(file);
974 async_answer_0(callid, EINVAL);
975 async_answer_0(rid, EINVAL);
976 return;
977 }
978
979 fibril_mutex_lock(&file->lock);
980
981 async_exch_t *exch = vfs_exchange_grab(file->node->fs_handle);
982
983 aid_t msg;
984 msg = async_send_3(exch, VFS_OUT_STAT, file->node->service_id,
985 file->node->index, true, NULL);
986 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
987
988 vfs_exchange_release(exch);
989
990 async_wait_for(msg, &rc);
991
992 fibril_mutex_unlock(&file->lock);
993 vfs_file_put(file);
994 async_answer_0(rid, rc);
995}
996
997void vfs_stat(ipc_callid_t rid, ipc_call_t *request)
998{
999 char *path;
1000 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1001 if (rc != EOK) {
1002 async_answer_0(rid, rc);
1003 return;
1004 }
1005
1006 ipc_callid_t callid;
1007 if (!async_data_read_receive(&callid, NULL)) {
1008 free(path);
1009 async_answer_0(callid, EINVAL);
1010 async_answer_0(rid, EINVAL);
1011 return;
1012 }
1013
1014 vfs_lookup_res_t lr;
1015 fibril_rwlock_read_lock(&namespace_rwlock);
1016 rc = vfs_lookup_internal(path, L_NONE, &lr, NULL);
1017 free(path);
1018 if (rc != EOK) {
1019 fibril_rwlock_read_unlock(&namespace_rwlock);
1020 async_answer_0(callid, rc);
1021 async_answer_0(rid, rc);
1022 return;
1023 }
1024 vfs_node_t *node = vfs_node_get(&lr);
1025 if (!node) {
1026 fibril_rwlock_read_unlock(&namespace_rwlock);
1027 async_answer_0(callid, ENOMEM);
1028 async_answer_0(rid, ENOMEM);
1029 return;
1030 }
1031
1032 fibril_rwlock_read_unlock(&namespace_rwlock);
1033
1034 async_exch_t *exch = vfs_exchange_grab(node->fs_handle);
1035
1036 aid_t msg;
1037 msg = async_send_3(exch, VFS_OUT_STAT, node->service_id,
1038 node->index, false, NULL);
1039 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME);
1040
1041 vfs_exchange_release(exch);
1042
1043 sysarg_t rv;
1044 async_wait_for(msg, &rv);
1045
1046 async_answer_0(rid, rv);
1047
1048 vfs_node_put(node);
1049}
1050
1051void vfs_mkdir(ipc_callid_t rid, ipc_call_t *request)
1052{
1053 int mode = IPC_GET_ARG1(*request);
1054
1055 char *path;
1056 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1057 if (rc != EOK) {
1058 async_answer_0(rid, rc);
1059 return;
1060 }
1061
1062 /* Ignore mode for now. */
1063 (void) mode;
1064
1065 fibril_rwlock_write_lock(&namespace_rwlock);
1066 int lflag = L_DIRECTORY | L_CREATE | L_EXCLUSIVE;
1067 rc = vfs_lookup_internal(path, lflag, NULL, NULL);
1068 fibril_rwlock_write_unlock(&namespace_rwlock);
1069 free(path);
1070 async_answer_0(rid, rc);
1071}
1072
1073void vfs_unlink(ipc_callid_t rid, ipc_call_t *request)
1074{
1075 int lflag = IPC_GET_ARG1(*request);
1076
1077 char *path;
1078 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL);
1079 if (rc != EOK) {
1080 async_answer_0(rid, rc);
1081 return;
1082 }
1083
1084 fibril_rwlock_write_lock(&namespace_rwlock);
1085 lflag &= L_DIRECTORY; /* sanitize lflag */
1086 vfs_lookup_res_t lr;
1087 rc = vfs_lookup_internal(path, lflag | L_UNLINK, &lr, NULL);
1088 free(path);
1089 if (rc != EOK) {
1090 fibril_rwlock_write_unlock(&namespace_rwlock);
1091 async_answer_0(rid, rc);
1092 return;
1093 }
1094
1095 /*
1096 * The name has already been unlinked by vfs_lookup_internal().
1097 * We have to get and put the VFS node to ensure that it is
1098 * VFS_OUT_DESTROY'ed after the last reference to it is dropped.
1099 */
1100 vfs_node_t *node = vfs_node_get(&lr);
1101 fibril_mutex_lock(&nodes_mutex);
1102 node->lnkcnt--;
1103 fibril_mutex_unlock(&nodes_mutex);
1104 fibril_rwlock_write_unlock(&namespace_rwlock);
1105 vfs_node_put(node);
1106 async_answer_0(rid, EOK);
1107}
1108
1109void vfs_rename(ipc_callid_t rid, ipc_call_t *request)
1110{
1111 /* Retrieve the old path. */
1112 char *old;
1113 int rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL);
1114 if (rc != EOK) {
1115 async_answer_0(rid, rc);
1116 return;
1117 }
1118
1119 /* Retrieve the new path. */
1120 char *new;
1121 rc = async_data_write_accept((void **) &new, true, 0, 0, 0, NULL);
1122 if (rc != EOK) {
1123 free(old);
1124 async_answer_0(rid, rc);
1125 return;
1126 }
1127
1128 size_t olen;
1129 size_t nlen;
1130 char *oldc = canonify(old, &olen);
1131 char *newc = canonify(new, &nlen);
1132
1133 if ((!oldc) || (!newc)) {
1134 async_answer_0(rid, EINVAL);
1135 free(old);
1136 free(new);
1137 return;
1138 }
1139
1140 oldc[olen] = '\0';
1141 newc[nlen] = '\0';
1142
1143 if ((!str_lcmp(newc, oldc, str_length(oldc))) &&
1144 ((newc[str_length(oldc)] == '/') ||
1145 (str_length(oldc) == 1) ||
1146 (str_length(oldc) == str_length(newc)))) {
1147 /*
1148 * oldc is a prefix of newc and either
1149 * - newc continues with a / where oldc ends, or
1150 * - oldc was / itself, or
1151 * - oldc and newc are equal.
1152 */
1153 async_answer_0(rid, EINVAL);
1154 free(old);
1155 free(new);
1156 return;
1157 }
1158
1159 vfs_lookup_res_t old_lr;
1160 vfs_lookup_res_t new_lr;
1161 vfs_lookup_res_t new_par_lr;
1162 fibril_rwlock_write_lock(&namespace_rwlock);
1163
1164 /* Lookup the node belonging to the old file name. */
1165 rc = vfs_lookup_internal(oldc, L_NONE, &old_lr, NULL);
1166 if (rc != EOK) {
1167 fibril_rwlock_write_unlock(&namespace_rwlock);
1168 async_answer_0(rid, rc);
1169 free(old);
1170 free(new);
1171 return;
1172 }
1173
1174 vfs_node_t *old_node = vfs_node_get(&old_lr);
1175 if (!old_node) {
1176 fibril_rwlock_write_unlock(&namespace_rwlock);
1177 async_answer_0(rid, ENOMEM);
1178 free(old);
1179 free(new);
1180 return;
1181 }
1182
1183 /* Determine the path to the parent of the node with the new name. */
1184 char *parentc = str_dup(newc);
1185 if (!parentc) {
1186 fibril_rwlock_write_unlock(&namespace_rwlock);
1187 vfs_node_put(old_node);
1188 async_answer_0(rid, rc);
1189 free(old);
1190 free(new);
1191 return;
1192 }
1193
1194 char *lastsl = str_rchr(parentc + 1, '/');
1195 if (lastsl)
1196 *lastsl = '\0';
1197 else
1198 parentc[1] = '\0';
1199
1200 /* Lookup parent of the new file name. */
1201 rc = vfs_lookup_internal(parentc, L_NONE, &new_par_lr, NULL);
1202 free(parentc); /* not needed anymore */
1203 if (rc != EOK) {
1204 fibril_rwlock_write_unlock(&namespace_rwlock);
1205 vfs_node_put(old_node);
1206 async_answer_0(rid, rc);
1207 free(old);
1208 free(new);
1209 return;
1210 }
1211
1212 /* Check whether linking to the same file system instance. */
1213 if ((old_node->fs_handle != new_par_lr.triplet.fs_handle) ||
1214 (old_node->service_id != new_par_lr.triplet.service_id)) {
1215 fibril_rwlock_write_unlock(&namespace_rwlock);
1216 vfs_node_put(old_node);
1217 async_answer_0(rid, EXDEV); /* different file systems */
1218 free(old);
1219 free(new);
1220 return;
1221 }
1222
1223 /* Destroy the old link for the new name. */
1224 vfs_node_t *new_node = NULL;
1225 rc = vfs_lookup_internal(newc, L_UNLINK, &new_lr, NULL);
1226
1227 switch (rc) {
1228 case ENOENT:
1229 /* simply not in our way */
1230 break;
1231 case EOK:
1232 new_node = vfs_node_get(&new_lr);
1233 if (!new_node) {
1234 fibril_rwlock_write_unlock(&namespace_rwlock);
1235 vfs_node_put(old_node);
1236 async_answer_0(rid, ENOMEM);
1237 free(old);
1238 free(new);
1239 return;
1240 }
1241 fibril_mutex_lock(&nodes_mutex);
1242 new_node->lnkcnt--;
1243 fibril_mutex_unlock(&nodes_mutex);
1244 break;
1245 default:
1246 fibril_rwlock_write_unlock(&namespace_rwlock);
1247 vfs_node_put(old_node);
1248 async_answer_0(rid, ENOTEMPTY);
1249 free(old);
1250 free(new);
1251 return;
1252 }
1253
1254 /* Create the new link for the new name. */
1255 rc = vfs_lookup_internal(newc, L_LINK, NULL, NULL, old_node->index);
1256 if (rc != EOK) {
1257 fibril_rwlock_write_unlock(&namespace_rwlock);
1258 vfs_node_put(old_node);
1259 if (new_node)
1260 vfs_node_put(new_node);
1261 async_answer_0(rid, rc);
1262 free(old);
1263 free(new);
1264 return;
1265 }
1266
1267 fibril_mutex_lock(&nodes_mutex);
1268 old_node->lnkcnt++;
1269 fibril_mutex_unlock(&nodes_mutex);
1270
1271 /* Destroy the link for the old name. */
1272 rc = vfs_lookup_internal(oldc, L_UNLINK, NULL, NULL);
1273 if (rc != EOK) {
1274 fibril_rwlock_write_unlock(&namespace_rwlock);
1275 vfs_node_put(old_node);
1276 if (new_node)
1277 vfs_node_put(new_node);
1278 async_answer_0(rid, rc);
1279 free(old);
1280 free(new);
1281 return;
1282 }
1283
1284 fibril_mutex_lock(&nodes_mutex);
1285 old_node->lnkcnt--;
1286 fibril_mutex_unlock(&nodes_mutex);
1287 fibril_rwlock_write_unlock(&namespace_rwlock);
1288 vfs_node_put(old_node);
1289
1290 if (new_node)
1291 vfs_node_put(new_node);
1292
1293 free(old);
1294 free(new);
1295 async_answer_0(rid, EOK);
1296}
1297
1298void vfs_dup(ipc_callid_t rid, ipc_call_t *request)
1299{
1300 int oldfd = IPC_GET_ARG1(*request);
1301 int newfd = IPC_GET_ARG2(*request);
1302
1303 /* If the file descriptors are the same, do nothing. */
1304 if (oldfd == newfd) {
1305 async_answer_1(rid, EOK, newfd);
1306 return;
1307 }
1308
1309 /* Lookup the file structure corresponding to oldfd. */
1310 vfs_file_t *oldfile = vfs_file_get(oldfd);
1311 if (!oldfile) {
1312 async_answer_0(rid, EBADF);
1313 return;
1314 }
1315
1316 /*
1317 * Lock the open file structure so that no other thread can manipulate
1318 * the same open file at a time.
1319 */
1320 fibril_mutex_lock(&oldfile->lock);
1321
1322 /* Make sure newfd is closed. */
1323 (void) vfs_fd_free(newfd);
1324
1325 /* Assign the old file to newfd. */
1326 int ret = vfs_fd_assign(oldfile, newfd);
1327 fibril_mutex_unlock(&oldfile->lock);
1328 vfs_file_put(oldfile);
1329
1330 if (ret != EOK)
1331 async_answer_0(rid, ret);
1332 else
1333 async_answer_1(rid, EOK, newfd);
1334}
1335
1336void vfs_wait_handle(ipc_callid_t rid, ipc_call_t *request)
1337{
1338 int fd = vfs_wait_handle_internal();
1339 async_answer_1(rid, EOK, fd);
1340}
1341
1342void vfs_get_mtab(ipc_callid_t rid, ipc_call_t *request)
1343{
1344 ipc_callid_t callid;
1345 ipc_call_t data;
1346 sysarg_t rc = EOK;
1347 size_t len;
1348
1349 fibril_mutex_lock(&mtab_list_lock);
1350
1351 /* Send to the caller the number of mounted filesystems */
1352 callid = async_get_call(&data);
1353 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
1354 rc = ENOTSUP;
1355 async_answer_1(callid, rc, 0);
1356 goto exit;
1357 }
1358 async_answer_1(callid, EOK, mtab_size);
1359
1360 list_foreach(mtab_list, cur) {
1361 mtab_ent_t *mtab_ent = list_get_instance(cur, mtab_ent_t,
1362 link);
1363
1364 rc = ENOTSUP;
1365
1366 if (!async_data_read_receive(&callid, &len))
1367 goto exit;
1368
1369 (void) async_data_read_finalize(callid, mtab_ent->mp,
1370 str_size(mtab_ent->mp));
1371
1372 if (!async_data_read_receive(&callid, &len))
1373 goto exit;
1374
1375 (void) async_data_read_finalize(callid, mtab_ent->opts,
1376 str_size(mtab_ent->opts));
1377
1378 if (!async_data_read_receive(&callid, &len))
1379 goto exit;
1380
1381 (void) async_data_read_finalize(callid, mtab_ent->fs_name,
1382 str_size(mtab_ent->fs_name));
1383
1384 sysarg_t p[3];
1385
1386 p[0] = mtab_ent->flags;
1387 p[1] = mtab_ent->instance;
1388 p[2] = mtab_ent->fs_handle;
1389
1390 int i;
1391 for (i = 0; i < 3; ++i) {
1392 callid = async_get_call(&data);
1393 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) {
1394 rc = ENOTSUP;
1395 async_answer_1(callid, rc, 0);
1396 goto exit;
1397 }
1398 async_answer_1(callid, EOK, p[i]);
1399 }
1400
1401 rc = EOK;
1402 }
1403
1404exit:
1405 fibril_mutex_unlock(&mtab_list_lock);
1406 async_answer_0(rid, rc);
1407}
1408
1409/**
1410 * @}
1411 */
Note: See TracBrowser for help on using the repository browser.