Changeset b75e929 in mainline
- Timestamp:
- 2011-01-26T00:24:57Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- a28ab12
- Parents:
- 23882034
- Location:
- uspace/srv/vfs
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/srv/vfs/vfs.c
r23882034 rb75e929 59 59 ipc_answer_0(iid, EOK); 60 60 61 /*62 * Here we enter the main connection fibril loop.63 * The logic behind this loop and the protocol is that we'd like to keep64 * each connection open until the client hangs up. When the client hangs65 * up, we will free its VFS state. The act of hanging up the connection66 * by the client is equivalent to client termination because we cannot67 * distinguish one from the other. On the other hand, the client can68 * hang up arbitrarily if it has no open files and reestablish the69 * connection later.70 */71 61 while (keep_on_going) { 72 62 ipc_call_t call; … … 133 123 } 134 124 } 135 136 vfs_files_done(); 125 126 /* 127 * Open files for this client will be cleaned up when its last 128 * connection fibril terminates. 129 */ 137 130 } 138 131 … … 166 159 167 160 /* 161 * Set client data constructor and destructor. 162 */ 163 async_set_client_data_constructor(vfs_client_data_create); 164 async_set_client_data_destructor(vfs_client_data_destroy); 165 166 /* 168 167 * Set a connection handling function/fibril. 169 168 */ -
uspace/srv/vfs/vfs.h
r23882034 rb75e929 188 188 #define MAX_OPEN_FILES 128 189 189 190 extern bool vfs_files_init(void); 191 extern void vfs_files_done(void); 190 extern void *vfs_client_data_create(void); 191 extern void vfs_client_data_destroy(void *); 192 192 193 extern vfs_file_t *vfs_file_get(int); 193 194 extern int vfs_fd_assign(vfs_file_t *file, int fd); 194 195 extern int vfs_fd_alloc(bool desc); 195 196 extern int vfs_fd_free(int); 196 197 extern void vfs_file_addref(vfs_file_t *);198 extern void vfs_file_delref(vfs_file_t *);199 197 200 198 extern void vfs_node_addref(vfs_node_t *); -
uspace/srv/vfs/vfs_file.c
r23882034 rb75e929 45 45 #include "vfs.h" 46 46 47 /** 48 * This is a per-connection table of open files. 49 * Our assumption is that each client opens only one connection and therefore 50 * there is one table of open files per task. However, this may not be the case 51 * and the client can open more connections to VFS. In that case, there will be 52 * several tables and several file handle name spaces per task. Besides of this, 53 * the functionality will stay unchanged. So unless the client knows what it is 54 * doing, it should open one connection to VFS only. 55 * 56 * Allocation of the open files table is deferred until the client makes the 57 * first VFS_OPEN operation. 58 * 59 * This resource being per-connection and, in the first place, per-fibril, we 60 * don't need to protect it by a mutex. 61 */ 62 fibril_local vfs_file_t **files = NULL; 47 #define VFS_DATA ((vfs_client_data_t *) async_client_data_get()) 48 #define FILES (VFS_DATA->files) 49 50 typedef struct { 51 fibril_mutex_t lock; 52 vfs_file_t **files; 53 } vfs_client_data_t; 63 54 64 55 /** Initialize the table of open files. */ 65 bool vfs_files_init(void) 66 { 67 if (!files) { 68 files = malloc(MAX_OPEN_FILES * sizeof(vfs_file_t *)); 69 if (!files) 56 static bool vfs_files_init(void) 57 { 58 fibril_mutex_lock(&VFS_DATA->lock); 59 if (!FILES) { 60 FILES = malloc(MAX_OPEN_FILES * sizeof(vfs_file_t *)); 61 if (!FILES) { 62 fibril_mutex_unlock(&VFS_DATA->lock); 70 63 return false; 71 memset(files, 0, MAX_OPEN_FILES * sizeof(vfs_file_t *)); 72 } 64 } 65 memset(FILES, 0, MAX_OPEN_FILES * sizeof(vfs_file_t *)); 66 } 67 fibril_mutex_unlock(&VFS_DATA->lock); 73 68 return true; 74 69 } 75 70 76 71 /** Cleanup the table of open files. */ 77 void vfs_files_done(void)72 static void vfs_files_done(void) 78 73 { 79 74 int i; 80 75 81 if (! files)76 if (!FILES) 82 77 return; 83 78 84 79 for (i = 0; i < MAX_OPEN_FILES; i++) { 85 if ( files[i]) {86 (void) vfs_close_internal( files[i]);80 if (FILES[i]) { 81 (void) vfs_close_internal(FILES[i]); 87 82 (void) vfs_fd_free(i); 88 83 } 89 84 } 90 85 91 free(files); 92 } 86 free(FILES); 87 } 88 89 void *vfs_client_data_create(void) 90 { 91 vfs_client_data_t *vfs_data; 92 93 vfs_data = malloc(sizeof(vfs_client_data_t)); 94 if (vfs_data) { 95 fibril_mutex_initialize(&vfs_data->lock); 96 vfs_data->files = NULL; 97 } 98 99 return vfs_data; 100 } 101 102 void vfs_client_data_destroy(void *data) 103 { 104 vfs_client_data_t *vfs_data = (vfs_client_data_t *) data; 105 106 vfs_files_done(); 107 free(vfs_data); 108 } 109 110 /** Increment reference count of VFS file structure. 111 * 112 * @param file File structure that will have reference count 113 * incremented. 114 */ 115 static void vfs_file_addref(vfs_file_t *file) 116 { 117 assert(fibril_mutex_is_locked(&VFS_DATA->lock)); 118 119 file->refcnt++; 120 } 121 122 /** Decrement reference count of VFS file structure. 123 * 124 * @param file File structure that will have reference count 125 * decremented. 126 */ 127 static void vfs_file_delref(vfs_file_t *file) 128 { 129 assert(fibril_mutex_is_locked(&VFS_DATA->lock)); 130 131 if (file->refcnt-- == 1) { 132 /* 133 * Lost the last reference to a file, need to drop our reference 134 * to the underlying VFS node. 135 */ 136 vfs_node_delref(file->node); 137 free(file); 138 } 139 } 140 93 141 94 142 /** Allocate a file descriptor. … … 111 159 i = 0; 112 160 161 fibril_mutex_lock(&VFS_DATA->lock); 113 162 while (true) { 114 if (!files[i]) { 115 files[i] = (vfs_file_t *) malloc(sizeof(vfs_file_t)); 116 if (!files[i]) 163 if (!FILES[i]) { 164 FILES[i] = (vfs_file_t *) malloc(sizeof(vfs_file_t)); 165 if (!FILES[i]) { 166 fibril_mutex_unlock(&VFS_DATA->lock); 117 167 return ENOMEM; 168 } 118 169 119 memset(files[i], 0, sizeof(vfs_file_t)); 120 fibril_mutex_initialize(&files[i]->lock); 121 vfs_file_addref(files[i]); 170 memset(FILES[i], 0, sizeof(vfs_file_t)); 171 fibril_mutex_initialize(&FILES[i]->lock); 172 vfs_file_addref(FILES[i]); 173 fibril_mutex_unlock(&VFS_DATA->lock); 122 174 return (int) i; 123 175 } … … 135 187 } 136 188 } 189 fibril_mutex_unlock(&VFS_DATA->lock); 137 190 138 191 return EMFILE; … … 150 203 if (!vfs_files_init()) 151 204 return ENOMEM; 152 153 if ((fd < 0) || (fd >= MAX_OPEN_FILES) || (files[fd] == NULL)) 205 206 fibril_mutex_lock(&VFS_DATA->lock); 207 if ((fd < 0) || (fd >= MAX_OPEN_FILES) || (FILES[fd] == NULL)) { 208 fibril_mutex_unlock(&VFS_DATA->lock); 154 209 return EBADF; 155 156 vfs_file_delref(files[fd]); 157 files[fd] = NULL; 210 } 211 212 vfs_file_delref(FILES[fd]); 213 FILES[fd] = NULL; 214 fibril_mutex_unlock(&VFS_DATA->lock); 158 215 159 216 return EOK; … … 173 230 if (!vfs_files_init()) 174 231 return ENOMEM; 175 176 if ((fd < 0) || (fd >= MAX_OPEN_FILES) || (files[fd] != NULL)) 232 233 fibril_mutex_lock(&VFS_DATA->lock); 234 if ((fd < 0) || (fd >= MAX_OPEN_FILES) || (FILES[fd] != NULL)) { 235 fibril_mutex_unlock(&VFS_DATA->lock); 177 236 return EINVAL; 178 179 files[fd] = file; 180 vfs_file_addref(files[fd]); 237 } 238 239 FILES[fd] = file; 240 vfs_file_addref(FILES[fd]); 241 fibril_mutex_unlock(&VFS_DATA->lock); 181 242 182 243 return EOK; 183 244 } 184 245 185 /** Increment reference count of VFS file structure.186 *187 * @param file File structure that will have reference count188 * incremented.189 */190 void vfs_file_addref(vfs_file_t *file)191 {192 /*193 * File structures are per-connection, so no-one, except the current194 * fibril, should have a reference to them. This is the reason we don't195 * do any synchronization here.196 */197 file->refcnt++;198 }199 200 /** Decrement reference count of VFS file structure.201 *202 * @param file File structure that will have reference count203 * decremented.204 */205 void vfs_file_delref(vfs_file_t *file)206 {207 if (file->refcnt-- == 1) {208 /*209 * Lost the last reference to a file, need to drop our reference210 * to the underlying VFS node.211 */212 vfs_node_delref(file->node);213 free(file);214 }215 }216 217 246 /** Find VFS file structure for a given file descriptor. 218 247 * … … 226 255 return NULL; 227 256 228 if ((fd >= 0) && (fd < MAX_OPEN_FILES)) 229 return files[fd]; 257 fibril_mutex_lock(&VFS_DATA->lock); 258 if ((fd >= 0) && (fd < MAX_OPEN_FILES)) { 259 vfs_file_t *file = FILES[fd]; 260 fibril_mutex_unlock(&VFS_DATA->lock); 261 return file; 262 } 263 fibril_mutex_unlock(&VFS_DATA->lock); 230 264 231 265 return NULL; -
uspace/srv/vfs/vfs_ops.c
r23882034 rb75e929 491 491 void vfs_open(ipc_callid_t rid, ipc_call_t *request) 492 492 { 493 if (!vfs_files_init()) {494 ipc_answer_0(rid, ENOMEM);495 return;496 }497 498 493 /* 499 494 * The POSIX interface is open(path, oflag, mode). … … 618 613 // FIXME: check for sanity of the supplied fs, dev and index 619 614 620 if (!vfs_files_init()) {621 ipc_answer_0(rid, ENOMEM);622 return;623 }624 625 615 /* 626 616 * The interface is open_node(fs, dev, index, oflag).
Note:
See TracChangeset
for help on using the changeset viewer.