Changeset bd5f3b7 in mainline for kernel/generic/src
- Timestamp:
- 2011-08-21T13:07:35Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 00aece0, f1a9e87
- Parents:
- 86a34d3e (diff), a6480d5 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/console/console.c
r86a34d3e rbd5f3b7 248 248 } 249 249 250 void klog_update(void )250 void klog_update(void *event) 251 251 { 252 252 if (!atomic_get(&klog_inited)) … … 327 327 /* Force notification on newline */ 328 328 if (ch == '\n') 329 klog_update( );329 klog_update(NULL); 330 330 } 331 331 … … 358 358 free(data); 359 359 } else 360 klog_update( );360 klog_update(NULL); 361 361 362 362 return size; -
kernel/generic/src/ddi/ddi.c
r86a34d3e rbd5f3b7 41 41 42 42 #include <ddi/ddi.h> 43 #include <ddi/ddi_arg.h>44 43 #include <proc/task.h> 45 44 #include <security/cap.h> -
kernel/generic/src/ipc/event.c
r86a34d3e rbd5f3b7 36 36 37 37 #include <ipc/event.h> 38 #include <ipc/event_types.h>39 38 #include <mm/slab.h> 40 39 #include <typedefs.h> 41 40 #include <synch/spinlock.h> 42 41 #include <console/console.h> 42 #include <proc/task.h> 43 43 #include <memstr.h> 44 44 #include <errno.h> … … 48 48 static event_t events[EVENT_END]; 49 49 50 static void event_initialize(event_t *event) 51 { 52 spinlock_initialize(&event->lock, "event.lock"); 53 event->answerbox = NULL; 54 event->counter = 0; 55 event->imethod = 0; 56 event->masked = false; 57 event->unmask_callback = NULL; 58 } 59 60 static event_t *evno2event(int evno, task_t *t) 61 { 62 ASSERT(evno < EVENT_TASK_END); 63 64 event_t *event; 65 66 if (evno < EVENT_END) 67 event = &events[(event_type_t) evno]; 68 else 69 event = &t->events[(event_task_type_t) evno - EVENT_END]; 70 71 return event; 72 } 73 50 74 /** Initialize kernel events. 51 75 * … … 53 77 void event_init(void) 54 78 { 55 for (unsigned int i = 0; i < EVENT_END; i++) { 56 spinlock_initialize(&events[i].lock, "event.lock"); 57 events[i].answerbox = NULL; 58 events[i].counter = 0; 59 events[i].imethod = 0; 60 events[i].masked = false; 61 events[i].unmask_callback = NULL; 62 } 63 } 79 for (unsigned int i = 0; i < EVENT_END; i++) 80 event_initialize(evno2event(i, NULL)); 81 } 82 83 void event_task_init(task_t *task) 84 { 85 for (unsigned int i = EVENT_END; i < EVENT_TASK_END; i++) 86 event_initialize(evno2event(i, task)); 87 } 88 64 89 65 90 /** Unsubscribe kernel events associated with an answerbox … … 84 109 } 85 110 111 static void _event_set_unmask_callback(event_t *event, event_callback_t callback) 112 { 113 spinlock_lock(&event->lock); 114 event->unmask_callback = callback; 115 spinlock_unlock(&event->lock); 116 } 117 86 118 /** Define a callback function for the event unmask event. 87 119 * … … 95 127 ASSERT(evno < EVENT_END); 96 128 97 spinlock_lock(&events[evno].lock); 98 events[evno].unmask_callback = callback; 99 spinlock_unlock(&events[evno].lock); 129 _event_set_unmask_callback(evno2event(evno, NULL), callback); 130 } 131 132 void event_task_set_unmask_callback(task_t *task, event_task_type_t evno, 133 event_callback_t callback) 134 { 135 ASSERT(evno >= (int) EVENT_END); 136 ASSERT(evno < EVENT_TASK_END); 137 138 _event_set_unmask_callback(evno2event(evno, task), callback); 139 } 140 141 static int event_enqueue(event_t *event, bool mask, sysarg_t a1, sysarg_t a2, 142 sysarg_t a3, sysarg_t a4, sysarg_t a5) 143 { 144 int res; 145 146 spinlock_lock(&event->lock); 147 148 if (event->answerbox != NULL) { 149 if (!event->masked) { 150 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 151 152 if (call) { 153 call->flags |= IPC_CALL_NOTIF; 154 call->priv = ++event->counter; 155 156 IPC_SET_IMETHOD(call->data, event->imethod); 157 IPC_SET_ARG1(call->data, a1); 158 IPC_SET_ARG2(call->data, a2); 159 IPC_SET_ARG3(call->data, a3); 160 IPC_SET_ARG4(call->data, a4); 161 IPC_SET_ARG5(call->data, a5); 162 163 call->data.task_id = TASK ? TASK->taskid : 0; 164 165 irq_spinlock_lock(&event->answerbox->irq_lock, true); 166 list_append(&call->link, &event->answerbox->irq_notifs); 167 irq_spinlock_unlock(&event->answerbox->irq_lock, true); 168 169 waitq_wakeup(&event->answerbox->wq, WAKEUP_FIRST); 170 171 if (mask) 172 event->masked = true; 173 174 res = EOK; 175 } else 176 res = ENOMEM; 177 } else 178 res = EBUSY; 179 } else 180 res = ENOENT; 181 182 spinlock_unlock(&event->lock); 183 return res; 100 184 } 101 185 … … 124 208 ASSERT(evno < EVENT_END); 125 209 126 spinlock_lock(&events[evno].lock); 127 128 int ret; 129 130 if (events[evno].answerbox != NULL) { 131 if (!events[evno].masked) { 132 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 133 134 if (call) { 135 call->flags |= IPC_CALL_NOTIF; 136 call->priv = ++events[evno].counter; 137 138 IPC_SET_IMETHOD(call->data, events[evno].imethod); 139 IPC_SET_ARG1(call->data, a1); 140 IPC_SET_ARG2(call->data, a2); 141 IPC_SET_ARG3(call->data, a3); 142 IPC_SET_ARG4(call->data, a4); 143 IPC_SET_ARG5(call->data, a5); 144 145 irq_spinlock_lock(&events[evno].answerbox->irq_lock, true); 146 list_append(&call->link, &events[evno].answerbox->irq_notifs); 147 irq_spinlock_unlock(&events[evno].answerbox->irq_lock, true); 148 149 waitq_wakeup(&events[evno].answerbox->wq, WAKEUP_FIRST); 150 151 if (mask) 152 events[evno].masked = true; 153 154 ret = EOK; 155 } else 156 ret = ENOMEM; 157 } else 158 ret = EBUSY; 159 } else 160 ret = ENOENT; 161 162 spinlock_unlock(&events[evno].lock); 163 164 return ret; 210 return event_enqueue(evno2event(evno, NULL), mask, a1, a2, a3, a4, a5); 211 } 212 213 /** Send per-task kernel notification event 214 * 215 * @param task Destination task. 216 * @param evno Event type. 217 * @param mask Mask further notifications after a successful 218 * sending. 219 * @param a1 First argument. 220 * @param a2 Second argument. 221 * @param a3 Third argument. 222 * @param a4 Fourth argument. 223 * @param a5 Fifth argument. 224 * 225 * @return EOK if notification was successfully sent. 226 * @return ENOMEM if the notification IPC message failed to allocate. 227 * @return EBUSY if the notifications of the given type are 228 * currently masked. 229 * @return ENOENT if the notifications of the given type are 230 * currently not subscribed. 231 * 232 */ 233 int event_task_notify(task_t *task, event_task_type_t evno, bool mask, 234 sysarg_t a1, sysarg_t a2, sysarg_t a3, sysarg_t a4, sysarg_t a5) 235 { 236 ASSERT(evno >= (int) EVENT_END); 237 ASSERT(evno < EVENT_TASK_END); 238 239 return event_enqueue(evno2event(evno, task), mask, a1, a2, a3, a4, a5); 165 240 } 166 241 … … 177 252 * 178 253 */ 179 static int event_subscribe(event_t ype_t evno, sysarg_t imethod,254 static int event_subscribe(event_t *event, sysarg_t imethod, 180 255 answerbox_t *answerbox) 181 256 { 182 ASSERT(evno < EVENT_END);183 184 spinlock_lock(&events[evno].lock);185 186 257 int res; 187 188 if (events[evno].answerbox == NULL) { 189 events[evno].answerbox = answerbox; 190 events[evno].imethod = imethod; 191 events[evno].counter = 0; 192 events[evno].masked = false; 258 259 spinlock_lock(&event->lock); 260 261 if (event->answerbox == NULL) { 262 event->answerbox = answerbox; 263 event->imethod = imethod; 264 event->counter = 0; 265 event->masked = false; 193 266 res = EOK; 194 267 } else 195 268 res = EEXISTS; 196 269 197 spinlock_unlock(&event s[evno].lock);270 spinlock_unlock(&event->lock); 198 271 199 272 return res; … … 205 278 * 206 279 */ 207 static void event_unmask(event_type_t evno) 208 { 209 ASSERT(evno < EVENT_END); 210 211 spinlock_lock(&events[evno].lock); 212 events[evno].masked = false; 213 event_callback_t callback = events[evno].unmask_callback; 214 spinlock_unlock(&events[evno].lock); 280 static void event_unmask(event_t *event) 281 { 282 spinlock_lock(&event->lock); 283 event->masked = false; 284 event_callback_t callback = event->unmask_callback; 285 spinlock_unlock(&event->lock); 215 286 216 287 /* … … 219 290 */ 220 291 if (callback != NULL) 221 callback( );292 callback(event); 222 293 } 223 294 … … 236 307 sysarg_t sys_event_subscribe(sysarg_t evno, sysarg_t imethod) 237 308 { 238 if (evno >= EVENT_ END)309 if (evno >= EVENT_TASK_END) 239 310 return ELIMIT; 240 311 241 return (sysarg_t) event_subscribe( (event_type_t) evno, (sysarg_t)242 imethod, &TASK->answerbox);312 return (sysarg_t) event_subscribe(evno2event(evno, TASK), 313 (sysarg_t) imethod, &TASK->answerbox); 243 314 } 244 315 … … 258 329 sysarg_t sys_event_unmask(sysarg_t evno) 259 330 { 260 if (evno >= EVENT_ END)331 if (evno >= EVENT_TASK_END) 261 332 return ELIMIT; 262 333 263 event_unmask((event_type_t) evno); 334 event_unmask(evno2event(evno, TASK)); 335 264 336 return EOK; 265 337 } -
kernel/generic/src/ipc/ipc.c
r86a34d3e rbd5f3b7 38 38 */ 39 39 40 #include <synch/synch.h>41 40 #include <synch/spinlock.h> 42 41 #include <synch/mutex.h> 43 42 #include <synch/waitq.h> 44 #include <synch/synch.h>45 43 #include <ipc/ipc.h> 46 #include < ipc/ipc_methods.h>44 #include <abi/ipc/methods.h> 47 45 #include <ipc/kbox.h> 48 46 #include <ipc/event.h> … … 232 230 } 233 231 } 232 233 call->data.task_id = TASK->taskid; 234 234 235 235 if (do_lock) … … 296 296 atomic_inc(&phone->active_calls); 297 297 call->data.phone = phone; 298 call->data.task = TASK;298 call->data.task_id = TASK->taskid; 299 299 } 300 300 … … 408 408 call->caller_phone = call->data.phone; 409 409 call->data.phone = newphone; 410 call->data.task = TASK;410 call->data.task_id = TASK->taskid; 411 411 } 412 412 -
kernel/generic/src/ipc/kbox.c
r86a34d3e rbd5f3b7 33 33 */ 34 34 35 #include <synch/synch.h>36 35 #include <synch/spinlock.h> 37 36 #include <synch/mutex.h> 38 37 #include <ipc/ipc.h> 39 #include < ipc/ipc_methods.h>38 #include <abi/ipc/methods.h> 40 39 #include <ipc/ipcrsc.h> 41 40 #include <arch.h> -
kernel/generic/src/ipc/sysipc.c
r86a34d3e rbd5f3b7 40 40 #include <debug.h> 41 41 #include <ipc/ipc.h> 42 #include < ipc/ipc_methods.h>42 #include <abi/ipc/methods.h> 43 43 #include <ipc/sysipc.h> 44 44 #include <ipc/irq.h> 45 45 #include <ipc/ipcrsc.h> 46 #include <ipc/event.h> 46 47 #include <ipc/kbox.h> 47 48 #include <synch/waitq.h> … … 53 54 #include <mm/as.h> 54 55 #include <print.h> 56 #include <macros.h> 55 57 56 58 /** … … 134 136 case IPC_M_DATA_WRITE: 135 137 case IPC_M_DATA_READ: 138 case IPC_M_STATE_CHANGE_AUTHORIZE: 136 139 return true; 137 140 default: … … 164 167 case IPC_M_DATA_WRITE: 165 168 case IPC_M_DATA_READ: 169 case IPC_M_STATE_CHANGE_AUTHORIZE: 166 170 return true; 167 171 default: … … 249 253 /* The connection was accepted */ 250 254 phone_connect(phoneid, &answer->sender->answerbox); 251 /* Set 'task hash' as arg4 of response */252 IPC_SET_ARG4(answer->data, (sysarg_t) TASK);253 255 /* Set 'phone hash' as arg5 of response */ 254 256 IPC_SET_ARG5(answer->data, … … 334 336 free(answer->buffer); 335 337 answer->buffer = NULL; 338 } else if (IPC_GET_IMETHOD(*olddata) == IPC_M_STATE_CHANGE_AUTHORIZE) { 339 if (!IPC_GET_RETVAL(answer->data)) { 340 /* The recipient authorized the change of state. */ 341 phone_t *recipient_phone; 342 task_t *other_task_s; 343 task_t *other_task_r; 344 int rc; 345 346 rc = phone_get(IPC_GET_ARG1(answer->data), 347 &recipient_phone); 348 if (rc != EOK) { 349 IPC_SET_RETVAL(answer->data, ENOENT); 350 return ENOENT; 351 } 352 353 mutex_lock(&recipient_phone->lock); 354 if (recipient_phone->state != IPC_PHONE_CONNECTED) { 355 mutex_unlock(&recipient_phone->lock); 356 IPC_SET_RETVAL(answer->data, EINVAL); 357 return EINVAL; 358 } 359 360 other_task_r = recipient_phone->callee->task; 361 other_task_s = (task_t *) IPC_GET_ARG5(*olddata); 362 363 /* 364 * See if both the sender and the recipient meant the 365 * same third party task. 366 */ 367 if (other_task_r != other_task_s) { 368 IPC_SET_RETVAL(answer->data, EINVAL); 369 rc = EINVAL; 370 } else { 371 rc = event_task_notify_5(other_task_r, 372 EVENT_TASK_STATE_CHANGE, false, 373 IPC_GET_ARG1(*olddata), 374 IPC_GET_ARG2(*olddata), 375 IPC_GET_ARG3(*olddata), 376 LOWER32(olddata->task_id), 377 UPPER32(olddata->task_id)); 378 IPC_SET_RETVAL(answer->data, rc); 379 } 380 381 mutex_unlock(&recipient_phone->lock); 382 return rc; 383 } 336 384 } 337 385 … … 427 475 case IPC_M_DATA_READ: { 428 476 size_t size = IPC_GET_ARG2(call->data); 429 if (size <= 0)430 return ELIMIT;431 477 if (size > DATA_XFER_LIMIT) { 432 478 int flags = IPC_GET_ARG3(call->data); … … 458 504 } 459 505 506 break; 507 } 508 case IPC_M_STATE_CHANGE_AUTHORIZE: { 509 phone_t *sender_phone; 510 task_t *other_task_s; 511 512 if (phone_get(IPC_GET_ARG5(call->data), &sender_phone) != EOK) 513 return ENOENT; 514 515 mutex_lock(&sender_phone->lock); 516 if (sender_phone->state != IPC_PHONE_CONNECTED) { 517 mutex_unlock(&sender_phone->lock); 518 return EINVAL; 519 } 520 521 other_task_s = sender_phone->callee->task; 522 523 mutex_unlock(&sender_phone->lock); 524 525 /* Remember the third party task hash. */ 526 IPC_SET_ARG5(call->data, (sysarg_t) other_task_s); 460 527 break; 461 528 } -
kernel/generic/src/lib/elf.c
r86a34d3e rbd5f3b7 47 47 #include <macros.h> 48 48 #include <arch.h> 49 50 #include <lib/elf_load.h> 49 51 50 52 static const char *error_codes[] = { -
kernel/generic/src/proc/program.c
r86a34d3e rbd5f3b7 40 40 #include <proc/thread.h> 41 41 #include <proc/task.h> 42 #include <proc/uarg.h>43 42 #include <mm/as.h> 44 43 #include <mm/slab.h> … … 48 47 #include <ipc/ipcrsc.h> 49 48 #include <security/cap.h> 50 #include <lib/elf .h>49 #include <lib/elf_load.h> 51 50 #include <errno.h> 52 51 #include <print.h> -
kernel/generic/src/proc/task.c
r86a34d3e rbd5f3b7 50 50 #include <ipc/ipc.h> 51 51 #include <ipc/ipcrsc.h> 52 #include <ipc/event.h> 52 53 #include <print.h> 53 54 #include <errno.h> … … 57 58 #include <syscall/copy.h> 58 59 #include <macros.h> 59 #include <ipc/event.h>60 60 61 61 /** Spinlock protecting the tasks_tree AVL tree. */ … … 201 201 task->ipc_info.irq_notif_received = 0; 202 202 task->ipc_info.forwarded = 0; 203 204 event_task_init(task); 203 205 204 206 #ifdef CONFIG_UDEBUG -
kernel/generic/src/proc/thread.c
r86a34d3e rbd5f3b7 39 39 #include <proc/thread.h> 40 40 #include <proc/task.h> 41 #include <proc/uarg.h>42 41 #include <mm/frame.h> 43 42 #include <mm/page.h> … … 45 44 #include <arch/cycle.h> 46 45 #include <arch.h> 47 #include <synch/synch.h>48 46 #include <synch/spinlock.h> 49 47 #include <synch/waitq.h> -
kernel/generic/src/synch/condvar.c
r86a34d3e rbd5f3b7 39 39 #include <synch/mutex.h> 40 40 #include <synch/waitq.h> 41 #include <synch/synch.h>42 41 #include <arch.h> 43 42 -
kernel/generic/src/synch/futex.c
r86a34d3e rbd5f3b7 39 39 #include <synch/mutex.h> 40 40 #include <synch/spinlock.h> 41 #include <synch/synch.h>42 41 #include <mm/frame.h> 43 42 #include <mm/page.h> -
kernel/generic/src/synch/mutex.c
r86a34d3e rbd5f3b7 38 38 #include <synch/mutex.h> 39 39 #include <synch/semaphore.h> 40 #include <synch/synch.h>41 40 #include <debug.h> 42 41 #include <arch.h> -
kernel/generic/src/synch/semaphore.c
r86a34d3e rbd5f3b7 39 39 #include <synch/waitq.h> 40 40 #include <synch/spinlock.h> 41 #include <synch/synch.h>42 41 #include <arch/asm.h> 43 42 #include <arch.h> -
kernel/generic/src/synch/waitq.c
r86a34d3e rbd5f3b7 45 45 46 46 #include <synch/waitq.h> 47 #include <synch/synch.h>48 47 #include <synch/spinlock.h> 49 48 #include <proc/thread.h> -
kernel/generic/src/sysinfo/stats.c
r86a34d3e rbd5f3b7 35 35 36 36 #include <typedefs.h> 37 #include < sysinfo/abi.h>37 #include <abi/sysinfo.h> 38 38 #include <sysinfo/stats.h> 39 39 #include <sysinfo/sysinfo.h>
Note:
See TracChangeset
for help on using the changeset viewer.