Changeset b0f00a9 in mainline for kernel/generic/src/ipc
- Timestamp:
- 2011-11-06T22:21:05Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 898e847
- Parents:
- 2bdf8313 (diff), 7b5f4c9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/ipc
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/ipc/event.c
r2bdf8313 rb0f00a9 36 36 37 37 #include <ipc/event.h> 38 #include <ipc/event_types.h>39 38 #include <mm/slab.h> 40 39 #include <typedefs.h> 41 40 #include <synch/spinlock.h> 42 41 #include <console/console.h> 42 #include <proc/task.h> 43 43 #include <memstr.h> 44 44 #include <errno.h> … … 48 48 static event_t events[EVENT_END]; 49 49 50 static void event_initialize(event_t *event) 51 { 52 spinlock_initialize(&event->lock, "event.lock"); 53 event->answerbox = NULL; 54 event->counter = 0; 55 event->imethod = 0; 56 event->masked = false; 57 event->unmask_callback = NULL; 58 } 59 60 static event_t *evno2event(int evno, task_t *t) 61 { 62 ASSERT(evno < EVENT_TASK_END); 63 64 event_t *event; 65 66 if (evno < EVENT_END) 67 event = &events[(event_type_t) evno]; 68 else 69 event = &t->events[(event_task_type_t) evno - EVENT_END]; 70 71 return event; 72 } 73 50 74 /** Initialize kernel events. 51 75 * … … 53 77 void event_init(void) 54 78 { 55 for (unsigned int i = 0; i < EVENT_END; i++) { 56 spinlock_initialize(&events[i].lock, "event.lock"); 57 events[i].answerbox = NULL; 58 events[i].counter = 0; 59 events[i].imethod = 0; 60 events[i].masked = false; 61 events[i].unmask_callback = NULL; 62 } 63 } 79 for (unsigned int i = 0; i < EVENT_END; i++) 80 event_initialize(evno2event(i, NULL)); 81 } 82 83 void event_task_init(task_t *task) 84 { 85 for (unsigned int i = EVENT_END; i < EVENT_TASK_END; i++) 86 event_initialize(evno2event(i, task)); 87 } 88 64 89 65 90 /** Unsubscribe kernel events associated with an answerbox … … 84 109 } 85 110 111 static void _event_set_unmask_callback(event_t *event, event_callback_t callback) 112 { 113 spinlock_lock(&event->lock); 114 event->unmask_callback = callback; 115 spinlock_unlock(&event->lock); 116 } 117 86 118 /** Define a callback function for the event unmask event. 87 119 * … … 95 127 ASSERT(evno < EVENT_END); 96 128 97 spinlock_lock(&events[evno].lock); 98 events[evno].unmask_callback = callback; 99 spinlock_unlock(&events[evno].lock); 129 _event_set_unmask_callback(evno2event(evno, NULL), callback); 130 } 131 132 void event_task_set_unmask_callback(task_t *task, event_task_type_t evno, 133 event_callback_t callback) 134 { 135 ASSERT(evno >= (int) EVENT_END); 136 ASSERT(evno < EVENT_TASK_END); 137 138 _event_set_unmask_callback(evno2event(evno, task), callback); 139 } 140 141 static int event_enqueue(event_t *event, bool mask, sysarg_t a1, sysarg_t a2, 142 sysarg_t a3, sysarg_t a4, sysarg_t a5) 143 { 144 int res; 145 146 spinlock_lock(&event->lock); 147 148 if (event->answerbox != NULL) { 149 if (!event->masked) { 150 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 151 152 if (call) { 153 call->flags |= IPC_CALL_NOTIF; 154 call->priv = ++event->counter; 155 156 IPC_SET_IMETHOD(call->data, event->imethod); 157 IPC_SET_ARG1(call->data, a1); 158 IPC_SET_ARG2(call->data, a2); 159 IPC_SET_ARG3(call->data, a3); 160 IPC_SET_ARG4(call->data, a4); 161 IPC_SET_ARG5(call->data, a5); 162 163 call->data.task_id = TASK ? TASK->taskid : 0; 164 165 irq_spinlock_lock(&event->answerbox->irq_lock, true); 166 list_append(&call->link, &event->answerbox->irq_notifs); 167 irq_spinlock_unlock(&event->answerbox->irq_lock, true); 168 169 waitq_wakeup(&event->answerbox->wq, WAKEUP_FIRST); 170 171 if (mask) 172 event->masked = true; 173 174 res = EOK; 175 } else 176 res = ENOMEM; 177 } else 178 res = EBUSY; 179 } else 180 res = ENOENT; 181 182 spinlock_unlock(&event->lock); 183 return res; 100 184 } 101 185 … … 124 208 ASSERT(evno < EVENT_END); 125 209 126 spinlock_lock(&events[evno].lock); 127 128 int ret; 129 130 if (events[evno].answerbox != NULL) { 131 if (!events[evno].masked) { 132 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 133 134 if (call) { 135 call->flags |= IPC_CALL_NOTIF; 136 call->priv = ++events[evno].counter; 137 138 IPC_SET_IMETHOD(call->data, events[evno].imethod); 139 IPC_SET_ARG1(call->data, a1); 140 IPC_SET_ARG2(call->data, a2); 141 IPC_SET_ARG3(call->data, a3); 142 IPC_SET_ARG4(call->data, a4); 143 IPC_SET_ARG5(call->data, a5); 144 145 irq_spinlock_lock(&events[evno].answerbox->irq_lock, true); 146 list_append(&call->link, &events[evno].answerbox->irq_notifs); 147 irq_spinlock_unlock(&events[evno].answerbox->irq_lock, true); 148 149 waitq_wakeup(&events[evno].answerbox->wq, WAKEUP_FIRST); 150 151 if (mask) 152 events[evno].masked = true; 153 154 ret = EOK; 155 } else 156 ret = ENOMEM; 157 } else 158 ret = EBUSY; 159 } else 160 ret = ENOENT; 161 162 spinlock_unlock(&events[evno].lock); 163 164 return ret; 210 return event_enqueue(evno2event(evno, NULL), mask, a1, a2, a3, a4, a5); 211 } 212 213 /** Send per-task kernel notification event 214 * 215 * @param task Destination task. 216 * @param evno Event type. 217 * @param mask Mask further notifications after a successful 218 * sending. 219 * @param a1 First argument. 220 * @param a2 Second argument. 221 * @param a3 Third argument. 222 * @param a4 Fourth argument. 223 * @param a5 Fifth argument. 224 * 225 * @return EOK if notification was successfully sent. 226 * @return ENOMEM if the notification IPC message failed to allocate. 227 * @return EBUSY if the notifications of the given type are 228 * currently masked. 229 * @return ENOENT if the notifications of the given type are 230 * currently not subscribed. 231 * 232 */ 233 int event_task_notify(task_t *task, event_task_type_t evno, bool mask, 234 sysarg_t a1, sysarg_t a2, sysarg_t a3, sysarg_t a4, sysarg_t a5) 235 { 236 ASSERT(evno >= (int) EVENT_END); 237 ASSERT(evno < EVENT_TASK_END); 238 239 return event_enqueue(evno2event(evno, task), mask, a1, a2, a3, a4, a5); 165 240 } 166 241 … … 177 252 * 178 253 */ 179 static int event_subscribe(event_t ype_t evno, sysarg_t imethod,254 static int event_subscribe(event_t *event, sysarg_t imethod, 180 255 answerbox_t *answerbox) 181 256 { 182 ASSERT(evno < EVENT_END);183 184 spinlock_lock(&events[evno].lock);185 186 257 int res; 187 188 if (events[evno].answerbox == NULL) { 189 events[evno].answerbox = answerbox; 190 events[evno].imethod = imethod; 191 events[evno].counter = 0; 192 events[evno].masked = false; 258 259 spinlock_lock(&event->lock); 260 261 if (event->answerbox == NULL) { 262 event->answerbox = answerbox; 263 event->imethod = imethod; 264 event->counter = 0; 265 event->masked = false; 193 266 res = EOK; 194 267 } else 195 268 res = EEXISTS; 196 269 197 spinlock_unlock(&event s[evno].lock);270 spinlock_unlock(&event->lock); 198 271 199 272 return res; … … 205 278 * 206 279 */ 207 static void event_unmask(event_type_t evno) 208 { 209 ASSERT(evno < EVENT_END); 210 211 spinlock_lock(&events[evno].lock); 212 events[evno].masked = false; 213 event_callback_t callback = events[evno].unmask_callback; 214 spinlock_unlock(&events[evno].lock); 280 static void event_unmask(event_t *event) 281 { 282 spinlock_lock(&event->lock); 283 event->masked = false; 284 event_callback_t callback = event->unmask_callback; 285 spinlock_unlock(&event->lock); 215 286 216 287 /* … … 219 290 */ 220 291 if (callback != NULL) 221 callback( );292 callback(event); 222 293 } 223 294 … … 236 307 sysarg_t sys_event_subscribe(sysarg_t evno, sysarg_t imethod) 237 308 { 238 if (evno >= EVENT_ END)309 if (evno >= EVENT_TASK_END) 239 310 return ELIMIT; 240 311 241 return (sysarg_t) event_subscribe( (event_type_t) evno, (sysarg_t)242 imethod, &TASK->answerbox);312 return (sysarg_t) event_subscribe(evno2event(evno, TASK), 313 (sysarg_t) imethod, &TASK->answerbox); 243 314 } 244 315 … … 258 329 sysarg_t sys_event_unmask(sysarg_t evno) 259 330 { 260 if (evno >= EVENT_ END)331 if (evno >= EVENT_TASK_END) 261 332 return ELIMIT; 262 333 263 event_unmask((event_type_t) evno); 334 event_unmask(evno2event(evno, TASK)); 335 264 336 return EOK; 265 337 } -
kernel/generic/src/ipc/ipc.c
r2bdf8313 rb0f00a9 38 38 */ 39 39 40 #include <synch/synch.h>41 40 #include <synch/spinlock.h> 42 41 #include <synch/mutex.h> 43 42 #include <synch/waitq.h> 44 #include <synch/synch.h>45 43 #include <ipc/ipc.h> 44 #include <abi/ipc/methods.h> 46 45 #include <ipc/kbox.h> 47 46 #include <ipc/event.h> … … 127 126 list_initialize(&box->answers); 128 127 list_initialize(&box->irq_notifs); 129 list_initialize(&box->irq_ head);128 list_initialize(&box->irq_list); 130 129 box->task = task; 131 130 } … … 182 181 */ 183 182 irq_spinlock_lock(&TASK->lock, true); 184 list_append(&sync_box->sync_box_link, &TASK->sync_box _head);183 list_append(&sync_box->sync_box_link, &TASK->sync_boxes); 185 184 irq_spinlock_unlock(&TASK->lock, true); 186 185 … … 231 230 } 232 231 } 232 233 call->data.task_id = TASK->taskid; 233 234 234 235 if (do_lock) … … 295 296 atomic_inc(&phone->active_calls); 296 297 call->data.phone = phone; 297 call->data.task = TASK;298 call->data.task_id = TASK->taskid; 298 299 } 299 300 … … 407 408 call->caller_phone = call->data.phone; 408 409 call->data.phone = newphone; 409 call->data.task = TASK;410 call->data.task_id = TASK->taskid; 410 411 } 411 412 … … 449 450 irq_spinlock_lock(&box->irq_lock, false); 450 451 451 request = list_get_instance(box->irq_notifs.next, call_t, link); 452 request = list_get_instance(list_first(&box->irq_notifs), 453 call_t, link); 452 454 list_remove(&request->link); 453 455 … … 458 460 459 461 /* Handle asynchronous answers */ 460 request = list_get_instance(box->answers.next, call_t, link); 462 request = list_get_instance(list_first(&box->answers), 463 call_t, link); 461 464 list_remove(&request->link); 462 465 atomic_dec(&request->data.phone->active_calls); … … 466 469 467 470 /* Handle requests */ 468 request = list_get_instance(box->calls.next, call_t, link); 471 request = list_get_instance(list_first(&box->calls), 472 call_t, link); 469 473 list_remove(&request->link); 470 474 … … 493 497 * 494 498 */ 495 void ipc_cleanup_call_list(li nk_t *lst)499 void ipc_cleanup_call_list(list_t *lst) 496 500 { 497 501 while (!list_empty(lst)) { 498 call_t *call = list_get_instance(l st->next, call_t, link);502 call_t *call = list_get_instance(list_first(lst), call_t, link); 499 503 if (call->buffer) 500 504 free(call->buffer); … … 525 529 irq_spinlock_lock(&box->lock, true); 526 530 while (!list_empty(&box->connected_phones)) { 527 phone = list_get_instance( box->connected_phones.next,531 phone = list_get_instance(list_first(&box->connected_phones), 528 532 phone_t, link); 529 533 if (SYNCH_FAILED(mutex_trylock(&phone->lock))) { … … 605 609 /* Wait for all answers to interrupted synchronous calls to arrive */ 606 610 ipl_t ipl = interrupts_disable(); 607 while (!list_empty(&TASK->sync_box _head)) {608 answerbox_t *box = list_get_instance( TASK->sync_box_head.next,609 answerbox_t, sync_box_link);611 while (!list_empty(&TASK->sync_boxes)) { 612 answerbox_t *box = list_get_instance( 613 list_first(&TASK->sync_boxes), answerbox_t, sync_box_link); 610 614 611 615 list_remove(&box->sync_box_link); … … 742 746 #endif 743 747 744 link_t *cur;745 746 748 printf(" --- incomming calls ---\n"); 747 for (cur = task->answerbox.calls.next; cur != &task->answerbox.calls; 748 cur = cur->next) { 749 list_foreach(task->answerbox.calls, cur) { 749 750 call_t *call = list_get_instance(cur, call_t, link); 750 751 … … 766 767 767 768 printf(" --- dispatched calls ---\n"); 768 for (cur = task->answerbox.dispatched_calls.next; 769 cur != &task->answerbox.dispatched_calls; 770 cur = cur->next) { 769 list_foreach(task->answerbox.dispatched_calls, cur) { 771 770 call_t *call = list_get_instance(cur, call_t, link); 772 771 … … 788 787 789 788 printf(" --- incoming answers ---\n"); 790 for (cur = task->answerbox.answers.next; 791 cur != &task->answerbox.answers; 792 cur = cur->next) { 789 list_foreach(task->answerbox.answers, cur) { 793 790 call_t *call = list_get_instance(cur, call_t, link); 794 791 -
kernel/generic/src/ipc/ipcrsc.c
r2bdf8313 rb0f00a9 146 146 call_t *get_call(sysarg_t callid) 147 147 { 148 link_t *lst;149 148 call_t *result = NULL; 150 149 151 150 irq_spinlock_lock(&TASK->answerbox.lock, true); 152 for (lst = TASK->answerbox.dispatched_calls.next;153 lst != &TASK->answerbox.dispatched_calls; lst = lst->next) {151 152 list_foreach(TASK->answerbox.dispatched_calls, lst) { 154 153 call_t *call = list_get_instance(lst, call_t, link); 155 154 if ((sysarg_t) call == callid) { -
kernel/generic/src/ipc/irq.c
r2bdf8313 rb0f00a9 174 174 irq->notif_cfg.code = code; 175 175 irq->notif_cfg.counter = 0; 176 irq->driver_as = AS; 176 177 177 178 /* … … 199 200 200 201 hash_table_insert(&irq_uspace_hash_table, key, &irq->link); 201 list_append(&irq->notif_cfg.link, &box->irq_ head);202 list_append(&irq->notif_cfg.link, &box->irq_list); 202 203 203 204 irq_spinlock_unlock(&box->irq_lock, false); … … 281 282 irq_spinlock_lock(&box->irq_lock, false); 282 283 283 while ( box->irq_head.next != &box->irq_head) {284 while (!list_empty(&box->irq_list)) { 284 285 DEADLOCK_PROBE_INIT(p_irqlock); 285 286 286 irq_t *irq = list_get_instance( box->irq_head.next, irq_t,287 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t, 287 288 notif_cfg.link); 288 289 … … 364 365 return IRQ_DECLINE; 365 366 367 #define CMD_MEM_READ(target) \ 368 do { \ 369 void *va = code->cmds[i].addr; \ 370 if (AS != irq->driver_as) \ 371 as_switch(AS, irq->driver_as); \ 372 memcpy_from_uspace(&target, va, (sizeof(target))); \ 373 if (dstarg) \ 374 scratch[dstarg] = target; \ 375 } while(0) 376 377 #define CMD_MEM_WRITE(val) \ 378 do { \ 379 void *va = code->cmds[i].addr; \ 380 if (AS != irq->driver_as) \ 381 as_switch(AS, irq->driver_as); \ 382 memcpy_to_uspace(va, &val, sizeof(val)); \ 383 } while (0) 384 385 as_t *current_as = AS; 366 386 size_t i; 367 387 for (i = 0; i < code->cmdcount; i++) { … … 422 442 } 423 443 break; 444 case CMD_MEM_READ_8: { 445 uint8_t val; 446 CMD_MEM_READ(val); 447 break; 448 } 449 case CMD_MEM_READ_16: { 450 uint16_t val; 451 CMD_MEM_READ(val); 452 break; 453 } 454 case CMD_MEM_READ_32: { 455 uint32_t val; 456 CMD_MEM_READ(val); 457 break; 458 } 459 case CMD_MEM_WRITE_8: { 460 uint8_t val = code->cmds[i].value; 461 CMD_MEM_WRITE(val); 462 break; 463 } 464 case CMD_MEM_WRITE_16: { 465 uint16_t val = code->cmds[i].value; 466 CMD_MEM_WRITE(val); 467 break; 468 } 469 case CMD_MEM_WRITE_32: { 470 uint32_t val = code->cmds[i].value; 471 CMD_MEM_WRITE(val); 472 break; 473 } 474 case CMD_MEM_WRITE_A_8: 475 if (srcarg) { 476 uint8_t val = scratch[srcarg]; 477 CMD_MEM_WRITE(val); 478 } 479 break; 480 case CMD_MEM_WRITE_A_16: 481 if (srcarg) { 482 uint16_t val = scratch[srcarg]; 483 CMD_MEM_WRITE(val); 484 } 485 break; 486 case CMD_MEM_WRITE_A_32: 487 if (srcarg) { 488 uint32_t val = scratch[srcarg]; 489 CMD_MEM_WRITE(val); 490 } 491 break; 424 492 case CMD_BTEST: 425 493 if ((srcarg) && (dstarg)) { … … 435 503 break; 436 504 case CMD_ACCEPT: 505 if (AS != current_as) 506 as_switch(AS, current_as); 437 507 return IRQ_ACCEPT; 438 508 case CMD_DECLINE: 439 509 default: 510 if (AS != current_as) 511 as_switch(AS, current_as); 440 512 return IRQ_DECLINE; 441 513 } 442 514 } 515 if (AS != current_as) 516 as_switch(AS, current_as); 443 517 444 518 return IRQ_DECLINE; -
kernel/generic/src/ipc/kbox.c
r2bdf8313 rb0f00a9 33 33 */ 34 34 35 #include <synch/synch.h>36 35 #include <synch/spinlock.h> 37 36 #include <synch/mutex.h> 38 37 #include <ipc/ipc.h> 38 #include <abi/ipc/methods.h> 39 39 #include <ipc/ipcrsc.h> 40 40 #include <arch.h> … … 169 169 switch (IPC_GET_IMETHOD(call->data)) { 170 170 171 case IPC_M_DEBUG _ALL:171 case IPC_M_DEBUG: 172 172 /* Handle debug call. */ 173 173 udebug_call_receive(call); -
kernel/generic/src/ipc/sysipc.c
r2bdf8313 rb0f00a9 40 40 #include <debug.h> 41 41 #include <ipc/ipc.h> 42 #include <abi/ipc/methods.h> 42 43 #include <ipc/sysipc.h> 43 44 #include <ipc/irq.h> 44 45 #include <ipc/ipcrsc.h> 46 #include <ipc/event.h> 45 47 #include <ipc/kbox.h> 46 48 #include <synch/waitq.h> … … 52 54 #include <mm/as.h> 53 55 #include <print.h> 56 #include <macros.h> 54 57 55 58 /** … … 133 136 case IPC_M_DATA_WRITE: 134 137 case IPC_M_DATA_READ: 138 case IPC_M_STATE_CHANGE_AUTHORIZE: 135 139 return true; 136 140 default: … … 163 167 case IPC_M_DATA_WRITE: 164 168 case IPC_M_DATA_READ: 169 case IPC_M_STATE_CHANGE_AUTHORIZE: 165 170 return true; 166 171 default: … … 248 253 /* The connection was accepted */ 249 254 phone_connect(phoneid, &answer->sender->answerbox); 250 /* Set 'task hash' as arg4 of response */251 IPC_SET_ARG4(answer->data, (sysarg_t) TASK);252 255 /* Set 'phone hash' as arg5 of response */ 253 256 IPC_SET_ARG5(answer->data, … … 333 336 free(answer->buffer); 334 337 answer->buffer = NULL; 338 } else if (IPC_GET_IMETHOD(*olddata) == IPC_M_STATE_CHANGE_AUTHORIZE) { 339 if (!IPC_GET_RETVAL(answer->data)) { 340 /* The recipient authorized the change of state. */ 341 phone_t *recipient_phone; 342 task_t *other_task_s; 343 task_t *other_task_r; 344 int rc; 345 346 rc = phone_get(IPC_GET_ARG1(answer->data), 347 &recipient_phone); 348 if (rc != EOK) { 349 IPC_SET_RETVAL(answer->data, ENOENT); 350 return ENOENT; 351 } 352 353 mutex_lock(&recipient_phone->lock); 354 if (recipient_phone->state != IPC_PHONE_CONNECTED) { 355 mutex_unlock(&recipient_phone->lock); 356 IPC_SET_RETVAL(answer->data, EINVAL); 357 return EINVAL; 358 } 359 360 other_task_r = recipient_phone->callee->task; 361 other_task_s = (task_t *) IPC_GET_ARG5(*olddata); 362 363 /* 364 * See if both the sender and the recipient meant the 365 * same third party task. 366 */ 367 if (other_task_r != other_task_s) { 368 IPC_SET_RETVAL(answer->data, EINVAL); 369 rc = EINVAL; 370 } else { 371 rc = event_task_notify_5(other_task_r, 372 EVENT_TASK_STATE_CHANGE, false, 373 IPC_GET_ARG1(*olddata), 374 IPC_GET_ARG2(*olddata), 375 IPC_GET_ARG3(*olddata), 376 LOWER32(olddata->task_id), 377 UPPER32(olddata->task_id)); 378 IPC_SET_RETVAL(answer->data, rc); 379 } 380 381 mutex_unlock(&recipient_phone->lock); 382 return rc; 383 } 335 384 } 336 385 … … 426 475 case IPC_M_DATA_READ: { 427 476 size_t size = IPC_GET_ARG2(call->data); 428 if (size <= 0)429 return ELIMIT;430 477 if (size > DATA_XFER_LIMIT) { 431 478 int flags = IPC_GET_ARG3(call->data); … … 459 506 break; 460 507 } 508 case IPC_M_STATE_CHANGE_AUTHORIZE: { 509 phone_t *sender_phone; 510 task_t *other_task_s; 511 512 if (phone_get(IPC_GET_ARG5(call->data), &sender_phone) != EOK) 513 return ENOENT; 514 515 mutex_lock(&sender_phone->lock); 516 if (sender_phone->state != IPC_PHONE_CONNECTED) { 517 mutex_unlock(&sender_phone->lock); 518 return EINVAL; 519 } 520 521 other_task_s = sender_phone->callee->task; 522 523 mutex_unlock(&sender_phone->lock); 524 525 /* Remember the third party task hash. */ 526 IPC_SET_ARG5(call->data, (sysarg_t) other_task_s); 527 break; 528 } 461 529 #ifdef CONFIG_UDEBUG 462 case IPC_M_DEBUG _ALL:530 case IPC_M_DEBUG: 463 531 return udebug_request_preprocess(call, phone); 464 532 #endif … … 495 563 /* 496 564 * This must be an affirmative answer to IPC_M_DATA_READ 497 * or IPC_M_DEBUG _ALL/UDEBUG_M_MEM_READ...565 * or IPC_M_DEBUG/UDEBUG_M_MEM_READ... 498 566 * 499 567 */ … … 531 599 532 600 switch (IPC_GET_IMETHOD(call->data)) { 533 case IPC_M_DEBUG _ALL:601 case IPC_M_DEBUG: 534 602 return -1; 535 603 default:
Note:
See TracChangeset
for help on using the changeset viewer.
