Changeset 25a76ab8 in mainline for kernel/generic/src
- Timestamp:
- 2010-05-08T07:53:23Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 051bc69a
- Parents:
- 6c39a907 (diff), 1317380 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 18 edited
-
console/cmd.c (modified) (2 diffs)
-
console/kconsole.c (modified) (1 diff)
-
ddi/ddi.c (modified) (10 diffs)
-
ipc/ipc.c (modified) (11 diffs)
-
lib/func.c (modified) (3 diffs)
-
lib/str.c (modified) (2 diffs)
-
mm/as.c (modified) (5 diffs)
-
mm/frame.c (modified) (1 diff)
-
mm/slab.c (modified) (5 diffs)
-
printf/printf_core.c (modified) (7 diffs)
-
proc/scheduler.c (modified) (6 diffs)
-
proc/task.c (modified) (3 diffs)
-
proc/thread.c (modified) (3 diffs)
-
synch/mutex.c (modified) (2 diffs)
-
synch/spinlock.c (modified) (1 diff)
-
sysinfo/stats.c (modified) (11 diffs)
-
sysinfo/sysinfo.c (modified) (29 diffs)
-
time/clock.c (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/console/cmd.c
r6c39a907 r25a76ab8 837 837 bool pointer = false; 838 838 int rc; 839 840 if (((char *) argv->buffer)[0] == '*') {839 840 if (((char *) argv->buffer)[0] == '*') { 841 841 rc = symtab_addr_lookup((char *) argv->buffer + 1, &addr); 842 842 pointer = true; 843 } else if (((char *) argv->buffer)[0] >= '0' && 844 ((char *)argv->buffer)[0] <= '9') { 845 rc = EOK; 846 addr = atoi((char *)argv->buffer); 847 } else { 843 } else if (((char *) argv->buffer)[0] >= '0' && 844 ((char *) argv->buffer)[0] <= '9') { 845 uint64_t value; 846 rc = str_uint64((char *) argv->buffer, NULL, 0, true, &value); 847 if (rc == EOK) 848 addr = (uintptr_t) value; 849 } else 848 850 rc = symtab_addr_lookup((char *) argv->buffer, &addr); 849 } 850 851 851 852 if (rc == ENOENT) 852 853 printf("Symbol %s not found.\n", argv->buffer); 854 else if (rc == EINVAL) 855 printf("Invalid address.\n"); 853 856 else if (rc == EOVERFLOW) { 854 857 symtab_print_search((char *) argv->buffer); 855 printf("Duplicate symbol , be more specific.\n");858 printf("Duplicate symbol (be more specific) or address overflow.\n"); 856 859 } else if (rc == EOK) { 857 860 if (pointer) … … 859 862 printf("Writing %#" PRIx64 " -> %p\n", arg1, addr); 860 863 *(uint32_t *) addr = arg1; 861 } else {864 } else 862 865 printf("No symbol information available.\n"); 863 }864 866 865 867 return 1; -
kernel/generic/src/console/kconsole.c
r6c39a907 r25a76ab8 455 455 printf("No symbol information available.\n"); 456 456 return false; 457 } 458 459 if (isaddr) 460 *result = (unative_t) symaddr; 461 else if (isptr) 462 *result = **((unative_t **) symaddr); 463 else 464 *result = *((unative_t *) symaddr); 457 case EOK: 458 if (isaddr) 459 *result = (unative_t) symaddr; 460 else if (isptr) 461 *result = **((unative_t **) symaddr); 462 else 463 *result = *((unative_t *) symaddr); 464 break; 465 default: 466 printf("Unknown error.\n"); 467 return false; 468 } 465 469 } else { 466 470 /* It's a number - convert it */ 467 *result = atoi(text); 468 if (isptr) 469 *result = *((unative_t *) *result); 471 uint64_t value; 472 int rc = str_uint64(text, NULL, 0, true, &value); 473 switch (rc) { 474 case EINVAL: 475 printf("Invalid number.\n"); 476 return false; 477 case EOVERFLOW: 478 printf("Integer overflow.\n"); 479 return false; 480 case EOK: 481 *result = (unative_t) value; 482 if (isptr) 483 *result = *((unative_t *) *result); 484 break; 485 default: 486 printf("Unknown error.\n"); 487 return false; 488 } 470 489 } 471 490 -
kernel/generic/src/ddi/ddi.c
r6c39a907 r25a76ab8 46 46 #include <mm/frame.h> 47 47 #include <mm/as.h> 48 #include <synch/ spinlock.h>48 #include <synch/mutex.h> 49 49 #include <syscall/copy.h> 50 50 #include <adt/btree.h> … … 54 54 55 55 /** This lock protects the parea_btree. */ 56 SPINLOCK_INITIALIZE(parea_lock);56 static mutex_t parea_lock; 57 57 58 58 /** B+tree with enabled physical memory areas. */ … … 63 63 { 64 64 btree_create(&parea_btree); 65 mutex_initialize(&parea_lock, MUTEX_PASSIVE); 65 66 } 66 67 … … 72 73 void ddi_parea_register(parea_t *parea) 73 74 { 74 ipl_t ipl = interrupts_disable(); 75 spinlock_lock(&parea_lock); 75 mutex_lock(&parea_lock); 76 76 77 77 /* … … 80 80 btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL); 81 81 82 spinlock_unlock(&parea_lock); 83 interrupts_restore(ipl); 82 mutex_unlock(&parea_lock); 84 83 } 85 84 … … 141 140 spinlock_unlock(&zones.lock); 142 141 143 spinlock_lock(&parea_lock);142 mutex_lock(&parea_lock); 144 143 btree_node_t *nodep; 145 144 parea_t *parea = (parea_t *) btree_search(&parea_btree, … … 147 146 148 147 if ((!parea) || (parea->frames < pages)) { 149 spinlock_unlock(&parea_lock);148 mutex_unlock(&parea_lock); 150 149 goto err; 151 150 } 152 151 153 spinlock_unlock(&parea_lock);152 mutex_unlock(&parea_lock); 154 153 goto map; 155 154 } … … 161 160 162 161 map: 163 spinlock_lock(&TASK->lock);164 162 interrupts_restore(ipl); 163 165 164 if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, 166 165 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { … … 169 168 * We report it using ENOMEM. 170 169 */ 171 spinlock_unlock(&TASK->lock);172 interrupts_restore(ipl);173 170 return ENOMEM; 174 171 } … … 177 174 * Mapping is created on-demand during page fault. 178 175 */ 179 180 spinlock_unlock(&TASK->lock);181 interrupts_restore(ipl);182 176 return 0; 183 177 } -
kernel/generic/src/ipc/ipc.c
r6c39a907 r25a76ab8 218 218 answerbox_t *callerbox = call->callerbox; 219 219 bool do_lock = ((!selflocked) || callerbox != (&TASK->answerbox)); 220 ipl_t ipl; 220 221 221 222 /* Count sent answer */ 223 ipl = interrupts_disable(); 222 224 spinlock_lock(&TASK->lock); 223 225 TASK->ipc_info.answer_sent++; 224 226 spinlock_unlock(&TASK->lock); 227 interrupts_restore(ipl); 225 228 226 229 call->flags |= IPC_CALL_ANSWERED; … … 281 284 static void _ipc_call(phone_t *phone, answerbox_t *box, call_t *call) 282 285 { 286 ipl_t ipl; 287 283 288 /* Count sent ipc call */ 289 ipl = interrupts_disable(); 284 290 spinlock_lock(&TASK->lock); 285 291 TASK->ipc_info.call_sent++; 286 292 spinlock_unlock(&TASK->lock); 293 interrupts_restore(ipl); 287 294 288 295 if (!(call->flags & IPC_CALL_FORWARDED)) { … … 386 393 int ipc_forward(call_t *call, phone_t *newphone, answerbox_t *oldbox, int mode) 387 394 { 395 ipl_t ipl; 396 388 397 /* Count forwarded calls */ 398 ipl = interrupts_disable(); 389 399 spinlock_lock(&TASK->lock); 390 400 TASK->ipc_info.forwarded++; 391 401 spinlock_unlock(&TASK->lock); 402 interrupts_restore(ipl); 392 403 393 404 spinlock_lock(&oldbox->lock); … … 422 433 call_t *request; 423 434 ipl_t ipl; 435 uint64_t irq_cnt = 0; 436 uint64_t answer_cnt = 0; 437 uint64_t call_cnt = 0; 424 438 int rc; 425 439 … … 431 445 spinlock_lock(&box->lock); 432 446 if (!list_empty(&box->irq_notifs)) { 433 434 447 /* Count recieved IRQ notification */ 435 spinlock_lock(&TASK->lock); 436 TASK->ipc_info.irq_notif_recieved++; 437 spinlock_unlock(&TASK->lock); 448 irq_cnt++; 438 449 439 450 ipl = interrupts_disable(); … … 447 458 } else if (!list_empty(&box->answers)) { 448 459 /* Count recieved answer */ 449 spinlock_lock(&TASK->lock); 450 TASK->ipc_info.answer_recieved++; 451 spinlock_unlock(&TASK->lock); 460 answer_cnt++; 452 461 453 462 /* Handle asynchronous answers */ … … 457 466 } else if (!list_empty(&box->calls)) { 458 467 /* Count recieved call */ 459 spinlock_lock(&TASK->lock); 460 TASK->ipc_info.call_recieved++; 461 spinlock_unlock(&TASK->lock); 468 call_cnt++; 462 469 463 470 /* Handle requests */ … … 472 479 } 473 480 spinlock_unlock(&box->lock); 481 482 ipl = interrupts_disable(); 483 spinlock_lock(&TASK->lock); 484 TASK->ipc_info.irq_notif_recieved += irq_cnt; 485 TASK->ipc_info.answer_recieved += answer_cnt; 486 TASK->ipc_info.call_recieved += call_cnt; 487 spinlock_unlock(&TASK->lock); 488 interrupts_restore(ipl); 489 474 490 return request; 475 491 } … … 675 691 call_t *call; 676 692 link_t *tmp; 693 ipl_t ipl; 677 694 695 ipl = interrupts_disable(); 678 696 spinlock_lock(&tasks_lock); 679 697 task = task_find_by_id(taskid); … … 681 699 spinlock_lock(&task->lock); 682 700 spinlock_unlock(&tasks_lock); 683 if (!task) 701 if (!task) { 702 interrupts_restore(ipl); 684 703 return; 704 } 685 705 686 706 /* Print opened phones & details */ … … 765 785 spinlock_unlock(&task->answerbox.lock); 766 786 spinlock_unlock(&task->lock); 787 interrupts_restore(ipl); 767 788 } 768 789 -
kernel/generic/src/lib/func.c
r6c39a907 r25a76ab8 27 27 */ 28 28 29 /** @addtogroup generic 29 /** @addtogroup generic 30 30 * @{ 31 31 */ … … 33 33 /** 34 34 * @file 35 * @brief Miscellaneous functions.35 * @brief Miscellaneous functions. 36 36 */ 37 37 … … 79 79 } 80 80 81 /** Convert ascii representation to unative_t82 *83 * Supports 0x for hexa & 0 for octal notation.84 * Does not check for overflows, does not support negative numbers85 *86 * @param text Textual representation of number87 * @return Converted number or 0 if no valid number ofund88 */89 unative_t atoi(const char *text)90 {91 int base = 10;92 unative_t result = 0;93 94 if (text[0] == '0' && text[1] == 'x') {95 base = 16;96 text += 2;97 } else if (text[0] == '0')98 base = 8;99 100 while (*text) {101 if (base != 16 && \102 ((*text >= 'A' && *text <= 'F' )103 || (*text >='a' && *text <='f')))104 break;105 if (base == 8 && *text >='8')106 break;107 108 if (*text >= '0' && *text <= '9') {109 result *= base;110 result += *text - '0';111 } else if (*text >= 'A' && *text <= 'F') {112 result *= base;113 result += *text - 'A' + 10;114 } else if (*text >= 'a' && *text <= 'f') {115 result *= base;116 result += *text - 'a' + 10;117 } else118 break;119 text++;120 }121 122 return result;123 }124 125 81 /** @} 126 82 */ -
kernel/generic/src/lib/str.c
r6c39a907 r25a76ab8 823 823 str++; 824 824 break; 825 default: 826 str--; 825 827 } 826 828 } … … 886 888 * @param base Zero or number between 2 and 36 inclusive. 887 889 * @param strict Do not allow any trailing characters. 888 * @ apram result Result of the conversion.890 * @param result Result of the conversion. 889 891 * 890 892 * @return EOK if conversion was successful. -
kernel/generic/src/mm/as.c
r6c39a907 r25a76ab8 1 1 /* 2 * Copyright (c) 20 01-2006Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 152 152 * reference count never drops to zero. 153 153 */ 154 a tomic_set(&AS_KERNEL->refcount, 1);154 as_hold(AS_KERNEL); 155 155 } 156 156 … … 200 200 DEADLOCK_PROBE_INIT(p_asidlock); 201 201 202 ASSERT(as != AS); 202 203 ASSERT(atomic_get(&as->refcount) == 0); 203 204 204 205 /* 205 * Since there is no reference to this a rea,206 * it is safe not tolock its mutex.206 * Since there is no reference to this address space, it is safe not to 207 * lock its mutex. 207 208 */ 208 209 … … 225 226 preemption_enable(); /* Interrupts disabled, enable preemption */ 226 227 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 227 if (as != AS && as->cpu_refcount == 0)228 if (as->cpu_refcount == 0) 228 229 list_remove(&as->inactive_as_with_asid_link); 229 230 asid_put(as->asid); … … 258 259 259 260 slab_free(as_slab, as); 261 } 262 263 /** Hold a reference to an address space. 264 * 265 * Holding a reference to an address space prevents destruction of that address 266 * space. 267 * 268 * @param a Address space to be held. 269 */ 270 void as_hold(as_t *as) 271 { 272 atomic_inc(&as->refcount); 273 } 274 275 /** Release a reference to an address space. 276 * 277 * The last one to release a reference to an address space destroys the address 278 * space. 279 * 280 * @param a Address space to be released. 281 */ 282 void as_release(as_t *as) 283 { 284 if (atomic_predec(&as->refcount) == 0) 285 as_destroy(as); 260 286 } 261 287 -
kernel/generic/src/mm/frame.c
r6c39a907 r25a76ab8 1033 1033 spinlock_unlock(&zones.lock); 1034 1034 interrupts_restore(ipl); 1035 1036 if (!THREAD) 1037 panic("Cannot wait for memory to become available."); 1035 1038 1036 1039 /* -
kernel/generic/src/mm/slab.c
r6c39a907 r25a76ab8 555 555 * Initialize mag_cache structure in slab cache 556 556 */ 557 static voidmake_magcache(slab_cache_t *cache)557 static bool make_magcache(slab_cache_t *cache) 558 558 { 559 559 unsigned int i; … … 562 562 563 563 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, 564 0); 564 FRAME_ATOMIC); 565 if (!cache->mag_cache) 566 return false; 567 565 568 for (i = 0; i < config.cpu_count; i++) { 566 569 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); … … 568 571 "slab_maglock_cpu"); 569 572 } 573 return true; 570 574 } 571 575 … … 597 601 spinlock_initialize(&cache->maglock, "slab_maglock"); 598 602 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 599 make_magcache(cache);603 (void) make_magcache(cache); 600 604 601 605 /* Compute slab sizes, object counts in slabs etc. */ … … 923 927 SLAB_CACHE_MAGDEFERRED) 924 928 continue; 925 make_magcache(s);929 (void) make_magcache(s); 926 930 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 927 931 } -
kernel/generic/src/printf/printf_core.c
r6c39a907 r25a76ab8 261 261 if (str == NULL) 262 262 return printf_putstr(nullstr, ps); 263 263 264 264 /* Print leading spaces. */ 265 265 size_t strw = str_length(str); 266 266 if (precision == 0) 267 267 precision = strw; 268 268 269 269 /* Left padding */ 270 270 size_t counter = 0; … … 276 276 } 277 277 } 278 278 279 279 /* Part of @a str fitting into the alloted space. */ 280 280 int retval; … … 391 391 */ 392 392 if (flags & __PRINTF_FLAG_PREFIX) { 393 switch (base) {393 switch (base) { 394 394 case 2: 395 395 /* Binary formating is not standard, but usefull */ … … 455 455 /* Print prefix */ 456 456 if (flags & __PRINTF_FLAG_PREFIX) { 457 switch (base) {457 switch (base) { 458 458 case 2: 459 459 /* Binary formating is not standard, but usefull */ … … 570 570 * 571 571 * - P, p Print value of a pointer. Void * value is expected and it is 572 * printed in hexadecimal notation with prefix (as with \%#X / \%#x 573 * for 32-bit or \%#X / \%#x for 64-bit long pointers). 572 * printed in hexadecimal notation with prefix (as with 573 * \%#0.8X / \%#0.8x for 32-bit or \%#0.16lX / \%#0.16lx for 64-bit 574 * long pointers). 574 575 * 575 576 * - b Print value as unsigned binary number. Prefix is not printed by … … 784 785 case 'p': 785 786 flags |= __PRINTF_FLAG_PREFIX; 787 flags |= __PRINTF_FLAG_ZEROPADDED; 786 788 base = 16; 787 789 qualifier = PrintfQualifierPointer; … … 846 848 case PrintfQualifierPointer: 847 849 size = sizeof(void *); 848 number = (uint64_t) (unsigned long) va_arg(ap, void *); 850 precision = size << 1; 851 number = (uint64_t) (uintptr_t) va_arg(ap, void *); 849 852 break; 850 853 default: -
kernel/generic/src/proc/scheduler.c
r6c39a907 r25a76ab8 1 1 /* 2 * Copyright (c) 20 01-2007Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 187 187 188 188 loop: 189 interrupts_enable();190 189 191 190 if (atomic_get(&CPU->nrdy) == 0) { … … 196 195 */ 197 196 198 /* 197 /* Mark CPU as it was idle this clock tick */ 198 spinlock_lock(&CPU->lock); 199 CPU->idle = true; 200 spinlock_unlock(&CPU->lock); 201 202 interrupts_enable(); 203 /* 199 204 * An interrupt might occur right now and wake up a thread. 200 205 * In such case, the CPU will continue to go to sleep 201 206 * even though there is a runnable thread. 202 207 */ 203 204 spinlock_lock(&CPU->lock);205 CPU->idle = true;206 spinlock_unlock(&CPU->lock);207 208 cpu_sleep(); 209 interrupts_disable(); 208 210 goto loop; 209 211 } 210 211 interrupts_disable();212 212 213 213 for (i = 0; i < RQ_COUNT; i++) { … … 382 382 int priority; 383 383 DEADLOCK_PROBE_INIT(p_joinwq); 384 task_t *old_task = TASK; 385 as_t *old_as = AS; 384 386 385 387 ASSERT(CPU != NULL); 386 388 389 /* 390 * Hold the current task and the address space to prevent their 391 * possible destruction should thread_destroy() be called on this or any 392 * other processor while the scheduler is still using them. 393 */ 394 if (old_task) 395 task_hold(old_task); 396 if (old_as) 397 as_hold(old_as); 398 387 399 if (THREAD) { 388 400 /* must be run after the switch to scheduler stack */ … … 476 488 */ 477 489 if (TASK != THREAD->task) { 478 as_t *as1 = NULL; 479 as_t *as2; 480 481 if (TASK) { 482 spinlock_lock(&TASK->lock); 483 as1 = TASK->as; 484 spinlock_unlock(&TASK->lock); 485 } 486 487 spinlock_lock(&THREAD->task->lock); 488 as2 = THREAD->task->as; 489 spinlock_unlock(&THREAD->task->lock); 490 as_t *new_as = THREAD->task->as; 490 491 491 492 /* … … 493 494 * space. 494 495 */ 495 if ( as1 != as2) {496 if (old_as != new_as) { 496 497 /* 497 498 * Both tasks and address spaces are different. 498 499 * Replace the old one with the new one. 499 500 */ 500 as_switch( as1, as2);501 as_switch(old_as, new_as); 501 502 } 503 502 504 TASK = THREAD->task; 503 505 before_task_runs(); 504 506 } 505 507 508 if (old_task) 509 task_release(old_task); 510 if (old_as) 511 as_release(old_as); 512 506 513 spinlock_lock(&THREAD->lock); 507 514 THREAD->state = Running; -
kernel/generic/src/proc/task.c
r6c39a907 r25a76ab8 1 1 /* 2 * Copyright (c) 20 01-2004Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 210 210 btree_create(&ta->futexes); 211 211 212 /* 213 * Get a reference to the address space. 214 */ 215 as_hold(ta->as); 216 212 217 ipl = interrupts_disable(); 213 atomic_inc(&as->refcount);214 218 spinlock_lock(&tasks_lock); 215 219 ta->taskid = ++task_counter; … … 250 254 * Drop our reference to the address space. 251 255 */ 252 if (atomic_predec(&t->as->refcount) == 0) 253 as_destroy(t->as); 256 as_release(t->as); 254 257 255 258 slab_free(task_slab, t); 256 TASK = NULL; 259 } 260 261 /** Hold a reference to a task. 262 * 263 * Holding a reference to a task prevents destruction of that task. 264 * 265 * @param t Task to be held. 266 */ 267 void task_hold(task_t *t) 268 { 269 atomic_inc(&t->refcount); 270 } 271 272 /** Release a reference to a task. 273 * 274 * The last one to release a reference to a task destroys the task. 275 * 276 * @param t Task to be released. 277 */ 278 void task_release(task_t *t) 279 { 280 if ((atomic_predec(&t->refcount)) == 0) 281 task_destroy(t); 257 282 } 258 283 -
kernel/generic/src/proc/thread.c
r6c39a907 r25a76ab8 1 1 /* 2 * Copyright (c) 20 01-2004Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 409 409 410 410 /* 411 * t is guaranteed to be the very last thread of its task. 412 * It is safe to destroy the task. 411 * Drop the reference to the containing task. 413 412 */ 414 if (atomic_predec(&t->task->refcount) == 0) 415 task_destroy(t->task); 413 task_release(t->task); 416 414 417 415 slab_free(thread_slab, t); … … 436 434 spinlock_lock(&task->lock); 437 435 438 atomic_inc(&task->refcount); 436 /* Hold a reference to the task. */ 437 task_hold(task); 439 438 440 439 /* Must not count kbox thread into lifecount */ -
kernel/generic/src/synch/mutex.c
r6c39a907 r25a76ab8 40 40 #include <synch/synch.h> 41 41 #include <debug.h> 42 #include <arch.h> 42 43 43 44 /** Initialize mutex. … … 69 70 int rc; 70 71 71 if (mtx->type == MUTEX_PASSIVE ) {72 if (mtx->type == MUTEX_PASSIVE && THREAD) { 72 73 rc = _semaphore_down_timeout(&mtx->sem, usec, flags); 73 74 } else { 74 ASSERT(mtx->type == MUTEX_ACTIVE );75 ASSERT(mtx->type == MUTEX_ACTIVE || !THREAD); 75 76 ASSERT(usec == SYNCH_NO_TIMEOUT); 76 77 ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE)); -
kernel/generic/src/synch/spinlock.c
r6c39a907 r25a76ab8 120 120 } 121 121 122 /** Unlock spinlock 123 * 124 * Unlock spinlock. 125 * 126 * @param sl Pointer to spinlock_t structure. 127 */ 128 void spinlock_unlock_debug(spinlock_t *lock) 129 { 130 ASSERT(atomic_get(&lock->val) != 0); 131 132 /* 133 * Prevent critical section code from bleeding out this way down. 134 */ 135 CS_LEAVE_BARRIER(); 136 137 atomic_set(&lock->val, 0); 138 preemption_enable(); 139 } 140 122 141 #endif 123 142 -
kernel/generic/src/sysinfo/stats.c
r6c39a907 r25a76ab8 38 38 #include <sysinfo/stats.h> 39 39 #include <sysinfo/sysinfo.h> 40 #include <synch/spinlock.h> 41 #include <synch/mutex.h> 40 42 #include <time/clock.h> 41 43 #include <mm/frame.h> … … 68 70 static load_t avenrdy[LOAD_STEPS] = {0, 0, 0}; 69 71 70 /** Load calculation spinlock */71 SPINLOCK_STATIC_INITIALIZE_NAME(load_lock, "load_lock");72 /** Load calculation lock */ 73 static mutex_t load_lock; 72 74 73 75 /** Get system uptime … … 156 158 static size_t get_task_virtmem(as_t *as) 157 159 { 158 mutex_lock(&as->lock);159 160 160 size_t result = 0; 161 162 /* 163 * We are holding some spinlocks here and therefore are not allowed to 164 * block. Only attempt to lock the address space and address space area 165 * mutexes conditionally. If it is not possible to lock either object, 166 * allow the statistics to be inexact by skipping the respective object. 167 * 168 * Note that it may be infinitely better to let the address space 169 * management code compute these statistics as it proceeds instead of 170 * having them calculated here over and over again here. 171 */ 172 173 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 174 return result * PAGE_SIZE; 161 175 162 176 /* Walk the B+ tree and count pages */ … … 171 185 as_area_t *area = node->value[i]; 172 186 173 mutex_lock(&area->lock); 187 if (SYNCH_FAILED(mutex_trylock(&area->lock))) 188 continue; 174 189 result += area->pages; 175 190 mutex_unlock(&area->lock); … … 331 346 332 347 /* Interrupts are already disabled */ 333 spinlock_lock(& (thread->lock));348 spinlock_lock(&thread->lock); 334 349 335 350 /* Record the statistics and increment the iterator */ … … 337 352 (*iterator)++; 338 353 339 spinlock_unlock(& (thread->lock));354 spinlock_unlock(&thread->lock); 340 355 341 356 return true; … … 602 617 } 603 618 604 /* To always get consistent values acquire the spinlock */ 605 ipl_t ipl = interrupts_disable(); 606 spinlock_lock(&load_lock); 619 /* To always get consistent values acquire the mutex */ 620 mutex_lock(&load_lock); 607 621 608 622 unsigned int i; … … 610 624 stats_load[i] = avenrdy[i] << LOAD_FIXED_SHIFT; 611 625 612 spinlock_unlock(&load_lock); 613 interrupts_restore(ipl); 626 mutex_unlock(&load_lock); 614 627 615 628 return ((void *) stats_load); … … 642 655 643 656 /* Mutually exclude with get_stats_load() */ 644 ipl_t ipl = interrupts_disable(); 645 spinlock_lock(&load_lock); 657 mutex_lock(&load_lock); 646 658 647 659 unsigned int i; … … 649 661 avenrdy[i] = load_calc(avenrdy[i], load_exp[i], ready); 650 662 651 spinlock_unlock(&load_lock); 652 interrupts_restore(ipl); 663 mutex_unlock(&load_lock); 653 664 654 665 thread_sleep(LOAD_INTERVAL); … … 661 672 void stats_init(void) 662 673 { 674 mutex_initialize(&load_lock, MUTEX_PASSIVE); 675 663 676 sysinfo_set_item_fn_val("system.uptime", NULL, get_stats_uptime); 664 677 sysinfo_set_item_fn_data("system.cpus", NULL, get_stats_cpus); -
kernel/generic/src/sysinfo/sysinfo.c
r6c39a907 r25a76ab8 37 37 #include <print.h> 38 38 #include <syscall/copy.h> 39 #include <synch/ spinlock.h>39 #include <synch/mutex.h> 40 40 #include <arch/asm.h> 41 41 #include <errno.h> … … 52 52 static slab_cache_t *sysinfo_item_slab; 53 53 54 /** Sysinfo spinlock */55 SPINLOCK_STATIC_INITIALIZE_NAME(sysinfo_lock, "sysinfo_lock");54 /** Sysinfo lock */ 55 static mutex_t sysinfo_lock; 56 56 57 57 /** Sysinfo item constructor … … 98 98 sizeof(sysinfo_item_t), 0, sysinfo_item_constructor, 99 99 sysinfo_item_destructor, SLAB_CACHE_MAGDEFERRED); 100 101 mutex_initialize(&sysinfo_lock, MUTEX_ACTIVE); 100 102 } 101 103 102 104 /** Recursively find an item in sysinfo tree 103 105 * 104 * Should be called with interrupts disabled 105 * and sysinfo_lock held. 106 * Should be called with sysinfo_lock held. 106 107 * 107 108 * @param name Current sysinfo path suffix. … … 168 169 /** Recursively create items in sysinfo tree 169 170 * 170 * Should be called with interrupts disabled 171 * and sysinfo_lock held. 171 * Should be called with sysinfo_lock held. 172 172 * 173 173 * @param name Current sysinfo path suffix. … … 299 299 { 300 300 /* Protect sysinfo tree consistency */ 301 ipl_t ipl = interrupts_disable(); 302 spinlock_lock(&sysinfo_lock); 301 mutex_lock(&sysinfo_lock); 303 302 304 303 if (root == NULL) … … 311 310 } 312 311 313 spinlock_unlock(&sysinfo_lock); 314 interrupts_restore(ipl); 312 mutex_unlock(&sysinfo_lock); 315 313 } 316 314 … … 332 330 { 333 331 /* Protect sysinfo tree consistency */ 334 ipl_t ipl = interrupts_disable(); 335 spinlock_lock(&sysinfo_lock); 332 mutex_lock(&sysinfo_lock); 336 333 337 334 if (root == NULL) … … 345 342 } 346 343 347 spinlock_unlock(&sysinfo_lock); 348 interrupts_restore(ipl); 344 mutex_unlock(&sysinfo_lock); 349 345 } 350 346 … … 361 357 { 362 358 /* Protect sysinfo tree consistency */ 363 ipl_t ipl = interrupts_disable(); 364 spinlock_lock(&sysinfo_lock); 359 mutex_lock(&sysinfo_lock); 365 360 366 361 if (root == NULL) … … 373 368 } 374 369 375 spinlock_unlock(&sysinfo_lock); 376 interrupts_restore(ipl); 370 mutex_unlock(&sysinfo_lock); 377 371 } 378 372 … … 394 388 { 395 389 /* Protect sysinfo tree consistency */ 396 ipl_t ipl = interrupts_disable(); 397 spinlock_lock(&sysinfo_lock); 390 mutex_lock(&sysinfo_lock); 398 391 399 392 if (root == NULL) … … 406 399 } 407 400 408 spinlock_unlock(&sysinfo_lock); 409 interrupts_restore(ipl); 401 mutex_unlock(&sysinfo_lock); 410 402 } 411 403 … … 420 412 { 421 413 /* Protect sysinfo tree consistency */ 422 ipl_t ipl = interrupts_disable(); 423 spinlock_lock(&sysinfo_lock); 414 mutex_lock(&sysinfo_lock); 424 415 425 416 if (root == NULL) … … 430 421 item->val_type = SYSINFO_VAL_UNDEFINED; 431 422 432 spinlock_unlock(&sysinfo_lock); 433 interrupts_restore(ipl); 423 mutex_unlock(&sysinfo_lock); 434 424 } 435 425 … … 446 436 { 447 437 /* Protect sysinfo tree consistency */ 448 ipl_t ipl = interrupts_disable(); 449 spinlock_lock(&sysinfo_lock); 438 mutex_lock(&sysinfo_lock); 450 439 451 440 if (root == NULL) … … 461 450 } 462 451 463 spinlock_unlock(&sysinfo_lock); 464 interrupts_restore(ipl); 452 mutex_unlock(&sysinfo_lock); 465 453 } 466 454 … … 479 467 /** Dump the structure of sysinfo tree 480 468 * 481 * Should be called with interrupts disabled 482 * and sysinfo_lock held. Because this routine 483 * might take a reasonable long time to proceed, 484 * having the spinlock held is not optimal, but 485 * there is no better simple solution. 469 * Should be called with sysinfo_lock held. 486 470 * 487 471 * @param root Root item of the current (sub)tree. … … 559 543 /* Avoid other functions to mess with sysinfo 560 544 while we are dumping it */ 561 ipl_t ipl = interrupts_disable(); 562 spinlock_lock(&sysinfo_lock); 545 mutex_lock(&sysinfo_lock); 563 546 564 547 if (root == NULL) … … 567 550 sysinfo_dump_internal(root, 0); 568 551 569 spinlock_unlock(&sysinfo_lock); 570 interrupts_restore(ipl); 552 mutex_unlock(&sysinfo_lock); 571 553 } 572 554 573 555 /** Return sysinfo item value determined by name 574 556 * 575 * Should be called with interrupts disabled 576 * and sysinfo_lock held. 557 * Should be called with sysinfo_lock held. 577 558 * 578 559 * @param name Sysinfo path. … … 632 613 /** Return sysinfo item determined by name from user space 633 614 * 634 * Should be called with interrupts disabled 635 * and sysinfo_lock held. The path string passed from 636 * the user space has to be properly null-terminated 615 * The path string passed from the user space has to be properly null-terminated 637 616 * (the last passed character must be null). 638 617 * … … 656 635 657 636 if ((copy_from_uspace(path, ptr, size + 1) == 0) 658 && (path[size] == 0)) 637 && (path[size] == 0)) { 638 /* 639 * Prevent other functions from messing with sysinfo while we 640 * are reading it. 641 */ 642 mutex_lock(&sysinfo_lock); 659 643 ret = sysinfo_get_item(path, NULL, dry_run); 660 644 mutex_unlock(&sysinfo_lock); 645 } 661 646 free(path); 662 647 return ret; … … 677 662 unative_t sys_sysinfo_get_tag(void *path_ptr, size_t path_size) 678 663 { 679 /* Avoid other functions to mess with sysinfo 680 while we are reading it */ 681 ipl_t ipl = interrupts_disable(); 682 spinlock_lock(&sysinfo_lock); 683 684 /* Get the item. 685 686 N.B.: There is no need to free any potential generated 687 binary data since we request a dry run */ 664 /* 665 * Get the item. 666 * 667 * N.B.: There is no need to free any potential generated 668 * binary data since we request a dry run. 669 */ 688 670 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, true); 689 671 690 /* Map generated value types to constant types 691 (user space does not care whether the 692 value is constant or generated) */ 672 /* 673 * Map generated value types to constant types (user space does not care 674 * whether the value is constant or generated). 675 */ 693 676 if (ret.tag == SYSINFO_VAL_FUNCTION_VAL) 694 677 ret.tag = SYSINFO_VAL_VAL; … … 696 679 ret.tag = SYSINFO_VAL_DATA; 697 680 698 spinlock_unlock(&sysinfo_lock);699 interrupts_restore(ipl);700 701 681 return (unative_t) ret.tag; 702 682 } … … 719 699 void *value_ptr) 720 700 { 721 /* Avoid other functions to mess with sysinfo 722 while we are reading it */ 723 ipl_t ipl = interrupts_disable(); 724 spinlock_lock(&sysinfo_lock); 725 726 /* Get the item. 727 728 N.B.: There is no need to free any potential generated 729 binary data since we request a dry run */ 701 int rc; 702 703 /* 704 * Get the item. 705 * 706 * N.B.: There is no need to free any potential generated binary data 707 * since we request a dry run. 708 */ 730 709 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, true); 731 int rc;732 710 733 711 /* Only constant or generated numerical value is returned */ … … 737 715 rc = EINVAL; 738 716 739 spinlock_unlock(&sysinfo_lock);740 interrupts_restore(ipl);741 742 717 return (unative_t) rc; 743 718 } … … 760 735 void *size_ptr) 761 736 { 762 /* Avoid other functions to mess with sysinfo 763 while we are reading it */ 764 ipl_t ipl = interrupts_disable(); 765 spinlock_lock(&sysinfo_lock); 766 767 /* Get the item. 768 769 N.B.: There is no need to free any potential generated 770 binary data since we request a dry run */ 737 int rc; 738 739 /* 740 * Get the item. 741 * 742 * N.B.: There is no need to free any potential generated binary data 743 * since we request a dry run. 744 */ 771 745 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, true); 772 int rc;773 746 774 747 /* Only the size of constant or generated binary data is considered */ … … 779 752 rc = EINVAL; 780 753 781 spinlock_unlock(&sysinfo_lock);782 interrupts_restore(ipl);783 784 754 return (unative_t) rc; 785 755 } … … 807 777 void *buffer_ptr, size_t buffer_size) 808 778 { 809 /* Avoid other functions to mess with sysinfo 810 while we are reading it */ 811 ipl_t ipl = interrupts_disable(); 812 spinlock_lock(&sysinfo_lock); 779 int rc; 813 780 814 781 /* Get the item */ 815 782 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, false); 816 int rc; 817 783 818 784 /* Only constant or generated binary data is considered */ 819 785 if ((ret.tag == SYSINFO_VAL_DATA) || (ret.tag == SYSINFO_VAL_FUNCTION_DATA)) { … … 831 797 free(ret.data.data); 832 798 833 spinlock_unlock(&sysinfo_lock);834 interrupts_restore(ipl);835 836 799 return (unative_t) rc; 837 800 } -
kernel/generic/src/time/clock.c
r6c39a907 r25a76ab8 140 140 /* Account lost ticks to CPU usage */ 141 141 if (CPU->idle) { 142 ASSERT(missed_clock_ticks == 0); 143 CPU->idle_ticks++; 142 CPU->idle_ticks += missed_clock_ticks + 1; 144 143 } else { 145 144 CPU->busy_ticks += missed_clock_ticks + 1;
Note:
See TracChangeset
for help on using the changeset viewer.
