Changeset df29f24 in mainline for kernel/generic/src
- Timestamp:
- 2011-06-01T09:04:08Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0a7627b, c9f0975
- Parents:
- e51a514 (diff), 5d1b3aa (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/list.c
re51a514 rdf29f24 52 52 * 53 53 */ 54 boollist_member(const link_t *link, const link_t *head)54 int list_member(const link_t *link, const link_t *head) 55 55 { 56 56 bool found = false; -
kernel/generic/src/console/console.c
re51a514 rdf29f24 55 55 #define KLOG_PAGES 8 56 56 #define KLOG_LENGTH (KLOG_PAGES * PAGE_SIZE / sizeof(wchar_t)) 57 #define KLOG_LATENCY 858 57 59 58 /** Kernel log cyclic buffer */ … … 61 60 62 61 /** Kernel log initialized */ 63 static bool klog_inited = false;62 static atomic_t klog_inited = {false}; 64 63 65 64 /** First kernel log characters */ … … 76 75 77 76 /** Kernel log spinlock */ 78 SPINLOCK_STATIC_INITIALIZE_NAME(klog_lock, " *klog_lock");77 SPINLOCK_STATIC_INITIALIZE_NAME(klog_lock, "klog_lock"); 79 78 80 79 /** Physical memory area used for klog buffer */ … … 165 164 sysinfo_set_item_val("klog.faddr", NULL, (sysarg_t) faddr); 166 165 sysinfo_set_item_val("klog.pages", NULL, KLOG_PAGES); 167 166 168 167 event_set_unmask_callback(EVENT_KLOG, klog_update); 169 170 spinlock_lock(&klog_lock); 171 klog_inited = true; 172 spinlock_unlock(&klog_lock); 168 atomic_set(&klog_inited, true); 173 169 } 174 170 … … 265 261 void klog_update(void) 266 262 { 263 if (!atomic_get(&klog_inited)) 264 return; 265 267 266 spinlock_lock(&klog_lock); 268 267 269 if ( (klog_inited) && (klog_uspace > 0)) {268 if (klog_uspace > 0) { 270 269 if (event_notify_3(EVENT_KLOG, true, klog_start, klog_len, 271 270 klog_uspace) == EOK) … … 278 277 void putchar(const wchar_t ch) 279 278 { 279 bool ordy = ((stdout) && (stdout->op->write)); 280 280 281 spinlock_lock(&klog_lock); 281 282 282 if ((klog_stored > 0) && (stdout) && (stdout->op->write)) { 283 /* Print charaters stored in kernel log */ 284 size_t i; 285 for (i = klog_len - klog_stored; i < klog_len; i++) 286 stdout->op->write(stdout, klog[(klog_start + i) % KLOG_LENGTH], silent); 287 klog_stored = 0; 283 /* Print charaters stored in kernel log */ 284 if (ordy) { 285 while (klog_stored > 0) { 286 wchar_t tmp = klog[(klog_start + klog_len - klog_stored) % KLOG_LENGTH]; 287 klog_stored--; 288 289 /* 290 * We need to give up the spinlock for 291 * the physical operation of writting out 292 * the character. 293 */ 294 spinlock_unlock(&klog_lock); 295 stdout->op->write(stdout, tmp, silent); 296 spinlock_lock(&klog_lock); 297 } 288 298 } 289 299 … … 295 305 klog_start = (klog_start + 1) % KLOG_LENGTH; 296 306 297 if ((stdout) && (stdout->op->write)) 307 if (!ordy) { 308 if (klog_stored < klog_len) 309 klog_stored++; 310 } 311 312 /* The character is stored for uspace */ 313 if (klog_uspace < klog_len) 314 klog_uspace++; 315 316 spinlock_unlock(&klog_lock); 317 318 if (ordy) { 319 /* 320 * Output the character. In this case 321 * it should be no longer buffered. 322 */ 298 323 stdout->op->write(stdout, ch, silent); 299 else {324 } else { 300 325 /* 301 326 * No standard output routine defined yet. … … 307 332 * Note that the early_putc() function might be 308 333 * a no-op on certain hardware configurations. 309 *310 334 */ 311 335 early_putchar(ch); 312 313 if (klog_stored < klog_len) 314 klog_stored++; 315 } 316 317 /* The character is stored for uspace */ 318 if (klog_uspace < klog_len) 319 klog_uspace++; 320 321 /* Check notify uspace to update */ 322 bool update; 323 if ((klog_uspace > KLOG_LATENCY) || (ch == '\n')) 324 update = true; 325 else 326 update = false; 327 328 spinlock_unlock(&klog_lock); 329 330 if (update) 336 } 337 338 /* Force notification on newline */ 339 if (ch == '\n') 331 340 klog_update(); 332 341 } -
kernel/generic/src/ddi/ddi.c
re51a514 rdf29f24 224 224 task_t *task = task_find_by_id(id); 225 225 226 if ((!task) || (!cont ext_check(CONTEXT, task->context))) {226 if ((!task) || (!container_check(CONTAINER, task->container))) { 227 227 /* 228 228 * There is no task with the specified ID -
kernel/generic/src/debug/panic.c
re51a514 rdf29f24 95 95 printf("\n"); 96 96 97 printf("THE=%p: ", THE); 98 if (THE != NULL) { 99 printf("pe=%" PRIun " thr=%p task=%p cpu=%p as=%p" 100 " magic=%#" PRIx32 "\n", THE->preemption_disabled, 101 THE->thread, THE->task, THE->cpu, THE->as, THE->magic); 102 } else 103 printf("invalid\n"); 104 97 105 if (istate) { 98 106 istate_decode(istate); -
kernel/generic/src/interrupt/interrupt.c
re51a514 rdf29f24 205 205 * stack. 206 206 */ 207 return (istate_t *) ((uint8_t *) thread->kstack + THREAD_STACK_SIZE -208 sizeof(istate_t));207 return (istate_t *) ((uint8_t *) 208 thread->kstack + STACK_SIZE - sizeof(istate_t)); 209 209 } 210 210 -
kernel/generic/src/ipc/event.c
re51a514 rdf29f24 59 59 events[i].imethod = 0; 60 60 events[i].masked = false; 61 events[i].unmask_c b= NULL;61 events[i].unmask_callback = NULL; 62 62 } 63 63 } … … 86 86 /** Define a callback function for the event unmask event. 87 87 * 88 * @param evno Event type. 89 * @param cb Callback function to be called when the event is unmasked. 90 * 91 */ 92 void event_set_unmask_callback(event_type_t evno, void (*cb)(void)) 93 { 94 ASSERT(evno < EVENT_END); 95 96 spinlock_lock(&events[evno].lock); 97 events[evno].unmask_cb = cb; 88 * @param evno Event type. 89 * @param callback Callback function to be called when 90 * the event is unmasked. 91 * 92 */ 93 void event_set_unmask_callback(event_type_t evno, event_callback_t callback) 94 { 95 ASSERT(evno < EVENT_END); 96 97 spinlock_lock(&events[evno].lock); 98 events[evno].unmask_callback = callback; 98 99 spinlock_unlock(&events[evno].lock); 99 100 } … … 206 207 static void event_unmask(event_type_t evno) 207 208 { 208 void (*cb)(void);209 209 ASSERT(evno < EVENT_END); 210 210 211 211 spinlock_lock(&events[evno].lock); 212 212 events[evno].masked = false; 213 cb = events[evno].unmask_cb;213 event_callback_t callback = events[evno].unmask_callback; 214 214 spinlock_unlock(&events[evno].lock); 215 215 216 216 /* 217 * Check if there is an unmask callback function defined for this event. 217 * Check if there is an unmask callback 218 * function defined for this event. 218 219 */ 219 if (c b)220 cb();220 if (callback != NULL) 221 callback(); 221 222 } 222 223 -
kernel/generic/src/lib/elf.c
re51a514 rdf29f24 114 114 } 115 115 116 /* Inspect all section headers and proc cess them. */116 /* Inspect all section headers and process them. */ 117 117 for (i = 0; i < header->e_shnum; i++) { 118 118 elf_section_header_t *sechdr = -
kernel/generic/src/lib/memfnc.c
re51a514 rdf29f24 56 56 void *memset(void *dst, int val, size_t cnt) 57 57 { 58 size_t i; 59 uint8_t *ptr = (uint8_t *) dst; 58 uint8_t *dp = (uint8_t *) dst; 60 59 61 for (i = 0; i < cnt; i++)62 ptr[i]= val;60 while (cnt-- != 0) 61 *dp++ = val; 63 62 64 63 return dst; … … 83 82 84 83 while (cnt-- != 0) 85 84 *dp++ = *sp++; 86 85 87 86 return dst; -
kernel/generic/src/main/main.c
re51a514 rdf29f24 118 118 #endif 119 119 120 #define CONFIG_STACK_SIZE ((1 << STACK_FRAMES) * STACK_SIZE)121 122 120 /** Main kernel routine for bootstrap CPU. 123 121 * … … 139 137 config.kernel_size = ALIGN_UP(hardcoded_ktext_size + 140 138 hardcoded_kdata_size, PAGE_SIZE); 141 config.stack_size = CONFIG_STACK_SIZE;139 config.stack_size = STACK_SIZE; 142 140 143 141 /* Initialy the stack is placed just after the kernel */ … … 165 163 166 164 context_save(&ctx); 167 context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base,168 THREAD_STACK_SIZE);165 context_set(&ctx, FADDR(main_bsp_separated_stack), 166 config.stack_base, STACK_SIZE); 169 167 context_restore(&ctx); 170 168 /* not reached */ … … 323 321 context_save(&CPU->saved_context); 324 322 context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), 325 (uintptr_t) CPU->stack, CPU_STACK_SIZE);323 (uintptr_t) CPU->stack, STACK_SIZE); 326 324 context_restore(&CPU->saved_context); 327 325 /* not reached */ -
kernel/generic/src/main/uinit.c
re51a514 rdf29f24 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Userspace bootstrap thread. 36 36 * 37 37 * This file contains uinit kernel thread wich is used to start every … … 40 40 * @see SYS_THREAD_CREATE 41 41 */ 42 42 43 43 #include <main/uinit.h> 44 44 #include <typedefs.h> … … 48 48 #include <arch.h> 49 49 #include <udebug/udebug.h> 50 51 50 52 51 /** Thread used to bring up userspace thread. … … 58 57 { 59 58 uspace_arg_t uarg; 60 59 61 60 /* 62 61 * So far, we don't have a use for joining userspace threads so we … … 68 67 */ 69 68 thread_detach(THREAD); 70 69 71 70 #ifdef CONFIG_UDEBUG 72 71 udebug_stoppable_end(); … … 78 77 uarg.uspace_thread_function = NULL; 79 78 uarg.uspace_thread_arg = NULL; 80 79 81 80 free((uspace_arg_t *) arg); 82 81 -
kernel/generic/src/mm/as.c
re51a514 rdf29f24 302 302 * We don't want any area to have conflicts with NULL page. 303 303 */ 304 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE))304 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE)) 305 305 return false; 306 306 … … 329 329 mutex_lock(&area->lock); 330 330 331 if (overlaps(addr, count << PAGE_WIDTH,332 area->base, area->pages << PAGE_WIDTH)) {331 if (overlaps(addr, P2SZ(count), area->base, 332 P2SZ(area->pages))) { 333 333 mutex_unlock(&area->lock); 334 334 return false; … … 346 346 mutex_lock(&area->lock); 347 347 348 if (overlaps(addr, count << PAGE_WIDTH,349 area->base, area->pages << PAGE_WIDTH)) {348 if (overlaps(addr, P2SZ(count), area->base, 349 P2SZ(area->pages))) { 350 350 mutex_unlock(&area->lock); 351 351 return false; … … 366 366 mutex_lock(&area->lock); 367 367 368 if (overlaps(addr, count << PAGE_WIDTH,369 area->base, area->pages << PAGE_WIDTH)) {368 if (overlaps(addr, P2SZ(count), area->base, 369 P2SZ(area->pages))) { 370 370 mutex_unlock(&area->lock); 371 371 return false; … … 380 380 */ 381 381 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 382 return !overlaps(addr, count << PAGE_WIDTH, 383 KERNEL_ADDRESS_SPACE_START, 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 384 383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 385 384 } … … 474 473 475 474 btree_node_t *leaf; 476 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 475 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, 476 &leaf); 477 477 if (area) { 478 478 /* va is the base address of an address space area */ … … 482 482 483 483 /* 484 * Search the leaf node and the righ most record of its left neighbour484 * Search the leaf node and the rightmost record of its left neighbour 485 485 * to find out whether this is a miss or va belongs to an address 486 486 * space area found there. … … 494 494 495 495 mutex_lock(&area->lock); 496 496 497 497 if ((area->base <= va) && 498 (va < area->base + (area->pages << PAGE_WIDTH)))498 (va <= area->base + (P2SZ(area->pages) - 1))) 499 499 return area; 500 500 … … 506 506 * Because of its position in the B+tree, it must have base < va. 507 507 */ 508 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 508 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, 509 leaf); 509 510 if (lnode) { 510 511 area = (as_area_t *) lnode->value[lnode->keys - 1]; … … 512 513 mutex_lock(&area->lock); 513 514 514 if (va < area->base + (area->pages << PAGE_WIDTH))515 if (va <= area->base + (P2SZ(area->pages) - 1)) 515 516 return area; 516 517 … … 577 578 578 579 if (pages < area->pages) { 579 uintptr_t start_free = area->base + (pages << PAGE_WIDTH);580 uintptr_t start_free = area->base + P2SZ(pages); 580 581 581 582 /* … … 590 591 */ 591 592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 592 area->base + (pages << PAGE_WIDTH), area->pages - pages);593 area->base + P2SZ(pages), area->pages - pages); 593 594 594 595 /* … … 613 614 size_t i = 0; 614 615 615 if (overlaps(ptr, size << PAGE_WIDTH, area->base,616 pages << PAGE_WIDTH)) {616 if (overlaps(ptr, P2SZ(size), area->base, 617 P2SZ(pages))) { 617 618 618 if (ptr + (size << PAGE_WIDTH) <= start_free) {619 if (ptr + P2SZ(size) <= start_free) { 619 620 /* 620 621 * The whole interval fits … … 647 648 648 649 for (; i < size; i++) { 649 pte_t *pte = page_mapping_find(as, ptr +650 (i << PAGE_WIDTH));650 pte_t *pte = page_mapping_find(as, 651 ptr + P2SZ(i), false); 651 652 652 653 ASSERT(pte); … … 657 658 (area->backend->frame_free)) { 658 659 area->backend->frame_free(area, 659 ptr + (i << PAGE_WIDTH),660 ptr + P2SZ(i), 660 661 PTE_GET_FRAME(pte)); 661 662 } 662 663 663 page_mapping_remove(as, ptr + 664 (i << PAGE_WIDTH)); 664 page_mapping_remove(as, ptr + P2SZ(i)); 665 665 } 666 666 } … … 671 671 */ 672 672 673 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH),673 tlb_invalidate_pages(as->asid, area->base + P2SZ(pages), 674 674 area->pages - pages); 675 675 676 676 /* 677 * Invalidate software translation caches (e.g. TSB on sparc64). 678 */ 679 as_invalidate_translation_cache(as, area->base + 680 (pages << PAGE_WIDTH), area->pages - pages); 677 * Invalidate software translation caches 678 * (e.g. TSB on sparc64, PHT on ppc32). 679 */ 680 as_invalidate_translation_cache(as, area->base + P2SZ(pages), 681 area->pages - pages); 681 682 tlb_shootdown_finalize(ipl); 682 683 … … 797 798 798 799 for (size = 0; size < (size_t) node->value[i]; size++) { 799 pte_t *pte = 800 page_mapping_find(as, ptr + (size << PAGE_WIDTH));800 pte_t *pte = page_mapping_find(as, 801 ptr + P2SZ(size), false); 801 802 802 803 ASSERT(pte); … … 807 808 (area->backend->frame_free)) { 808 809 area->backend->frame_free(area, 809 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 810 ptr + P2SZ(size), 811 PTE_GET_FRAME(pte)); 810 812 } 811 813 812 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));814 page_mapping_remove(as, ptr + P2SZ(size)); 813 815 } 814 816 } … … 822 824 823 825 /* 824 * Invalidate potential software translation caches (e.g. TSB on825 * sparc64).826 * Invalidate potential software translation caches 827 * (e.g. TSB on sparc64, PHT on ppc32). 826 828 */ 827 829 as_invalidate_translation_cache(as, area->base, area->pages); … … 897 899 } 898 900 899 size_t src_size = src_area->pages << PAGE_WIDTH;901 size_t src_size = P2SZ(src_area->pages); 900 902 unsigned int src_flags = src_area->flags; 901 903 mem_backend_t *src_backend = src_area->backend; … … 1094 1096 for (cur = area->used_space.leaf_head.next; 1095 1097 cur != &area->used_space.leaf_head; cur = cur->next) { 1096 btree_node_t *node 1097 = list_get_instance(cur, btree_node_t,leaf_link);1098 btree_node_t *node = list_get_instance(cur, btree_node_t, 1099 leaf_link); 1098 1100 btree_key_t i; 1099 1101 … … 1103 1105 1104 1106 for (size = 0; size < (size_t) node->value[i]; size++) { 1105 pte_t *pte = 1106 p age_mapping_find(as, ptr + (size << PAGE_WIDTH));1107 pte_t *pte = page_mapping_find(as, 1108 ptr + P2SZ(size), false); 1107 1109 1108 1110 ASSERT(pte); … … 1113 1115 1114 1116 /* Remove old mapping */ 1115 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));1117 page_mapping_remove(as, ptr + P2SZ(size)); 1116 1118 } 1117 1119 } … … 1125 1127 1126 1128 /* 1127 * Invalidate potential software translation caches (e.g. TSB on1128 * sparc64).1129 * Invalidate potential software translation caches 1130 * (e.g. TSB on sparc64, PHT on ppc32). 1129 1131 */ 1130 1132 as_invalidate_translation_cache(as, area->base, area->pages); … … 1159 1161 1160 1162 /* Insert the new mapping */ 1161 page_mapping_insert(as, ptr + (size << PAGE_WIDTH),1163 page_mapping_insert(as, ptr + P2SZ(size), 1162 1164 old_frame[frame_idx++], page_flags); 1163 1165 … … 1240 1242 */ 1241 1243 pte_t *pte; 1242 if ((pte = page_mapping_find(AS, page ))) {1244 if ((pte = page_mapping_find(AS, page, false))) { 1243 1245 if (PTE_PRESENT(pte)) { 1244 1246 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || … … 1481 1483 1482 1484 if (src_area) { 1483 size = src_area->pages << PAGE_WIDTH;1485 size = P2SZ(src_area->pages); 1484 1486 mutex_unlock(&src_area->lock); 1485 1487 } else … … 1536 1538 if (page >= right_pg) { 1537 1539 /* Do nothing. */ 1538 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1539 left_cnt << PAGE_WIDTH)) {1540 } else if (overlaps(page, P2SZ(count), left_pg, 1541 P2SZ(left_cnt))) { 1540 1542 /* The interval intersects with the left interval. */ 1541 1543 return false; 1542 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1543 right_cnt << PAGE_WIDTH)) {1544 } else if (overlaps(page, P2SZ(count), right_pg, 1545 P2SZ(right_cnt))) { 1544 1546 /* The interval intersects with the right interval. */ 1545 1547 return false; 1546 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1547 (page + (count << PAGE_WIDTH) == right_pg)) {1548 } else if ((page == left_pg + P2SZ(left_cnt)) && 1549 (page + P2SZ(count) == right_pg)) { 1548 1550 /* 1549 1551 * The interval can be added by merging the two already … … 1553 1555 btree_remove(&area->used_space, right_pg, leaf); 1554 1556 goto success; 1555 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1557 } else if (page == left_pg + P2SZ(left_cnt)) { 1556 1558 /* 1557 1559 * The interval can be added by simply growing the left … … 1560 1562 node->value[node->keys - 1] += count; 1561 1563 goto success; 1562 } else if (page + (count << PAGE_WIDTH) == right_pg) {1564 } else if (page + P2SZ(count) == right_pg) { 1563 1565 /* 1564 1566 * The interval can be addded by simply moving base of … … 1587 1589 */ 1588 1590 1589 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1590 right_cnt << PAGE_WIDTH)) { 1591 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) { 1591 1592 /* The interval intersects with the right interval. */ 1592 1593 return false; 1593 } else if (page + (count << PAGE_WIDTH) == right_pg) {1594 } else if (page + P2SZ(count) == right_pg) { 1594 1595 /* 1595 1596 * The interval can be added by moving the base of the … … 1626 1627 if (page < left_pg) { 1627 1628 /* Do nothing. */ 1628 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1629 left_cnt << PAGE_WIDTH)) {1629 } else if (overlaps(page, P2SZ(count), left_pg, 1630 P2SZ(left_cnt))) { 1630 1631 /* The interval intersects with the left interval. */ 1631 1632 return false; 1632 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1633 right_cnt << PAGE_WIDTH)) {1633 } else if (overlaps(page, P2SZ(count), right_pg, 1634 P2SZ(right_cnt))) { 1634 1635 /* The interval intersects with the right interval. */ 1635 1636 return false; 1636 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1637 (page + (count << PAGE_WIDTH) == right_pg)) {1637 } else if ((page == left_pg + P2SZ(left_cnt)) && 1638 (page + P2SZ(count) == right_pg)) { 1638 1639 /* 1639 1640 * The interval can be added by merging the two already … … 1643 1644 btree_remove(&area->used_space, right_pg, node); 1644 1645 goto success; 1645 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1646 } else if (page == left_pg + P2SZ(left_cnt)) { 1646 1647 /* 1647 1648 * The interval can be added by simply growing the left … … 1650 1651 leaf->value[leaf->keys - 1] += count; 1651 1652 goto success; 1652 } else if (page + (count << PAGE_WIDTH) == right_pg) {1653 } else if (page + P2SZ(count) == right_pg) { 1653 1654 /* 1654 1655 * The interval can be addded by simply moving base of … … 1677 1678 */ 1678 1679 1679 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1680 left_cnt << PAGE_WIDTH)) { 1680 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) { 1681 1681 /* The interval intersects with the left interval. */ 1682 1682 return false; 1683 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) {1683 } else if (left_pg + P2SZ(left_cnt) == page) { 1684 1684 /* 1685 1685 * The interval can be added by growing the left … … 1716 1716 */ 1717 1717 1718 if (overlaps(page, count << PAGE_WIDTH, left_pg,1719 left_cnt << PAGE_WIDTH)) {1718 if (overlaps(page, P2SZ(count), left_pg, 1719 P2SZ(left_cnt))) { 1720 1720 /* 1721 1721 * The interval intersects with the left … … 1723 1723 */ 1724 1724 return false; 1725 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1726 right_cnt << PAGE_WIDTH)) {1725 } else if (overlaps(page, P2SZ(count), right_pg, 1726 P2SZ(right_cnt))) { 1727 1727 /* 1728 1728 * The interval intersects with the right … … 1730 1730 */ 1731 1731 return false; 1732 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1733 (page + (count << PAGE_WIDTH) == right_pg)) {1732 } else if ((page == left_pg + P2SZ(left_cnt)) && 1733 (page + P2SZ(count) == right_pg)) { 1734 1734 /* 1735 1735 * The interval can be added by merging the two … … 1739 1739 btree_remove(&area->used_space, right_pg, leaf); 1740 1740 goto success; 1741 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1741 } else if (page == left_pg + P2SZ(left_cnt)) { 1742 1742 /* 1743 1743 * The interval can be added by simply growing … … 1746 1746 leaf->value[i - 1] += count; 1747 1747 goto success; 1748 } else if (page + (count << PAGE_WIDTH) == right_pg) {1748 } else if (page + P2SZ(count) == right_pg) { 1749 1749 /* 1750 1750 * The interval can be addded by simply moving … … 1812 1812 for (i = 0; i < leaf->keys; i++) { 1813 1813 if (leaf->key[i] == page) { 1814 leaf->key[i] += count << PAGE_WIDTH;1814 leaf->key[i] += P2SZ(count); 1815 1815 leaf->value[i] -= count; 1816 1816 goto success; … … 1822 1822 } 1823 1823 1824 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1824 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, 1825 leaf); 1825 1826 if ((node) && (page < leaf->key[0])) { 1826 1827 uintptr_t left_pg = node->key[node->keys - 1]; 1827 1828 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1828 1829 1829 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1830 count << PAGE_WIDTH)) { 1831 if (page + (count << PAGE_WIDTH) == 1832 left_pg + (left_cnt << PAGE_WIDTH)) { 1830 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 1831 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 1833 1832 /* 1834 1833 * The interval is contained in the rightmost … … 1839 1838 node->value[node->keys - 1] -= count; 1840 1839 goto success; 1841 } else if (page + (count << PAGE_WIDTH) < 1842 left_pg + (left_cnt << PAGE_WIDTH)) { 1840 } else if (page + P2SZ(count) < 1841 left_pg + P2SZ(left_cnt)) { 1842 size_t new_cnt; 1843 1843 1844 /* 1844 1845 * The interval is contained in the rightmost … … 1848 1849 * new interval. 1849 1850 */ 1850 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1851 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1851 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1852 (page + P2SZ(count))) >> PAGE_WIDTH; 1852 1853 node->value[node->keys - 1] -= count + new_cnt; 1853 1854 btree_insert(&area->used_space, page + 1854 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1855 P2SZ(count), (void *) new_cnt, leaf); 1855 1856 goto success; 1856 1857 } … … 1865 1866 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1866 1867 1867 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1868 count << PAGE_WIDTH)) { 1869 if (page + (count << PAGE_WIDTH) == 1870 left_pg + (left_cnt << PAGE_WIDTH)) { 1868 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 1869 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 1871 1870 /* 1872 1871 * The interval is contained in the rightmost … … 1876 1875 leaf->value[leaf->keys - 1] -= count; 1877 1876 goto success; 1878 } else if (page + (count << PAGE_WIDTH) < left_pg + 1879 (left_cnt << PAGE_WIDTH)) { 1877 } else if (page + P2SZ(count) < left_pg + 1878 P2SZ(left_cnt)) { 1879 size_t new_cnt; 1880 1880 1881 /* 1881 1882 * The interval is contained in the rightmost … … 1885 1886 * interval. 1886 1887 */ 1887 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1888 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1888 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1889 (page + P2SZ(count))) >> PAGE_WIDTH; 1889 1890 leaf->value[leaf->keys - 1] -= count + new_cnt; 1890 1891 btree_insert(&area->used_space, page + 1891 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1892 P2SZ(count), (void *) new_cnt, leaf); 1892 1893 goto success; 1893 1894 } … … 1911 1912 * to (i - 1) and i. 1912 1913 */ 1913 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1914 count << PAGE_WIDTH)) {1915 if (page + (count << PAGE_WIDTH) ==1916 left_pg + (left_cnt << PAGE_WIDTH)) {1914 if (overlaps(left_pg, P2SZ(left_cnt), page, 1915 P2SZ(count))) { 1916 if (page + P2SZ(count) == 1917 left_pg + P2SZ(left_cnt)) { 1917 1918 /* 1918 1919 * The interval is contained in the … … 1923 1924 leaf->value[i - 1] -= count; 1924 1925 goto success; 1925 } else if (page + (count << PAGE_WIDTH) < 1926 left_pg + (left_cnt << PAGE_WIDTH)) { 1926 } else if (page + P2SZ(count) < 1927 left_pg + P2SZ(left_cnt)) { 1928 size_t new_cnt; 1929 1927 1930 /* 1928 1931 * The interval is contained in the … … 1932 1935 * also inserting a new interval. 1933 1936 */ 1934 size_t new_cnt = ((left_pg + 1935 (left_cnt << PAGE_WIDTH)) - 1936 (page + (count << PAGE_WIDTH))) >> 1937 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1938 (page + P2SZ(count))) >> 1937 1939 PAGE_WIDTH; 1938 1940 leaf->value[i - 1] -= count + new_cnt; 1939 1941 btree_insert(&area->used_space, page + 1940 (count << PAGE_WIDTH), (void *) new_cnt,1942 P2SZ(count), (void *) new_cnt, 1941 1943 leaf); 1942 1944 goto success; … … 2034 2036 btree_key_t i; 2035 2037 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2038 uintptr_t addr; 2039 2036 2040 as_area_t *area = (as_area_t *) node->value[i]; 2037 2041 2038 2042 mutex_lock(&area->lock); 2039 2043 2040 uintptr_t addr = 2041 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2044 addr = ALIGN_UP(area->base + P2SZ(area->pages), 2042 2045 PAGE_SIZE); 2043 2046 … … 2098 2101 2099 2102 info[area_idx].start_addr = area->base; 2100 info[area_idx].size = FRAMES2SIZE(area->pages);2103 info[area_idx].size = P2SZ(area->pages); 2101 2104 info[area_idx].flags = area->flags; 2102 2105 ++area_idx; … … 2136 2139 " (%p - %p)\n", area, (void *) area->base, 2137 2140 area->pages, (void *) area->base, 2138 (void *) (area->base + FRAMES2SIZE(area->pages)));2141 (void *) (area->base + P2SZ(area->pages))); 2139 2142 mutex_unlock(&area->lock); 2140 2143 } -
kernel/generic/src/mm/backend_anon.c
re51a514 rdf29f24 50 50 #include <typedefs.h> 51 51 #include <align.h> 52 #include <memstr.h> 52 53 #include <arch.h> 53 54 … … 121 122 page_table_lock(area->as, false); 122 123 pte = page_mapping_find(area->as, 123 base + j * PAGE_SIZE);124 base + P2SZ(j), false); 124 125 ASSERT(pte && PTE_VALID(pte) && 125 126 PTE_PRESENT(pte)); 126 127 btree_insert(&area->sh_info->pagemap, 127 (base + j * PAGE_SIZE) - area->base,128 (base + P2SZ(j)) - area->base, 128 129 (void *) PTE_GET_FRAME(pte), NULL); 129 130 page_table_unlock(area->as, false); -
kernel/generic/src/mm/backend_elf.c
re51a514 rdf29f24 170 170 if (!(area->flags & AS_AREA_WRITE)) 171 171 if (base >= entry->p_vaddr && 172 base + count * PAGE_SIZE<= start_anon)172 base + P2SZ(count) <= start_anon) 173 173 continue; 174 174 … … 182 182 if (!(area->flags & AS_AREA_WRITE)) 183 183 if (base >= entry->p_vaddr && 184 base + (j + 1) * PAGE_SIZE <= 185 start_anon) 184 base + P2SZ(j + 1) <= start_anon) 186 185 continue; 187 186 188 187 page_table_lock(area->as, false); 189 188 pte = page_mapping_find(area->as, 190 base + j * PAGE_SIZE);189 base + P2SZ(j), false); 191 190 ASSERT(pte && PTE_VALID(pte) && 192 191 PTE_PRESENT(pte)); 193 192 btree_insert(&area->sh_info->pagemap, 194 (base + j * PAGE_SIZE) - area->base,193 (base + P2SZ(j)) - area->base, 195 194 (void *) PTE_GET_FRAME(pte), NULL); 196 195 page_table_unlock(area->as, false); -
kernel/generic/src/mm/page.c
re51a514 rdf29f24 112 112 * using flags. Allocate and setup any missing page tables. 113 113 * 114 * @param as Address space to w ich page belongs.114 * @param as Address space to which page belongs. 115 115 * @param page Virtual address of the page to be mapped. 116 116 * @param frame Physical address of memory frame to which the mapping is … … 139 139 * this call visible. 140 140 * 141 * @param as Address space to w ich page belongs.141 * @param as Address space to which page belongs. 142 142 * @param page Virtual address of the page to be demapped. 143 143 * … … 156 156 } 157 157 158 /** Find mapping for virtual page 159 * 160 * Find mapping for virtual page. 161 * 162 * @param as Address space to wich page belongs. 163 * @param page Virtual page. 158 /** Find mapping for virtual page. 159 * 160 * @param as Address space to which page belongs. 161 * @param page Virtual page. 162 * @param nolock True if the page tables need not be locked. 164 163 * 165 164 * @return NULL if there is no such mapping; requested mapping … … 167 166 * 168 167 */ 169 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page )170 { 171 ASSERT( page_table_locked(as));168 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page, bool nolock) 169 { 170 ASSERT(nolock || page_table_locked(as)); 172 171 173 172 ASSERT(page_mapping_operations); 174 173 ASSERT(page_mapping_operations->mapping_find); 175 174 176 return page_mapping_operations->mapping_find(as, page );175 return page_mapping_operations->mapping_find(as, page, nolock); 177 176 } 178 177 … … 188 187 mutex_lock(&AS->lock); 189 188 190 pte_t *pte = page_mapping_find(AS, virt_address );189 pte_t *pte = page_mapping_find(AS, virt_address, true); 191 190 if (!PTE_VALID(pte) || !PTE_PRESENT(pte)) { 192 191 mutex_unlock(&AS->lock); -
kernel/generic/src/printf/vprintf.c
re51a514 rdf29f24 41 41 #include <typedefs.h> 42 42 #include <str.h> 43 44 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(printf_lock, "*printf_lock");45 43 46 44 static int vprintf_str_write(const char *str, size_t size, void *data) … … 93 91 }; 94 92 95 irq_spinlock_lock(&printf_lock, true); 96 int ret = printf_core(fmt, &ps, ap); 97 irq_spinlock_unlock(&printf_lock, true); 98 99 return ret; 93 return printf_core(fmt, &ps, ap); 100 94 } 101 95 -
kernel/generic/src/proc/program.c
re51a514 rdf29f24 54 54 #include <proc/program.h> 55 55 56 #ifndef LOADED_PROG_STACK_PAGES_NO57 #define LOADED_PROG_STACK_PAGES_NO 158 #endif59 60 56 /** 61 57 * Points to the binary image used as the program loader. All non-initial … … 90 86 91 87 /* 92 * Create the dataaddress space area.88 * Create the stack address space area. 93 89 */ 94 90 as_area_t *area = as_area_create(as, 95 91 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 96 LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,97 AS_AREA_ATTR_NONE,&anon_backend, NULL);92 STACK_SIZE, USTACK_ADDRESS, AS_AREA_ATTR_NONE, 93 &anon_backend, NULL); 98 94 if (!area) 99 95 return ENOMEM; -
kernel/generic/src/proc/scheduler.c
re51a514 rdf29f24 376 376 context_save(&CPU->saved_context); 377 377 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), 378 (uintptr_t) CPU->stack, CPU_STACK_SIZE);378 (uintptr_t) CPU->stack, STACK_SIZE); 379 379 context_restore(&CPU->saved_context); 380 380 -
kernel/generic/src/proc/task.c
re51a514 rdf29f24 190 190 str_cpy(task->name, TASK_NAME_BUFLEN, name); 191 191 192 task->cont ext = CONTEXT;192 task->container = CONTAINER; 193 193 task->capabilities = 0; 194 194 task->ucycles = 0; … … 211 211 212 212 if ((ipc_phone_0) && 213 (cont ext_check(ipc_phone_0->task->context, task->context)))213 (container_check(ipc_phone_0->task->container, task->container))) 214 214 ipc_phone_connect(&task->phones[0], ipc_phone_0); 215 215 … … 584 584 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" 585 585 " %9" PRIu64 "%c %9" PRIu64 "%c\n", task->taskid, 586 task->name, task->cont ext, task, task->as,586 task->name, task->container, task, task->as, 587 587 ucycles, usuffix, kcycles, ksuffix); 588 588 #endif … … 595 595 else 596 596 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n", 597 task->taskid, task->name, task->cont ext, task, task->as);597 task->taskid, task->name, task->container, task, task->as); 598 598 #endif 599 599 … … 625 625 printf("[id ] [threads] [calls] [callee\n"); 626 626 else 627 printf("[id ] [name ] [ct x] [address ] [as ]"627 printf("[id ] [name ] [ctn] [address ] [as ]" 628 628 " [ucycles ] [kcycles ]\n"); 629 629 #endif … … 634 634 " [callee\n"); 635 635 else 636 printf("[id ] [name ] [ct x] [address ]"636 printf("[id ] [name ] [ctn] [address ]" 637 637 " [as ]\n"); 638 638 #endif -
kernel/generic/src/proc/the.c
re51a514 rdf29f24 58 58 the->task = NULL; 59 59 the->as = NULL; 60 the->magic = MAGIC; 60 61 } 61 62 … … 70 71 NO_TRACE void the_copy(the_t *src, the_t *dst) 71 72 { 73 ASSERT(src->magic == MAGIC); 72 74 *dst = *src; 73 75 } -
kernel/generic/src/proc/thread.c
re51a514 rdf29f24 68 68 #include <errno.h> 69 69 70 71 #ifndef LOADED_PROG_STACK_PAGES_NO72 #define LOADED_PROG_STACK_PAGES_NO 173 #endif74 75 76 70 /** Thread states */ 77 71 const char *thread_states[] = { … … 300 294 301 295 /* Not needed, but good for debugging */ 302 memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);296 memsetb(thread->kstack, STACK_SIZE, 0); 303 297 304 298 irq_spinlock_lock(&tidlock, true); … … 308 302 context_save(&thread->saved_context); 309 303 context_set(&thread->saved_context, FADDR(cushion), 310 (uintptr_t) thread->kstack, THREAD_STACK_SIZE);304 (uintptr_t) thread->kstack, STACK_SIZE); 311 305 312 306 the_initialize((the_t *) thread->kstack); … … 605 599 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n", 606 600 thread->tid, name, thread, thread_states[thread->state], 607 thread->task, thread->task->cont ext);601 thread->task, thread->task->container); 608 602 #endif 609 603 … … 617 611 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n", 618 612 thread->tid, name, thread, thread_states[thread->state], 619 thread->task, thread->task->cont ext);613 thread->task, thread->task->container); 620 614 #endif 621 615 … … 658 652 else 659 653 printf("[id ] [name ] [address ] [state ] [task ]" 660 " [ct x]\n");654 " [ctn]\n"); 661 655 #endif 662 656 … … 667 661 } else 668 662 printf("[id ] [name ] [address ] [state ]" 669 " [task ] [ct x]\n");663 " [task ] [ctn]\n"); 670 664 #endif 671 665 -
kernel/generic/src/security/cap.c
re51a514 rdf29f24 92 92 task_t *task = task_find_by_id(taskid); 93 93 94 if ((!task) || (!cont ext_check(CONTEXT, task->context))) {94 if ((!task) || (!container_check(CONTAINER, task->container))) { 95 95 irq_spinlock_unlock(&tasks_lock, true); 96 96 return (sysarg_t) ENOENT; … … 121 121 122 122 task_t *task = task_find_by_id(taskid); 123 if ((!task) || (!cont ext_check(CONTEXT, task->context))) {123 if ((!task) || (!container_check(CONTAINER, task->container))) { 124 124 irq_spinlock_unlock(&tasks_lock, true); 125 125 return (sysarg_t) ENOENT; -
kernel/generic/src/synch/futex.c
re51a514 rdf29f24 119 119 */ 120 120 page_table_lock(AS, true); 121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) );121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 122 122 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 123 123 page_table_unlock(AS, true); … … 155 155 */ 156 156 page_table_lock(AS, true); 157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) );157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 158 158 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 159 159 page_table_unlock(AS, true); -
kernel/generic/src/synch/spinlock.c
re51a514 rdf29f24 96 96 * run in a simulator) that caused problems with both 97 97 * printf_lock and the framebuffer lock. 98 *99 98 */ 100 99 if (lock->name[0] == '*')
Note:
See TracChangeset
for help on using the changeset viewer.