- Timestamp:
- 2024-02-23T17:57:23Z (2 years ago)
- Children:
- 192019f
- Parents:
- 86f862c (diff), 90ba06c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - git-author:
- boba-buba <120932204+boba-buba@…> (2024-02-23 17:57:23)
- git-committer:
- GitHub <noreply@…> (2024-02-23 17:57:23)
- Location:
- kernel
- Files:
-
- 1 added
- 1 deleted
- 58 edited
- 1 moved
-
arch/amd64/_link.ld.in (modified) (1 diff)
-
arch/amd64/include/arch/mm/page.h (modified) (4 diffs)
-
arch/amd64/include/arch/mm/pat.h (moved) (moved from uspace/lib/c/arch/amd64/include/libarch/stackarg.h ) (3 diffs)
-
arch/amd64/src/amd64.c (modified) (2 diffs)
-
arch/arm32/_link.ld.in (modified) (3 diffs)
-
arch/arm64/_link.ld.in (modified) (2 diffs)
-
arch/arm64/src/interrupt.c (modified) (1 diff)
-
arch/ia32/_link.ld.in (modified) (1 diff)
-
arch/ia32/include/arch/mm/page.h (modified) (2 diffs)
-
arch/ia32/include/arch/mm/pat.h (added)
-
arch/ia32/src/ia32.c (modified) (2 diffs)
-
arch/ia64/_link.ld.in (modified) (2 diffs)
-
arch/ia64/src/drivers/it.c (modified) (1 diff)
-
arch/ia64/src/drivers/ski.c (modified) (1 diff)
-
arch/mips32/_link.ld.in (modified) (2 diffs)
-
arch/mips32/src/interrupt.c (modified) (1 diff)
-
arch/ppc32/_link.ld.in (modified) (1 diff)
-
arch/riscv64/_link.ld.in (modified) (1 diff)
-
arch/sparc64/_link.ld.in (modified) (1 diff)
-
arch/sparc64/src/drivers/niagara.c (modified) (1 diff)
-
arch/sparc64/src/drivers/tick.c (modified) (1 diff)
-
arch/sparc64/src/proc/sun4u/scheduler.c (modified) (1 diff)
-
arch/sparc64/src/proc/sun4v/scheduler.c (modified) (1 diff)
-
genarch/src/fb/fb.c (modified) (1 diff)
-
genarch/src/kbrd/kbrd.c (modified) (1 diff)
-
genarch/src/kbrd/kbrd_at.c (modified) (1 diff)
-
genarch/src/srln/srln.c (modified) (1 diff)
-
generic/include/atomic.h (modified) (1 diff)
-
generic/include/context.h (modified) (3 diffs)
-
generic/include/cpu.h (modified) (3 diffs)
-
generic/include/lib/refcount.h (modified) (1 diff)
-
generic/include/mm/mm.h (modified) (2 diffs)
-
generic/include/proc/scheduler.h (modified) (2 diffs)
-
generic/include/proc/task.h (modified) (2 diffs)
-
generic/include/proc/thread.h (modified) (7 diffs)
-
generic/meson.build (modified) (1 diff)
-
generic/src/console/cmd.c (modified) (4 diffs)
-
generic/src/cpu/cpu.c (modified) (2 diffs)
-
generic/src/ddi/ddi.c (modified) (2 diffs)
-
generic/src/interrupt/interrupt.c (modified) (2 diffs)
-
generic/src/ipc/ipc.c (modified) (1 diff)
-
generic/src/ipc/kbox.c (modified) (6 diffs)
-
generic/src/main/kinit.c (modified) (5 diffs)
-
generic/src/main/main.c (modified) (6 diffs)
-
generic/src/preempt/preemption.c (deleted)
-
generic/src/proc/program.c (modified) (4 diffs)
-
generic/src/proc/scheduler.c (modified) (13 diffs)
-
generic/src/proc/task.c (modified) (14 diffs)
-
generic/src/proc/thread.c (modified) (28 diffs)
-
generic/src/security/perm.c (modified) (3 diffs)
-
generic/src/syscall/syscall.c (modified) (2 diffs)
-
generic/src/sysinfo/stats.c (modified) (7 diffs)
-
generic/src/time/clock.c (modified) (3 diffs)
-
generic/src/time/timeout.c (modified) (1 diff)
-
generic/src/udebug/udebug_ops.c (modified) (3 diffs)
-
test/mm/falloc2.c (modified) (4 diffs)
-
test/mm/slab1.c (modified) (2 diffs)
-
test/mm/slab2.c (modified) (4 diffs)
-
test/synch/semaphore1.c (modified) (1 diff)
-
test/synch/semaphore2.c (modified) (1 diff)
-
test/thread/thread1.c (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/_link.ld.in
r86f862c rf2cb80a 23 23 } 24 24 25 . mapped(PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS + SIZEOF(.unmapped)) : AT (BOOT_OFFSET + SIZEOF_HEADERS + SIZEOF(.unmapped)) {25 .text (PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS + SIZEOF(.unmapped)) : AT (BOOT_OFFSET + SIZEOF_HEADERS + SIZEOF(.unmapped)) { 26 26 ktext_start = .; 27 27 *(.text .text.*); 28 28 ktext_end = .; 29 } 29 30 31 /* stack unwinding data */ 32 .eh_frame_hdr : { 33 eh_frame_hdr_start = .; 34 *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*); 35 eh_frame_hdr_end = .; 36 } 37 .eh_frame : { 38 eh_frame_start = .; 39 KEEP(*(.eh_frame .eh_frame.*)); 40 eh_frame_end = .; 41 } 42 43 .data : { 30 44 kdata_start = .; 31 *(.data .data.*); /* initialized data */ 32 *(.rodata .rodata.*); 33 *(.eh_frame .eh_frame.*); /* stack unwinding data */ 34 *(.eh_frame_hdr .eh_frame_hdr.*); 35 *(COMMON); /* global variables */ 45 *(.rodata .rodata.*); /* read-only global variables */ 46 *(.data .data.*); /* non-zero initialized global variables */ 36 47 37 /* bss can't be omitted from the ELF image. */ 38 *(.bss .bss.*); /* uninitialized static variables */ 48 /* 49 * When .bss is not physically present in the ELF file (MemSz > FileSz) 50 * the kernel crashes during early boot. Not sure which part of the 51 * boot process is to blame, for now just keep .bss packaged with .data 52 * so that FileSz == MemSz. 53 */ 54 55 *(.bss .bss.*); /* uninitialized global variables */ 56 *(COMMON); /* non-`static` global variables without an extern declaration */ 39 57 kdata_end = .; 40 58 } -
kernel/arch/amd64/include/arch/mm/page.h
r86f862c rf2cb80a 192 192 unsigned int accessed : 1; 193 193 unsigned int dirty : 1; 194 unsigned int unused: 1;194 unsigned int pat : 1; 195 195 unsigned int global : 1; 196 196 unsigned int soft_valid : 1; /**< Valid content even if present bit is cleared. */ … … 211 211 p->writeable << PAGE_WRITE_SHIFT | 212 212 (!p->no_execute) << PAGE_EXEC_SHIFT | 213 p->global << PAGE_GLOBAL_SHIFT); 213 p->global << PAGE_GLOBAL_SHIFT | 214 p->page_write_through << PAGE_WRITE_COMBINE_SHIFT); 214 215 } 215 216 … … 225 226 pte_t *p = &pt[i]; 226 227 227 p->page_cache_disable = !(flags & PAGE_CACHEABLE);228 228 p->present = !(flags & PAGE_NOT_PRESENT); 229 229 p->uaccessible = (flags & PAGE_USER) != 0; … … 232 232 p->global = (flags & PAGE_GLOBAL) != 0; 233 233 234 if (flags & PAGE_WRITE_COMBINE) { 235 /* We have mapped PCD+PWT bits to write-combine mode via PAT MSR. */ 236 /* (If PAT is unsupported, it will default to uncached.) */ 237 p->page_cache_disable = 1; 238 p->page_write_through = 1; 239 } else { 240 p->page_cache_disable = !(flags & PAGE_CACHEABLE); 241 p->page_write_through = 0; 242 } 243 234 244 /* 235 245 * Ensure that there is at least one bit set even if the present bit is cleared. -
kernel/arch/amd64/include/arch/mm/pat.h
r86f862c rf2cb80a 1 1 /* 2 * Copyright (c) 20 06 Josef Cejka2 * Copyright (c) 2024 Jiří Zárevúcky 3 3 * All rights reserved. 4 4 * … … 27 27 */ 28 28 29 /** @addtogroup libcamd6429 /** @addtogroup kernel_amd64_mm 30 30 * @{ 31 31 */ … … 33 33 */ 34 34 35 #ifndef _LIBC_STACKARG_H_ 36 #define _LIBC_STACKARG_H_ 35 #ifndef KERN_amd64_MM_PAT_H_ 36 #define KERN_amd64_MM_PAT_H_ 37 38 #include <arch/asm.h> 39 #include <arch/cpuid.h> 40 41 #define MSR_IA32_PAT 0x00000277 42 43 typedef enum { 44 PAT_TYPE_UNCACHEABLE = 0, 45 PAT_TYPE_WRITE_COMBINING = 1, 46 PAT_TYPE_WRITE_THROUGH = 4, 47 PAT_TYPE_WRITE_PROTECTED = 5, 48 PAT_TYPE_WRITE_BACK = 6, 49 PAT_TYPE_UNCACHED = 7, 50 } pat_type_t; 51 52 /** 53 * Assign caching type for a particular combination of PAT, 54 * PCD and PWT bits in PTE. 55 */ 56 static inline void pat_set_mapping(bool pat, bool pcd, bool pwt, 57 pat_type_t type) 58 { 59 int index = pat << 2 | pcd << 1 | pwt; 60 int shift = index * 8; 61 62 uint64_t r = read_msr(MSR_IA32_PAT); 63 r &= ~(0xffull << shift); 64 r |= ((uint64_t) type) << shift; 65 write_msr(MSR_IA32_PAT, r); 66 } 67 68 static inline bool pat_supported(void) 69 { 70 if (!has_cpuid()) 71 return false; 72 73 cpu_info_t info; 74 cpuid(INTEL_CPUID_STANDARD, &info); 75 76 return (info.cpuid_edx & (1 << 16)) != 0; 77 } 37 78 38 79 #endif -
kernel/arch/amd64/src/amd64.c
r86f862c rf2cb80a 60 60 #include <arch/vreg.h> 61 61 #include <arch/kseg.h> 62 #include <arch/mm/pat.h> 62 63 #include <genarch/pic/pic_ops.h> 63 64 … … 115 116 /* Disable alignment check */ 116 117 write_cr0(read_cr0() & ~CR0_AM); 118 119 /* Use PCD+PWT bit combination in PTE to mean write-combining mode. */ 120 if (pat_supported()) 121 pat_set_mapping(false, true, true, PAT_TYPE_WRITE_COMBINING); 117 122 118 123 if (config.cpu_active == 1) { -
kernel/arch/arm32/_link.ld.in
r86f862c rf2cb80a 26 26 kernel_load_address = .; 27 27 . = . + SIZEOF_HEADERS; 28 28 29 .text : { 29 30 ktext_start = .; … … 31 32 ktext_end = .; 32 33 } 34 35 /* stack unwinding data */ 36 .eh_frame_hdr : { 37 eh_frame_hdr_start = .; 38 *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*); 39 eh_frame_hdr_end = .; 40 } 41 42 .eh_frame : { 43 eh_frame_start = .; 44 KEEP(*(.eh_frame .eh_frame.*)); 45 eh_frame_end = .; 46 } 47 33 48 .data : { 34 49 kdata_start = .; … … 38 53 39 54 *(.rodata .rodata.*); 40 *(.eh_frame .eh_frame.*); /* stack unwinding data */41 *(.eh_frame_hdr .eh_frame_hdr.*);42 55 *(.sdata .sdata.*); 43 }44 .sbss : {45 56 *(.sbss .sbss.*); 46 57 *(.scommon .scommon.*); 58 kdata_end = .; 47 59 } 48 49 kdata_end = .;50 60 51 61 .comment 0 : { *(.comment); } -
kernel/arch/arm64/_link.ld.in
r86f862c rf2cb80a 19 19 kernel_load_address = LOAD_ADDRESS_V; 20 20 21 . image(LOAD_ADDRESS_V + SIZEOF_HEADERS) : AT (LOAD_ADDRESS_P + SIZEOF_HEADERS) {21 .text (LOAD_ADDRESS_V + SIZEOF_HEADERS) : AT (LOAD_ADDRESS_P + SIZEOF_HEADERS) { 22 22 . = ALIGN(16); 23 23 ktext_start = .; … … 25 25 *(.text .text.*); 26 26 ktext_end = .; 27 } 27 28 29 /* stack unwinding data */ 30 .eh_frame_hdr : { 31 eh_frame_hdr_start = .; 32 *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*); 33 eh_frame_hdr_end = .; 34 } 35 36 .eh_frame : { 37 eh_frame_start = .; 38 KEEP(*(.eh_frame .eh_frame.*)); 39 eh_frame_end = .; 40 } 41 42 .data : { 28 43 kdata_start = .; 29 44 KEEP(*(K_DATA_START)) 45 *(.rodata .rodata.*); 30 46 *(.data .data.*); /* initialized data */ 31 47 *(.bss .bss.*); /* uninitialized static variables */ 32 48 *(COMMON); /* global variables */ 33 34 *(.rodata .rodata.*);35 *(.eh_frame .eh_frame.*); /* stack unwinding data */36 *(.eh_frame_hdr .eh_frame_hdr.*);37 38 49 kdata_end = .; 39 50 } -
kernel/arch/arm64/src/interrupt.c
r86f862c rf2cb80a 137 137 while (drift > timer_increment) { 138 138 drift -= timer_increment; 139 CPU ->missed_clock_ticks++;139 CPU_LOCAL->missed_clock_ticks++; 140 140 } 141 141 CNTV_CVAL_EL0_write(cntvct + timer_increment - drift); -
kernel/arch/ia32/_link.ld.in
r86f862c rf2cb80a 22 22 } 23 23 24 . mapped(PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS + SIZEOF(.unmapped)): AT (BOOT_OFFSET + SIZEOF_HEADERS + SIZEOF(.unmapped)) {24 .text (PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS + SIZEOF(.unmapped)): AT (BOOT_OFFSET + SIZEOF_HEADERS + SIZEOF(.unmapped)) { 25 25 ktext_start = .; 26 26 *(.text .text.*); 27 27 ktext_end = .; 28 } 28 29 30 /* stack unwinding data */ 31 .eh_frame_hdr : { 32 eh_frame_hdr_start = .; 33 *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*); 34 eh_frame_hdr_end = .; 35 } 36 .eh_frame : { 37 eh_frame_start = .; 38 KEEP(*(.eh_frame .eh_frame.*)); 39 eh_frame_end = .; 40 } 41 42 .data : { 29 43 kdata_start = .; 30 *(.data .data.*); /* initialized data */ 31 *(.rodata .rodata.*); 32 *(.eh_frame .eh_frame.*); /* stack unwinding data */ 33 *(.eh_frame_hdr .eh_frame_hdr.*); 34 *(COMMON); /* global variables */ 44 *(.rodata .rodata.*); /* read-only global variables */ 45 *(.data .data.*); /* non-zero initialized global variables */ 35 46 36 /* bss can't be omitted from the ELF image. */ 37 *(.bss .bss.*); /* uninitialized static variables */ 47 /* 48 * When .bss is not physically present in the ELF file (MemSz > FileSz) 49 * the kernel crashes during early boot. Not sure which part of the 50 * boot process is to blame, for now just keep .bss packaged with .data 51 * so that FileSz == MemSz. 52 */ 53 54 *(.bss .bss.*); /* uninitialized global variables */ 55 *(COMMON); /* non-`static` global variables without an extern declaration */ 38 56 kdata_end = .; 39 57 } -
kernel/arch/ia32/include/arch/mm/page.h
r86f862c rf2cb80a 190 190 p->writeable << PAGE_WRITE_SHIFT | 191 191 1 << PAGE_EXEC_SHIFT | 192 p->global << PAGE_GLOBAL_SHIFT); 192 p->global << PAGE_GLOBAL_SHIFT | 193 p->page_write_through << PAGE_WRITE_COMBINE_SHIFT); 193 194 } 194 195 … … 197 198 pte_t *p = &pt[i]; 198 199 199 p->page_cache_disable = !(flags & PAGE_CACHEABLE);200 200 p->present = !(flags & PAGE_NOT_PRESENT); 201 201 p->uaccessible = (flags & PAGE_USER) != 0; 202 202 p->writeable = (flags & PAGE_WRITE) != 0; 203 203 p->global = (flags & PAGE_GLOBAL) != 0; 204 205 if (flags & PAGE_WRITE_COMBINE) { 206 /* We have mapped PCD+PWT bits to write-combine mode via PAT MSR. */ 207 /* (If PAT is unsupported, it will default to uncached.) */ 208 p->page_cache_disable = 1; 209 p->page_write_through = 1; 210 } else { 211 p->page_cache_disable = !(flags & PAGE_CACHEABLE); 212 p->page_write_through = 0; 213 } 204 214 205 215 /* -
kernel/arch/ia32/src/ia32.c
r86f862c rf2cb80a 61 61 #include <arch/pm.h> 62 62 #include <arch/vreg.h> 63 #include <arch/mm/pat.h> 63 64 64 65 #ifdef CONFIG_SMP … … 104 105 { 105 106 pm_init(); 107 108 /* Use PCD+PWT bit combination in PTE to mean write-combining mode. */ 109 if (pat_supported()) 110 pat_set_mapping(false, true, true, PAT_TYPE_WRITE_COMBINING); 106 111 107 112 if (config.cpu_active == 1) { -
kernel/arch/ia64/_link.ld.in
r86f862c rf2cb80a 15 15 kernel_load_address = LOAD_ADDRESS_V; 16 16 17 . image(LOAD_ADDRESS_V + SIZEOF_HEADERS): AT (LOAD_ADDRESS_P + SIZEOF_HEADERS) {17 .text (LOAD_ADDRESS_V + SIZEOF_HEADERS): AT (LOAD_ADDRESS_P + SIZEOF_HEADERS) { 18 18 . = ALIGN(16); 19 19 ktext_start = .; … … 21 21 *(.text .text.*) 22 22 ktext_end = .; 23 } 23 24 25 /* stack unwinding data */ 26 .eh_frame_hdr : { 27 eh_frame_hdr_start = .; 28 *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*); 29 eh_frame_hdr_end = .; 30 } 31 32 .eh_frame : { 33 eh_frame_start = .; 34 KEEP(*(.eh_frame .eh_frame.*)); 35 eh_frame_end = .; 36 } 37 38 .data : { 24 39 kdata_start = .; 25 40 KEEP(*(K_DATA_START)); 26 41 *(.rodata .rodata.*); 27 *(.eh_frame .eh_frame.*); /* stack unwinding data */28 *(.eh_frame_hdr .eh_frame_hdr.*);29 42 *(.opd) 30 43 *(.data .data.*) -
kernel/arch/ia64/src/drivers/it.c
r86f862c rf2cb80a 122 122 itm += IT_DELTA; 123 123 if (itm - itc < 0) 124 CPU ->missed_clock_ticks++;124 CPU_LOCAL->missed_clock_ticks++; 125 125 else 126 126 break; -
kernel/arch/ia64/src/drivers/ski.c
r86f862c rf2cb80a 258 258 259 259 instance->srlnin = srlnin; 260 thread_ ready(instance->thread);260 thread_start(instance->thread); 261 261 262 262 sysinfo_set_item_val("kbd", NULL, true); -
kernel/arch/mips32/_link.ld.in
r86f862c rf2cb80a 33 33 ktext_end = .; 34 34 } 35 36 /* stack unwinding data */ 37 .eh_frame_hdr : { 38 eh_frame_hdr_start = .; 39 *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*); 40 eh_frame_hdr_end = .; 41 } 42 43 .eh_frame : { 44 eh_frame_start = .; 45 KEEP(*(.eh_frame .eh_frame.*)); 46 eh_frame_end = .; 47 } 48 35 49 .data : { 36 50 kdata_start = .; 37 51 *(.data .data.*); /* initialized data */ 38 52 *(.rodata .rodata.*); 39 *(.eh_frame .eh_frame.*); /* stack unwinding data */40 *(.eh_frame_hdr .eh_frame_hdr.*);41 53 *(.sdata .sdata.*); 42 54 *(.reginfo); … … 46 58 *(COMMON); /* global variables */ 47 59 } 60 48 61 _gp = . + 0x8000; 49 62 .lit8 : { *(.lit8) } -
kernel/arch/mips32/src/interrupt.c
r86f862c rf2cb80a 121 121 while (drift > cp0_compare_value) { 122 122 drift -= cp0_compare_value; 123 CPU ->missed_clock_ticks++;123 CPU_LOCAL->missed_clock_ticks++; 124 124 } 125 125 -
kernel/arch/ppc32/_link.ld.in
r86f862c rf2cb80a 25 25 } 26 26 27 . mappedPA2KA(BOOT_OFFSET): AT (BOOT_OFFSET) {27 .text PA2KA(BOOT_OFFSET): AT (BOOT_OFFSET) { 28 28 ktext_start = .; 29 29 KEEP(*(K_TEXT_START)); 30 30 *(.text .text.*); 31 31 ktext_end = .; 32 } 32 33 34 /* stack unwinding data */ 35 .eh_frame_hdr : { 36 eh_frame_hdr_start = .; 37 *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*); 38 eh_frame_hdr_end = .; 39 } 40 41 .eh_frame : { 42 eh_frame_start = .; 43 KEEP(*(.eh_frame .eh_frame.*)); 44 eh_frame_end = .; 45 } 46 47 .data : { 33 48 kdata_start = .; 34 49 KEEP(*(K_DATA_START)); 35 50 *(.rodata .rodata.*); 36 *(.eh_frame .eh_frame.*); /* stack unwinding data */37 *(.eh_frame_hdr .eh_frame_hdr.*);38 51 *(.data .data.*); /* initialized data */ 39 52 *(.sdata .sdata.*); -
kernel/arch/riscv64/_link.ld.in
r86f862c rf2cb80a 16 16 kernel_load_address = PA2KA(BOOT_OFFSET); 17 17 18 . image(PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS) : AT (BOOT_OFFSET + SIZEOF_HEADERS) {18 .text (PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS) : AT (BOOT_OFFSET + SIZEOF_HEADERS) { 19 19 ktext_start = .; 20 20 KEEP(*(K_TEXT_START)); 21 21 *(.text .text.*); 22 22 ktext_end = .; 23 } 23 24 25 /* stack unwinding data */ 26 .eh_frame_hdr : { 27 eh_frame_hdr_start = .; 28 *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*); 29 eh_frame_hdr_end = .; 30 } 31 32 .eh_frame : { 33 eh_frame_start = .; 34 KEEP(*(.eh_frame .eh_frame.*)); 35 eh_frame_end = .; 36 } 37 38 .data : { 24 39 kdata_start = .; 25 40 *(.data .data.*); /* initialized data */ 26 41 *(.rodata .rodata.*); 27 *(.eh_frame .eh_frame.*); /* stack unwinding data */28 *(.eh_frame_hdr .eh_frame_hdr.*);29 42 *(.sdata .sdata.*); 30 43 *(.sbss .sbss.*); -
kernel/arch/sparc64/_link.ld.in
r86f862c rf2cb80a 14 14 kernel_load_address = VMA; 15 15 16 . image(VMA + SIZEOF_HEADERS): AT (LMA + SIZEOF_HEADERS) {16 .text (VMA + SIZEOF_HEADERS): AT (LMA + SIZEOF_HEADERS) { 17 17 ktext_start = .; 18 18 KEEP(*(K_TEXT_START)); 19 19 *(.text .text.*); 20 20 ktext_end = .; 21 } 21 22 23 /* stack unwinding data */ 24 .eh_frame_hdr : { 25 eh_frame_hdr_start = .; 26 *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*); 27 eh_frame_hdr_end = .; 28 } 29 30 .eh_frame : { 31 eh_frame_start = .; 32 KEEP(*(.eh_frame .eh_frame.*)); 33 eh_frame_end = .; 34 } 35 36 .data : { 22 37 kdata_start = .; 23 38 KEEP(*(K_DATA_START)); 24 39 *(.rodata .rodata.*); 25 *(.eh_frame .eh_frame.*); /* stack unwinding data */26 *(.eh_frame_hdr .eh_frame_hdr.*);27 40 *(.data .data.*); /* initialized data */ 28 41 *(.sdata .sdata.*); -
kernel/arch/sparc64/src/drivers/niagara.c
r86f862c rf2cb80a 253 253 254 254 instance->srlnin = srln; 255 thread_ ready(instance->thread);255 thread_start(instance->thread); 256 256 } 257 257 } -
kernel/arch/sparc64/src/drivers/tick.c
r86f862c rf2cb80a 117 117 while (drift > CPU->arch.clock_frequency / HZ) { 118 118 drift -= CPU->arch.clock_frequency / HZ; 119 CPU ->missed_clock_ticks++;119 CPU_LOCAL->missed_clock_ticks++; 120 120 } 121 121 CPU->arch.next_tick_cmpr = tick_counter_read() + -
kernel/arch/sparc64/src/proc/sun4u/scheduler.c
r86f862c rf2cb80a 76 76 { 77 77 if (THREAD->uspace) { 78 asm volatile ("flushw"); 79 78 80 /* sample the state of the userspace window buffer */ 79 81 THREAD->arch.uspace_window_buffer = -
kernel/arch/sparc64/src/proc/sun4v/scheduler.c
r86f862c rf2cb80a 68 68 { 69 69 if (THREAD->uspace) { 70 asm volatile ("flushw"); 71 70 72 /* sample the state of the userspace window buffer */ 71 73 THREAD->arch.uspace_window_buffer = -
kernel/genarch/src/fb/fb.c
r86f862c rf2cb80a 633 633 634 634 instance->addr = (uint8_t *) km_map((uintptr_t) props->addr, fbsize, 635 KM_NATURAL_ALIGNMENT, PAGE_WRITE | PAGE_ NOT_CACHEABLE);635 KM_NATURAL_ALIGNMENT, PAGE_WRITE | PAGE_WRITE_COMBINE); 636 636 if (!instance->addr) { 637 637 LOG("Unable to map framebuffer."); -
kernel/genarch/src/kbrd/kbrd.c
r86f862c rf2cb80a 200 200 201 201 instance->sink = sink; 202 thread_ ready(instance->thread);202 thread_start(instance->thread); 203 203 204 204 return &instance->raw; -
kernel/genarch/src/kbrd/kbrd_at.c
r86f862c rf2cb80a 198 198 199 199 instance->sink = sink; 200 thread_ ready(instance->thread);200 thread_start(instance->thread); 201 201 202 202 return &instance->raw; -
kernel/genarch/src/srln/srln.c
r86f862c rf2cb80a 156 156 157 157 instance->sink = sink; 158 thread_ ready(instance->thread);158 thread_start(instance->thread); 159 159 160 160 return &instance->raw; -
kernel/generic/include/atomic.h
r86f862c rf2cb80a 39 39 #include <typedefs.h> 40 40 #include <stdatomic.h> 41 42 /* 43 * Shorthand for relaxed atomic read/write, something that's needed to formally 44 * avoid undefined behavior in cases where we need to read a variable in 45 * different threads and we don't particularly care about ordering 46 * (e.g. statistic printouts). This is most likely translated into the same 47 * assembly instructions as regular read/writes. 48 */ 49 #define atomic_set_unordered(var, val) atomic_store_explicit((var), (val), memory_order_relaxed) 50 #define atomic_get_unordered(var) atomic_load_explicit((var), memory_order_relaxed) 41 51 42 52 #define atomic_predec(val) \ -
kernel/generic/include/context.h
r86f862c rf2cb80a 36 36 #define KERN_CONTEXT_H_ 37 37 38 #include <panic.h> 38 39 #include <trace.h> 39 40 #include <arch/context.h> 41 #include <arch/faddr.h> 40 42 41 43 #define context_set_generic(ctx, _pc, stack, size) \ … … 47 49 extern int context_save_arch(context_t *ctx) __attribute__((returns_twice)); 48 50 extern void context_restore_arch(context_t *ctx) __attribute__((noreturn)); 49 50 /** Save register context.51 *52 * Save the current register context (including stack pointer) to a context53 * structure. A subsequent call to context_restore() will return to the same54 * address as the corresponding call to context_save().55 *56 * Note that context_save_arch() must reuse the stack frame of the function57 * which called context_save(). We guarantee this by:58 *59 * a) implementing context_save_arch() in assembly so that it does not create60 * its own stack frame, and by61 * b) defining context_save() as a macro because the inline keyword is just a62 * hint for the compiler, not a real constraint; the application of a macro63 * will definitely not create a stack frame either.64 *65 * To imagine what could happen if there were some extra stack frames created66 * either by context_save() or context_save_arch(), we need to realize that the67 * sp saved in the contex_t structure points to the current stack frame as it68 * existed when context_save_arch() was executing. After the return from69 * context_save_arch() and context_save(), any extra stack frames created by70 * these functions will be destroyed and their contents sooner or later71 * overwritten by functions called next. Any attempt to restore to a context72 * saved like that would therefore lead to a disaster.73 *74 * @param ctx Context structure.75 *76 * @return context_save() returns 1, context_restore() returns 0.77 *78 */79 #define context_save(ctx) context_save_arch(ctx)80 51 81 52 /** Restore register context. … … 91 62 * 92 63 */ 93 _NO_TRACE static inline void context_restore(context_t *ctx) 64 _NO_TRACE __attribute__((noreturn)) 65 static inline void context_restore(context_t *ctx) 94 66 { 95 67 context_restore_arch(ctx); 68 } 69 70 /** 71 * Saves current context to the variable pointed to by `self`, 72 * and restores the context denoted by `other`. 73 * 74 * When the `self` context is later restored by another call to 75 * `context_swap()`, the control flow behaves as if the earlier call to 76 * `context_swap()` just returned. 77 */ 78 _NO_TRACE static inline void context_swap(context_t *self, context_t *other) 79 { 80 if (context_save_arch(self)) 81 context_restore_arch(other); 82 } 83 84 _NO_TRACE static inline void context_create(context_t *context, 85 void (*fn)(void), void *stack_base, size_t stack_size) 86 { 87 *context = (context_t) { 0 }; 88 context_set(context, FADDR(fn), stack_base, stack_size); 89 } 90 91 __attribute__((noreturn)) static inline void context_replace(void (*fn)(void), 92 void *stack_base, size_t stack_size) 93 { 94 context_t ctx; 95 context_create(&ctx, fn, stack_base, stack_size); 96 context_restore(&ctx); 96 97 } 97 98 -
kernel/generic/include/cpu.h
r86f862c rf2cb80a 44 44 #include <arch.h> 45 45 46 #define CPU CURRENT->cpu 46 #define CPU (CURRENT->cpu) 47 #define CPU_LOCAL (&CPU->local) 48 49 /** 50 * Contents of CPU_LOCAL. These are variables that are only ever accessed by 51 * the CPU they belong to, so they don't need any synchronization, 52 * just locally disabled interrupts. 53 */ 54 typedef struct cpu_local { 55 /** 56 * When system clock loses a tick, it is 57 * recorded here so that clock() can react. 58 */ 59 size_t missed_clock_ticks; 60 61 uint64_t current_clock_tick; 62 uint64_t preempt_deadline; /* < when should the currently running thread be preempted */ 63 uint64_t relink_deadline; 64 65 /** 66 * Stack used by scheduler when there is no running thread. 67 * This field is unchanged after initialization. 68 */ 69 uint8_t *stack; 70 71 /** 72 * Processor cycle accounting. 73 */ 74 bool idle; 75 uint64_t last_cycle; 76 77 context_t scheduler_context; 78 79 struct thread *prev_thread; 80 } cpu_local_t; 47 81 48 82 /** CPU structure. … … 63 97 64 98 /** 65 * When system clock loses a tick, it is66 * recorded here so that clock() can react.67 * This variable is CPU-local and can be68 * only accessed when interrupts are69 * disabled.70 */71 size_t missed_clock_ticks;72 73 /** Can only be accessed by the CPU represented by this structure when interrupts are disabled. */74 uint64_t current_clock_tick;75 uint64_t preempt_deadline; /* < when should the currently running thread be preempted */76 uint64_t relink_deadline;77 78 /**79 99 * Processor cycle accounting. 80 100 */ 81 bool idle;82 uint64_t last_cycle;83 101 atomic_time_stat_t idle_cycles; 84 102 atomic_time_stat_t busy_cycles; … … 103 121 _Atomic(struct thread *) fpu_owner; 104 122 105 /** 106 * Stack used by scheduler when there is no running thread. 107 */ 108 uint8_t *stack; 123 cpu_local_t local; 109 124 } cpu_t; 110 125 -
kernel/generic/include/lib/refcount.h
r86f862c rf2cb80a 49 49 } atomic_refcount_t; 50 50 51 #define REFCOUNT_INITIALIZER() { \ 52 .__cnt = ATOMIC_VAR_INIT(0), \ 53 } 54 51 55 static inline void refcount_init(atomic_refcount_t *rc) 52 56 { 53 atomic_ store_explicit(&rc->__cnt, 0, memory_order_relaxed);57 atomic_init(&rc->__cnt, 0); 54 58 } 55 59 -
kernel/generic/include/mm/mm.h
r86f862c rf2cb80a 46 46 #define PAGE_EXEC_SHIFT 5 47 47 #define PAGE_GLOBAL_SHIFT 6 48 #define PAGE_WRITE_COMBINE_SHIFT 7 48 49 49 50 #define PAGE_NOT_CACHEABLE (0 << PAGE_CACHEABLE_SHIFT) … … 62 63 #define PAGE_GLOBAL (1 << PAGE_GLOBAL_SHIFT) 63 64 65 #define PAGE_WRITE_COMBINE (1 << PAGE_WRITE_COMBINE_SHIFT) 66 64 67 #endif 65 68 -
kernel/generic/include/proc/scheduler.h
r86f862c rf2cb80a 41 41 #include <atomic.h> 42 42 #include <adt/list.h> 43 #include <abi/proc/thread.h> 43 44 44 45 #define RQ_COUNT 16 … … 56 57 57 58 extern void scheduler_fpu_lazy_request(void); 58 extern void scheduler(void);59 extern void scheduler_locked(ipl_t);60 59 extern void kcpulb(void *arg); 61 60 62 61 extern void sched_print_list(void); 62 63 extern void scheduler_run(void) __attribute__((noreturn)); 64 extern void scheduler_enter(state_t); 65 66 extern void thread_main_func(void); 63 67 64 68 /* -
kernel/generic/include/proc/task.h
r86f862c rf2cb80a 88 88 89 89 /** Number of references (i.e. threads). */ 90 atomic_ size_t refcount;90 atomic_refcount_t refcount; 91 91 /** Number of threads that haven't exited yet. */ 92 92 // TODO: remove … … 144 144 extern void task_done(void); 145 145 extern task_t *task_create(as_t *, const char *); 146 extern void task_destroy(task_t *);147 146 extern void task_hold(task_t *); 148 147 extern void task_release(task_t *); -
kernel/generic/include/proc/thread.h
r86f862c rf2cb80a 95 95 waitq_t join_wq; 96 96 97 /** Lock protecting thread structure. 97 /** Thread accounting. */ 98 atomic_time_stat_t ucycles; 99 atomic_time_stat_t kcycles; 100 101 /** Architecture-specific data. */ 102 thread_arch_t arch; 103 104 #ifdef CONFIG_UDEBUG 105 /** 106 * If true, the scheduler will print a stack trace 107 * to the kernel console upon scheduling this thread. 108 */ 109 atomic_int_fast8_t btrace; 110 111 /** Debugging stuff */ 112 udebug_thread_t udebug; 113 #endif /* CONFIG_UDEBUG */ 114 115 /* 116 * Immutable fields. 98 117 * 99 * Protects the whole thread structure except fields listed above. 100 */ 101 IRQ_SPINLOCK_DECLARE(lock); 102 103 char name[THREAD_NAME_BUFLEN]; 118 * These fields are only modified during initialization, and are not 119 * changed at any time between initialization and destruction. 120 * Can be accessed without synchronization in most places. 121 */ 122 123 /** Thread ID. */ 124 thread_id_t tid; 104 125 105 126 /** Function implementing the thread. */ … … 108 129 void *thread_arg; 109 130 131 char name[THREAD_NAME_BUFLEN]; 132 133 /** Thread is executed in user space. */ 134 bool uspace; 135 136 /** Thread doesn't affect accumulated accounting. */ 137 bool uncounted; 138 139 /** Containing task. */ 140 task_t *task; 141 142 /** Thread's kernel stack. */ 143 uint8_t *kstack; 144 145 /* 146 * Local fields. 147 * 148 * These fields can be safely accessed from code that _controls execution_ 149 * of this thread. Code controls execution of a thread if either: 150 * - it runs in the context of said thread AND interrupts are disabled 151 * (interrupts can and will access these fields) 152 * - the thread is not running, and the code accessing it can legally 153 * add/remove the thread to/from a runqueue, i.e., either: 154 * - it is allowed to enqueue thread in a new runqueue 155 * - it holds the lock to the runqueue containing the thread 156 * 157 */ 158 110 159 /** 111 160 * From here, the stored context is restored … … 113 162 */ 114 163 context_t saved_context; 115 ipl_t saved_ipl; 164 165 // TODO: we only need one of the two bools below 116 166 117 167 /** … … 127 177 bool in_copy_to_uspace; 128 178 179 /* 180 * FPU context is a special case. If lazy FPU switching is disabled, 181 * it acts as a regular local field. However, if lazy switching is enabled, 182 * the context is synchronized via CPU->fpu_lock 183 */ 129 184 #ifdef CONFIG_FPU 130 185 fpu_context_t fpu_context; … … 135 190 unsigned int nomigrate; 136 191 137 /** Thread state. */138 state_t state;139 140 /** Thread CPU. */141 cpu_t *cpu;142 /** Containing task. */143 task_t *task;144 192 /** Thread was migrated to another CPU and has not run yet. */ 145 193 bool stolen; 146 /** Thread is executed in user space. */ 147 bool uspace; 148 149 /** Thread accounting. */ 150 uint64_t ucycles; 151 uint64_t kcycles; 194 195 /** 196 * Thread state (state_t). 197 * This is atomic because we read it via some commands for debug output, 198 * otherwise it could just be a regular local. 199 */ 200 atomic_int_fast32_t state; 201 202 /** Thread CPU. */ 203 _Atomic(cpu_t *) cpu; 204 205 /** Thread's priority. Implemented as index to CPU->rq */ 206 atomic_int_fast32_t priority; 207 152 208 /** Last sampled cycle. */ 153 209 uint64_t last_cycle; 154 /** Thread doesn't affect accumulated accounting. */155 bool uncounted;156 157 /** Thread's priority. Implemented as index to CPU->rq */158 int priority;159 /** Thread ID. */160 thread_id_t tid;161 162 /** Architecture-specific data. */163 thread_arch_t arch;164 165 /** Thread's kernel stack. */166 uint8_t *kstack;167 168 #ifdef CONFIG_UDEBUG169 /**170 * If true, the scheduler will print a stack trace171 * to the kernel console upon scheduling this thread.172 */173 bool btrace;174 175 /** Debugging stuff */176 udebug_thread_t udebug;177 #endif /* CONFIG_UDEBUG */178 210 } thread_t; 179 211 … … 186 218 extern void thread_wire(thread_t *, cpu_t *); 187 219 extern void thread_attach(thread_t *, task_t *); 188 extern void thread_ready(thread_t *); 220 extern void thread_start(thread_t *); 221 extern void thread_requeue_sleeping(thread_t *); 189 222 extern void thread_exit(void) __attribute__((noreturn)); 190 223 extern void thread_interrupt(thread_t *); 224 225 enum sleep_state { 226 SLEEP_INITIAL, 227 SLEEP_ASLEEP, 228 SLEEP_WOKE, 229 }; 191 230 192 231 typedef enum { … … 237 276 extern errno_t thread_join(thread_t *); 238 277 extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int); 278 extern void thread_detach(thread_t *); 279 280 extern void thread_yield(void); 239 281 240 282 extern void thread_print_list(bool); -
kernel/generic/meson.build
r86f862c rf2cb80a 95 95 'src/mm/malloc.c', 96 96 'src/mm/reserve.c', 97 'src/preempt/preemption.c',98 97 'src/printf/printf.c', 99 98 'src/printf/snprintf.c', -
kernel/generic/src/console/cmd.c
r86f862c rf2cb80a 229 229 }; 230 230 231 /* Data and methods for 'printbench' command. */ 232 static int cmd_printbench(cmd_arg_t *argv); 233 234 static cmd_info_t printbench_info = { 235 .name = "printbench", 236 .description = "Run a printing benchmark.", 237 .func = cmd_printbench, 238 .argc = 0, 239 }; 240 231 241 #endif /* CONFIG_TEST */ 232 242 … … 613 623 &test_info, 614 624 &bench_info, 625 &printbench_info, 615 626 #endif 616 627 #ifdef CONFIG_UDEBUG … … 993 1004 printf("cpu%u: ", i); 994 1005 thread_wire(thread, &cpus[i]); 995 thread_ ready(thread_ref(thread));1006 thread_start(thread); 996 1007 thread_join(thread); 997 thread_put(thread);998 1008 } else 999 1009 printf("Unable to create thread for cpu%u\n", i); … … 1582 1592 } 1583 1593 1594 int cmd_printbench(cmd_arg_t *argv) 1595 { 1596 int cnt = 20; 1597 1598 uint64_t *data = malloc(sizeof(uint64_t) * cnt); 1599 if (data == NULL) { 1600 printf("Error allocating memory for statistics\n"); 1601 return false; 1602 } 1603 1604 for (int i = 0; i < cnt; i++) { 1605 /* 1606 * Update and read thread accounting 1607 * for benchmarking 1608 */ 1609 irq_spinlock_lock(&TASK->lock, true); 1610 uint64_t ucycles0, kcycles0; 1611 task_get_accounting(TASK, &ucycles0, &kcycles0); 1612 irq_spinlock_unlock(&TASK->lock, true); 1613 1614 /* Execute the test */ 1615 for (int j = 0; j < 20; j++) { 1616 printf("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ěščřžýáíéú!@#$%%^&*(){}+\n"); 1617 printf("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ěščřžýáíéú!@#$%%^&*(){}+abcdefghijklmnopqrstuvwxyz\n"); 1618 printf("0123456789ěščřžýáíéú!@#$%%^&*(){}+abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\n"); 1619 } 1620 1621 /* Update and read thread accounting */ 1622 irq_spinlock_lock(&TASK->lock, true); 1623 uint64_t ucycles1, kcycles1; 1624 task_get_accounting(TASK, &ucycles1, &kcycles1); 1625 irq_spinlock_unlock(&TASK->lock, true); 1626 1627 data[i] = ucycles1 - ucycles0 + kcycles1 - kcycles0; 1628 } 1629 1630 printf("\n"); 1631 1632 uint64_t cycles; 1633 char suffix; 1634 uint64_t sum = 0; 1635 1636 for (int i = 0; i < cnt; i++) { 1637 sum += data[i]; 1638 } 1639 1640 order_suffix(sum / (uint64_t) cnt, &cycles, &suffix); 1641 printf("Average\t\t%" PRIu64 "%c\n", cycles, suffix); 1642 1643 free(data); 1644 1645 return true; 1646 } 1647 1584 1648 #endif 1585 1649 -
kernel/generic/src/cpu/cpu.c
r86f862c rf2cb80a 81 81 panic("Cannot allocate CPU stack."); 82 82 83 cpus[i]. stack = (uint8_t *) PA2KA(stack_phys);83 cpus[i].local.stack = (uint8_t *) PA2KA(stack_phys); 84 84 cpus[i].id = i; 85 85 … … 104 104 CPU->tlb_active = true; 105 105 106 CPU ->idle = false;107 CPU ->last_cycle = get_cycle();106 CPU_LOCAL->idle = false; 107 CPU_LOCAL->last_cycle = get_cycle(); 108 108 CPU->idle_cycles = ATOMIC_TIME_INITIALIZER(); 109 109 CPU->busy_cycles = ATOMIC_TIME_INITIALIZER(); -
kernel/generic/src/ddi/ddi.c
r86f862c rf2cb80a 336 336 return EPERM; 337 337 338 irq_spinlock_lock(&tasks_lock, true);339 340 338 task_t *task = task_find_by_id(id); 341 339 342 if ((!task) || (!container_check(CONTAINER, task->container))) { 343 /* 344 * There is no task with the specified ID 345 * or the task belongs to a different security 346 * context. 347 */ 348 irq_spinlock_unlock(&tasks_lock, true); 340 if (!task) 349 341 return ENOENT; 350 } 351 352 /* Lock the task and release the lock protecting tasks dictionary. */ 353 irq_spinlock_exchange(&tasks_lock, &task->lock); 354 errno_t rc = ddi_iospace_enable_arch(task, ioaddr, size); 342 343 errno_t rc = ENOENT; 344 345 irq_spinlock_lock(&task->lock, true); 346 347 /* Check that the task belongs to the correct security context. */ 348 if (container_check(CONTAINER, task->container)) 349 rc = ddi_iospace_enable_arch(task, ioaddr, size); 350 355 351 irq_spinlock_unlock(&task->lock, true); 356 352 task_release(task); 357 353 return rc; 358 354 } … … 377 373 return EPERM; 378 374 379 irq_spinlock_lock(&tasks_lock, true);380 381 375 task_t *task = task_find_by_id(id); 382 376 383 if ((!task) || (!container_check(CONTAINER, task->container))) { 384 /* 385 * There is no task with the specified ID 386 * or the task belongs to a different security 387 * context. 388 */ 389 irq_spinlock_unlock(&tasks_lock, true); 377 if (!task) 390 378 return ENOENT; 391 } 392 393 /* Lock the task and release the lock protecting tasks dictionary. */ 394 irq_spinlock_exchange(&tasks_lock, &task->lock); 395 errno_t rc = ddi_iospace_disable_arch(task, ioaddr, size); 379 380 errno_t rc = ENOENT; 381 382 irq_spinlock_lock(&task->lock, true); 383 384 /* Check that the task belongs to the correct security context. */ 385 if (container_check(CONTAINER, task->container)) 386 rc = ddi_iospace_disable_arch(task, ioaddr, size); 387 396 388 irq_spinlock_unlock(&task->lock, true); 397 389 task_release(task); 398 390 return rc; 399 391 } -
kernel/generic/src/interrupt/interrupt.c
r86f862c rf2cb80a 114 114 115 115 /* Account user cycles */ 116 if (THREAD) { 117 irq_spinlock_lock(&THREAD->lock, false); 116 if (THREAD) 118 117 thread_update_accounting(true); 119 irq_spinlock_unlock(&THREAD->lock, false);120 }121 118 122 119 /* Account CPU usage if it woke up from sleep */ 123 if (CPU && CPU ->idle) {120 if (CPU && CPU_LOCAL->idle) { 124 121 uint64_t now = get_cycle(); 125 atomic_time_increment(&CPU->idle_cycles, now - CPU ->last_cycle);126 CPU ->last_cycle = now;127 CPU ->idle = false;122 atomic_time_increment(&CPU->idle_cycles, now - CPU_LOCAL->last_cycle); 123 CPU_LOCAL->last_cycle = now; 124 CPU_LOCAL->idle = false; 128 125 } 129 126 … … 155 152 156 153 /* Do not charge THREAD for exception cycles */ 157 if (THREAD) { 158 irq_spinlock_lock(&THREAD->lock, false); 154 if (THREAD) 159 155 THREAD->last_cycle = end_cycle; 160 irq_spinlock_unlock(&THREAD->lock, false);161 }162 156 #else 163 157 panic("No space for any exception handler, yet we want to handle some exception."); -
kernel/generic/src/ipc/ipc.c
r86f862c rf2cb80a 967 967 void ipc_print_task(task_id_t taskid) 968 968 { 969 irq_spinlock_lock(&tasks_lock, true);970 969 task_t *task = task_find_by_id(taskid); 971 if (!task) { 972 irq_spinlock_unlock(&tasks_lock, true); 970 if (!task) 973 971 return; 974 }975 task_hold(task);976 irq_spinlock_unlock(&tasks_lock, true);977 972 978 973 printf("[phone cap] [calls] [state\n"); -
kernel/generic/src/ipc/kbox.c
r86f862c rf2cb80a 90 90 LOG("Join kb.thread."); 91 91 thread_join(TASK->kb.thread); 92 thread_put(TASK->kb.thread);93 92 LOG("...join done."); 94 93 TASK->kb.thread = NULL; … … 201 200 /** Connect phone to a task kernel-box specified by id. 202 201 * 203 * Note that this is not completely atomic. For optimisation reasons, the task204 * might start cleaning up kbox after the phone has been connected and before205 * a kbox thread has been created. This must be taken into account in the206 * cleanup code.207 *208 202 * @param[out] out_phone Phone capability handle on success. 209 203 * @return Error code. … … 212 206 errno_t ipc_connect_kbox(task_id_t taskid, cap_phone_handle_t *out_phone) 213 207 { 214 irq_spinlock_lock(&tasks_lock, true);215 216 208 task_t *task = task_find_by_id(taskid); 217 if (task == NULL) { 218 irq_spinlock_unlock(&tasks_lock, true); 209 if (!task) 219 210 return ENOENT; 220 }221 222 atomic_inc(&task->refcount);223 224 irq_spinlock_unlock(&tasks_lock, true);225 211 226 212 mutex_lock(&task->kb.cleanup_lock); 227 228 if (atomic_predec(&task->refcount) == 0) {229 mutex_unlock(&task->kb.cleanup_lock);230 task_destroy(task);231 return ENOENT;232 }233 213 234 214 if (task->kb.finished) { 235 215 mutex_unlock(&task->kb.cleanup_lock); 216 task_release(task); 236 217 return EINVAL; 237 218 } … … 244 225 if (!kb_thread) { 245 226 mutex_unlock(&task->kb.cleanup_lock); 227 task_release(task); 246 228 return ENOMEM; 247 229 } 248 230 249 task->kb.thread = thread_ref(kb_thread);250 thread_ ready(kb_thread);231 task->kb.thread = kb_thread; 232 thread_start(kb_thread); 251 233 } 252 234 … … 256 238 if (rc != EOK) { 257 239 mutex_unlock(&task->kb.cleanup_lock); 240 task_release(task); 258 241 return rc; 259 242 } … … 266 249 267 250 mutex_unlock(&task->kb.cleanup_lock); 251 task_release(task); 268 252 *out_phone = phone_handle; 269 253 return EOK; -
kernel/generic/src/main/kinit.c
r86f862c rf2cb80a 122 122 123 123 thread_wire(thread, &cpus[0]); 124 thread_ ready(thread_ref(thread));124 thread_start(thread); 125 125 thread_join(thread); 126 thread_put(thread);127 126 128 127 /* … … 136 135 if (thread != NULL) { 137 136 thread_wire(thread, &cpus[i]); 138 thread_ready(thread); 137 thread_start(thread); 138 thread_detach(thread); 139 139 } else 140 140 log(LF_OTHER, LVL_ERROR, … … 152 152 thread = thread_create(kload, NULL, TASK, THREAD_FLAG_NONE, 153 153 "kload"); 154 if (thread != NULL) 155 thread_ready(thread); 156 else 154 if (thread != NULL) { 155 thread_start(thread); 156 thread_detach(thread); 157 } else { 157 158 log(LF_OTHER, LVL_ERROR, "Unable to create kload thread"); 159 } 158 160 159 161 #ifdef CONFIG_KCONSOLE … … 164 166 thread = thread_create(kconsole_thread, NULL, TASK, 165 167 THREAD_FLAG_NONE, "kconsole"); 166 if (thread != NULL) 167 thread_ready(thread); 168 else 168 if (thread != NULL) { 169 thread_start(thread); 170 thread_detach(thread); 171 } else { 169 172 log(LF_OTHER, LVL_ERROR, 170 173 "Unable to create kconsole thread"); 174 } 171 175 } 172 176 #endif /* CONFIG_KCONSOLE */ … … 309 313 */ 310 314 for (i = 0; i < init.cnt; i++) { 311 if (programs[i].task != NULL) 315 if (programs[i].task != NULL) { 312 316 program_ready(&programs[i]); 317 task_release(programs[i].task); 318 } 313 319 } 314 320 -
kernel/generic/src/main/main.c
r86f862c rf2cb80a 80 80 #include <arch/arch.h> 81 81 #include <arch.h> 82 #include <arch/faddr.h>83 82 #include <ipc/ipc.h> 84 83 #include <macros.h> … … 174 173 ALIGN_UP((uintptr_t) kdata_end - config.base, PAGE_SIZE); 175 174 176 context_save(&ctx); 177 context_set(&ctx, FADDR(main_bsp_separated_stack), 175 context_create(&ctx, main_bsp_separated_stack, 178 176 bootstrap_stack, bootstrap_stack_size); 179 177 context_restore(&ctx); … … 282 280 if (!kinit_thread) 283 281 panic("Cannot create kinit thread."); 284 thread_ready(kinit_thread); 285 286 /* 287 * This call to scheduler() will return to kinit, 282 thread_start(kinit_thread); 283 thread_detach(kinit_thread); 284 285 /* 286 * This call to scheduler_run() will return to kinit, 288 287 * starting the thread of kernel threads. 289 288 */ 290 scheduler(); 289 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 290 context_replace(scheduler_run, CPU_LOCAL->stack, STACK_SIZE); 291 291 /* not reached */ 292 292 } … … 328 328 ARCH_OP(post_cpu_init); 329 329 330 current_copy(CURRENT, (current_t *) CPU->stack);331 332 330 /* 333 331 * If we woke kmp up before we left the kernel stack, we could … … 335 333 * switch to this cpu's private stack prior to waking kmp up. 336 334 */ 337 context_t ctx; 338 context_save(&ctx); 339 context_set(&ctx, FADDR(main_ap_separated_stack), 340 (uintptr_t) CPU->stack, STACK_SIZE); 341 context_restore(&ctx); 335 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 336 context_replace(main_ap_separated_stack, CPU_LOCAL->stack, STACK_SIZE); 342 337 /* not reached */ 343 338 } … … 356 351 357 352 semaphore_up(&ap_completion_semaphore); 358 scheduler ();353 scheduler_run(); 359 354 /* not reached */ 360 355 } -
kernel/generic/src/proc/program.c
r86f862c rf2cb80a 99 99 if (!area) { 100 100 free(kernel_uarg); 101 task_ destroy(prg->task);101 task_release(prg->task); 102 102 prg->task = NULL; 103 103 return ENOMEM; … … 119 119 free(kernel_uarg); 120 120 as_area_destroy(as, virt); 121 task_ destroy(prg->task);121 task_release(prg->task); 122 122 prg->task = NULL; 123 123 return ELIMIT; … … 212 212 void program_ready(program_t *prg) 213 213 { 214 thread_ready(prg->main_thread); 214 thread_start(prg->main_thread); 215 thread_detach(prg->main_thread); 215 216 prg->main_thread = NULL; 216 217 } … … 251 252 program_ready(&prg); 252 253 254 task_release(prg.task); 255 253 256 return EOK; 254 257 } -
kernel/generic/src/proc/scheduler.c
r86f862c rf2cb80a 1 1 /* 2 2 * Copyright (c) 2010 Jakub Jermar 3 * Copyright (c) 2023 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 50 51 #include <time/delay.h> 51 52 #include <arch/asm.h> 52 #include <arch/faddr.h>53 53 #include <arch/cycle.h> 54 54 #include <atomic.h> … … 66 66 #include <stacktrace.h> 67 67 68 static void scheduler_separated_stack(void);69 70 68 atomic_size_t nrdy; /**< Number of ready threads in the system. */ 71 72 /** Take actions before new thread runs.73 *74 * Perform actions that need to be75 * taken before the newly selected76 * thread is passed control.77 *78 * THREAD->lock is locked on entry79 *80 */81 static void before_thread_runs(void)82 {83 before_thread_runs_arch();84 85 #ifdef CONFIG_FPU_LAZY86 /*87 * The only concurrent modification possible for fpu_owner here is88 * another thread changing it from itself to NULL in its destructor.89 */90 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner,91 memory_order_relaxed);92 93 if (THREAD == owner)94 fpu_enable();95 else96 fpu_disable();97 #elif defined CONFIG_FPU98 fpu_enable();99 if (THREAD->fpu_context_exists)100 fpu_context_restore(&THREAD->fpu_context);101 else {102 fpu_init();103 THREAD->fpu_context_exists = true;104 }105 #endif106 107 #ifdef CONFIG_UDEBUG108 if (THREAD->btrace) {109 istate_t *istate = THREAD->udebug.uspace_state;110 if (istate != NULL) {111 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);112 stack_trace_istate(istate);113 }114 115 THREAD->btrace = false;116 }117 #endif118 }119 120 /** Take actions after THREAD had run.121 *122 * Perform actions that need to be123 * taken after the running thread124 * had been preempted by the scheduler.125 *126 * THREAD->lock is locked on entry127 *128 */129 static void after_thread_ran(void)130 {131 after_thread_ran_arch();132 }133 69 134 70 #ifdef CONFIG_FPU_LAZY … … 207 143 list_remove(&thread->rq_link); 208 144 209 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 210 211 thread->cpu = CPU; 212 thread->priority = i; /* Correct rq index */ 213 214 /* Time allocation in microseconds. */ 215 uint64_t time_to_run = (i + 1) * 10000; 216 217 /* This is safe because interrupts are disabled. */ 218 CPU->preempt_deadline = CPU->current_clock_tick + us2ticks(time_to_run); 219 220 /* 221 * Clear the stolen flag so that it can be migrated 222 * when load balancing needs emerge. 223 */ 224 thread->stolen = false; 225 irq_spinlock_unlock(&thread->lock, false); 145 irq_spinlock_unlock(&(CPU->rq[i].lock), false); 226 146 227 147 *rq_index = i; … … 257 177 * This improves energy saving and hyperthreading. 258 178 */ 259 CPU ->idle = true;179 CPU_LOCAL->idle = true; 260 180 261 181 /* … … 305 225 static void relink_rq(int start) 306 226 { 307 if (CPU->current_clock_tick < CPU->relink_deadline) 227 assert(interrupts_disabled()); 228 229 if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline) 308 230 return; 309 231 310 CPU ->relink_deadline = CPU->current_clock_tick + NEEDS_RELINK_MAX;232 CPU_LOCAL->relink_deadline = CPU_LOCAL->current_clock_tick + NEEDS_RELINK_MAX; 311 233 312 234 /* Temporary cache for lists we are moving. */ … … 340 262 } 341 263 342 void scheduler(void) 343 { 344 ipl_t ipl = interrupts_disable(); 345 346 if (atomic_load(&haltstate)) 347 halt(); 348 349 if (THREAD) { 350 irq_spinlock_lock(&THREAD->lock, false); 351 } 352 353 scheduler_locked(ipl); 354 } 355 356 /** The scheduler 357 * 358 * The thread scheduling procedure. 359 * Passes control directly to 360 * scheduler_separated_stack(). 361 * 362 */ 363 void scheduler_locked(ipl_t ipl) 364 { 365 assert(CPU != NULL); 366 367 if (THREAD) { 368 /* Update thread kernel accounting */ 369 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 370 264 /** 265 * Do whatever needs to be done with current FPU state before we switch to 266 * another thread. 267 */ 268 static void fpu_cleanup(void) 269 { 371 270 #if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY) 372 fpu_context_save(&THREAD->fpu_context);271 fpu_context_save(&THREAD->fpu_context); 373 272 #endif 374 if (!context_save(&THREAD->saved_context)) { 375 /* 376 * This is the place where threads leave scheduler(); 377 */ 378 379 /* Save current CPU cycle */ 380 THREAD->last_cycle = get_cycle(); 381 382 irq_spinlock_unlock(&THREAD->lock, false); 383 interrupts_restore(THREAD->saved_ipl); 384 385 return; 386 } 387 388 /* 389 * Interrupt priority level of preempted thread is recorded 390 * here to facilitate scheduler() invocations from 391 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 392 * 393 */ 394 THREAD->saved_ipl = ipl; 395 } 396 273 } 274 275 /** 276 * Set correct FPU state for this thread after switch from another thread. 277 */ 278 static void fpu_restore(void) 279 { 280 #ifdef CONFIG_FPU_LAZY 397 281 /* 398 * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS 399 * and preemption counter. At this point CURRENT could be coming either 400 * from THREAD's or CPU's stack. 401 * 282 * The only concurrent modification possible for fpu_owner here is 283 * another thread changing it from itself to NULL in its destructor. 402 284 */ 403 current_copy(CURRENT, (current_t *) CPU->stack); 285 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner, 286 memory_order_relaxed); 287 288 if (THREAD == owner) 289 fpu_enable(); 290 else 291 fpu_disable(); 292 293 #elif defined CONFIG_FPU 294 fpu_enable(); 295 if (THREAD->fpu_context_exists) 296 fpu_context_restore(&THREAD->fpu_context); 297 else { 298 fpu_init(); 299 THREAD->fpu_context_exists = true; 300 } 301 #endif 302 } 303 304 /** Things to do before we switch to THREAD context. 305 */ 306 static void prepare_to_run_thread(int rq_index) 307 { 308 relink_rq(rq_index); 309 310 switch_task(THREAD->task); 311 312 assert(atomic_get_unordered(&THREAD->cpu) == CPU); 313 314 atomic_set_unordered(&THREAD->state, Running); 315 atomic_set_unordered(&THREAD->priority, rq_index); /* Correct rq index */ 404 316 405 317 /* 406 * We may not keep the old stack. 407 * Reason: If we kept the old stack and got blocked, for instance, in 408 * find_best_thread(), the old thread could get rescheduled by another 409 * CPU and overwrite the part of its own stack that was also used by 410 * the scheduler on this CPU. 411 * 412 * Moreover, we have to bypass the compiler-generated POP sequence 413 * which is fooled by SP being set to the very top of the stack. 414 * Therefore the scheduler() function continues in 415 * scheduler_separated_stack(). 416 * 318 * Clear the stolen flag so that it can be migrated 319 * when load balancing needs emerge. 417 320 */ 418 context_t ctx; 419 context_save(&ctx); 420 context_set(&ctx, FADDR(scheduler_separated_stack), 421 (uintptr_t) CPU->stack, STACK_SIZE); 422 context_restore(&ctx); 423 424 /* Not reached */ 425 } 426 427 /** Scheduler stack switch wrapper 428 * 429 * Second part of the scheduler() function 430 * using new stack. Handling the actual context 431 * switch to a new thread. 432 * 433 */ 434 void scheduler_separated_stack(void) 435 { 436 assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 437 assert(CPU != NULL); 438 assert(interrupts_disabled()); 439 440 if (THREAD) { 441 /* Must be run after the switch to scheduler stack */ 442 after_thread_ran(); 443 444 switch (THREAD->state) { 445 case Running: 446 irq_spinlock_unlock(&THREAD->lock, false); 447 thread_ready(THREAD); 448 break; 449 450 case Exiting: 451 irq_spinlock_unlock(&THREAD->lock, false); 452 waitq_close(&THREAD->join_wq); 453 454 /* 455 * Release the reference CPU has for the thread. 456 * If there are no other references (e.g. threads calling join), 457 * the thread structure is deallocated. 458 */ 459 thread_put(THREAD); 460 break; 461 462 case Sleeping: 463 /* 464 * Prefer the thread after it's woken up. 465 */ 466 THREAD->priority = -1; 467 irq_spinlock_unlock(&THREAD->lock, false); 468 break; 469 470 default: 471 /* 472 * Entering state is unexpected. 473 */ 474 panic("tid%" PRIu64 ": unexpected state %s.", 475 THREAD->tid, thread_states[THREAD->state]); 476 break; 477 } 478 479 THREAD = NULL; 480 } 481 482 int rq_index; 483 THREAD = find_best_thread(&rq_index); 484 485 relink_rq(rq_index); 486 487 switch_task(THREAD->task); 488 489 irq_spinlock_lock(&THREAD->lock, false); 490 THREAD->state = Running; 321 THREAD->stolen = false; 491 322 492 323 #ifdef SCHEDULER_VERBOSE 493 324 log(LF_OTHER, LVL_DEBUG, 494 325 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 495 ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,326 ", nrdy=%zu)", CPU->id, THREAD->tid, rq_index, 496 327 THREAD->ticks, atomic_load(&CPU->nrdy)); 497 328 #endif … … 505 336 * function must be executed before the switch to the new stack. 506 337 */ 507 before_thread_runs(); 338 before_thread_runs_arch(); 339 340 #ifdef CONFIG_UDEBUG 341 if (atomic_get_unordered(&THREAD->btrace)) { 342 istate_t *istate = THREAD->udebug.uspace_state; 343 if (istate != NULL) { 344 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 345 stack_trace_istate(istate); 346 } else { 347 printf("Thread %" PRIu64 " interrupt state not available\n", THREAD->tid); 348 } 349 350 atomic_set_unordered(&THREAD->btrace, false); 351 } 352 #endif 353 354 fpu_restore(); 355 356 /* Time allocation in microseconds. */ 357 uint64_t time_to_run = (rq_index + 1) * 10000; 358 359 /* Set the time of next preemption. */ 360 CPU_LOCAL->preempt_deadline = 361 CPU_LOCAL->current_clock_tick + us2ticks(time_to_run); 362 363 /* Save current CPU cycle */ 364 THREAD->last_cycle = get_cycle(); 365 } 366 367 static void add_to_rq(thread_t *thread, cpu_t *cpu, int i) 368 { 369 /* Add to the appropriate runqueue. */ 370 runq_t *rq = &cpu->rq[i]; 371 372 irq_spinlock_lock(&rq->lock, false); 373 list_append(&thread->rq_link, &rq->rq); 374 rq->n++; 375 irq_spinlock_unlock(&rq->lock, false); 376 377 atomic_inc(&nrdy); 378 atomic_inc(&cpu->nrdy); 379 } 380 381 /** Requeue a thread that was just preempted on this CPU. 382 */ 383 static void thread_requeue_preempted(thread_t *thread) 384 { 385 assert(interrupts_disabled()); 386 assert(atomic_get_unordered(&thread->state) == Running); 387 assert(atomic_get_unordered(&thread->cpu) == CPU); 388 389 int prio = atomic_get_unordered(&thread->priority); 390 391 if (prio < RQ_COUNT - 1) { 392 prio++; 393 atomic_set_unordered(&thread->priority, prio); 394 } 395 396 atomic_set_unordered(&thread->state, Ready); 397 398 add_to_rq(thread, CPU, prio); 399 } 400 401 void thread_requeue_sleeping(thread_t *thread) 402 { 403 ipl_t ipl = interrupts_disable(); 404 405 assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering); 406 407 atomic_set_unordered(&thread->priority, 0); 408 atomic_set_unordered(&thread->state, Ready); 409 410 /* Prefer the CPU on which the thread ran last */ 411 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 412 413 if (!cpu) { 414 cpu = CPU; 415 atomic_set_unordered(&thread->cpu, CPU); 416 } 417 418 add_to_rq(thread, cpu, 0); 419 420 interrupts_restore(ipl); 421 } 422 423 static void cleanup_after_thread(thread_t *thread) 424 { 425 assert(CURRENT->mutex_locks == 0); 426 assert(interrupts_disabled()); 427 428 int expected; 429 430 switch (atomic_get_unordered(&thread->state)) { 431 case Running: 432 thread_requeue_preempted(thread); 433 break; 434 435 case Exiting: 436 waitq_close(&thread->join_wq); 437 438 /* 439 * Release the reference CPU has for the thread. 440 * If there are no other references (e.g. threads calling join), 441 * the thread structure is deallocated. 442 */ 443 thread_put(thread); 444 break; 445 446 case Sleeping: 447 expected = SLEEP_INITIAL; 448 449 /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */ 450 if (!atomic_compare_exchange_strong_explicit(&thread->sleep_state, 451 &expected, SLEEP_ASLEEP, 452 memory_order_acq_rel, memory_order_acquire)) { 453 454 assert(expected == SLEEP_WOKE); 455 /* The thread has already been woken up, requeue immediately. */ 456 thread_requeue_sleeping(thread); 457 } 458 break; 459 460 default: 461 /* 462 * Entering state is unexpected. 463 */ 464 panic("tid%" PRIu64 ": unexpected state %s.", 465 thread->tid, thread_states[atomic_get_unordered(&thread->state)]); 466 break; 467 } 468 } 469 470 /** Switch to scheduler context to let other threads run. */ 471 void scheduler_enter(state_t new_state) 472 { 473 ipl_t ipl = interrupts_disable(); 474 475 assert(CPU != NULL); 476 assert(THREAD != NULL); 477 478 if (atomic_load(&haltstate)) 479 halt(); 480 481 /* Check if we have a thread to switch to. */ 482 483 int rq_index; 484 thread_t *new_thread = try_find_thread(&rq_index); 485 486 if (new_thread == NULL && new_state == Running) { 487 /* No other thread to run, but we still have work to do here. */ 488 interrupts_restore(ipl); 489 return; 490 } 491 492 atomic_set_unordered(&THREAD->state, new_state); 493 494 /* Update thread kernel accounting */ 495 atomic_time_increment(&THREAD->kcycles, get_cycle() - THREAD->last_cycle); 496 497 fpu_cleanup(); 508 498 509 499 /* 510 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to511 * thread's stack.500 * On Sparc, this saves some extra userspace state that's not 501 * covered by context_save()/context_restore(). 512 502 */ 513 current_copy(CURRENT, (current_t *) THREAD->kstack); 514 515 context_restore(&THREAD->saved_context); 503 after_thread_ran_arch(); 504 505 if (new_thread) { 506 thread_t *old_thread = THREAD; 507 CPU_LOCAL->prev_thread = old_thread; 508 THREAD = new_thread; 509 /* No waiting necessary, we can switch to the new thread directly. */ 510 prepare_to_run_thread(rq_index); 511 512 current_copy(CURRENT, (current_t *) new_thread->kstack); 513 context_swap(&old_thread->saved_context, &new_thread->saved_context); 514 } else { 515 /* 516 * A new thread isn't immediately available, switch to a separate 517 * stack to sleep or do other idle stuff. 518 */ 519 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 520 context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context); 521 } 522 523 assert(CURRENT->mutex_locks == 0); 524 assert(interrupts_disabled()); 525 526 /* Check if we need to clean up after another thread. */ 527 if (CPU_LOCAL->prev_thread) { 528 cleanup_after_thread(CPU_LOCAL->prev_thread); 529 CPU_LOCAL->prev_thread = NULL; 530 } 531 532 interrupts_restore(ipl); 533 } 534 535 /** Enter main scheduler loop. Never returns. 536 * 537 * This function switches to a runnable thread as soon as one is available, 538 * after which it is only switched back to if a thread is stopping and there is 539 * no other thread to run in its place. We need a separate context for that 540 * because we're going to block the CPU, which means we need another context 541 * to clean up after the previous thread. 542 */ 543 void scheduler_run(void) 544 { 545 assert(interrupts_disabled()); 546 547 assert(CPU != NULL); 548 assert(TASK == NULL); 549 assert(THREAD == NULL); 550 assert(interrupts_disabled()); 551 552 while (!atomic_load(&haltstate)) { 553 assert(CURRENT->mutex_locks == 0); 554 555 int rq_index; 556 THREAD = find_best_thread(&rq_index); 557 prepare_to_run_thread(rq_index); 558 559 /* 560 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 561 * thread's stack. 562 */ 563 current_copy(CURRENT, (current_t *) THREAD->kstack); 564 565 /* Switch to thread context. */ 566 context_swap(&CPU_LOCAL->scheduler_context, &THREAD->saved_context); 567 568 /* Back from another thread. */ 569 assert(CPU != NULL); 570 assert(THREAD != NULL); 571 assert(CURRENT->mutex_locks == 0); 572 assert(interrupts_disabled()); 573 574 cleanup_after_thread(THREAD); 575 576 /* 577 * Necessary because we're allowing interrupts in find_best_thread(), 578 * so we need to avoid other code referencing the thread we left. 579 */ 580 THREAD = NULL; 581 } 582 583 halt(); 584 } 585 586 /** Thread wrapper. 587 * 588 * This wrapper is provided to ensure that a starting thread properly handles 589 * everything it needs to do when first scheduled, and when it exits. 590 */ 591 void thread_main_func(void) 592 { 593 assert(interrupts_disabled()); 594 595 void (*f)(void *) = THREAD->thread_code; 596 void *arg = THREAD->thread_arg; 597 598 /* This is where each thread wakes up after its creation */ 599 600 /* Check if we need to clean up after another thread. */ 601 if (CPU_LOCAL->prev_thread) { 602 cleanup_after_thread(CPU_LOCAL->prev_thread); 603 CPU_LOCAL->prev_thread = NULL; 604 } 605 606 interrupts_enable(); 607 608 f(arg); 609 610 thread_exit(); 516 611 517 612 /* Not reached */ … … 539 634 list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) { 540 635 541 irq_spinlock_lock(&thread->lock, false);542 543 636 /* 544 637 * Do not steal CPU-wired threads, threads … … 547 640 * FPU context is still in the CPU. 548 641 */ 549 if (thread->stolen || thread->nomigrate || 550 thread == fpu_owner) { 551 irq_spinlock_unlock(&thread->lock, false); 642 if (thread->stolen || thread->nomigrate || thread == fpu_owner) { 552 643 continue; 553 644 } 554 645 555 646 thread->stolen = true; 556 thread->cpu = CPU; 557 558 irq_spinlock_unlock(&thread->lock, false); 647 atomic_set_unordered(&thread->cpu, CPU); 559 648 560 649 /* … … 659 748 * 660 749 */ 661 scheduler();750 thread_yield(); 662 751 } else { 663 752 /* … … 686 775 continue; 687 776 688 /* Technically a data race, but we don't really care in this case. */ 689 int needs_relink = cpus[cpu].relink_deadline - cpus[cpu].current_clock_tick; 690 691 printf("cpu%u: address=%p, nrdy=%zu, needs_relink=%d\n", 692 cpus[cpu].id, &cpus[cpu], atomic_load(&cpus[cpu].nrdy), 693 needs_relink); 777 printf("cpu%u: address=%p, nrdy=%zu\n", 778 cpus[cpu].id, &cpus[cpu], atomic_load(&cpus[cpu].nrdy)); 694 779 695 780 unsigned int i; … … 705 790 thread) { 706 791 printf("%" PRIu64 "(%s) ", thread->tid, 707 thread_states[ thread->state]);792 thread_states[atomic_get_unordered(&thread->state)]); 708 793 } 709 794 printf("\n"); -
kernel/generic/src/proc/task.c
r86f862c rf2cb80a 158 158 return rc; 159 159 160 atomic_store(&task->refcount, 0);161 160 atomic_store(&task->lifecount, 0); 162 161 … … 201 200 if (!task) 202 201 return NULL; 202 203 refcount_init(&task->refcount); 203 204 204 205 task_create_arch(task); … … 268 269 * 269 270 */ 270 void task_destroy(task_t *task)271 static void task_destroy(task_t *task) 271 272 { 272 273 /* … … 299 300 void task_hold(task_t *task) 300 301 { 301 atomic_inc(&task->refcount);302 refcount_up(&task->refcount); 302 303 } 303 304 … … 311 312 void task_release(task_t *task) 312 313 { 313 if ( (atomic_predec(&task->refcount)) == 0)314 if (refcount_down(&task->refcount)) 314 315 task_destroy(task); 315 316 } … … 416 417 /** Find task structure corresponding to task ID. 417 418 * 418 * The tasks_lock must be already held by the caller of this function and419 * interrupts must be disabled.420 *421 419 * @param id Task ID. 422 420 * 423 * @return Task structure addressor NULL if there is no such task ID.421 * @return Task reference or NULL if there is no such task ID. 424 422 * 425 423 */ 426 424 task_t *task_find_by_id(task_id_t id) 427 425 { 428 assert(interrupts_disabled()); 429 assert(irq_spinlock_locked(&tasks_lock)); 426 task_t *task = NULL; 427 428 irq_spinlock_lock(&tasks_lock, true); 430 429 431 430 odlink_t *odlink = odict_find_eq(&tasks, &id, NULL); 432 if (odlink != NULL) 433 return odict_get_instance(odlink, task_t, ltasks); 434 435 return NULL; 431 if (odlink != NULL) { 432 task = odict_get_instance(odlink, task_t, ltasks); 433 434 /* 435 * The directory of tasks can't hold a reference, since that would 436 * prevent task from ever being destroyed. That means we have to 437 * check for the case where the task is already being destroyed, but 438 * not yet removed from the directory. 439 */ 440 if (!refcount_try_up(&task->refcount)) 441 task = NULL; 442 } 443 444 irq_spinlock_unlock(&tasks_lock, true); 445 446 return task; 436 447 } 437 448 … … 506 517 /* Current values of threads */ 507 518 list_foreach(task->threads, th_link, thread_t, thread) { 508 irq_spinlock_lock(&thread->lock, false);509 510 519 /* Process only counted threads */ 511 520 if (!thread->uncounted) { … … 515 524 } 516 525 517 uret += thread->ucycles;518 kret += thread->kcycles;526 uret += atomic_time_read(&thread->ucycles); 527 kret += atomic_time_read(&thread->kcycles); 519 528 } 520 521 irq_spinlock_unlock(&thread->lock, false);522 529 } 523 530 … … 528 535 static void task_kill_internal(task_t *task) 529 536 { 530 irq_spinlock_lock(&task->lock, false);537 irq_spinlock_lock(&task->lock, true); 531 538 532 539 /* … … 538 545 } 539 546 540 irq_spinlock_unlock(&task->lock, false);547 irq_spinlock_unlock(&task->lock, true); 541 548 } 542 549 … … 556 563 return EPERM; 557 564 558 irq_spinlock_lock(&tasks_lock, true);559 560 565 task_t *task = task_find_by_id(id); 561 if (!task) { 562 irq_spinlock_unlock(&tasks_lock, true); 566 if (!task) 563 567 return ENOENT; 564 }565 568 566 569 task_kill_internal(task); 567 irq_spinlock_unlock(&tasks_lock, true); 568 570 task_release(task); 569 571 return EOK; 570 572 } … … 596 598 } 597 599 598 irq_spinlock_lock(&tasks_lock, true);599 600 task_kill_internal(TASK); 600 irq_spinlock_unlock(&tasks_lock, true);601 602 601 thread_exit(); 603 602 } … … 628 627 if (additional) 629 628 printf("%-8" PRIu64 " %9zu", task->taskid, 630 atomic_load(&task-> refcount));629 atomic_load(&task->lifecount)); 631 630 else 632 631 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" … … 640 639 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c " 641 640 "%9zu\n", task->taskid, ucycles, usuffix, kcycles, 642 ksuffix, atomic_load(&task-> refcount));641 ksuffix, atomic_load(&task->lifecount)); 643 642 else 644 643 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n", -
kernel/generic/src/proc/thread.c
r86f862c rf2cb80a 60 60 #include <arch/interrupt.h> 61 61 #include <smp/ipi.h> 62 #include <arch/faddr.h>63 62 #include <atomic.h> 64 63 #include <memw.h> … … 82 81 }; 83 82 84 enum sleep_state {85 SLEEP_INITIAL,86 SLEEP_ASLEEP,87 SLEEP_WOKE,88 };89 90 83 /** Lock protecting the @c threads ordered dictionary . 91 84 * … … 115 108 static int threads_cmp(void *, void *); 116 109 117 /** Thread wrapper.118 *119 * This wrapper is provided to ensure that every thread makes a call to120 * thread_exit() when its implementing function returns.121 *122 * interrupts_disable() is assumed.123 *124 */125 static void cushion(void)126 {127 void (*f)(void *) = THREAD->thread_code;128 void *arg = THREAD->thread_arg;129 THREAD->last_cycle = get_cycle();130 131 /* This is where each thread wakes up after its creation */132 irq_spinlock_unlock(&THREAD->lock, false);133 interrupts_enable();134 135 f(arg);136 137 thread_exit();138 139 /* Not reached */140 }141 142 110 /** Initialization and allocation for thread_t structure 143 111 * … … 147 115 thread_t *thread = (thread_t *) obj; 148 116 149 irq_spinlock_initialize(&thread->lock, "thread_t_lock");150 117 link_initialize(&thread->rq_link); 151 118 link_initialize(&thread->wq_link); … … 229 196 void thread_wire(thread_t *thread, cpu_t *cpu) 230 197 { 231 i rq_spinlock_lock(&thread->lock, true);232 thread->cpu = cpu;198 ipl_t ipl = interrupts_disable(); 199 atomic_set_unordered(&thread->cpu, cpu); 233 200 thread->nomigrate++; 234 irq_spinlock_unlock(&thread->lock, true); 235 } 236 237 /** Invoked right before thread_ready() readies the thread. thread is locked. */ 238 static void before_thread_is_ready(thread_t *thread) 239 { 240 assert(irq_spinlock_locked(&thread->lock)); 241 } 242 243 /** Make thread ready 244 * 245 * Switch thread to the ready state. Consumes reference passed by the caller. 246 * 247 * @param thread Thread to make ready. 248 * 249 */ 250 void thread_ready(thread_t *thread) 251 { 252 irq_spinlock_lock(&thread->lock, true); 253 254 assert(thread->state != Ready); 255 256 before_thread_is_ready(thread); 257 258 int i = (thread->priority < RQ_COUNT - 1) ? 259 ++thread->priority : thread->priority; 260 261 /* Prefer the CPU on which the thread ran last */ 262 cpu_t *cpu = thread->cpu ? thread->cpu : CPU; 263 264 thread->state = Ready; 265 266 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 267 268 /* 269 * Append thread to respective ready queue 270 * on respective processor. 271 */ 272 273 list_append(&thread->rq_link, &cpu->rq[i].rq); 274 cpu->rq[i].n++; 275 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 276 277 atomic_inc(&nrdy); 278 atomic_inc(&cpu->nrdy); 201 interrupts_restore(ipl); 202 } 203 204 /** Start a thread that wasn't started yet since it was created. 205 * 206 * @param thread A reference to the newly created thread. 207 */ 208 void thread_start(thread_t *thread) 209 { 210 assert(atomic_get_unordered(&thread->state) == Entering); 211 thread_requeue_sleeping(thread_ref(thread)); 279 212 } 280 213 … … 315 248 irq_spinlock_unlock(&tidlock, true); 316 249 317 memset(&thread->saved_context, 0, sizeof(thread->saved_context)); 318 context_set(&thread->saved_context, FADDR(cushion), 319 (uintptr_t) thread->kstack, STACK_SIZE); 250 context_create(&thread->saved_context, thread_main_func, 251 thread->kstack, STACK_SIZE); 320 252 321 253 current_initialize((current_t *) thread->kstack); 322 323 ipl_t ipl = interrupts_disable();324 thread->saved_ipl = interrupts_read();325 interrupts_restore(ipl);326 254 327 255 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); … … 329 257 thread->thread_code = func; 330 258 thread->thread_arg = arg; 331 thread->ucycles = 0;332 thread->kcycles = 0;259 thread->ucycles = ATOMIC_TIME_INITIALIZER(); 260 thread->kcycles = ATOMIC_TIME_INITIALIZER(); 333 261 thread->uncounted = 334 262 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 335 thread->priority = -1; /* Start in rq[0] */336 thread->cpu = NULL;263 atomic_init(&thread->priority, 0); 264 atomic_init(&thread->cpu, NULL); 337 265 thread->stolen = false; 338 266 thread->uspace = … … 340 268 341 269 thread->nomigrate = 0; 342 thread->state = Entering;270 atomic_init(&thread->state, Entering); 343 271 344 272 atomic_init(&thread->sleep_queue, NULL); … … 360 288 #ifdef CONFIG_UDEBUG 361 289 /* Initialize debugging stuff */ 362 thread->btrace = false;290 atomic_init(&thread->btrace, false); 363 291 udebug_thread_initialize(&thread->udebug); 364 292 #endif … … 404 332 405 333 if (!thread->uncounted) { 406 thread->task->ucycles += thread->ucycles;407 thread->task->kcycles += thread->kcycles;334 thread->task->ucycles += atomic_time_read(&thread->ucycles); 335 thread->task->kcycles += atomic_time_read(&thread->kcycles); 408 336 } 409 337 410 338 irq_spinlock_unlock(&thread->task->lock, false); 411 339 412 assert(( thread->state == Exiting) || (thread->state== Lingering));340 assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering)); 413 341 414 342 /* Clear cpu->fpu_owner if set to this thread. */ 415 343 #ifdef CONFIG_FPU_LAZY 416 if (thread->cpu) { 344 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 345 if (cpu) { 417 346 /* 418 347 * We need to lock for this because the old CPU can concurrently try … … 420 349 * it to finish. An atomic compare-and-swap wouldn't be enough. 421 350 */ 422 irq_spinlock_lock(&thread->cpu->fpu_lock, false); 423 424 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner, 425 memory_order_relaxed); 426 427 if (owner == thread) { 428 atomic_store_explicit(&thread->cpu->fpu_owner, NULL, 429 memory_order_relaxed); 430 } 431 432 irq_spinlock_unlock(&thread->cpu->fpu_lock, false); 351 irq_spinlock_lock(&cpu->fpu_lock, false); 352 353 if (atomic_get_unordered(&cpu->fpu_owner) == thread) 354 atomic_set_unordered(&cpu->fpu_owner, NULL); 355 356 irq_spinlock_unlock(&cpu->fpu_lock, false); 433 357 } 434 358 #endif … … 525 449 } 526 450 527 irq_spinlock_lock(&THREAD->lock, true); 528 THREAD->state = Exiting; 529 irq_spinlock_unlock(&THREAD->lock, true); 530 531 scheduler(); 532 533 panic("should never be reached"); 451 scheduler_enter(Exiting); 452 unreachable(); 534 453 } 535 454 … … 579 498 } 580 499 581 static void thread_wait_internal(void)582 {583 assert(THREAD != NULL);584 585 ipl_t ipl = interrupts_disable();586 587 if (atomic_load(&haltstate))588 halt();589 590 /*591 * Lock here to prevent a race between entering the scheduler and another592 * thread rescheduling this thread.593 */594 irq_spinlock_lock(&THREAD->lock, false);595 596 int expected = SLEEP_INITIAL;597 598 /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */599 if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,600 SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {601 THREAD->state = Sleeping;602 scheduler_locked(ipl);603 } else {604 assert(expected == SLEEP_WOKE);605 /* Return immediately. */606 irq_spinlock_unlock(&THREAD->lock, false);607 interrupts_restore(ipl);608 }609 }610 611 500 static void thread_wait_timeout_callback(void *arg) 612 501 { … … 649 538 timeout_t timeout; 650 539 540 /* Extra check to avoid going to scheduler if we don't need to. */ 541 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) != 542 SLEEP_INITIAL) 543 return THREAD_WAIT_SUCCESS; 544 651 545 if (deadline != DEADLINE_NEVER) { 652 /* Extra check to avoid setting up a deadline if we don't need to. */653 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=654 SLEEP_INITIAL)655 return THREAD_WAIT_SUCCESS;656 657 546 timeout_initialize(&timeout); 658 547 timeout_register_deadline(&timeout, deadline, … … 660 549 } 661 550 662 thread_wait_internal();551 scheduler_enter(Sleeping); 663 552 664 553 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) { … … 674 563 675 564 int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE, 676 memory_order_ release);565 memory_order_acq_rel); 677 566 678 567 if (state == SLEEP_ASLEEP) { … … 682 571 * the waking thread by the sleeper in thread_wait_finish(). 683 572 */ 684 thread_re ady(thread);573 thread_requeue_sleeping(thread); 685 574 } 686 575 } … … 689 578 void thread_migration_disable(void) 690 579 { 580 ipl_t ipl = interrupts_disable(); 581 691 582 assert(THREAD); 692 693 583 THREAD->nomigrate++; 584 585 interrupts_restore(ipl); 694 586 } 695 587 … … 697 589 void thread_migration_enable(void) 698 590 { 591 ipl_t ipl = interrupts_disable(); 592 699 593 assert(THREAD); 700 594 assert(THREAD->nomigrate > 0); … … 702 596 if (THREAD->nomigrate > 0) 703 597 THREAD->nomigrate--; 598 599 interrupts_restore(ipl); 704 600 } 705 601 … … 731 627 732 628 /** Wait for another thread to exit. 733 * This function does not destroy the thread. Reference counting handles that.629 * After successful wait, the thread reference is destroyed. 734 630 * 735 631 * @param thread Thread to join on exit. … … 742 638 errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags) 743 639 { 640 assert(thread != NULL); 641 744 642 if (thread == THREAD) 745 643 return EINVAL; 746 644 747 irq_spinlock_lock(&thread->lock, true); 748 state_t state = thread->state; 749 irq_spinlock_unlock(&thread->lock, true); 750 751 if (state == Exiting) { 752 return EOK; 753 } else { 754 return _waitq_sleep_timeout(&thread->join_wq, usec, flags); 755 } 645 errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags); 646 647 if (rc == EOK) 648 thread_put(thread); 649 650 return rc; 651 } 652 653 void thread_detach(thread_t *thread) 654 { 655 thread_put(thread); 756 656 } 757 657 … … 770 670 771 671 (void) waitq_sleep_timeout(&wq, usec); 672 } 673 674 /** Allow other threads to run. */ 675 void thread_yield(void) 676 { 677 assert(THREAD != NULL); 678 scheduler_enter(Running); 772 679 } 773 680 … … 776 683 uint64_t ucycles, kcycles; 777 684 char usuffix, ksuffix; 778 order_suffix(thread->ucycles, &ucycles, &usuffix); 779 order_suffix(thread->kcycles, &kcycles, &ksuffix); 685 order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix); 686 order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix); 687 688 state_t state = atomic_get_unordered(&thread->state); 780 689 781 690 char *name; … … 791 700 else 792 701 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n", 793 thread->tid, name, thread, thread_states[ thread->state],702 thread->tid, name, thread, thread_states[state], 794 703 thread->task, thread->task->container); 795 704 796 705 if (additional) { 797 if (thread->cpu) 798 printf("%-5u", thread->cpu->id); 706 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 707 if (cpu) 708 printf("%-5u", cpu->id); 799 709 else 800 710 printf("none "); 801 711 802 if ( thread->state == Sleeping) {712 if (state == Sleeping) { 803 713 printf(" %p", thread->sleep_queue); 804 714 } … … 879 789 void thread_update_accounting(bool user) 880 790 { 791 assert(interrupts_disabled()); 792 881 793 uint64_t time = get_cycle(); 882 794 883 assert(interrupts_disabled());884 assert(irq_spinlock_locked(&THREAD->lock));885 886 795 if (user) 887 THREAD->ucycles += time - THREAD->last_cycle;796 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle); 888 797 else 889 THREAD->kcycles += time - THREAD->last_cycle;798 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle); 890 799 891 800 THREAD->last_cycle = time; … … 998 907 */ 999 908 1000 irq_spinlock_lock(&thread->lock, true); 1001 1002 bool sleeping = false; 1003 istate_t *istate = thread->udebug.uspace_state; 1004 if (istate != NULL) { 1005 printf("Scheduling thread stack trace.\n"); 1006 thread->btrace = true; 1007 if (thread->state == Sleeping) 1008 sleeping = true; 1009 } else 1010 printf("Thread interrupt state not available.\n"); 1011 1012 irq_spinlock_unlock(&thread->lock, true); 1013 1014 if (sleeping) 1015 thread_wakeup(thread); 1016 909 printf("Scheduling thread stack trace.\n"); 910 atomic_set_unordered(&thread->btrace, true); 911 912 thread_wakeup(thread); 1017 913 thread_put(thread); 1018 914 } … … 1115 1011 thread_attach(thread, TASK); 1116 1012 #endif 1117 thread_ready(thread); 1013 thread_start(thread); 1014 thread_put(thread); 1118 1015 1119 1016 return 0; -
kernel/generic/src/security/perm.c
r86f862c rf2cb80a 89 89 return EPERM; 90 90 91 irq_spinlock_lock(&tasks_lock, true);92 91 task_t *task = task_find_by_id(taskid); 93 94 if ((!task) || (!container_check(CONTAINER, task->container))) { 95 irq_spinlock_unlock(&tasks_lock, true); 92 if (!task) 96 93 return ENOENT; 94 95 errno_t rc = ENOENT; 96 97 irq_spinlock_lock(&task->lock, true); 98 if (container_check(CONTAINER, task->container)) { 99 task->perms |= perms; 100 rc = EOK; 97 101 } 98 99 irq_spinlock_lock(&task->lock, false); 100 task->perms |= perms; 101 irq_spinlock_unlock(&task->lock, false); 102 103 irq_spinlock_unlock(&tasks_lock, true); 104 return EOK; 102 irq_spinlock_unlock(&task->lock, true); 103 104 task_release(task); 105 return rc; 105 106 } 106 107 … … 118 119 static errno_t perm_revoke(task_id_t taskid, perm_t perms) 119 120 { 120 irq_spinlock_lock(&tasks_lock, true);121 122 121 task_t *task = task_find_by_id(taskid); 123 if ((!task) || (!container_check(CONTAINER, task->container))) { 124 irq_spinlock_unlock(&tasks_lock, true); 122 if (!task) 125 123 return ENOENT; 126 }127 124 128 125 /* … … 131 128 * doesn't have PERM_PERM. 132 129 */ 133 irq_spinlock_lock(&TASK->lock, false); 134 135 if ((!(TASK->perms & PERM_PERM)) || (task != TASK)) { 136 irq_spinlock_unlock(&TASK->lock, false); 137 irq_spinlock_unlock(&tasks_lock, true); 130 if (task != TASK && !(perm_get(TASK) & PERM_PERM)) { 131 task_release(task); 138 132 return EPERM; 139 133 } 140 134 141 task->perms &= ~perms; 142 irq_spinlock_unlock(&TASK->lock, false); 143 144 irq_spinlock_unlock(&tasks_lock, true); 145 return EOK; 135 errno_t rc = ENOENT; 136 137 irq_spinlock_lock(&task->lock, true); 138 if (container_check(CONTAINER, task->container)) { 139 task->perms &= ~perms; 140 rc = EOK; 141 } 142 irq_spinlock_unlock(&task->lock, true); 143 144 task_release(task); 145 return rc; 146 146 } 147 147 -
kernel/generic/src/syscall/syscall.c
r86f862c rf2cb80a 141 141 { 142 142 /* Do userpace accounting */ 143 i rq_spinlock_lock(&THREAD->lock, true);143 ipl_t ipl = interrupts_disable(); 144 144 thread_update_accounting(true); 145 i rq_spinlock_unlock(&THREAD->lock, true);145 interrupts_restore(ipl); 146 146 147 147 #ifdef CONFIG_UDEBUG … … 191 191 192 192 /* Do kernel accounting */ 193 i rq_spinlock_lock(&THREAD->lock, true);193 ipl = interrupts_disable(); 194 194 thread_update_accounting(false); 195 i rq_spinlock_unlock(&THREAD->lock, true);195 interrupts_restore(ipl); 196 196 197 197 return rc; -
kernel/generic/src/sysinfo/stats.c
r86f862c rf2cb80a 221 221 stats_task->virtmem = get_task_virtmem(task->as); 222 222 stats_task->resmem = get_task_resmem(task->as); 223 stats_task->threads = atomic_load(&task-> refcount);223 stats_task->threads = atomic_load(&task->lifecount); 224 224 task_get_accounting(task, &(stats_task->ucycles), 225 225 &(stats_task->kcycles)); … … 299 299 { 300 300 assert(interrupts_disabled()); 301 assert(irq_spinlock_locked(&thread->lock));302 301 303 302 stats_thread->thread_id = thread->tid; 304 303 stats_thread->task_id = thread->task->taskid; 305 stats_thread->state = thread->state; 306 stats_thread->priority = thread->priority; 307 stats_thread->ucycles = thread->ucycles; 308 stats_thread->kcycles = thread->kcycles; 309 310 if (thread->cpu != NULL) { 304 stats_thread->state = atomic_get_unordered(&thread->state); 305 stats_thread->priority = atomic_get_unordered(&thread->priority); 306 stats_thread->ucycles = atomic_time_read(&thread->ucycles); 307 stats_thread->kcycles = atomic_time_read(&thread->kcycles); 308 309 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 310 311 if (cpu != NULL) { 311 312 stats_thread->on_cpu = true; 312 stats_thread->cpu = thread->cpu->id;313 stats_thread->cpu = cpu->id; 313 314 } else 314 315 stats_thread->on_cpu = false; … … 361 362 thread_t *thread = thread_first(); 362 363 while (thread != NULL) { 363 /* Interrupts are already disabled */364 irq_spinlock_lock(&thread->lock, false);365 366 364 /* Record the statistics and increment the index */ 367 365 produce_stats_thread(thread, &stats_threads[i]); 368 366 i++; 369 370 irq_spinlock_unlock(&thread->lock, false);371 367 372 368 thread = thread_next(thread); … … 515 511 { 516 512 /* Initially no return value */ 517 sysinfo_return_t ret; 518 ret.tag = SYSINFO_VAL_UNDEFINED; 513 sysinfo_return_t ret = { 514 .tag = SYSINFO_VAL_UNDEFINED, 515 }; 519 516 520 517 /* Parse the task ID */ … … 523 520 return ret; 524 521 525 /* Messing with task structures, avoid deadlock */526 irq_spinlock_lock(&tasks_lock, true);527 528 522 task_t *task = task_find_by_id(task_id); 529 if (task == NULL) { 530 /* No task with this ID */ 531 irq_spinlock_unlock(&tasks_lock, true); 523 if (!task) 532 524 return ret; 533 }534 525 535 526 if (dry_run) { … … 537 528 ret.data.data = NULL; 538 529 ret.data.size = sizeof(stats_task_t); 539 540 irq_spinlock_unlock(&tasks_lock, true);541 530 } else { 542 531 /* Allocate stats_task_t structure */ 543 stats_task_t *stats_task = 544 (stats_task_t *) malloc(sizeof(stats_task_t)); 545 if (stats_task == NULL) { 546 irq_spinlock_unlock(&tasks_lock, true); 547 return ret; 532 stats_task_t *stats_task = malloc(sizeof(stats_task_t)); 533 534 if (stats_task != NULL) { 535 /* Correct return value */ 536 ret.tag = SYSINFO_VAL_FUNCTION_DATA; 537 ret.data.data = stats_task; 538 ret.data.size = sizeof(stats_task_t); 539 540 irq_spinlock_lock(&task->lock, true); 541 produce_stats_task(task, stats_task); 542 irq_spinlock_unlock(&task->lock, true); 548 543 } 549 550 /* Correct return value */ 551 ret.tag = SYSINFO_VAL_FUNCTION_DATA; 552 ret.data.data = (void *) stats_task; 553 ret.data.size = sizeof(stats_task_t); 554 555 /* Hand-over-hand locking */ 556 irq_spinlock_exchange(&tasks_lock, &task->lock); 557 558 produce_stats_task(task, stats_task); 559 560 irq_spinlock_unlock(&task->lock, true); 561 } 562 544 } 545 546 task_release(task); 563 547 return ret; 564 548 } … … 624 608 ret.data.size = sizeof(stats_thread_t); 625 609 626 /*627 * Replaced hand-over-hand locking with regular nested sections628 * to avoid weak reference leak issues.629 */630 irq_spinlock_lock(&thread->lock, false);631 610 produce_stats_thread(thread, stats_thread); 632 irq_spinlock_unlock(&thread->lock, false);633 611 634 612 irq_spinlock_unlock(&threads_lock, true); -
kernel/generic/src/time/clock.c
r86f862c rf2cb80a 123 123 static void cpu_update_accounting(void) 124 124 { 125 // FIXME: get_cycle() is unimplemented on several platforms 125 126 uint64_t now = get_cycle(); 126 atomic_time_increment(&CPU->busy_cycles, now - CPU ->last_cycle);127 CPU ->last_cycle = now;127 atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle); 128 CPU_LOCAL->last_cycle = now; 128 129 } 129 130 … … 137 138 void clock(void) 138 139 { 139 size_t missed_clock_ticks = CPU ->missed_clock_ticks;140 CPU ->missed_clock_ticks = 0;141 142 CPU ->current_clock_tick += missed_clock_ticks + 1;143 uint64_t current_clock_tick = CPU ->current_clock_tick;140 size_t missed_clock_ticks = CPU_LOCAL->missed_clock_ticks; 141 CPU_LOCAL->missed_clock_ticks = 0; 142 143 CPU_LOCAL->current_clock_tick += missed_clock_ticks + 1; 144 uint64_t current_clock_tick = CPU_LOCAL->current_clock_tick; 144 145 clock_update_counters(current_clock_tick); 145 146 … … 186 187 187 188 if (THREAD) { 188 if (current_clock_tick >= CPU ->preempt_deadline && PREEMPTION_ENABLED) {189 scheduler();189 if (current_clock_tick >= CPU_LOCAL->preempt_deadline && PREEMPTION_ENABLED) { 190 thread_yield(); 190 191 #ifdef CONFIG_UDEBUG 191 192 /* -
kernel/generic/src/time/timeout.c
r86f862c rf2cb80a 77 77 return 0; 78 78 79 return CPU ->current_clock_tick + us2ticks(usec);79 return CPU_LOCAL->current_clock_tick + us2ticks(usec); 80 80 } 81 81 -
kernel/generic/src/udebug/udebug_ops.c
r86f862c rf2cb80a 90 90 } 91 91 92 irq_spinlock_lock(&thread->lock, true);93 94 92 /* Verify that 'thread' is a userspace thread. */ 95 93 if (!thread->uspace) { 96 /* It's not, deny its existence */97 irq_spinlock_unlock(&thread->lock, true);98 94 mutex_unlock(&TASK->udebug.lock); 99 95 return ENOENT; 100 96 } 101 102 /* Verify debugging state. */103 if (thread->udebug.active != true) {104 /* Not in debugging session or undesired GO state */105 irq_spinlock_unlock(&thread->lock, true);106 mutex_unlock(&TASK->udebug.lock);107 return ENOENT;108 }109 110 /* Now verify that the thread belongs to the current task. */111 if (thread->task != TASK) {112 /* No such thread belonging this task */113 irq_spinlock_unlock(&thread->lock, true);114 mutex_unlock(&TASK->udebug.lock);115 return ENOENT;116 }117 118 irq_spinlock_unlock(&thread->lock, true);119 120 /* Only mutex TASK->udebug.lock left. */121 97 122 98 /* … … 126 102 */ 127 103 mutex_lock(&thread->udebug.lock); 104 105 /* Verify debugging state. */ 106 if (thread->udebug.active != true) { 107 /* Not in debugging session or undesired GO state */ 108 mutex_unlock(&thread->udebug.lock); 109 mutex_unlock(&TASK->udebug.lock); 110 return ENOENT; 111 } 112 113 /* Now verify that the thread belongs to the current task. */ 114 if (thread->task != TASK) { 115 /* No such thread belonging this task */ 116 mutex_unlock(&thread->udebug.lock); 117 mutex_unlock(&TASK->udebug.lock); 118 return ENOENT; 119 } 128 120 129 121 /* The big task mutex is no longer needed. */ … … 388 380 /* FIXME: make sure the thread isn't past debug shutdown... */ 389 381 list_foreach(TASK->threads, th_link, thread_t, thread) { 390 irq_spinlock_lock(&thread->lock, false);391 382 bool uspace = thread->uspace; 392 irq_spinlock_unlock(&thread->lock, false);393 383 394 384 /* Not interested in kernel threads. */ -
kernel/test/mm/falloc2.c
r86f862c rf2cb80a 43 43 #define THREADS 8 44 44 45 static atomic_size_t thread_cnt;46 45 static atomic_size_t thread_fail; 47 46 … … 56 55 "Unable to allocate frames\n", THREAD->tid, CPU->id); 57 56 atomic_inc(&thread_fail); 58 atomic_dec(&thread_cnt);59 57 return; 60 58 } … … 108 106 TPRINTF("Thread #%" PRIu64 " (cpu%u): Exiting\n", 109 107 THREAD->tid, CPU->id); 110 atomic_dec(&thread_cnt);111 108 } 112 109 113 110 const char *test_falloc2(void) 114 111 { 115 atomic_store(&thread_cnt, THREADS);116 112 atomic_store(&thread_fail, 0); 113 114 thread_t *threads[THREADS] = { }; 117 115 118 116 for (unsigned int i = 0; i < THREADS; i++) { … … 123 121 break; 124 122 } 125 thread_ready(thrd); 123 thread_start(thrd); 124 threads[i] = thrd; 126 125 } 127 126 128 while (atomic_load(&thread_cnt) > 0) { 129 TPRINTF("Threads left: %zu\n", 130 atomic_load(&thread_cnt)); 131 thread_sleep(1); 127 for (unsigned int i = 0; i < THREADS; i++) { 128 if (threads[i] != NULL) 129 thread_join(threads[i]); 130 131 TPRINTF("Threads left: %u\n", THREADS - i - 1); 132 132 } 133 133 -
kernel/test/mm/slab1.c
r86f862c rf2cb80a 121 121 static void *thr_data[THREADS][THR_MEM_COUNT]; 122 122 static slab_cache_t *thr_cache; 123 static semaphore_t thr_sem;124 123 125 124 static void slabtest(void *data) … … 142 141 143 142 TPRINTF("Thread #%" PRIu64 " finished\n", THREAD->tid); 144 145 semaphore_up(&thr_sem);146 143 } 147 144 148 145 static void testthreads(void) 149 146 { 150 thread_t *t;151 int i;152 153 147 thr_cache = slab_cache_create("thread_cache", THR_MEM_SIZE, 0, NULL, NULL, 154 148 SLAB_CACHE_NOMAGAZINE); 155 149 156 semaphore_initialize(&thr_sem, 0); 157 for (i = 0; i < THREADS; i++) { 158 if (!(t = thread_create(slabtest, (void *) (sysarg_t) i, TASK, THREAD_FLAG_NONE, "slabtest"))) { 150 thread_t *threads[THREADS] = { }; 151 152 for (int i = 0; i < THREADS; i++) { 153 threads[i] = thread_create(slabtest, (void *) (sysarg_t) i, 154 TASK, THREAD_FLAG_NONE, "slabtest"); 155 if (threads[i]) { 156 thread_start(threads[i]); 157 } else { 159 158 TPRINTF("Could not create thread %d\n", i); 160 } else 161 thread_ready(t); 159 } 162 160 } 163 161 164 for (i = 0; i < THREADS; i++) 165 semaphore_down(&thr_sem); 162 for (int i = 0; i < THREADS; i++) { 163 if (threads[i] != NULL) 164 thread_join(threads[i]); 165 } 166 166 167 167 slab_cache_destroy(thr_cache); -
kernel/test/mm/slab2.c
r86f862c rf2cb80a 127 127 128 128 static slab_cache_t *thr_cache; 129 static semaphore_t thr_sem;130 129 static condvar_t thread_starter; 131 130 static mutex_t starter_mutex; … … 188 187 if (!test_quiet) 189 188 slab_print_list(); 190 191 semaphore_up(&thr_sem);192 189 } 193 190 … … 198 195 * then release everything, then again allocate, then release 199 196 */ 200 thread_t *t;201 int i;202 197 203 198 TPRINTF("Running stress test with size %d\n", size); … … 207 202 208 203 thr_cache = slab_cache_create("thread_cache", size, 0, NULL, NULL, 0); 209 semaphore_initialize(&thr_sem, 0); 210 for (i = 0; i < THREADS; i++) { 211 if (!(t = thread_create(slabtest, NULL, TASK, THREAD_FLAG_NONE, "slabtest"))) { 204 205 thread_t *threads[THREADS] = { }; 206 207 for (int i = 0; i < THREADS; i++) { 208 threads[i] = thread_create(slabtest, NULL, 209 TASK, THREAD_FLAG_NONE, "slabtest"); 210 if (threads[i]) { 211 thread_start(threads[i]); 212 } else { 212 213 TPRINTF("Could not create thread %d\n", i); 213 } else214 thread_ready(t);215 } 214 } 215 } 216 216 217 thread_sleep(1); 217 218 condvar_broadcast(&thread_starter); 218 219 219 for (i = 0; i < THREADS; i++) 220 semaphore_down(&thr_sem); 220 for (int i = 0; i < THREADS; i++) { 221 if (threads[i] != NULL) 222 thread_join(threads[i]); 223 } 221 224 222 225 slab_cache_destroy(thr_cache); -
kernel/test/synch/semaphore1.c
r86f862c rf2cb80a 89 89 thrd = thread_create(consumer, NULL, TASK, 90 90 THREAD_FLAG_NONE, "consumer"); 91 if (thrd) 92 thread_ready(thrd); 93 else 91 if (thrd) { 92 thread_start(thrd); 93 thread_detach(thrd); 94 } else { 94 95 TPRINTF("could not create consumer %d\n", i); 96 } 95 97 } 96 98 for (k = 0; k < (4 - i); k++) { 97 99 thrd = thread_create(producer, NULL, TASK, 98 100 THREAD_FLAG_NONE, "producer"); 99 if (thrd) 100 thread_ready(thrd); 101 else 101 if (thrd) { 102 thread_start(thrd); 103 thread_detach(thrd); 104 } else { 102 105 TPRINTF("could not create producer %d\n", i); 106 } 103 107 } 104 108 } -
kernel/test/synch/semaphore2.c
r86f862c rf2cb80a 92 92 thrd = thread_create(consumer, NULL, TASK, 93 93 THREAD_FLAG_NONE, "consumer"); 94 if (thrd) 95 thread_ready(thrd); 96 else 94 if (thrd) { 95 thread_start(thrd); 96 thread_detach(thrd); 97 } else { 97 98 TPRINTF("Error creating thread\n"); 99 } 98 100 } 99 101 -
kernel/test/thread/thread1.c
r86f862c rf2cb80a 38 38 39 39 static atomic_bool finish; 40 static atomic_size_t threads_finished;41 40 42 41 static void threadtest(void *data) … … 46 45 thread_usleep(100000); 47 46 } 48 atomic_inc(&threads_finished);49 47 } 50 48 51 49 const char *test_thread1(void) 52 50 { 53 unsigned int i; 54 size_t total = 0; 51 atomic_store(&finish, true); 55 52 56 atomic_store(&finish, true); 57 atomic_store(&threads_finished, 0); 53 thread_t *threads[THREADS] = { }; 58 54 59 for (i = 0; i < THREADS; i++) { 60 thread_t *t; 61 if (!(t = thread_create(threadtest, NULL, TASK, 62 THREAD_FLAG_NONE, "threadtest"))) { 55 for (int i = 0; i < THREADS; i++) { 56 threads[i] = thread_create(threadtest, NULL, 57 TASK, THREAD_FLAG_NONE, "threadtest"); 58 59 if (threads[i]) { 60 thread_start(threads[i]); 61 } else { 63 62 TPRINTF("Could not create thread %d\n", i); 64 63 break; 65 64 } 66 thread_ready(t);67 total++;68 65 } 69 66 … … 72 69 73 70 atomic_store(&finish, false); 74 while (atomic_load(&threads_finished) < total) { 75 TPRINTF("Threads left: %zu\n", total - atomic_load(&threads_finished)); 76 thread_sleep(1); 71 72 for (int i = 0; i < THREADS; i++) { 73 if (threads[i] != NULL) 74 thread_join(threads[i]); 75 76 TPRINTF("Threads left: %d\n", THREADS - i - 1); 77 77 } 78 78
Note:
See TracChangeset
for help on using the changeset viewer.
