Changeset f2cb80a in mainline for kernel


Ignore:
Timestamp:
2024-02-23T17:57:23Z (2 years ago)
Author:
GitHub <noreply@…>
Children:
192019f
Parents:
86f862c (diff), 90ba06c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
git-author:
boba-buba <120932204+boba-buba@…> (2024-02-23 17:57:23)
git-committer:
GitHub <noreply@…> (2024-02-23 17:57:23)
Message:

Merge branch 'HelenOS:master' into topic/packet-capture

Location:
kernel
Files:
1 added
1 deleted
58 edited
1 moved

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/_link.ld.in

    r86f862c rf2cb80a  
    2323        }
    2424
    25         .mapped (PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS + SIZEOF(.unmapped)) : AT (BOOT_OFFSET + SIZEOF_HEADERS + SIZEOF(.unmapped)) {
     25        .text (PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS + SIZEOF(.unmapped)) : AT (BOOT_OFFSET + SIZEOF_HEADERS + SIZEOF(.unmapped)) {
    2626                ktext_start = .;
    2727                *(.text .text.*);
    2828                ktext_end = .;
     29        }
    2930
     31        /* stack unwinding data */
     32        .eh_frame_hdr : {
     33                eh_frame_hdr_start = .;
     34                *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*);
     35                eh_frame_hdr_end = .;
     36        }
     37        .eh_frame : {
     38                eh_frame_start = .;
     39                KEEP(*(.eh_frame .eh_frame.*));
     40                eh_frame_end = .;
     41        }
     42
     43        .data : {
    3044                kdata_start = .;
    31                 *(.data .data.*);              /* initialized data */
    32                 *(.rodata .rodata.*);
    33                 *(.eh_frame .eh_frame.*);      /* stack unwinding data */
    34                 *(.eh_frame_hdr .eh_frame_hdr.*);
    35                 *(COMMON);      /* global variables */
     45                *(.rodata .rodata.*);  /* read-only global variables */
     46                *(.data .data.*);      /* non-zero initialized global variables */
    3647
    37                 /* bss can't be omitted from the ELF image. */
    38                 *(.bss .bss.*);        /* uninitialized static variables */
     48                /*
     49                 * When .bss is not physically present in the ELF file (MemSz > FileSz)
     50                 * the kernel crashes during early boot. Not sure which part of the
     51                 * boot process is to blame, for now just keep .bss packaged with .data
     52                 * so that FileSz == MemSz.
     53                 */
     54
     55                 *(.bss .bss.*);        /* uninitialized global variables */
     56                *(COMMON);             /* non-`static` global variables without an extern declaration */
    3957                kdata_end = .;
    4058        }
  • kernel/arch/amd64/include/arch/mm/page.h

    r86f862c rf2cb80a  
    192192        unsigned int accessed : 1;
    193193        unsigned int dirty : 1;
    194         unsigned int unused : 1;
     194        unsigned int pat : 1;
    195195        unsigned int global : 1;
    196196        unsigned int soft_valid : 1;  /**< Valid content even if present bit is cleared. */
     
    211211            p->writeable << PAGE_WRITE_SHIFT |
    212212            (!p->no_execute) << PAGE_EXEC_SHIFT |
    213             p->global << PAGE_GLOBAL_SHIFT);
     213            p->global << PAGE_GLOBAL_SHIFT |
     214            p->page_write_through << PAGE_WRITE_COMBINE_SHIFT);
    214215}
    215216
     
    225226        pte_t *p = &pt[i];
    226227
    227         p->page_cache_disable = !(flags & PAGE_CACHEABLE);
    228228        p->present = !(flags & PAGE_NOT_PRESENT);
    229229        p->uaccessible = (flags & PAGE_USER) != 0;
     
    232232        p->global = (flags & PAGE_GLOBAL) != 0;
    233233
     234        if (flags & PAGE_WRITE_COMBINE) {
     235                /* We have mapped PCD+PWT bits to write-combine mode via PAT MSR. */
     236                /* (If PAT is unsupported, it will default to uncached.) */
     237                p->page_cache_disable = 1;
     238                p->page_write_through = 1;
     239        } else {
     240                p->page_cache_disable = !(flags & PAGE_CACHEABLE);
     241                p->page_write_through = 0;
     242        }
     243
    234244        /*
    235245         * Ensure that there is at least one bit set even if the present bit is cleared.
  • kernel/arch/amd64/include/arch/mm/pat.h

    r86f862c rf2cb80a  
    11/*
    2  * Copyright (c) 2006 Josef Cejka
     2 * Copyright (c) 2024 Jiří Zárevúcky
    33 * All rights reserved.
    44 *
     
    2727 */
    2828
    29 /** @addtogroup libcamd64
     29/** @addtogroup kernel_amd64_mm
    3030 * @{
    3131 */
     
    3333 */
    3434
    35 #ifndef _LIBC_STACKARG_H_
    36 #define _LIBC_STACKARG_H_
     35#ifndef KERN_amd64_MM_PAT_H_
     36#define KERN_amd64_MM_PAT_H_
     37
     38#include <arch/asm.h>
     39#include <arch/cpuid.h>
     40
     41#define MSR_IA32_PAT  0x00000277
     42
     43typedef enum {
     44        PAT_TYPE_UNCACHEABLE = 0,
     45        PAT_TYPE_WRITE_COMBINING = 1,
     46        PAT_TYPE_WRITE_THROUGH = 4,
     47        PAT_TYPE_WRITE_PROTECTED = 5,
     48        PAT_TYPE_WRITE_BACK = 6,
     49        PAT_TYPE_UNCACHED  = 7,
     50} pat_type_t;
     51
     52/**
     53 * Assign caching type for a particular combination of PAT,
     54 * PCD and PWT bits in PTE.
     55 */
     56static inline void pat_set_mapping(bool pat, bool pcd, bool pwt,
     57    pat_type_t type)
     58{
     59        int index = pat << 2 | pcd << 1 | pwt;
     60        int shift = index * 8;
     61
     62        uint64_t r = read_msr(MSR_IA32_PAT);
     63        r &= ~(0xffull << shift);
     64        r |= ((uint64_t) type) << shift;
     65        write_msr(MSR_IA32_PAT, r);
     66}
     67
     68static inline bool pat_supported(void)
     69{
     70        if (!has_cpuid())
     71                return false;
     72
     73        cpu_info_t info;
     74        cpuid(INTEL_CPUID_STANDARD, &info);
     75
     76        return (info.cpuid_edx & (1 << 16)) != 0;
     77}
    3778
    3879#endif
  • kernel/arch/amd64/src/amd64.c

    r86f862c rf2cb80a  
    6060#include <arch/vreg.h>
    6161#include <arch/kseg.h>
     62#include <arch/mm/pat.h>
    6263#include <genarch/pic/pic_ops.h>
    6364
     
    115116        /* Disable alignment check */
    116117        write_cr0(read_cr0() & ~CR0_AM);
     118
     119        /* Use PCD+PWT bit combination in PTE to mean write-combining mode. */
     120        if (pat_supported())
     121                pat_set_mapping(false, true, true, PAT_TYPE_WRITE_COMBINING);
    117122
    118123        if (config.cpu_active == 1) {
  • kernel/arch/arm32/_link.ld.in

    r86f862c rf2cb80a  
    2626        kernel_load_address = .;
    2727        . = . + SIZEOF_HEADERS;
     28
    2829        .text : {
    2930                ktext_start = .;
     
    3132                ktext_end = .;
    3233        }
     34
     35        /* stack unwinding data */
     36        .eh_frame_hdr : {
     37                eh_frame_hdr_start = .;
     38                *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*);
     39                eh_frame_hdr_end = .;
     40        }
     41
     42        .eh_frame : {
     43                eh_frame_start = .;
     44                KEEP(*(.eh_frame .eh_frame.*));
     45                eh_frame_end = .;
     46        }
     47
    3348        .data : {
    3449                kdata_start = .;
     
    3853
    3954                *(.rodata .rodata.*);
    40                 *(.eh_frame .eh_frame.*);       /* stack unwinding data */
    41                 *(.eh_frame_hdr .eh_frame_hdr.*);
    4255                *(.sdata .sdata.*);
    43         }
    44         .sbss : {
    4556                *(.sbss .sbss.*);
    4657                *(.scommon .scommon.*);
     58                kdata_end = .;
    4759        }
    48 
    49         kdata_end = .;
    5060
    5161        .comment 0 : { *(.comment); }
  • kernel/arch/arm64/_link.ld.in

    r86f862c rf2cb80a  
    1919        kernel_load_address = LOAD_ADDRESS_V;
    2020
    21         .image (LOAD_ADDRESS_V + SIZEOF_HEADERS) : AT (LOAD_ADDRESS_P + SIZEOF_HEADERS) {
     21        .text (LOAD_ADDRESS_V + SIZEOF_HEADERS) : AT (LOAD_ADDRESS_P + SIZEOF_HEADERS) {
    2222                . = ALIGN(16);
    2323                ktext_start = .;
     
    2525                *(.text .text.*);
    2626                ktext_end = .;
     27        }
    2728
     29        /* stack unwinding data */
     30        .eh_frame_hdr : {
     31                eh_frame_hdr_start = .;
     32                *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*);
     33                eh_frame_hdr_end = .;
     34        }
     35
     36        .eh_frame : {
     37                eh_frame_start = .;
     38                KEEP(*(.eh_frame .eh_frame.*));
     39                eh_frame_end = .;
     40        }
     41
     42        .data : {
    2843                kdata_start = .;
    2944                KEEP(*(K_DATA_START))
     45                *(.rodata .rodata.*);
    3046                *(.data .data.*);                       /* initialized data */
    3147                *(.bss .bss.*);                        /* uninitialized static variables */
    3248                *(COMMON);                      /* global variables */
    33 
    34                 *(.rodata .rodata.*);
    35                 *(.eh_frame .eh_frame.*);       /* stack unwinding data */
    36                 *(.eh_frame_hdr .eh_frame_hdr.*);
    37 
    3849                kdata_end = .;
    3950        }
  • kernel/arch/arm64/src/interrupt.c

    r86f862c rf2cb80a  
    137137        while (drift > timer_increment) {
    138138                drift -= timer_increment;
    139                 CPU->missed_clock_ticks++;
     139                CPU_LOCAL->missed_clock_ticks++;
    140140        }
    141141        CNTV_CVAL_EL0_write(cntvct + timer_increment - drift);
  • kernel/arch/ia32/_link.ld.in

    r86f862c rf2cb80a  
    2222        }
    2323
    24         .mapped (PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS + SIZEOF(.unmapped)): AT (BOOT_OFFSET + SIZEOF_HEADERS + SIZEOF(.unmapped)) {
     24        .text (PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS + SIZEOF(.unmapped)): AT (BOOT_OFFSET + SIZEOF_HEADERS + SIZEOF(.unmapped)) {
    2525                ktext_start = .;
    2626                *(.text .text.*);
    2727                ktext_end = .;
     28        }
    2829
     30        /* stack unwinding data */
     31        .eh_frame_hdr : {
     32                eh_frame_hdr_start = .;
     33                *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*);
     34                eh_frame_hdr_end = .;
     35        }
     36        .eh_frame : {
     37                eh_frame_start = .;
     38                KEEP(*(.eh_frame .eh_frame.*));
     39                eh_frame_end = .;
     40        }
     41
     42        .data : {
    2943                kdata_start = .;
    30                 *(.data .data.*);               /* initialized data */
    31                 *(.rodata .rodata.*);
    32                 *(.eh_frame .eh_frame.*);       /* stack unwinding data */
    33                 *(.eh_frame_hdr .eh_frame_hdr.*);
    34                 *(COMMON);              /* global variables */
     44                *(.rodata .rodata.*);  /* read-only global variables */
     45                *(.data .data.*);      /* non-zero initialized global variables */
    3546
    36                 /* bss can't be omitted from the ELF image. */
    37                 *(.bss .bss.*);                /* uninitialized static variables */
     47                /*
     48                 * When .bss is not physically present in the ELF file (MemSz > FileSz)
     49                 * the kernel crashes during early boot. Not sure which part of the
     50                 * boot process is to blame, for now just keep .bss packaged with .data
     51                 * so that FileSz == MemSz.
     52                 */
     53
     54                 *(.bss .bss.*);        /* uninitialized global variables */
     55                *(COMMON);             /* non-`static` global variables without an extern declaration */
    3856                kdata_end = .;
    3957        }
  • kernel/arch/ia32/include/arch/mm/page.h

    r86f862c rf2cb80a  
    190190            p->writeable << PAGE_WRITE_SHIFT |
    191191            1 << PAGE_EXEC_SHIFT |
    192             p->global << PAGE_GLOBAL_SHIFT);
     192            p->global << PAGE_GLOBAL_SHIFT |
     193            p->page_write_through << PAGE_WRITE_COMBINE_SHIFT);
    193194}
    194195
     
    197198        pte_t *p = &pt[i];
    198199
    199         p->page_cache_disable = !(flags & PAGE_CACHEABLE);
    200200        p->present = !(flags & PAGE_NOT_PRESENT);
    201201        p->uaccessible = (flags & PAGE_USER) != 0;
    202202        p->writeable = (flags & PAGE_WRITE) != 0;
    203203        p->global = (flags & PAGE_GLOBAL) != 0;
     204
     205        if (flags & PAGE_WRITE_COMBINE) {
     206                /* We have mapped PCD+PWT bits to write-combine mode via PAT MSR. */
     207                /* (If PAT is unsupported, it will default to uncached.) */
     208                p->page_cache_disable = 1;
     209                p->page_write_through = 1;
     210        } else {
     211                p->page_cache_disable = !(flags & PAGE_CACHEABLE);
     212                p->page_write_through = 0;
     213        }
    204214
    205215        /*
  • kernel/arch/ia32/src/ia32.c

    r86f862c rf2cb80a  
    6161#include <arch/pm.h>
    6262#include <arch/vreg.h>
     63#include <arch/mm/pat.h>
    6364
    6465#ifdef CONFIG_SMP
     
    104105{
    105106        pm_init();
     107
     108        /* Use PCD+PWT bit combination in PTE to mean write-combining mode. */
     109        if (pat_supported())
     110                pat_set_mapping(false, true, true, PAT_TYPE_WRITE_COMBINING);
    106111
    107112        if (config.cpu_active == 1) {
  • kernel/arch/ia64/_link.ld.in

    r86f862c rf2cb80a  
    1515        kernel_load_address = LOAD_ADDRESS_V;
    1616
    17         .image (LOAD_ADDRESS_V + SIZEOF_HEADERS): AT (LOAD_ADDRESS_P + SIZEOF_HEADERS) {
     17        .text (LOAD_ADDRESS_V + SIZEOF_HEADERS): AT (LOAD_ADDRESS_P + SIZEOF_HEADERS) {
    1818                . = ALIGN(16);
    1919                ktext_start = .;
     
    2121                *(.text .text.*)
    2222                ktext_end = .;
     23        }
    2324
     25        /* stack unwinding data */
     26        .eh_frame_hdr : {
     27                eh_frame_hdr_start = .;
     28                *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*);
     29                eh_frame_hdr_end = .;
     30        }
     31
     32        .eh_frame : {
     33                eh_frame_start = .;
     34                KEEP(*(.eh_frame .eh_frame.*));
     35                eh_frame_end = .;
     36        }
     37       
     38        .data : {
    2439                kdata_start = .;
    2540                KEEP(*(K_DATA_START));
    2641                *(.rodata .rodata.*);
    27                 *(.eh_frame .eh_frame.*);       /* stack unwinding data */
    28                 *(.eh_frame_hdr .eh_frame_hdr.*);
    2942                *(.opd)
    3043                *(.data .data.*)
  • kernel/arch/ia64/src/drivers/it.c

    r86f862c rf2cb80a  
    122122                itm += IT_DELTA;
    123123                if (itm - itc < 0)
    124                         CPU->missed_clock_ticks++;
     124                        CPU_LOCAL->missed_clock_ticks++;
    125125                else
    126126                        break;
  • kernel/arch/ia64/src/drivers/ski.c

    r86f862c rf2cb80a  
    258258
    259259        instance->srlnin = srlnin;
    260         thread_ready(instance->thread);
     260        thread_start(instance->thread);
    261261
    262262        sysinfo_set_item_val("kbd", NULL, true);
  • kernel/arch/mips32/_link.ld.in

    r86f862c rf2cb80a  
    3333                ktext_end = .;
    3434        }
     35
     36        /* stack unwinding data */
     37        .eh_frame_hdr : {
     38                eh_frame_hdr_start = .;
     39                *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*);
     40                eh_frame_hdr_end = .;
     41        }
     42
     43        .eh_frame : {
     44                eh_frame_start = .;
     45                KEEP(*(.eh_frame .eh_frame.*));
     46                eh_frame_end = .;
     47        }
     48
    3549        .data : {
    3650                kdata_start = .;
    3751                *(.data .data.*);                       /* initialized data */
    3852                *(.rodata .rodata.*);
    39                 *(.eh_frame .eh_frame.*);       /* stack unwinding data */
    40                 *(.eh_frame_hdr .eh_frame_hdr.*);
    4153                *(.sdata .sdata.*);
    4254                *(.reginfo);
     
    4658                *(COMMON);                      /* global variables */
    4759        }
     60
    4861        _gp = . + 0x8000;
    4962        .lit8 : { *(.lit8) }
  • kernel/arch/mips32/src/interrupt.c

    r86f862c rf2cb80a  
    121121        while (drift > cp0_compare_value) {
    122122                drift -= cp0_compare_value;
    123                 CPU->missed_clock_ticks++;
     123                CPU_LOCAL->missed_clock_ticks++;
    124124        }
    125125
  • kernel/arch/ppc32/_link.ld.in

    r86f862c rf2cb80a  
    2525        }
    2626
    27         .mapped PA2KA(BOOT_OFFSET): AT (BOOT_OFFSET) {
     27        .text PA2KA(BOOT_OFFSET): AT (BOOT_OFFSET) {
    2828                ktext_start = .;
    2929                KEEP(*(K_TEXT_START));
    3030                *(.text .text.*);
    3131                ktext_end = .;
     32        }
    3233
     34        /* stack unwinding data */
     35        .eh_frame_hdr : {
     36                eh_frame_hdr_start = .;
     37                *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*);
     38                eh_frame_hdr_end = .;
     39        }
     40
     41        .eh_frame : {
     42                eh_frame_start = .;
     43                KEEP(*(.eh_frame .eh_frame.*));
     44                eh_frame_end = .;
     45        }
     46
     47        .data : {
    3348                kdata_start = .;
    3449                KEEP(*(K_DATA_START));
    3550                *(.rodata .rodata.*);
    36                 *(.eh_frame .eh_frame.*);       /* stack unwinding data */
    37                 *(.eh_frame_hdr .eh_frame_hdr.*);
    3851                *(.data .data.*);       /* initialized data */
    3952                *(.sdata .sdata.*);
  • kernel/arch/riscv64/_link.ld.in

    r86f862c rf2cb80a  
    1616        kernel_load_address = PA2KA(BOOT_OFFSET);
    1717
    18         .image (PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS) : AT (BOOT_OFFSET + SIZEOF_HEADERS) {
     18        .text (PA2KA(BOOT_OFFSET) + SIZEOF_HEADERS) : AT (BOOT_OFFSET + SIZEOF_HEADERS) {
    1919                ktext_start = .;
    2020                KEEP(*(K_TEXT_START));
    2121                *(.text .text.*);
    2222                ktext_end = .;
     23        }
    2324
     25        /* stack unwinding data */
     26        .eh_frame_hdr : {
     27                eh_frame_hdr_start = .;
     28                *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*);
     29                eh_frame_hdr_end = .;
     30        }
     31
     32        .eh_frame : {
     33                eh_frame_start = .;
     34                KEEP(*(.eh_frame .eh_frame.*));
     35                eh_frame_end = .;
     36        }
     37
     38        .data : {
    2439                kdata_start = .;
    2540                *(.data .data.*);                       /* initialized data */
    2641                *(.rodata .rodata.*);
    27                 *(.eh_frame .eh_frame.*);       /* stack unwinding data */
    28                 *(.eh_frame_hdr .eh_frame_hdr.*);
    2942                *(.sdata .sdata.*);
    3043                *(.sbss .sbss.*);
  • kernel/arch/sparc64/_link.ld.in

    r86f862c rf2cb80a  
    1414        kernel_load_address = VMA;
    1515
    16         .image (VMA + SIZEOF_HEADERS): AT (LMA + SIZEOF_HEADERS) {
     16        .text (VMA + SIZEOF_HEADERS): AT (LMA + SIZEOF_HEADERS) {
    1717                ktext_start = .;
    1818                KEEP(*(K_TEXT_START));
    1919                *(.text .text.*);
    2020                ktext_end = .;
     21        }
    2122
     23        /* stack unwinding data */
     24        .eh_frame_hdr : {
     25                eh_frame_hdr_start = .;
     26                *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*);
     27                eh_frame_hdr_end = .;
     28        }
     29
     30        .eh_frame : {
     31                eh_frame_start = .;
     32                KEEP(*(.eh_frame .eh_frame.*));
     33                eh_frame_end = .;
     34        }
     35
     36        .data : {
    2237                kdata_start = .;
    2338                KEEP(*(K_DATA_START));
    2439                *(.rodata .rodata.*);
    25                 *(.eh_frame .eh_frame.*);       /* stack unwinding data */
    26                 *(.eh_frame_hdr .eh_frame_hdr.*);
    2740                *(.data .data.*);                   /* initialized data */
    2841                *(.sdata .sdata.*);
  • kernel/arch/sparc64/src/drivers/niagara.c

    r86f862c rf2cb80a  
    253253
    254254                        instance->srlnin = srln;
    255                         thread_ready(instance->thread);
     255                        thread_start(instance->thread);
    256256                }
    257257        }
  • kernel/arch/sparc64/src/drivers/tick.c

    r86f862c rf2cb80a  
    117117        while (drift > CPU->arch.clock_frequency / HZ) {
    118118                drift -= CPU->arch.clock_frequency / HZ;
    119                 CPU->missed_clock_ticks++;
     119                CPU_LOCAL->missed_clock_ticks++;
    120120        }
    121121        CPU->arch.next_tick_cmpr = tick_counter_read() +
  • kernel/arch/sparc64/src/proc/sun4u/scheduler.c

    r86f862c rf2cb80a  
    7676{
    7777        if (THREAD->uspace) {
     78                asm volatile ("flushw");
     79
    7880                /* sample the state of the userspace window buffer */
    7981                THREAD->arch.uspace_window_buffer =
  • kernel/arch/sparc64/src/proc/sun4v/scheduler.c

    r86f862c rf2cb80a  
    6868{
    6969        if (THREAD->uspace) {
     70                asm volatile ("flushw");
     71
    7072                /* sample the state of the userspace window buffer */
    7173                THREAD->arch.uspace_window_buffer =
  • kernel/genarch/src/fb/fb.c

    r86f862c rf2cb80a  
    633633
    634634        instance->addr = (uint8_t *) km_map((uintptr_t) props->addr, fbsize,
    635             KM_NATURAL_ALIGNMENT, PAGE_WRITE | PAGE_NOT_CACHEABLE);
     635            KM_NATURAL_ALIGNMENT, PAGE_WRITE | PAGE_WRITE_COMBINE);
    636636        if (!instance->addr) {
    637637                LOG("Unable to map framebuffer.");
  • kernel/genarch/src/kbrd/kbrd.c

    r86f862c rf2cb80a  
    200200
    201201        instance->sink = sink;
    202         thread_ready(instance->thread);
     202        thread_start(instance->thread);
    203203
    204204        return &instance->raw;
  • kernel/genarch/src/kbrd/kbrd_at.c

    r86f862c rf2cb80a  
    198198
    199199        instance->sink = sink;
    200         thread_ready(instance->thread);
     200        thread_start(instance->thread);
    201201
    202202        return &instance->raw;
  • kernel/genarch/src/srln/srln.c

    r86f862c rf2cb80a  
    156156
    157157        instance->sink = sink;
    158         thread_ready(instance->thread);
     158        thread_start(instance->thread);
    159159
    160160        return &instance->raw;
  • kernel/generic/include/atomic.h

    r86f862c rf2cb80a  
    3939#include <typedefs.h>
    4040#include <stdatomic.h>
     41
     42/*
     43 * Shorthand for relaxed atomic read/write, something that's needed to formally
     44 * avoid undefined behavior in cases where we need to read a variable in
     45 * different threads and we don't particularly care about ordering
     46 * (e.g. statistic printouts). This is most likely translated into the same
     47 * assembly instructions as regular read/writes.
     48 */
     49#define atomic_set_unordered(var, val) atomic_store_explicit((var), (val), memory_order_relaxed)
     50#define atomic_get_unordered(var) atomic_load_explicit((var), memory_order_relaxed)
    4151
    4252#define atomic_predec(val) \
  • kernel/generic/include/context.h

    r86f862c rf2cb80a  
    3636#define KERN_CONTEXT_H_
    3737
     38#include <panic.h>
    3839#include <trace.h>
    3940#include <arch/context.h>
     41#include <arch/faddr.h>
    4042
    4143#define context_set_generic(ctx, _pc, stack, size) \
     
    4749extern int context_save_arch(context_t *ctx) __attribute__((returns_twice));
    4850extern void context_restore_arch(context_t *ctx) __attribute__((noreturn));
    49 
    50 /** Save register context.
    51  *
    52  * Save the current register context (including stack pointer) to a context
    53  * structure. A subsequent call to context_restore() will return to the same
    54  * address as the corresponding call to context_save().
    55  *
    56  * Note that context_save_arch() must reuse the stack frame of the function
    57  * which called context_save(). We guarantee this by:
    58  *
    59  *   a) implementing context_save_arch() in assembly so that it does not create
    60  *      its own stack frame, and by
    61  *   b) defining context_save() as a macro because the inline keyword is just a
    62  *      hint for the compiler, not a real constraint; the application of a macro
    63  *      will definitely not create a stack frame either.
    64  *
    65  * To imagine what could happen if there were some extra stack frames created
    66  * either by context_save() or context_save_arch(), we need to realize that the
    67  * sp saved in the contex_t structure points to the current stack frame as it
    68  * existed when context_save_arch() was executing. After the return from
    69  * context_save_arch() and context_save(), any extra stack frames created by
    70  * these functions will be destroyed and their contents sooner or later
    71  * overwritten by functions called next. Any attempt to restore to a context
    72  * saved like that would therefore lead to a disaster.
    73  *
    74  * @param ctx Context structure.
    75  *
    76  * @return context_save() returns 1, context_restore() returns 0.
    77  *
    78  */
    79 #define context_save(ctx)  context_save_arch(ctx)
    8051
    8152/** Restore register context.
     
    9162 *
    9263 */
    93 _NO_TRACE static inline void context_restore(context_t *ctx)
     64_NO_TRACE __attribute__((noreturn))
     65    static inline void context_restore(context_t *ctx)
    9466{
    9567        context_restore_arch(ctx);
     68}
     69
     70/**
     71 * Saves current context to the variable pointed to by `self`,
     72 * and restores the context denoted by `other`.
     73 *
     74 * When the `self` context is later restored by another call to
     75 * `context_swap()`, the control flow behaves as if the earlier call to
     76 * `context_swap()` just returned.
     77 */
     78_NO_TRACE static inline void context_swap(context_t *self, context_t *other)
     79{
     80        if (context_save_arch(self))
     81                context_restore_arch(other);
     82}
     83
     84_NO_TRACE static inline void context_create(context_t *context,
     85    void (*fn)(void), void *stack_base, size_t stack_size)
     86{
     87        *context = (context_t) { 0 };
     88        context_set(context, FADDR(fn), stack_base, stack_size);
     89}
     90
     91__attribute__((noreturn)) static inline void context_replace(void (*fn)(void),
     92    void *stack_base, size_t stack_size)
     93{
     94        context_t ctx;
     95        context_create(&ctx, fn, stack_base, stack_size);
     96        context_restore(&ctx);
    9697}
    9798
  • kernel/generic/include/cpu.h

    r86f862c rf2cb80a  
    4444#include <arch.h>
    4545
    46 #define CPU                  CURRENT->cpu
     46#define CPU                  (CURRENT->cpu)
     47#define CPU_LOCAL            (&CPU->local)
     48
     49/**
     50 * Contents of CPU_LOCAL. These are variables that are only ever accessed by
     51 * the CPU they belong to, so they don't need any synchronization,
     52 * just locally disabled interrupts.
     53 */
     54typedef struct cpu_local {
     55        /**
     56         * When system clock loses a tick, it is
     57         * recorded here so that clock() can react.
     58         */
     59        size_t missed_clock_ticks;
     60
     61        uint64_t current_clock_tick;
     62        uint64_t preempt_deadline;  /* < when should the currently running thread be preempted */
     63        uint64_t relink_deadline;
     64
     65        /**
     66         * Stack used by scheduler when there is no running thread.
     67         * This field is unchanged after initialization.
     68         */
     69        uint8_t *stack;
     70
     71        /**
     72         * Processor cycle accounting.
     73         */
     74        bool idle;
     75        uint64_t last_cycle;
     76
     77        context_t scheduler_context;
     78
     79        struct thread *prev_thread;
     80} cpu_local_t;
    4781
    4882/** CPU structure.
     
    6397
    6498        /**
    65          * When system clock loses a tick, it is
    66          * recorded here so that clock() can react.
    67          * This variable is CPU-local and can be
    68          * only accessed when interrupts are
    69          * disabled.
    70          */
    71         size_t missed_clock_ticks;
    72 
    73         /** Can only be accessed by the CPU represented by this structure when interrupts are disabled. */
    74         uint64_t current_clock_tick;
    75         uint64_t preempt_deadline;  /* < when should the currently running thread be preempted */
    76         uint64_t relink_deadline;
    77 
    78         /**
    7999         * Processor cycle accounting.
    80100         */
    81         bool idle;
    82         uint64_t last_cycle;
    83101        atomic_time_stat_t idle_cycles;
    84102        atomic_time_stat_t busy_cycles;
     
    103121        _Atomic(struct thread *) fpu_owner;
    104122
    105         /**
    106          * Stack used by scheduler when there is no running thread.
    107          */
    108         uint8_t *stack;
     123        cpu_local_t local;
    109124} cpu_t;
    110125
  • kernel/generic/include/lib/refcount.h

    r86f862c rf2cb80a  
    4949} atomic_refcount_t;
    5050
     51#define REFCOUNT_INITIALIZER() { \
     52        .__cnt = ATOMIC_VAR_INIT(0), \
     53}
     54
    5155static inline void refcount_init(atomic_refcount_t *rc)
    5256{
    53         atomic_store_explicit(&rc->__cnt, 0, memory_order_relaxed);
     57        atomic_init(&rc->__cnt, 0);
    5458}
    5559
  • kernel/generic/include/mm/mm.h

    r86f862c rf2cb80a  
    4646#define PAGE_EXEC_SHIFT                 5
    4747#define PAGE_GLOBAL_SHIFT               6
     48#define PAGE_WRITE_COMBINE_SHIFT  7
    4849
    4950#define PAGE_NOT_CACHEABLE              (0 << PAGE_CACHEABLE_SHIFT)
     
    6263#define PAGE_GLOBAL                     (1 << PAGE_GLOBAL_SHIFT)
    6364
     65#define PAGE_WRITE_COMBINE  (1 << PAGE_WRITE_COMBINE_SHIFT)
     66
    6467#endif
    6568
  • kernel/generic/include/proc/scheduler.h

    r86f862c rf2cb80a  
    4141#include <atomic.h>
    4242#include <adt/list.h>
     43#include <abi/proc/thread.h>
    4344
    4445#define RQ_COUNT          16
     
    5657
    5758extern void scheduler_fpu_lazy_request(void);
    58 extern void scheduler(void);
    59 extern void scheduler_locked(ipl_t);
    6059extern void kcpulb(void *arg);
    6160
    6261extern void sched_print_list(void);
     62
     63extern void scheduler_run(void) __attribute__((noreturn));
     64extern void scheduler_enter(state_t);
     65
     66extern void thread_main_func(void);
    6367
    6468/*
  • kernel/generic/include/proc/task.h

    r86f862c rf2cb80a  
    8888
    8989        /** Number of references (i.e. threads). */
    90         atomic_size_t refcount;
     90        atomic_refcount_t refcount;
    9191        /** Number of threads that haven't exited yet. */
    9292        // TODO: remove
     
    144144extern void task_done(void);
    145145extern task_t *task_create(as_t *, const char *);
    146 extern void task_destroy(task_t *);
    147146extern void task_hold(task_t *);
    148147extern void task_release(task_t *);
  • kernel/generic/include/proc/thread.h

    r86f862c rf2cb80a  
    9595        waitq_t join_wq;
    9696
    97         /** Lock protecting thread structure.
     97        /** Thread accounting. */
     98        atomic_time_stat_t ucycles;
     99        atomic_time_stat_t kcycles;
     100
     101        /** Architecture-specific data. */
     102        thread_arch_t arch;
     103
     104#ifdef CONFIG_UDEBUG
     105        /**
     106         * If true, the scheduler will print a stack trace
     107         * to the kernel console upon scheduling this thread.
     108         */
     109        atomic_int_fast8_t btrace;
     110
     111        /** Debugging stuff */
     112        udebug_thread_t udebug;
     113#endif /* CONFIG_UDEBUG */
     114
     115        /*
     116         * Immutable fields.
    98117         *
    99          * Protects the whole thread structure except fields listed above.
    100          */
    101         IRQ_SPINLOCK_DECLARE(lock);
    102 
    103         char name[THREAD_NAME_BUFLEN];
     118         * These fields are only modified during initialization, and are not
     119         * changed at any time between initialization and destruction.
     120         * Can be accessed without synchronization in most places.
     121         */
     122
     123        /** Thread ID. */
     124        thread_id_t tid;
    104125
    105126        /** Function implementing the thread. */
     
    108129        void *thread_arg;
    109130
     131        char name[THREAD_NAME_BUFLEN];
     132
     133        /** Thread is executed in user space. */
     134        bool uspace;
     135
     136        /** Thread doesn't affect accumulated accounting. */
     137        bool uncounted;
     138
     139        /** Containing task. */
     140        task_t *task;
     141
     142        /** Thread's kernel stack. */
     143        uint8_t *kstack;
     144
     145        /*
     146         * Local fields.
     147         *
     148         * These fields can be safely accessed from code that _controls execution_
     149         * of this thread. Code controls execution of a thread if either:
     150         *  - it runs in the context of said thread AND interrupts are disabled
     151         *    (interrupts can and will access these fields)
     152         *  - the thread is not running, and the code accessing it can legally
     153         *    add/remove the thread to/from a runqueue, i.e., either:
     154         *    - it is allowed to enqueue thread in a new runqueue
     155         *    - it holds the lock to the runqueue containing the thread
     156         *
     157         */
     158
    110159        /**
    111160         * From here, the stored context is restored
     
    113162         */
    114163        context_t saved_context;
    115         ipl_t saved_ipl;
     164
     165        // TODO: we only need one of the two bools below
    116166
    117167        /**
     
    127177        bool in_copy_to_uspace;
    128178
     179        /*
     180         * FPU context is a special case. If lazy FPU switching is disabled,
     181         * it acts as a regular local field. However, if lazy switching is enabled,
     182         * the context is synchronized via CPU->fpu_lock
     183         */
    129184#ifdef CONFIG_FPU
    130185        fpu_context_t fpu_context;
     
    135190        unsigned int nomigrate;
    136191
    137         /** Thread state. */
    138         state_t state;
    139 
    140         /** Thread CPU. */
    141         cpu_t *cpu;
    142         /** Containing task. */
    143         task_t *task;
    144192        /** Thread was migrated to another CPU and has not run yet. */
    145193        bool stolen;
    146         /** Thread is executed in user space. */
    147         bool uspace;
    148 
    149         /** Thread accounting. */
    150         uint64_t ucycles;
    151         uint64_t kcycles;
     194
     195        /**
     196         * Thread state (state_t).
     197         * This is atomic because we read it via some commands for debug output,
     198         * otherwise it could just be a regular local.
     199         */
     200        atomic_int_fast32_t state;
     201
     202        /** Thread CPU. */
     203        _Atomic(cpu_t *) cpu;
     204
     205        /** Thread's priority. Implemented as index to CPU->rq */
     206        atomic_int_fast32_t priority;
     207
    152208        /** Last sampled cycle. */
    153209        uint64_t last_cycle;
    154         /** Thread doesn't affect accumulated accounting. */
    155         bool uncounted;
    156 
    157         /** Thread's priority. Implemented as index to CPU->rq */
    158         int priority;
    159         /** Thread ID. */
    160         thread_id_t tid;
    161 
    162         /** Architecture-specific data. */
    163         thread_arch_t arch;
    164 
    165         /** Thread's kernel stack. */
    166         uint8_t *kstack;
    167 
    168 #ifdef CONFIG_UDEBUG
    169         /**
    170          * If true, the scheduler will print a stack trace
    171          * to the kernel console upon scheduling this thread.
    172          */
    173         bool btrace;
    174 
    175         /** Debugging stuff */
    176         udebug_thread_t udebug;
    177 #endif /* CONFIG_UDEBUG */
    178210} thread_t;
    179211
     
    186218extern void thread_wire(thread_t *, cpu_t *);
    187219extern void thread_attach(thread_t *, task_t *);
    188 extern void thread_ready(thread_t *);
     220extern void thread_start(thread_t *);
     221extern void thread_requeue_sleeping(thread_t *);
    189222extern void thread_exit(void) __attribute__((noreturn));
    190223extern void thread_interrupt(thread_t *);
     224
     225enum sleep_state {
     226        SLEEP_INITIAL,
     227        SLEEP_ASLEEP,
     228        SLEEP_WOKE,
     229};
    191230
    192231typedef enum {
     
    237276extern errno_t thread_join(thread_t *);
    238277extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int);
     278extern void thread_detach(thread_t *);
     279
     280extern void thread_yield(void);
    239281
    240282extern void thread_print_list(bool);
  • kernel/generic/meson.build

    r86f862c rf2cb80a  
    9595        'src/mm/malloc.c',
    9696        'src/mm/reserve.c',
    97         'src/preempt/preemption.c',
    9897        'src/printf/printf.c',
    9998        'src/printf/snprintf.c',
  • kernel/generic/src/console/cmd.c

    r86f862c rf2cb80a  
    229229};
    230230
     231/* Data and methods for 'printbench' command. */
     232static int cmd_printbench(cmd_arg_t *argv);
     233
     234static cmd_info_t printbench_info = {
     235        .name = "printbench",
     236        .description = "Run a printing benchmark.",
     237        .func = cmd_printbench,
     238        .argc = 0,
     239};
     240
    231241#endif /* CONFIG_TEST */
    232242
     
    613623        &test_info,
    614624        &bench_info,
     625        &printbench_info,
    615626#endif
    616627#ifdef CONFIG_UDEBUG
     
    9931004                        printf("cpu%u: ", i);
    9941005                        thread_wire(thread, &cpus[i]);
    995                         thread_ready(thread_ref(thread));
     1006                        thread_start(thread);
    9961007                        thread_join(thread);
    997                         thread_put(thread);
    9981008                } else
    9991009                        printf("Unable to create thread for cpu%u\n", i);
     
    15821592}
    15831593
     1594int cmd_printbench(cmd_arg_t *argv)
     1595{
     1596        int cnt = 20;
     1597
     1598        uint64_t *data = malloc(sizeof(uint64_t) * cnt);
     1599        if (data == NULL) {
     1600                printf("Error allocating memory for statistics\n");
     1601                return false;
     1602        }
     1603
     1604        for (int i = 0; i < cnt; i++) {
     1605                /*
     1606                 * Update and read thread accounting
     1607                 * for benchmarking
     1608                 */
     1609                irq_spinlock_lock(&TASK->lock, true);
     1610                uint64_t ucycles0, kcycles0;
     1611                task_get_accounting(TASK, &ucycles0, &kcycles0);
     1612                irq_spinlock_unlock(&TASK->lock, true);
     1613
     1614                /* Execute the test */
     1615                for (int j = 0; j < 20; j++) {
     1616                        printf("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ěščřžýáíéú!@#$%%^&*(){}+\n");
     1617                        printf("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ěščřžýáíéú!@#$%%^&*(){}+abcdefghijklmnopqrstuvwxyz\n");
     1618                        printf("0123456789ěščřžýáíéú!@#$%%^&*(){}+abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\n");
     1619                }
     1620
     1621                /* Update and read thread accounting */
     1622                irq_spinlock_lock(&TASK->lock, true);
     1623                uint64_t ucycles1, kcycles1;
     1624                task_get_accounting(TASK, &ucycles1, &kcycles1);
     1625                irq_spinlock_unlock(&TASK->lock, true);
     1626
     1627                data[i] = ucycles1 - ucycles0 + kcycles1 - kcycles0;
     1628        }
     1629
     1630        printf("\n");
     1631
     1632        uint64_t cycles;
     1633        char suffix;
     1634        uint64_t sum = 0;
     1635
     1636        for (int i = 0; i < cnt; i++) {
     1637                sum += data[i];
     1638        }
     1639
     1640        order_suffix(sum / (uint64_t) cnt, &cycles, &suffix);
     1641        printf("Average\t\t%" PRIu64 "%c\n", cycles, suffix);
     1642
     1643        free(data);
     1644
     1645        return true;
     1646}
     1647
    15841648#endif
    15851649
  • kernel/generic/src/cpu/cpu.c

    r86f862c rf2cb80a  
    8181                                panic("Cannot allocate CPU stack.");
    8282
    83                         cpus[i].stack = (uint8_t *) PA2KA(stack_phys);
     83                        cpus[i].local.stack = (uint8_t *) PA2KA(stack_phys);
    8484                        cpus[i].id = i;
    8585
     
    104104        CPU->tlb_active = true;
    105105
    106         CPU->idle = false;
    107         CPU->last_cycle = get_cycle();
     106        CPU_LOCAL->idle = false;
     107        CPU_LOCAL->last_cycle = get_cycle();
    108108        CPU->idle_cycles = ATOMIC_TIME_INITIALIZER();
    109109        CPU->busy_cycles = ATOMIC_TIME_INITIALIZER();
  • kernel/generic/src/ddi/ddi.c

    r86f862c rf2cb80a  
    336336                return EPERM;
    337337
    338         irq_spinlock_lock(&tasks_lock, true);
    339 
    340338        task_t *task = task_find_by_id(id);
    341339
    342         if ((!task) || (!container_check(CONTAINER, task->container))) {
    343                 /*
    344                  * There is no task with the specified ID
    345                  * or the task belongs to a different security
    346                  * context.
    347                  */
    348                 irq_spinlock_unlock(&tasks_lock, true);
     340        if (!task)
    349341                return ENOENT;
    350         }
    351 
    352         /* Lock the task and release the lock protecting tasks dictionary. */
    353         irq_spinlock_exchange(&tasks_lock, &task->lock);
    354         errno_t rc = ddi_iospace_enable_arch(task, ioaddr, size);
     342
     343        errno_t rc = ENOENT;
     344
     345        irq_spinlock_lock(&task->lock, true);
     346
     347        /* Check that the task belongs to the correct security context. */
     348        if (container_check(CONTAINER, task->container))
     349                rc = ddi_iospace_enable_arch(task, ioaddr, size);
     350
    355351        irq_spinlock_unlock(&task->lock, true);
    356 
     352        task_release(task);
    357353        return rc;
    358354}
     
    377373                return EPERM;
    378374
    379         irq_spinlock_lock(&tasks_lock, true);
    380 
    381375        task_t *task = task_find_by_id(id);
    382376
    383         if ((!task) || (!container_check(CONTAINER, task->container))) {
    384                 /*
    385                  * There is no task with the specified ID
    386                  * or the task belongs to a different security
    387                  * context.
    388                  */
    389                 irq_spinlock_unlock(&tasks_lock, true);
     377        if (!task)
    390378                return ENOENT;
    391         }
    392 
    393         /* Lock the task and release the lock protecting tasks dictionary. */
    394         irq_spinlock_exchange(&tasks_lock, &task->lock);
    395         errno_t rc = ddi_iospace_disable_arch(task, ioaddr, size);
     379
     380        errno_t rc = ENOENT;
     381
     382        irq_spinlock_lock(&task->lock, true);
     383
     384        /* Check that the task belongs to the correct security context. */
     385        if (container_check(CONTAINER, task->container))
     386                rc = ddi_iospace_disable_arch(task, ioaddr, size);
     387
    396388        irq_spinlock_unlock(&task->lock, true);
    397 
     389        task_release(task);
    398390        return rc;
    399391}
  • kernel/generic/src/interrupt/interrupt.c

    r86f862c rf2cb80a  
    114114
    115115        /* Account user cycles */
    116         if (THREAD) {
    117                 irq_spinlock_lock(&THREAD->lock, false);
     116        if (THREAD)
    118117                thread_update_accounting(true);
    119                 irq_spinlock_unlock(&THREAD->lock, false);
    120         }
    121118
    122119        /* Account CPU usage if it woke up from sleep */
    123         if (CPU && CPU->idle) {
     120        if (CPU && CPU_LOCAL->idle) {
    124121                uint64_t now = get_cycle();
    125                 atomic_time_increment(&CPU->idle_cycles, now - CPU->last_cycle);
    126                 CPU->last_cycle = now;
    127                 CPU->idle = false;
     122                atomic_time_increment(&CPU->idle_cycles, now - CPU_LOCAL->last_cycle);
     123                CPU_LOCAL->last_cycle = now;
     124                CPU_LOCAL->idle = false;
    128125        }
    129126
     
    155152
    156153        /* Do not charge THREAD for exception cycles */
    157         if (THREAD) {
    158                 irq_spinlock_lock(&THREAD->lock, false);
     154        if (THREAD)
    159155                THREAD->last_cycle = end_cycle;
    160                 irq_spinlock_unlock(&THREAD->lock, false);
    161         }
    162156#else
    163157        panic("No space for any exception handler, yet we want to handle some exception.");
  • kernel/generic/src/ipc/ipc.c

    r86f862c rf2cb80a  
    967967void ipc_print_task(task_id_t taskid)
    968968{
    969         irq_spinlock_lock(&tasks_lock, true);
    970969        task_t *task = task_find_by_id(taskid);
    971         if (!task) {
    972                 irq_spinlock_unlock(&tasks_lock, true);
     970        if (!task)
    973971                return;
    974         }
    975         task_hold(task);
    976         irq_spinlock_unlock(&tasks_lock, true);
    977972
    978973        printf("[phone cap] [calls] [state\n");
  • kernel/generic/src/ipc/kbox.c

    r86f862c rf2cb80a  
    9090                LOG("Join kb.thread.");
    9191                thread_join(TASK->kb.thread);
    92                 thread_put(TASK->kb.thread);
    9392                LOG("...join done.");
    9493                TASK->kb.thread = NULL;
     
    201200/** Connect phone to a task kernel-box specified by id.
    202201 *
    203  * Note that this is not completely atomic. For optimisation reasons, the task
    204  * might start cleaning up kbox after the phone has been connected and before
    205  * a kbox thread has been created. This must be taken into account in the
    206  * cleanup code.
    207  *
    208202 * @param[out] out_phone  Phone capability handle on success.
    209203 * @return Error code.
     
    212206errno_t ipc_connect_kbox(task_id_t taskid, cap_phone_handle_t *out_phone)
    213207{
    214         irq_spinlock_lock(&tasks_lock, true);
    215 
    216208        task_t *task = task_find_by_id(taskid);
    217         if (task == NULL) {
    218                 irq_spinlock_unlock(&tasks_lock, true);
     209        if (!task)
    219210                return ENOENT;
    220         }
    221 
    222         atomic_inc(&task->refcount);
    223 
    224         irq_spinlock_unlock(&tasks_lock, true);
    225211
    226212        mutex_lock(&task->kb.cleanup_lock);
    227 
    228         if (atomic_predec(&task->refcount) == 0) {
    229                 mutex_unlock(&task->kb.cleanup_lock);
    230                 task_destroy(task);
    231                 return ENOENT;
    232         }
    233213
    234214        if (task->kb.finished) {
    235215                mutex_unlock(&task->kb.cleanup_lock);
     216                task_release(task);
    236217                return EINVAL;
    237218        }
     
    244225                if (!kb_thread) {
    245226                        mutex_unlock(&task->kb.cleanup_lock);
     227                        task_release(task);
    246228                        return ENOMEM;
    247229                }
    248230
    249                 task->kb.thread = thread_ref(kb_thread);
    250                 thread_ready(kb_thread);
     231                task->kb.thread = kb_thread;
     232                thread_start(kb_thread);
    251233        }
    252234
     
    256238        if (rc != EOK) {
    257239                mutex_unlock(&task->kb.cleanup_lock);
     240                task_release(task);
    258241                return rc;
    259242        }
     
    266249
    267250        mutex_unlock(&task->kb.cleanup_lock);
     251        task_release(task);
    268252        *out_phone = phone_handle;
    269253        return EOK;
  • kernel/generic/src/main/kinit.c

    r86f862c rf2cb80a  
    122122
    123123                thread_wire(thread, &cpus[0]);
    124                 thread_ready(thread_ref(thread));
     124                thread_start(thread);
    125125                thread_join(thread);
    126                 thread_put(thread);
    127126
    128127                /*
     
    136135                        if (thread != NULL) {
    137136                                thread_wire(thread, &cpus[i]);
    138                                 thread_ready(thread);
     137                                thread_start(thread);
     138                                thread_detach(thread);
    139139                        } else
    140140                                log(LF_OTHER, LVL_ERROR,
     
    152152        thread = thread_create(kload, NULL, TASK, THREAD_FLAG_NONE,
    153153            "kload");
    154         if (thread != NULL)
    155                 thread_ready(thread);
    156         else
     154        if (thread != NULL) {
     155                thread_start(thread);
     156                thread_detach(thread);
     157        } else {
    157158                log(LF_OTHER, LVL_ERROR, "Unable to create kload thread");
     159        }
    158160
    159161#ifdef CONFIG_KCONSOLE
     
    164166                thread = thread_create(kconsole_thread, NULL, TASK,
    165167                    THREAD_FLAG_NONE, "kconsole");
    166                 if (thread != NULL)
    167                         thread_ready(thread);
    168                 else
     168                if (thread != NULL) {
     169                        thread_start(thread);
     170                        thread_detach(thread);
     171                } else {
    169172                        log(LF_OTHER, LVL_ERROR,
    170173                            "Unable to create kconsole thread");
     174                }
    171175        }
    172176#endif /* CONFIG_KCONSOLE */
     
    309313         */
    310314        for (i = 0; i < init.cnt; i++) {
    311                 if (programs[i].task != NULL)
     315                if (programs[i].task != NULL) {
    312316                        program_ready(&programs[i]);
     317                        task_release(programs[i].task);
     318                }
    313319        }
    314320
  • kernel/generic/src/main/main.c

    r86f862c rf2cb80a  
    8080#include <arch/arch.h>
    8181#include <arch.h>
    82 #include <arch/faddr.h>
    8382#include <ipc/ipc.h>
    8483#include <macros.h>
     
    174173            ALIGN_UP((uintptr_t) kdata_end - config.base, PAGE_SIZE);
    175174
    176         context_save(&ctx);
    177         context_set(&ctx, FADDR(main_bsp_separated_stack),
     175        context_create(&ctx, main_bsp_separated_stack,
    178176            bootstrap_stack, bootstrap_stack_size);
    179177        context_restore(&ctx);
     
    282280        if (!kinit_thread)
    283281                panic("Cannot create kinit thread.");
    284         thread_ready(kinit_thread);
    285 
    286         /*
    287          * This call to scheduler() will return to kinit,
     282        thread_start(kinit_thread);
     283        thread_detach(kinit_thread);
     284
     285        /*
     286         * This call to scheduler_run() will return to kinit,
    288287         * starting the thread of kernel threads.
    289288         */
    290         scheduler();
     289        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     290        context_replace(scheduler_run, CPU_LOCAL->stack, STACK_SIZE);
    291291        /* not reached */
    292292}
     
    328328        ARCH_OP(post_cpu_init);
    329329
    330         current_copy(CURRENT, (current_t *) CPU->stack);
    331 
    332330        /*
    333331         * If we woke kmp up before we left the kernel stack, we could
     
    335333         * switch to this cpu's private stack prior to waking kmp up.
    336334         */
    337         context_t ctx;
    338         context_save(&ctx);
    339         context_set(&ctx, FADDR(main_ap_separated_stack),
    340             (uintptr_t) CPU->stack, STACK_SIZE);
    341         context_restore(&ctx);
     335        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     336        context_replace(main_ap_separated_stack, CPU_LOCAL->stack, STACK_SIZE);
    342337        /* not reached */
    343338}
     
    356351
    357352        semaphore_up(&ap_completion_semaphore);
    358         scheduler();
     353        scheduler_run();
    359354        /* not reached */
    360355}
  • kernel/generic/src/proc/program.c

    r86f862c rf2cb80a  
    9999        if (!area) {
    100100                free(kernel_uarg);
    101                 task_destroy(prg->task);
     101                task_release(prg->task);
    102102                prg->task = NULL;
    103103                return ENOMEM;
     
    119119                free(kernel_uarg);
    120120                as_area_destroy(as, virt);
    121                 task_destroy(prg->task);
     121                task_release(prg->task);
    122122                prg->task = NULL;
    123123                return ELIMIT;
     
    212212void program_ready(program_t *prg)
    213213{
    214         thread_ready(prg->main_thread);
     214        thread_start(prg->main_thread);
     215        thread_detach(prg->main_thread);
    215216        prg->main_thread = NULL;
    216217}
     
    251252        program_ready(&prg);
    252253
     254        task_release(prg.task);
     255
    253256        return EOK;
    254257}
  • kernel/generic/src/proc/scheduler.c

    r86f862c rf2cb80a  
    11/*
    22 * Copyright (c) 2010 Jakub Jermar
     3 * Copyright (c) 2023 Jiří Zárevúcky
    34 * All rights reserved.
    45 *
     
    5051#include <time/delay.h>
    5152#include <arch/asm.h>
    52 #include <arch/faddr.h>
    5353#include <arch/cycle.h>
    5454#include <atomic.h>
     
    6666#include <stacktrace.h>
    6767
    68 static void scheduler_separated_stack(void);
    69 
    7068atomic_size_t nrdy;  /**< Number of ready threads in the system. */
    71 
    72 /** Take actions before new thread runs.
    73  *
    74  * Perform actions that need to be
    75  * taken before the newly selected
    76  * thread is passed control.
    77  *
    78  * THREAD->lock is locked on entry
    79  *
    80  */
    81 static void before_thread_runs(void)
    82 {
    83         before_thread_runs_arch();
    84 
    85 #ifdef CONFIG_FPU_LAZY
    86         /*
    87          * The only concurrent modification possible for fpu_owner here is
    88          * another thread changing it from itself to NULL in its destructor.
    89          */
    90         thread_t *owner = atomic_load_explicit(&CPU->fpu_owner,
    91             memory_order_relaxed);
    92 
    93         if (THREAD == owner)
    94                 fpu_enable();
    95         else
    96                 fpu_disable();
    97 #elif defined CONFIG_FPU
    98         fpu_enable();
    99         if (THREAD->fpu_context_exists)
    100                 fpu_context_restore(&THREAD->fpu_context);
    101         else {
    102                 fpu_init();
    103                 THREAD->fpu_context_exists = true;
    104         }
    105 #endif
    106 
    107 #ifdef CONFIG_UDEBUG
    108         if (THREAD->btrace) {
    109                 istate_t *istate = THREAD->udebug.uspace_state;
    110                 if (istate != NULL) {
    111                         printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
    112                         stack_trace_istate(istate);
    113                 }
    114 
    115                 THREAD->btrace = false;
    116         }
    117 #endif
    118 }
    119 
    120 /** Take actions after THREAD had run.
    121  *
    122  * Perform actions that need to be
    123  * taken after the running thread
    124  * had been preempted by the scheduler.
    125  *
    126  * THREAD->lock is locked on entry
    127  *
    128  */
    129 static void after_thread_ran(void)
    130 {
    131         after_thread_ran_arch();
    132 }
    13369
    13470#ifdef CONFIG_FPU_LAZY
     
    207143                list_remove(&thread->rq_link);
    208144
    209                 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
    210 
    211                 thread->cpu = CPU;
    212                 thread->priority = i;  /* Correct rq index */
    213 
    214                 /* Time allocation in microseconds. */
    215                 uint64_t time_to_run = (i + 1) * 10000;
    216 
    217                 /* This is safe because interrupts are disabled. */
    218                 CPU->preempt_deadline = CPU->current_clock_tick + us2ticks(time_to_run);
    219 
    220                 /*
    221                  * Clear the stolen flag so that it can be migrated
    222                  * when load balancing needs emerge.
    223                  */
    224                 thread->stolen = false;
    225                 irq_spinlock_unlock(&thread->lock, false);
     145                irq_spinlock_unlock(&(CPU->rq[i].lock), false);
    226146
    227147                *rq_index = i;
     
    257177                 * This improves energy saving and hyperthreading.
    258178                 */
    259                 CPU->idle = true;
     179                CPU_LOCAL->idle = true;
    260180
    261181                /*
     
    305225static void relink_rq(int start)
    306226{
    307         if (CPU->current_clock_tick < CPU->relink_deadline)
     227        assert(interrupts_disabled());
     228
     229        if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline)
    308230                return;
    309231
    310         CPU->relink_deadline = CPU->current_clock_tick + NEEDS_RELINK_MAX;
     232        CPU_LOCAL->relink_deadline = CPU_LOCAL->current_clock_tick + NEEDS_RELINK_MAX;
    311233
    312234        /* Temporary cache for lists we are moving. */
     
    340262}
    341263
    342 void scheduler(void)
    343 {
    344         ipl_t ipl = interrupts_disable();
    345 
    346         if (atomic_load(&haltstate))
    347                 halt();
    348 
    349         if (THREAD) {
    350                 irq_spinlock_lock(&THREAD->lock, false);
    351         }
    352 
    353         scheduler_locked(ipl);
    354 }
    355 
    356 /** The scheduler
    357  *
    358  * The thread scheduling procedure.
    359  * Passes control directly to
    360  * scheduler_separated_stack().
    361  *
    362  */
    363 void scheduler_locked(ipl_t ipl)
    364 {
    365         assert(CPU != NULL);
    366 
    367         if (THREAD) {
    368                 /* Update thread kernel accounting */
    369                 THREAD->kcycles += get_cycle() - THREAD->last_cycle;
    370 
     264/**
     265 * Do whatever needs to be done with current FPU state before we switch to
     266 * another thread.
     267 */
     268static void fpu_cleanup(void)
     269{
    371270#if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY)
    372                 fpu_context_save(&THREAD->fpu_context);
     271        fpu_context_save(&THREAD->fpu_context);
    373272#endif
    374                 if (!context_save(&THREAD->saved_context)) {
    375                         /*
    376                          * This is the place where threads leave scheduler();
    377                          */
    378 
    379                         /* Save current CPU cycle */
    380                         THREAD->last_cycle = get_cycle();
    381 
    382                         irq_spinlock_unlock(&THREAD->lock, false);
    383                         interrupts_restore(THREAD->saved_ipl);
    384 
    385                         return;
    386                 }
    387 
    388                 /*
    389                  * Interrupt priority level of preempted thread is recorded
    390                  * here to facilitate scheduler() invocations from
    391                  * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
    392                  *
    393                  */
    394                 THREAD->saved_ipl = ipl;
    395         }
    396 
     273}
     274
     275/**
     276 * Set correct FPU state for this thread after switch from another thread.
     277 */
     278static void fpu_restore(void)
     279{
     280#ifdef CONFIG_FPU_LAZY
    397281        /*
    398          * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS
    399          * and preemption counter. At this point CURRENT could be coming either
    400          * from THREAD's or CPU's stack.
    401          *
     282         * The only concurrent modification possible for fpu_owner here is
     283         * another thread changing it from itself to NULL in its destructor.
    402284         */
    403         current_copy(CURRENT, (current_t *) CPU->stack);
     285        thread_t *owner = atomic_load_explicit(&CPU->fpu_owner,
     286            memory_order_relaxed);
     287
     288        if (THREAD == owner)
     289                fpu_enable();
     290        else
     291                fpu_disable();
     292
     293#elif defined CONFIG_FPU
     294        fpu_enable();
     295        if (THREAD->fpu_context_exists)
     296                fpu_context_restore(&THREAD->fpu_context);
     297        else {
     298                fpu_init();
     299                THREAD->fpu_context_exists = true;
     300        }
     301#endif
     302}
     303
     304/** Things to do before we switch to THREAD context.
     305 */
     306static void prepare_to_run_thread(int rq_index)
     307{
     308        relink_rq(rq_index);
     309
     310        switch_task(THREAD->task);
     311
     312        assert(atomic_get_unordered(&THREAD->cpu) == CPU);
     313
     314        atomic_set_unordered(&THREAD->state, Running);
     315        atomic_set_unordered(&THREAD->priority, rq_index);  /* Correct rq index */
    404316
    405317        /*
    406          * We may not keep the old stack.
    407          * Reason: If we kept the old stack and got blocked, for instance, in
    408          * find_best_thread(), the old thread could get rescheduled by another
    409          * CPU and overwrite the part of its own stack that was also used by
    410          * the scheduler on this CPU.
    411          *
    412          * Moreover, we have to bypass the compiler-generated POP sequence
    413          * which is fooled by SP being set to the very top of the stack.
    414          * Therefore the scheduler() function continues in
    415          * scheduler_separated_stack().
    416          *
     318         * Clear the stolen flag so that it can be migrated
     319         * when load balancing needs emerge.
    417320         */
    418         context_t ctx;
    419         context_save(&ctx);
    420         context_set(&ctx, FADDR(scheduler_separated_stack),
    421             (uintptr_t) CPU->stack, STACK_SIZE);
    422         context_restore(&ctx);
    423 
    424         /* Not reached */
    425 }
    426 
    427 /** Scheduler stack switch wrapper
    428  *
    429  * Second part of the scheduler() function
    430  * using new stack. Handling the actual context
    431  * switch to a new thread.
    432  *
    433  */
    434 void scheduler_separated_stack(void)
    435 {
    436         assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
    437         assert(CPU != NULL);
    438         assert(interrupts_disabled());
    439 
    440         if (THREAD) {
    441                 /* Must be run after the switch to scheduler stack */
    442                 after_thread_ran();
    443 
    444                 switch (THREAD->state) {
    445                 case Running:
    446                         irq_spinlock_unlock(&THREAD->lock, false);
    447                         thread_ready(THREAD);
    448                         break;
    449 
    450                 case Exiting:
    451                         irq_spinlock_unlock(&THREAD->lock, false);
    452                         waitq_close(&THREAD->join_wq);
    453 
    454                         /*
    455                          * Release the reference CPU has for the thread.
    456                          * If there are no other references (e.g. threads calling join),
    457                          * the thread structure is deallocated.
    458                          */
    459                         thread_put(THREAD);
    460                         break;
    461 
    462                 case Sleeping:
    463                         /*
    464                          * Prefer the thread after it's woken up.
    465                          */
    466                         THREAD->priority = -1;
    467                         irq_spinlock_unlock(&THREAD->lock, false);
    468                         break;
    469 
    470                 default:
    471                         /*
    472                          * Entering state is unexpected.
    473                          */
    474                         panic("tid%" PRIu64 ": unexpected state %s.",
    475                             THREAD->tid, thread_states[THREAD->state]);
    476                         break;
    477                 }
    478 
    479                 THREAD = NULL;
    480         }
    481 
    482         int rq_index;
    483         THREAD = find_best_thread(&rq_index);
    484 
    485         relink_rq(rq_index);
    486 
    487         switch_task(THREAD->task);
    488 
    489         irq_spinlock_lock(&THREAD->lock, false);
    490         THREAD->state = Running;
     321        THREAD->stolen = false;
    491322
    492323#ifdef SCHEDULER_VERBOSE
    493324        log(LF_OTHER, LVL_DEBUG,
    494325            "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
    495             ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
     326            ", nrdy=%zu)", CPU->id, THREAD->tid, rq_index,
    496327            THREAD->ticks, atomic_load(&CPU->nrdy));
    497328#endif
     
    505336         * function must be executed before the switch to the new stack.
    506337         */
    507         before_thread_runs();
     338        before_thread_runs_arch();
     339
     340#ifdef CONFIG_UDEBUG
     341        if (atomic_get_unordered(&THREAD->btrace)) {
     342                istate_t *istate = THREAD->udebug.uspace_state;
     343                if (istate != NULL) {
     344                        printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
     345                        stack_trace_istate(istate);
     346                } else {
     347                        printf("Thread %" PRIu64 " interrupt state not available\n", THREAD->tid);
     348                }
     349
     350                atomic_set_unordered(&THREAD->btrace, false);
     351        }
     352#endif
     353
     354        fpu_restore();
     355
     356        /* Time allocation in microseconds. */
     357        uint64_t time_to_run = (rq_index + 1) * 10000;
     358
     359        /* Set the time of next preemption. */
     360        CPU_LOCAL->preempt_deadline =
     361            CPU_LOCAL->current_clock_tick + us2ticks(time_to_run);
     362
     363        /* Save current CPU cycle */
     364        THREAD->last_cycle = get_cycle();
     365}
     366
     367static void add_to_rq(thread_t *thread, cpu_t *cpu, int i)
     368{
     369        /* Add to the appropriate runqueue. */
     370        runq_t *rq = &cpu->rq[i];
     371
     372        irq_spinlock_lock(&rq->lock, false);
     373        list_append(&thread->rq_link, &rq->rq);
     374        rq->n++;
     375        irq_spinlock_unlock(&rq->lock, false);
     376
     377        atomic_inc(&nrdy);
     378        atomic_inc(&cpu->nrdy);
     379}
     380
     381/** Requeue a thread that was just preempted on this CPU.
     382 */
     383static void thread_requeue_preempted(thread_t *thread)
     384{
     385        assert(interrupts_disabled());
     386        assert(atomic_get_unordered(&thread->state) == Running);
     387        assert(atomic_get_unordered(&thread->cpu) == CPU);
     388
     389        int prio = atomic_get_unordered(&thread->priority);
     390
     391        if (prio < RQ_COUNT - 1) {
     392                prio++;
     393                atomic_set_unordered(&thread->priority, prio);
     394        }
     395
     396        atomic_set_unordered(&thread->state, Ready);
     397
     398        add_to_rq(thread, CPU, prio);
     399}
     400
     401void thread_requeue_sleeping(thread_t *thread)
     402{
     403        ipl_t ipl = interrupts_disable();
     404
     405        assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering);
     406
     407        atomic_set_unordered(&thread->priority, 0);
     408        atomic_set_unordered(&thread->state, Ready);
     409
     410        /* Prefer the CPU on which the thread ran last */
     411        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     412
     413        if (!cpu) {
     414                cpu = CPU;
     415                atomic_set_unordered(&thread->cpu, CPU);
     416        }
     417
     418        add_to_rq(thread, cpu, 0);
     419
     420        interrupts_restore(ipl);
     421}
     422
     423static void cleanup_after_thread(thread_t *thread)
     424{
     425        assert(CURRENT->mutex_locks == 0);
     426        assert(interrupts_disabled());
     427
     428        int expected;
     429
     430        switch (atomic_get_unordered(&thread->state)) {
     431        case Running:
     432                thread_requeue_preempted(thread);
     433                break;
     434
     435        case Exiting:
     436                waitq_close(&thread->join_wq);
     437
     438                /*
     439                 * Release the reference CPU has for the thread.
     440                 * If there are no other references (e.g. threads calling join),
     441                 * the thread structure is deallocated.
     442                 */
     443                thread_put(thread);
     444                break;
     445
     446        case Sleeping:
     447                expected = SLEEP_INITIAL;
     448
     449                /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
     450                if (!atomic_compare_exchange_strong_explicit(&thread->sleep_state,
     451                    &expected, SLEEP_ASLEEP,
     452                    memory_order_acq_rel, memory_order_acquire)) {
     453
     454                        assert(expected == SLEEP_WOKE);
     455                        /* The thread has already been woken up, requeue immediately. */
     456                        thread_requeue_sleeping(thread);
     457                }
     458                break;
     459
     460        default:
     461                /*
     462                 * Entering state is unexpected.
     463                 */
     464                panic("tid%" PRIu64 ": unexpected state %s.",
     465                    thread->tid, thread_states[atomic_get_unordered(&thread->state)]);
     466                break;
     467        }
     468}
     469
     470/** Switch to scheduler context to let other threads run. */
     471void scheduler_enter(state_t new_state)
     472{
     473        ipl_t ipl = interrupts_disable();
     474
     475        assert(CPU != NULL);
     476        assert(THREAD != NULL);
     477
     478        if (atomic_load(&haltstate))
     479                halt();
     480
     481        /* Check if we have a thread to switch to. */
     482
     483        int rq_index;
     484        thread_t *new_thread = try_find_thread(&rq_index);
     485
     486        if (new_thread == NULL && new_state == Running) {
     487                /* No other thread to run, but we still have work to do here. */
     488                interrupts_restore(ipl);
     489                return;
     490        }
     491
     492        atomic_set_unordered(&THREAD->state, new_state);
     493
     494        /* Update thread kernel accounting */
     495        atomic_time_increment(&THREAD->kcycles, get_cycle() - THREAD->last_cycle);
     496
     497        fpu_cleanup();
    508498
    509499        /*
    510          * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
    511          * thread's stack.
     500         * On Sparc, this saves some extra userspace state that's not
     501         * covered by context_save()/context_restore().
    512502         */
    513         current_copy(CURRENT, (current_t *) THREAD->kstack);
    514 
    515         context_restore(&THREAD->saved_context);
     503        after_thread_ran_arch();
     504
     505        if (new_thread) {
     506                thread_t *old_thread = THREAD;
     507                CPU_LOCAL->prev_thread = old_thread;
     508                THREAD = new_thread;
     509                /* No waiting necessary, we can switch to the new thread directly. */
     510                prepare_to_run_thread(rq_index);
     511
     512                current_copy(CURRENT, (current_t *) new_thread->kstack);
     513                context_swap(&old_thread->saved_context, &new_thread->saved_context);
     514        } else {
     515                /*
     516                 * A new thread isn't immediately available, switch to a separate
     517                 * stack to sleep or do other idle stuff.
     518                 */
     519                current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     520                context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context);
     521        }
     522
     523        assert(CURRENT->mutex_locks == 0);
     524        assert(interrupts_disabled());
     525
     526        /* Check if we need to clean up after another thread. */
     527        if (CPU_LOCAL->prev_thread) {
     528                cleanup_after_thread(CPU_LOCAL->prev_thread);
     529                CPU_LOCAL->prev_thread = NULL;
     530        }
     531
     532        interrupts_restore(ipl);
     533}
     534
     535/** Enter main scheduler loop. Never returns.
     536 *
     537 * This function switches to a runnable thread as soon as one is available,
     538 * after which it is only switched back to if a thread is stopping and there is
     539 * no other thread to run in its place. We need a separate context for that
     540 * because we're going to block the CPU, which means we need another context
     541 * to clean up after the previous thread.
     542 */
     543void scheduler_run(void)
     544{
     545        assert(interrupts_disabled());
     546
     547        assert(CPU != NULL);
     548        assert(TASK == NULL);
     549        assert(THREAD == NULL);
     550        assert(interrupts_disabled());
     551
     552        while (!atomic_load(&haltstate)) {
     553                assert(CURRENT->mutex_locks == 0);
     554
     555                int rq_index;
     556                THREAD = find_best_thread(&rq_index);
     557                prepare_to_run_thread(rq_index);
     558
     559                /*
     560                 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
     561                 * thread's stack.
     562                 */
     563                current_copy(CURRENT, (current_t *) THREAD->kstack);
     564
     565                /* Switch to thread context. */
     566                context_swap(&CPU_LOCAL->scheduler_context, &THREAD->saved_context);
     567
     568                /* Back from another thread. */
     569                assert(CPU != NULL);
     570                assert(THREAD != NULL);
     571                assert(CURRENT->mutex_locks == 0);
     572                assert(interrupts_disabled());
     573
     574                cleanup_after_thread(THREAD);
     575
     576                /*
     577                 * Necessary because we're allowing interrupts in find_best_thread(),
     578                 * so we need to avoid other code referencing the thread we left.
     579                 */
     580                THREAD = NULL;
     581        }
     582
     583        halt();
     584}
     585
     586/** Thread wrapper.
     587 *
     588 * This wrapper is provided to ensure that a starting thread properly handles
     589 * everything it needs to do when first scheduled, and when it exits.
     590 */
     591void thread_main_func(void)
     592{
     593        assert(interrupts_disabled());
     594
     595        void (*f)(void *) = THREAD->thread_code;
     596        void *arg = THREAD->thread_arg;
     597
     598        /* This is where each thread wakes up after its creation */
     599
     600        /* Check if we need to clean up after another thread. */
     601        if (CPU_LOCAL->prev_thread) {
     602                cleanup_after_thread(CPU_LOCAL->prev_thread);
     603                CPU_LOCAL->prev_thread = NULL;
     604        }
     605
     606        interrupts_enable();
     607
     608        f(arg);
     609
     610        thread_exit();
    516611
    517612        /* Not reached */
     
    539634        list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) {
    540635
    541                 irq_spinlock_lock(&thread->lock, false);
    542 
    543636                /*
    544637                 * Do not steal CPU-wired threads, threads
     
    547640                 * FPU context is still in the CPU.
    548641                 */
    549                 if (thread->stolen || thread->nomigrate ||
    550                     thread == fpu_owner) {
    551                         irq_spinlock_unlock(&thread->lock, false);
     642                if (thread->stolen || thread->nomigrate || thread == fpu_owner) {
    552643                        continue;
    553644                }
    554645
    555646                thread->stolen = true;
    556                 thread->cpu = CPU;
    557 
    558                 irq_spinlock_unlock(&thread->lock, false);
     647                atomic_set_unordered(&thread->cpu, CPU);
    559648
    560649                /*
     
    659748                 *
    660749                 */
    661                 scheduler();
     750                thread_yield();
    662751        } else {
    663752                /*
     
    686775                        continue;
    687776
    688                 /* Technically a data race, but we don't really care in this case. */
    689                 int needs_relink = cpus[cpu].relink_deadline - cpus[cpu].current_clock_tick;
    690 
    691                 printf("cpu%u: address=%p, nrdy=%zu, needs_relink=%d\n",
    692                     cpus[cpu].id, &cpus[cpu], atomic_load(&cpus[cpu].nrdy),
    693                     needs_relink);
     777                printf("cpu%u: address=%p, nrdy=%zu\n",
     778                    cpus[cpu].id, &cpus[cpu], atomic_load(&cpus[cpu].nrdy));
    694779
    695780                unsigned int i;
     
    705790                            thread) {
    706791                                printf("%" PRIu64 "(%s) ", thread->tid,
    707                                     thread_states[thread->state]);
     792                                    thread_states[atomic_get_unordered(&thread->state)]);
    708793                        }
    709794                        printf("\n");
  • kernel/generic/src/proc/task.c

    r86f862c rf2cb80a  
    158158                return rc;
    159159
    160         atomic_store(&task->refcount, 0);
    161160        atomic_store(&task->lifecount, 0);
    162161
     
    201200        if (!task)
    202201                return NULL;
     202
     203        refcount_init(&task->refcount);
    203204
    204205        task_create_arch(task);
     
    268269 *
    269270 */
    270 void task_destroy(task_t *task)
     271static void task_destroy(task_t *task)
    271272{
    272273        /*
     
    299300void task_hold(task_t *task)
    300301{
    301         atomic_inc(&task->refcount);
     302        refcount_up(&task->refcount);
    302303}
    303304
     
    311312void task_release(task_t *task)
    312313{
    313         if ((atomic_predec(&task->refcount)) == 0)
     314        if (refcount_down(&task->refcount))
    314315                task_destroy(task);
    315316}
     
    416417/** Find task structure corresponding to task ID.
    417418 *
    418  * The tasks_lock must be already held by the caller of this function and
    419  * interrupts must be disabled.
    420  *
    421419 * @param id Task ID.
    422420 *
    423  * @return Task structure address or NULL if there is no such task ID.
     421 * @return Task reference or NULL if there is no such task ID.
    424422 *
    425423 */
    426424task_t *task_find_by_id(task_id_t id)
    427425{
    428         assert(interrupts_disabled());
    429         assert(irq_spinlock_locked(&tasks_lock));
     426        task_t *task = NULL;
     427
     428        irq_spinlock_lock(&tasks_lock, true);
    430429
    431430        odlink_t *odlink = odict_find_eq(&tasks, &id, NULL);
    432         if (odlink != NULL)
    433                 return odict_get_instance(odlink, task_t, ltasks);
    434 
    435         return NULL;
     431        if (odlink != NULL) {
     432                task = odict_get_instance(odlink, task_t, ltasks);
     433
     434                /*
     435                 * The directory of tasks can't hold a reference, since that would
     436                 * prevent task from ever being destroyed. That means we have to
     437                 * check for the case where the task is already being destroyed, but
     438                 * not yet removed from the directory.
     439                 */
     440                if (!refcount_try_up(&task->refcount))
     441                        task = NULL;
     442        }
     443
     444        irq_spinlock_unlock(&tasks_lock, true);
     445
     446        return task;
    436447}
    437448
     
    506517        /* Current values of threads */
    507518        list_foreach(task->threads, th_link, thread_t, thread) {
    508                 irq_spinlock_lock(&thread->lock, false);
    509 
    510519                /* Process only counted threads */
    511520                if (!thread->uncounted) {
     
    515524                        }
    516525
    517                         uret += thread->ucycles;
    518                         kret += thread->kcycles;
     526                        uret += atomic_time_read(&thread->ucycles);
     527                        kret += atomic_time_read(&thread->kcycles);
    519528                }
    520 
    521                 irq_spinlock_unlock(&thread->lock, false);
    522529        }
    523530
     
    528535static void task_kill_internal(task_t *task)
    529536{
    530         irq_spinlock_lock(&task->lock, false);
     537        irq_spinlock_lock(&task->lock, true);
    531538
    532539        /*
     
    538545        }
    539546
    540         irq_spinlock_unlock(&task->lock, false);
     547        irq_spinlock_unlock(&task->lock, true);
    541548}
    542549
     
    556563                return EPERM;
    557564
    558         irq_spinlock_lock(&tasks_lock, true);
    559 
    560565        task_t *task = task_find_by_id(id);
    561         if (!task) {
    562                 irq_spinlock_unlock(&tasks_lock, true);
     566        if (!task)
    563567                return ENOENT;
    564         }
    565568
    566569        task_kill_internal(task);
    567         irq_spinlock_unlock(&tasks_lock, true);
    568 
     570        task_release(task);
    569571        return EOK;
    570572}
     
    596598        }
    597599
    598         irq_spinlock_lock(&tasks_lock, true);
    599600        task_kill_internal(TASK);
    600         irq_spinlock_unlock(&tasks_lock, true);
    601 
    602601        thread_exit();
    603602}
     
    628627        if (additional)
    629628                printf("%-8" PRIu64 " %9zu", task->taskid,
    630                     atomic_load(&task->refcount));
     629                    atomic_load(&task->lifecount));
    631630        else
    632631                printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p"
     
    640639                printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c "
    641640                    "%9zu\n", task->taskid, ucycles, usuffix, kcycles,
    642                     ksuffix, atomic_load(&task->refcount));
     641                    ksuffix, atomic_load(&task->lifecount));
    643642        else
    644643                printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n",
  • kernel/generic/src/proc/thread.c

    r86f862c rf2cb80a  
    6060#include <arch/interrupt.h>
    6161#include <smp/ipi.h>
    62 #include <arch/faddr.h>
    6362#include <atomic.h>
    6463#include <memw.h>
     
    8281};
    8382
    84 enum sleep_state {
    85         SLEEP_INITIAL,
    86         SLEEP_ASLEEP,
    87         SLEEP_WOKE,
    88 };
    89 
    9083/** Lock protecting the @c threads ordered dictionary .
    9184 *
     
    115108static int threads_cmp(void *, void *);
    116109
    117 /** Thread wrapper.
    118  *
    119  * This wrapper is provided to ensure that every thread makes a call to
    120  * thread_exit() when its implementing function returns.
    121  *
    122  * interrupts_disable() is assumed.
    123  *
    124  */
    125 static void cushion(void)
    126 {
    127         void (*f)(void *) = THREAD->thread_code;
    128         void *arg = THREAD->thread_arg;
    129         THREAD->last_cycle = get_cycle();
    130 
    131         /* This is where each thread wakes up after its creation */
    132         irq_spinlock_unlock(&THREAD->lock, false);
    133         interrupts_enable();
    134 
    135         f(arg);
    136 
    137         thread_exit();
    138 
    139         /* Not reached */
    140 }
    141 
    142110/** Initialization and allocation for thread_t structure
    143111 *
     
    147115        thread_t *thread = (thread_t *) obj;
    148116
    149         irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    150117        link_initialize(&thread->rq_link);
    151118        link_initialize(&thread->wq_link);
     
    229196void thread_wire(thread_t *thread, cpu_t *cpu)
    230197{
    231         irq_spinlock_lock(&thread->lock, true);
    232         thread->cpu = cpu;
     198        ipl_t ipl = interrupts_disable();
     199        atomic_set_unordered(&thread->cpu, cpu);
    233200        thread->nomigrate++;
    234         irq_spinlock_unlock(&thread->lock, true);
    235 }
    236 
    237 /** Invoked right before thread_ready() readies the thread. thread is locked. */
    238 static void before_thread_is_ready(thread_t *thread)
    239 {
    240         assert(irq_spinlock_locked(&thread->lock));
    241 }
    242 
    243 /** Make thread ready
    244  *
    245  * Switch thread to the ready state. Consumes reference passed by the caller.
    246  *
    247  * @param thread Thread to make ready.
    248  *
    249  */
    250 void thread_ready(thread_t *thread)
    251 {
    252         irq_spinlock_lock(&thread->lock, true);
    253 
    254         assert(thread->state != Ready);
    255 
    256         before_thread_is_ready(thread);
    257 
    258         int i = (thread->priority < RQ_COUNT - 1) ?
    259             ++thread->priority : thread->priority;
    260 
    261         /* Prefer the CPU on which the thread ran last */
    262         cpu_t *cpu = thread->cpu ? thread->cpu : CPU;
    263 
    264         thread->state = Ready;
    265 
    266         irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
    267 
    268         /*
    269          * Append thread to respective ready queue
    270          * on respective processor.
    271          */
    272 
    273         list_append(&thread->rq_link, &cpu->rq[i].rq);
    274         cpu->rq[i].n++;
    275         irq_spinlock_unlock(&(cpu->rq[i].lock), true);
    276 
    277         atomic_inc(&nrdy);
    278         atomic_inc(&cpu->nrdy);
     201        interrupts_restore(ipl);
     202}
     203
     204/** Start a thread that wasn't started yet since it was created.
     205 *
     206 * @param thread A reference to the newly created thread.
     207 */
     208void thread_start(thread_t *thread)
     209{
     210        assert(atomic_get_unordered(&thread->state) == Entering);
     211        thread_requeue_sleeping(thread_ref(thread));
    279212}
    280213
     
    315248        irq_spinlock_unlock(&tidlock, true);
    316249
    317         memset(&thread->saved_context, 0, sizeof(thread->saved_context));
    318         context_set(&thread->saved_context, FADDR(cushion),
    319             (uintptr_t) thread->kstack, STACK_SIZE);
     250        context_create(&thread->saved_context, thread_main_func,
     251            thread->kstack, STACK_SIZE);
    320252
    321253        current_initialize((current_t *) thread->kstack);
    322 
    323         ipl_t ipl = interrupts_disable();
    324         thread->saved_ipl = interrupts_read();
    325         interrupts_restore(ipl);
    326254
    327255        str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
     
    329257        thread->thread_code = func;
    330258        thread->thread_arg = arg;
    331         thread->ucycles = 0;
    332         thread->kcycles = 0;
     259        thread->ucycles = ATOMIC_TIME_INITIALIZER();
     260        thread->kcycles = ATOMIC_TIME_INITIALIZER();
    333261        thread->uncounted =
    334262            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    335         thread->priority = -1;          /* Start in rq[0] */
    336         thread->cpu = NULL;
     263        atomic_init(&thread->priority, 0);
     264        atomic_init(&thread->cpu, NULL);
    337265        thread->stolen = false;
    338266        thread->uspace =
     
    340268
    341269        thread->nomigrate = 0;
    342         thread->state = Entering;
     270        atomic_init(&thread->state, Entering);
    343271
    344272        atomic_init(&thread->sleep_queue, NULL);
     
    360288#ifdef CONFIG_UDEBUG
    361289        /* Initialize debugging stuff */
    362         thread->btrace = false;
     290        atomic_init(&thread->btrace, false);
    363291        udebug_thread_initialize(&thread->udebug);
    364292#endif
     
    404332
    405333        if (!thread->uncounted) {
    406                 thread->task->ucycles += thread->ucycles;
    407                 thread->task->kcycles += thread->kcycles;
     334                thread->task->ucycles += atomic_time_read(&thread->ucycles);
     335                thread->task->kcycles += atomic_time_read(&thread->kcycles);
    408336        }
    409337
    410338        irq_spinlock_unlock(&thread->task->lock, false);
    411339
    412         assert((thread->state == Exiting) || (thread->state == Lingering));
     340        assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering));
    413341
    414342        /* Clear cpu->fpu_owner if set to this thread. */
    415343#ifdef CONFIG_FPU_LAZY
    416         if (thread->cpu) {
     344        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     345        if (cpu) {
    417346                /*
    418347                 * We need to lock for this because the old CPU can concurrently try
     
    420349                 * it to finish. An atomic compare-and-swap wouldn't be enough.
    421350                 */
    422                 irq_spinlock_lock(&thread->cpu->fpu_lock, false);
    423 
    424                 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
    425                     memory_order_relaxed);
    426 
    427                 if (owner == thread) {
    428                         atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
    429                             memory_order_relaxed);
    430                 }
    431 
    432                 irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
     351                irq_spinlock_lock(&cpu->fpu_lock, false);
     352
     353                if (atomic_get_unordered(&cpu->fpu_owner) == thread)
     354                        atomic_set_unordered(&cpu->fpu_owner, NULL);
     355
     356                irq_spinlock_unlock(&cpu->fpu_lock, false);
    433357        }
    434358#endif
     
    525449        }
    526450
    527         irq_spinlock_lock(&THREAD->lock, true);
    528         THREAD->state = Exiting;
    529         irq_spinlock_unlock(&THREAD->lock, true);
    530 
    531         scheduler();
    532 
    533         panic("should never be reached");
     451        scheduler_enter(Exiting);
     452        unreachable();
    534453}
    535454
     
    579498}
    580499
    581 static void thread_wait_internal(void)
    582 {
    583         assert(THREAD != NULL);
    584 
    585         ipl_t ipl = interrupts_disable();
    586 
    587         if (atomic_load(&haltstate))
    588                 halt();
    589 
    590         /*
    591          * Lock here to prevent a race between entering the scheduler and another
    592          * thread rescheduling this thread.
    593          */
    594         irq_spinlock_lock(&THREAD->lock, false);
    595 
    596         int expected = SLEEP_INITIAL;
    597 
    598         /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
    599         if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,
    600             SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {
    601                 THREAD->state = Sleeping;
    602                 scheduler_locked(ipl);
    603         } else {
    604                 assert(expected == SLEEP_WOKE);
    605                 /* Return immediately. */
    606                 irq_spinlock_unlock(&THREAD->lock, false);
    607                 interrupts_restore(ipl);
    608         }
    609 }
    610 
    611500static void thread_wait_timeout_callback(void *arg)
    612501{
     
    649538        timeout_t timeout;
    650539
     540        /* Extra check to avoid going to scheduler if we don't need to. */
     541        if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
     542            SLEEP_INITIAL)
     543                return THREAD_WAIT_SUCCESS;
     544
    651545        if (deadline != DEADLINE_NEVER) {
    652                 /* Extra check to avoid setting up a deadline if we don't need to. */
    653                 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
    654                     SLEEP_INITIAL)
    655                         return THREAD_WAIT_SUCCESS;
    656 
    657546                timeout_initialize(&timeout);
    658547                timeout_register_deadline(&timeout, deadline,
     
    660549        }
    661550
    662         thread_wait_internal();
     551        scheduler_enter(Sleeping);
    663552
    664553        if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
     
    674563
    675564        int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
    676             memory_order_release);
     565            memory_order_acq_rel);
    677566
    678567        if (state == SLEEP_ASLEEP) {
     
    682571                 * the waking thread by the sleeper in thread_wait_finish().
    683572                 */
    684                 thread_ready(thread);
     573                thread_requeue_sleeping(thread);
    685574        }
    686575}
     
    689578void thread_migration_disable(void)
    690579{
     580        ipl_t ipl = interrupts_disable();
     581
    691582        assert(THREAD);
    692 
    693583        THREAD->nomigrate++;
     584
     585        interrupts_restore(ipl);
    694586}
    695587
     
    697589void thread_migration_enable(void)
    698590{
     591        ipl_t ipl = interrupts_disable();
     592
    699593        assert(THREAD);
    700594        assert(THREAD->nomigrate > 0);
     
    702596        if (THREAD->nomigrate > 0)
    703597                THREAD->nomigrate--;
     598
     599        interrupts_restore(ipl);
    704600}
    705601
     
    731627
    732628/** Wait for another thread to exit.
    733  * This function does not destroy the thread. Reference counting handles that.
     629 * After successful wait, the thread reference is destroyed.
    734630 *
    735631 * @param thread Thread to join on exit.
     
    742638errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
    743639{
     640        assert(thread != NULL);
     641
    744642        if (thread == THREAD)
    745643                return EINVAL;
    746644
    747         irq_spinlock_lock(&thread->lock, true);
    748         state_t state = thread->state;
    749         irq_spinlock_unlock(&thread->lock, true);
    750 
    751         if (state == Exiting) {
    752                 return EOK;
    753         } else {
    754                 return _waitq_sleep_timeout(&thread->join_wq, usec, flags);
    755         }
     645        errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
     646
     647        if (rc == EOK)
     648                thread_put(thread);
     649
     650        return rc;
     651}
     652
     653void thread_detach(thread_t *thread)
     654{
     655        thread_put(thread);
    756656}
    757657
     
    770670
    771671        (void) waitq_sleep_timeout(&wq, usec);
     672}
     673
     674/** Allow other threads to run. */
     675void thread_yield(void)
     676{
     677        assert(THREAD != NULL);
     678        scheduler_enter(Running);
    772679}
    773680
     
    776683        uint64_t ucycles, kcycles;
    777684        char usuffix, ksuffix;
    778         order_suffix(thread->ucycles, &ucycles, &usuffix);
    779         order_suffix(thread->kcycles, &kcycles, &ksuffix);
     685        order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
     686        order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
     687
     688        state_t state = atomic_get_unordered(&thread->state);
    780689
    781690        char *name;
     
    791700        else
    792701                printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
    793                     thread->tid, name, thread, thread_states[thread->state],
     702                    thread->tid, name, thread, thread_states[state],
    794703                    thread->task, thread->task->container);
    795704
    796705        if (additional) {
    797                 if (thread->cpu)
    798                         printf("%-5u", thread->cpu->id);
     706                cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     707                if (cpu)
     708                        printf("%-5u", cpu->id);
    799709                else
    800710                        printf("none ");
    801711
    802                 if (thread->state == Sleeping) {
     712                if (state == Sleeping) {
    803713                        printf(" %p", thread->sleep_queue);
    804714                }
     
    879789void thread_update_accounting(bool user)
    880790{
     791        assert(interrupts_disabled());
     792
    881793        uint64_t time = get_cycle();
    882794
    883         assert(interrupts_disabled());
    884         assert(irq_spinlock_locked(&THREAD->lock));
    885 
    886795        if (user)
    887                 THREAD->ucycles += time - THREAD->last_cycle;
     796                atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
    888797        else
    889                 THREAD->kcycles += time - THREAD->last_cycle;
     798                atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
    890799
    891800        THREAD->last_cycle = time;
     
    998907         */
    999908
    1000         irq_spinlock_lock(&thread->lock, true);
    1001 
    1002         bool sleeping = false;
    1003         istate_t *istate = thread->udebug.uspace_state;
    1004         if (istate != NULL) {
    1005                 printf("Scheduling thread stack trace.\n");
    1006                 thread->btrace = true;
    1007                 if (thread->state == Sleeping)
    1008                         sleeping = true;
    1009         } else
    1010                 printf("Thread interrupt state not available.\n");
    1011 
    1012         irq_spinlock_unlock(&thread->lock, true);
    1013 
    1014         if (sleeping)
    1015                 thread_wakeup(thread);
    1016 
     909        printf("Scheduling thread stack trace.\n");
     910        atomic_set_unordered(&thread->btrace, true);
     911
     912        thread_wakeup(thread);
    1017913        thread_put(thread);
    1018914}
     
    11151011                thread_attach(thread, TASK);
    11161012#endif
    1117                 thread_ready(thread);
     1013                thread_start(thread);
     1014                thread_put(thread);
    11181015
    11191016                return 0;
  • kernel/generic/src/security/perm.c

    r86f862c rf2cb80a  
    8989                return EPERM;
    9090
    91         irq_spinlock_lock(&tasks_lock, true);
    9291        task_t *task = task_find_by_id(taskid);
    93 
    94         if ((!task) || (!container_check(CONTAINER, task->container))) {
    95                 irq_spinlock_unlock(&tasks_lock, true);
     92        if (!task)
    9693                return ENOENT;
     94
     95        errno_t rc = ENOENT;
     96
     97        irq_spinlock_lock(&task->lock, true);
     98        if (container_check(CONTAINER, task->container)) {
     99                task->perms |= perms;
     100                rc = EOK;
    97101        }
    98 
    99         irq_spinlock_lock(&task->lock, false);
    100         task->perms |= perms;
    101         irq_spinlock_unlock(&task->lock, false);
    102 
    103         irq_spinlock_unlock(&tasks_lock, true);
    104         return EOK;
     102        irq_spinlock_unlock(&task->lock, true);
     103
     104        task_release(task);
     105        return rc;
    105106}
    106107
     
    118119static errno_t perm_revoke(task_id_t taskid, perm_t perms)
    119120{
    120         irq_spinlock_lock(&tasks_lock, true);
    121 
    122121        task_t *task = task_find_by_id(taskid);
    123         if ((!task) || (!container_check(CONTAINER, task->container))) {
    124                 irq_spinlock_unlock(&tasks_lock, true);
     122        if (!task)
    125123                return ENOENT;
    126         }
    127124
    128125        /*
     
    131128         * doesn't have PERM_PERM.
    132129         */
    133         irq_spinlock_lock(&TASK->lock, false);
    134 
    135         if ((!(TASK->perms & PERM_PERM)) || (task != TASK)) {
    136                 irq_spinlock_unlock(&TASK->lock, false);
    137                 irq_spinlock_unlock(&tasks_lock, true);
     130        if (task != TASK && !(perm_get(TASK) & PERM_PERM)) {
     131                task_release(task);
    138132                return EPERM;
    139133        }
    140134
    141         task->perms &= ~perms;
    142         irq_spinlock_unlock(&TASK->lock, false);
    143 
    144         irq_spinlock_unlock(&tasks_lock, true);
    145         return EOK;
     135        errno_t rc = ENOENT;
     136
     137        irq_spinlock_lock(&task->lock, true);
     138        if (container_check(CONTAINER, task->container)) {
     139                task->perms &= ~perms;
     140                rc = EOK;
     141        }
     142        irq_spinlock_unlock(&task->lock, true);
     143
     144        task_release(task);
     145        return rc;
    146146}
    147147
  • kernel/generic/src/syscall/syscall.c

    r86f862c rf2cb80a  
    141141{
    142142        /* Do userpace accounting */
    143         irq_spinlock_lock(&THREAD->lock, true);
     143        ipl_t ipl = interrupts_disable();
    144144        thread_update_accounting(true);
    145         irq_spinlock_unlock(&THREAD->lock, true);
     145        interrupts_restore(ipl);
    146146
    147147#ifdef CONFIG_UDEBUG
     
    191191
    192192        /* Do kernel accounting */
    193         irq_spinlock_lock(&THREAD->lock, true);
     193        ipl = interrupts_disable();
    194194        thread_update_accounting(false);
    195         irq_spinlock_unlock(&THREAD->lock, true);
     195        interrupts_restore(ipl);
    196196
    197197        return rc;
  • kernel/generic/src/sysinfo/stats.c

    r86f862c rf2cb80a  
    221221        stats_task->virtmem = get_task_virtmem(task->as);
    222222        stats_task->resmem = get_task_resmem(task->as);
    223         stats_task->threads = atomic_load(&task->refcount);
     223        stats_task->threads = atomic_load(&task->lifecount);
    224224        task_get_accounting(task, &(stats_task->ucycles),
    225225            &(stats_task->kcycles));
     
    299299{
    300300        assert(interrupts_disabled());
    301         assert(irq_spinlock_locked(&thread->lock));
    302301
    303302        stats_thread->thread_id = thread->tid;
    304303        stats_thread->task_id = thread->task->taskid;
    305         stats_thread->state = thread->state;
    306         stats_thread->priority = thread->priority;
    307         stats_thread->ucycles = thread->ucycles;
    308         stats_thread->kcycles = thread->kcycles;
    309 
    310         if (thread->cpu != NULL) {
     304        stats_thread->state = atomic_get_unordered(&thread->state);
     305        stats_thread->priority = atomic_get_unordered(&thread->priority);
     306        stats_thread->ucycles = atomic_time_read(&thread->ucycles);
     307        stats_thread->kcycles = atomic_time_read(&thread->kcycles);
     308
     309        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     310
     311        if (cpu != NULL) {
    311312                stats_thread->on_cpu = true;
    312                 stats_thread->cpu = thread->cpu->id;
     313                stats_thread->cpu = cpu->id;
    313314        } else
    314315                stats_thread->on_cpu = false;
     
    361362        thread_t *thread = thread_first();
    362363        while (thread != NULL) {
    363                 /* Interrupts are already disabled */
    364                 irq_spinlock_lock(&thread->lock, false);
    365 
    366364                /* Record the statistics and increment the index */
    367365                produce_stats_thread(thread, &stats_threads[i]);
    368366                i++;
    369 
    370                 irq_spinlock_unlock(&thread->lock, false);
    371367
    372368                thread = thread_next(thread);
     
    515511{
    516512        /* Initially no return value */
    517         sysinfo_return_t ret;
    518         ret.tag = SYSINFO_VAL_UNDEFINED;
     513        sysinfo_return_t ret = {
     514                .tag = SYSINFO_VAL_UNDEFINED,
     515        };
    519516
    520517        /* Parse the task ID */
     
    523520                return ret;
    524521
    525         /* Messing with task structures, avoid deadlock */
    526         irq_spinlock_lock(&tasks_lock, true);
    527 
    528522        task_t *task = task_find_by_id(task_id);
    529         if (task == NULL) {
    530                 /* No task with this ID */
    531                 irq_spinlock_unlock(&tasks_lock, true);
     523        if (!task)
    532524                return ret;
    533         }
    534525
    535526        if (dry_run) {
     
    537528                ret.data.data = NULL;
    538529                ret.data.size = sizeof(stats_task_t);
    539 
    540                 irq_spinlock_unlock(&tasks_lock, true);
    541530        } else {
    542531                /* Allocate stats_task_t structure */
    543                 stats_task_t *stats_task =
    544                     (stats_task_t *) malloc(sizeof(stats_task_t));
    545                 if (stats_task == NULL) {
    546                         irq_spinlock_unlock(&tasks_lock, true);
    547                         return ret;
     532                stats_task_t *stats_task = malloc(sizeof(stats_task_t));
     533
     534                if (stats_task != NULL) {
     535                        /* Correct return value */
     536                        ret.tag = SYSINFO_VAL_FUNCTION_DATA;
     537                        ret.data.data = stats_task;
     538                        ret.data.size = sizeof(stats_task_t);
     539
     540                        irq_spinlock_lock(&task->lock, true);
     541                        produce_stats_task(task, stats_task);
     542                        irq_spinlock_unlock(&task->lock, true);
    548543                }
    549 
    550                 /* Correct return value */
    551                 ret.tag = SYSINFO_VAL_FUNCTION_DATA;
    552                 ret.data.data = (void *) stats_task;
    553                 ret.data.size = sizeof(stats_task_t);
    554 
    555                 /* Hand-over-hand locking */
    556                 irq_spinlock_exchange(&tasks_lock, &task->lock);
    557 
    558                 produce_stats_task(task, stats_task);
    559 
    560                 irq_spinlock_unlock(&task->lock, true);
    561         }
    562 
     544        }
     545
     546        task_release(task);
    563547        return ret;
    564548}
     
    624608                ret.data.size = sizeof(stats_thread_t);
    625609
    626                 /*
    627                  * Replaced hand-over-hand locking with regular nested sections
    628                  * to avoid weak reference leak issues.
    629                  */
    630                 irq_spinlock_lock(&thread->lock, false);
    631610                produce_stats_thread(thread, stats_thread);
    632                 irq_spinlock_unlock(&thread->lock, false);
    633611
    634612                irq_spinlock_unlock(&threads_lock, true);
  • kernel/generic/src/time/clock.c

    r86f862c rf2cb80a  
    123123static void cpu_update_accounting(void)
    124124{
     125        // FIXME: get_cycle() is unimplemented on several platforms
    125126        uint64_t now = get_cycle();
    126         atomic_time_increment(&CPU->busy_cycles, now - CPU->last_cycle);
    127         CPU->last_cycle = now;
     127        atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle);
     128        CPU_LOCAL->last_cycle = now;
    128129}
    129130
     
    137138void clock(void)
    138139{
    139         size_t missed_clock_ticks = CPU->missed_clock_ticks;
    140         CPU->missed_clock_ticks = 0;
    141 
    142         CPU->current_clock_tick += missed_clock_ticks + 1;
    143         uint64_t current_clock_tick = CPU->current_clock_tick;
     140        size_t missed_clock_ticks = CPU_LOCAL->missed_clock_ticks;
     141        CPU_LOCAL->missed_clock_ticks = 0;
     142
     143        CPU_LOCAL->current_clock_tick += missed_clock_ticks + 1;
     144        uint64_t current_clock_tick = CPU_LOCAL->current_clock_tick;
    144145        clock_update_counters(current_clock_tick);
    145146
     
    186187
    187188        if (THREAD) {
    188                 if (current_clock_tick >= CPU->preempt_deadline && PREEMPTION_ENABLED) {
    189                         scheduler();
     189                if (current_clock_tick >= CPU_LOCAL->preempt_deadline && PREEMPTION_ENABLED) {
     190                        thread_yield();
    190191#ifdef CONFIG_UDEBUG
    191192                        /*
  • kernel/generic/src/time/timeout.c

    r86f862c rf2cb80a  
    7777                return 0;
    7878
    79         return CPU->current_clock_tick + us2ticks(usec);
     79        return CPU_LOCAL->current_clock_tick + us2ticks(usec);
    8080}
    8181
  • kernel/generic/src/udebug/udebug_ops.c

    r86f862c rf2cb80a  
    9090        }
    9191
    92         irq_spinlock_lock(&thread->lock, true);
    93 
    9492        /* Verify that 'thread' is a userspace thread. */
    9593        if (!thread->uspace) {
    96                 /* It's not, deny its existence */
    97                 irq_spinlock_unlock(&thread->lock, true);
    9894                mutex_unlock(&TASK->udebug.lock);
    9995                return ENOENT;
    10096        }
    101 
    102         /* Verify debugging state. */
    103         if (thread->udebug.active != true) {
    104                 /* Not in debugging session or undesired GO state */
    105                 irq_spinlock_unlock(&thread->lock, true);
    106                 mutex_unlock(&TASK->udebug.lock);
    107                 return ENOENT;
    108         }
    109 
    110         /* Now verify that the thread belongs to the current task. */
    111         if (thread->task != TASK) {
    112                 /* No such thread belonging this task */
    113                 irq_spinlock_unlock(&thread->lock, true);
    114                 mutex_unlock(&TASK->udebug.lock);
    115                 return ENOENT;
    116         }
    117 
    118         irq_spinlock_unlock(&thread->lock, true);
    119 
    120         /* Only mutex TASK->udebug.lock left. */
    12197
    12298        /*
     
    126102         */
    127103        mutex_lock(&thread->udebug.lock);
     104
     105        /* Verify debugging state. */
     106        if (thread->udebug.active != true) {
     107                /* Not in debugging session or undesired GO state */
     108                mutex_unlock(&thread->udebug.lock);
     109                mutex_unlock(&TASK->udebug.lock);
     110                return ENOENT;
     111        }
     112
     113        /* Now verify that the thread belongs to the current task. */
     114        if (thread->task != TASK) {
     115                /* No such thread belonging this task */
     116                mutex_unlock(&thread->udebug.lock);
     117                mutex_unlock(&TASK->udebug.lock);
     118                return ENOENT;
     119        }
    128120
    129121        /* The big task mutex is no longer needed. */
     
    388380        /* FIXME: make sure the thread isn't past debug shutdown... */
    389381        list_foreach(TASK->threads, th_link, thread_t, thread) {
    390                 irq_spinlock_lock(&thread->lock, false);
    391382                bool uspace = thread->uspace;
    392                 irq_spinlock_unlock(&thread->lock, false);
    393383
    394384                /* Not interested in kernel threads. */
  • kernel/test/mm/falloc2.c

    r86f862c rf2cb80a  
    4343#define THREADS      8
    4444
    45 static atomic_size_t thread_cnt;
    4645static atomic_size_t thread_fail;
    4746
     
    5655                    "Unable to allocate frames\n", THREAD->tid, CPU->id);
    5756                atomic_inc(&thread_fail);
    58                 atomic_dec(&thread_cnt);
    5957                return;
    6058        }
     
    108106        TPRINTF("Thread #%" PRIu64 " (cpu%u): Exiting\n",
    109107            THREAD->tid, CPU->id);
    110         atomic_dec(&thread_cnt);
    111108}
    112109
    113110const char *test_falloc2(void)
    114111{
    115         atomic_store(&thread_cnt, THREADS);
    116112        atomic_store(&thread_fail, 0);
     113
     114        thread_t *threads[THREADS] = { };
    117115
    118116        for (unsigned int i = 0; i < THREADS; i++) {
     
    123121                        break;
    124122                }
    125                 thread_ready(thrd);
     123                thread_start(thrd);
     124                threads[i] = thrd;
    126125        }
    127126
    128         while (atomic_load(&thread_cnt) > 0) {
    129                 TPRINTF("Threads left: %zu\n",
    130                     atomic_load(&thread_cnt));
    131                 thread_sleep(1);
     127        for (unsigned int i = 0; i < THREADS; i++) {
     128                if (threads[i] != NULL)
     129                        thread_join(threads[i]);
     130
     131                TPRINTF("Threads left: %u\n", THREADS - i - 1);
    132132        }
    133133
  • kernel/test/mm/slab1.c

    r86f862c rf2cb80a  
    121121static void *thr_data[THREADS][THR_MEM_COUNT];
    122122static slab_cache_t *thr_cache;
    123 static semaphore_t thr_sem;
    124123
    125124static void slabtest(void *data)
     
    142141
    143142        TPRINTF("Thread #%" PRIu64 " finished\n", THREAD->tid);
    144 
    145         semaphore_up(&thr_sem);
    146143}
    147144
    148145static void testthreads(void)
    149146{
    150         thread_t *t;
    151         int i;
    152 
    153147        thr_cache = slab_cache_create("thread_cache", THR_MEM_SIZE, 0, NULL, NULL,
    154148            SLAB_CACHE_NOMAGAZINE);
    155149
    156         semaphore_initialize(&thr_sem, 0);
    157         for (i = 0; i < THREADS; i++) {
    158                 if (!(t = thread_create(slabtest, (void *) (sysarg_t) i, TASK, THREAD_FLAG_NONE, "slabtest"))) {
     150        thread_t *threads[THREADS] = { };
     151
     152        for (int i = 0; i < THREADS; i++) {
     153                threads[i] = thread_create(slabtest, (void *) (sysarg_t) i,
     154                    TASK, THREAD_FLAG_NONE, "slabtest");
     155                if (threads[i]) {
     156                        thread_start(threads[i]);
     157                } else {
    159158                        TPRINTF("Could not create thread %d\n", i);
    160                 } else
    161                         thread_ready(t);
     159                }
    162160        }
    163161
    164         for (i = 0; i < THREADS; i++)
    165                 semaphore_down(&thr_sem);
     162        for (int i = 0; i < THREADS; i++) {
     163                if (threads[i] != NULL)
     164                        thread_join(threads[i]);
     165        }
    166166
    167167        slab_cache_destroy(thr_cache);
  • kernel/test/mm/slab2.c

    r86f862c rf2cb80a  
    127127
    128128static slab_cache_t *thr_cache;
    129 static semaphore_t thr_sem;
    130129static condvar_t thread_starter;
    131130static mutex_t starter_mutex;
     
    188187        if (!test_quiet)
    189188                slab_print_list();
    190 
    191         semaphore_up(&thr_sem);
    192189}
    193190
     
    198195         * then release everything, then again allocate, then release
    199196         */
    200         thread_t *t;
    201         int i;
    202197
    203198        TPRINTF("Running stress test with size %d\n", size);
     
    207202
    208203        thr_cache = slab_cache_create("thread_cache", size, 0, NULL, NULL, 0);
    209         semaphore_initialize(&thr_sem, 0);
    210         for (i = 0; i < THREADS; i++) {
    211                 if (!(t = thread_create(slabtest, NULL, TASK, THREAD_FLAG_NONE, "slabtest"))) {
     204
     205        thread_t *threads[THREADS] = { };
     206
     207        for (int i = 0; i < THREADS; i++) {
     208                threads[i] = thread_create(slabtest, NULL,
     209                    TASK, THREAD_FLAG_NONE, "slabtest");
     210                if (threads[i]) {
     211                        thread_start(threads[i]);
     212                } else {
    212213                        TPRINTF("Could not create thread %d\n", i);
    213                 } else
    214                         thread_ready(t);
    215         }
     214                }
     215        }
     216
    216217        thread_sleep(1);
    217218        condvar_broadcast(&thread_starter);
    218219
    219         for (i = 0; i < THREADS; i++)
    220                 semaphore_down(&thr_sem);
     220        for (int i = 0; i < THREADS; i++) {
     221                if (threads[i] != NULL)
     222                        thread_join(threads[i]);
     223        }
    221224
    222225        slab_cache_destroy(thr_cache);
  • kernel/test/synch/semaphore1.c

    r86f862c rf2cb80a  
    8989                                thrd = thread_create(consumer, NULL, TASK,
    9090                                    THREAD_FLAG_NONE, "consumer");
    91                                 if (thrd)
    92                                         thread_ready(thrd);
    93                                 else
     91                                if (thrd) {
     92                                        thread_start(thrd);
     93                                        thread_detach(thrd);
     94                                } else {
    9495                                        TPRINTF("could not create consumer %d\n", i);
     96                                }
    9597                        }
    9698                        for (k = 0; k < (4 - i); k++) {
    9799                                thrd = thread_create(producer, NULL, TASK,
    98100                                    THREAD_FLAG_NONE, "producer");
    99                                 if (thrd)
    100                                         thread_ready(thrd);
    101                                 else
     101                                if (thrd) {
     102                                        thread_start(thrd);
     103                                        thread_detach(thrd);
     104                                } else {
    102105                                        TPRINTF("could not create producer %d\n", i);
     106                                }
    103107                        }
    104108                }
  • kernel/test/synch/semaphore2.c

    r86f862c rf2cb80a  
    9292                thrd = thread_create(consumer, NULL, TASK,
    9393                    THREAD_FLAG_NONE, "consumer");
    94                 if (thrd)
    95                         thread_ready(thrd);
    96                 else
     94                if (thrd) {
     95                        thread_start(thrd);
     96                        thread_detach(thrd);
     97                } else {
    9798                        TPRINTF("Error creating thread\n");
     99                }
    98100        }
    99101
  • kernel/test/thread/thread1.c

    r86f862c rf2cb80a  
    3838
    3939static atomic_bool finish;
    40 static atomic_size_t threads_finished;
    4140
    4241static void threadtest(void *data)
     
    4645                thread_usleep(100000);
    4746        }
    48         atomic_inc(&threads_finished);
    4947}
    5048
    5149const char *test_thread1(void)
    5250{
    53         unsigned int i;
    54         size_t total = 0;
     51        atomic_store(&finish, true);
    5552
    56         atomic_store(&finish, true);
    57         atomic_store(&threads_finished, 0);
     53        thread_t *threads[THREADS] = { };
    5854
    59         for (i = 0; i < THREADS; i++) {
    60                 thread_t *t;
    61                 if (!(t = thread_create(threadtest, NULL, TASK,
    62                     THREAD_FLAG_NONE, "threadtest"))) {
     55        for (int i = 0; i < THREADS; i++) {
     56                threads[i] = thread_create(threadtest, NULL,
     57                    TASK, THREAD_FLAG_NONE, "threadtest");
     58
     59                if (threads[i]) {
     60                        thread_start(threads[i]);
     61                } else {
    6362                        TPRINTF("Could not create thread %d\n", i);
    6463                        break;
    6564                }
    66                 thread_ready(t);
    67                 total++;
    6865        }
    6966
     
    7269
    7370        atomic_store(&finish, false);
    74         while (atomic_load(&threads_finished) < total) {
    75                 TPRINTF("Threads left: %zu\n", total - atomic_load(&threads_finished));
    76                 thread_sleep(1);
     71
     72        for (int i = 0; i < THREADS; i++) {
     73                if (threads[i] != NULL)
     74                        thread_join(threads[i]);
     75
     76                TPRINTF("Threads left: %d\n", THREADS - i - 1);
    7777        }
    7878
Note: See TracChangeset for help on using the changeset viewer.