Changeset 4760793 in mainline for kernel/generic/src


Ignore:
Timestamp:
2024-01-14T18:23:40Z (19 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
5663872, c7ceacf
Parents:
3b68542
Message:

Add CPU_LOCAL alongside CPU and segregate fields that are only used locally

This makes it more clear which fields can be used without synchronization
and which need more care.

Location:
kernel/generic/src
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/cpu/cpu.c

    r3b68542 r4760793  
    8181                                panic("Cannot allocate CPU stack.");
    8282
    83                         cpus[i].stack = (uint8_t *) PA2KA(stack_phys);
     83                        cpus[i].local.stack = (uint8_t *) PA2KA(stack_phys);
    8484                        cpus[i].id = i;
    8585
     
    104104        CPU->tlb_active = true;
    105105
    106         CPU->idle = false;
    107         CPU->last_cycle = get_cycle();
     106        CPU_LOCAL->idle = false;
     107        CPU_LOCAL->last_cycle = get_cycle();
    108108        CPU->idle_cycles = ATOMIC_TIME_INITIALIZER();
    109109        CPU->busy_cycles = ATOMIC_TIME_INITIALIZER();
  • kernel/generic/src/interrupt/interrupt.c

    r3b68542 r4760793  
    121121
    122122        /* Account CPU usage if it woke up from sleep */
    123         if (CPU && CPU->idle) {
     123        if (CPU && CPU_LOCAL->idle) {
    124124                uint64_t now = get_cycle();
    125                 atomic_time_increment(&CPU->idle_cycles, now - CPU->last_cycle);
    126                 CPU->last_cycle = now;
    127                 CPU->idle = false;
     125                atomic_time_increment(&CPU->idle_cycles, now - CPU_LOCAL->last_cycle);
     126                CPU_LOCAL->last_cycle = now;
     127                CPU_LOCAL->idle = false;
    128128        }
    129129
  • kernel/generic/src/main/main.c

    r3b68542 r4760793  
    328328        ARCH_OP(post_cpu_init);
    329329
    330         current_copy(CURRENT, (current_t *) CPU->stack);
     330        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    331331
    332332        /*
     
    338338        context_save(&ctx);
    339339        context_set(&ctx, FADDR(main_ap_separated_stack),
    340             (uintptr_t) CPU->stack, STACK_SIZE);
     340            (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
    341341        context_restore(&ctx);
    342342        /* not reached */
  • kernel/generic/src/proc/scheduler.c

    r3b68542 r4760793  
    216216
    217217                /* This is safe because interrupts are disabled. */
    218                 CPU->preempt_deadline = CPU->current_clock_tick + us2ticks(time_to_run);
     218                CPU_LOCAL->preempt_deadline =
     219                    CPU_LOCAL->current_clock_tick + us2ticks(time_to_run);
    219220
    220221                /*
     
    257258                 * This improves energy saving and hyperthreading.
    258259                 */
    259                 CPU->idle = true;
     260                CPU_LOCAL->idle = true;
    260261
    261262                /*
     
    305306static void relink_rq(int start)
    306307{
    307         if (CPU->current_clock_tick < CPU->relink_deadline)
     308        if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline)
    308309                return;
    309310
    310         CPU->relink_deadline = CPU->current_clock_tick + NEEDS_RELINK_MAX;
     311        CPU_LOCAL->relink_deadline = CPU_LOCAL->current_clock_tick + NEEDS_RELINK_MAX;
    311312
    312313        /* Temporary cache for lists we are moving. */
     
    401402         *
    402403         */
    403         current_copy(CURRENT, (current_t *) CPU->stack);
     404        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    404405
    405406        /*
     
    419420        context_save(&ctx);
    420421        context_set(&ctx, FADDR(scheduler_separated_stack),
    421             (uintptr_t) CPU->stack, STACK_SIZE);
     422            (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
    422423        context_restore(&ctx);
    423424
  • kernel/generic/src/time/clock.c

    r3b68542 r4760793  
    124124{
    125125        uint64_t now = get_cycle();
    126         atomic_time_increment(&CPU->busy_cycles, now - CPU->last_cycle);
    127         CPU->last_cycle = now;
     126        atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle);
     127        CPU_LOCAL->last_cycle = now;
    128128}
    129129
     
    137137void clock(void)
    138138{
    139         size_t missed_clock_ticks = CPU->missed_clock_ticks;
    140         CPU->missed_clock_ticks = 0;
    141 
    142         CPU->current_clock_tick += missed_clock_ticks + 1;
    143         uint64_t current_clock_tick = CPU->current_clock_tick;
     139        size_t missed_clock_ticks = CPU_LOCAL->missed_clock_ticks;
     140        CPU_LOCAL->missed_clock_ticks = 0;
     141
     142        CPU_LOCAL->current_clock_tick += missed_clock_ticks + 1;
     143        uint64_t current_clock_tick = CPU_LOCAL->current_clock_tick;
    144144        clock_update_counters(current_clock_tick);
    145145
     
    186186
    187187        if (THREAD) {
    188                 if (current_clock_tick >= CPU->preempt_deadline && PREEMPTION_ENABLED) {
     188                if (current_clock_tick >= CPU_LOCAL->preempt_deadline && PREEMPTION_ENABLED) {
    189189                        scheduler();
    190190#ifdef CONFIG_UDEBUG
  • kernel/generic/src/time/timeout.c

    r3b68542 r4760793  
    7777                return 0;
    7878
    79         return CPU->current_clock_tick + us2ticks(usec);
     79        return CPU_LOCAL->current_clock_tick + us2ticks(usec);
    8080}
    8181
Note: See TracChangeset for help on using the changeset viewer.