Changeset ed7e057 in mainline


Ignore:
Timestamp:
2024-01-16T15:46:47Z (4 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
29029ac0, a5b5f17
Parents:
4ed7870
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-15 20:09:30)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-16 15:46:47)
Message:

Add functions context_create(), context_replace() and context_swap()

and use them where appropriate, removing context_save() in the process.
Much like in userspace, context_swap() maintains natural control flow
as opposed to context_save()'s return-twice mechanic.

Beyond that, in the future, context_replace() and context_swap()
can be implemented more efficiently than the context_save()/
context_restore() pair. As of now, the original implementation is
retained.

Location:
kernel/generic
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/context.h

    r4ed7870 red7e057  
    3636#define KERN_CONTEXT_H_
    3737
     38#include <panic.h>
    3839#include <trace.h>
    3940#include <arch/context.h>
     41#include <arch/faddr.h>
    4042
    4143#define context_set_generic(ctx, _pc, stack, size) \
     
    4749extern int context_save_arch(context_t *ctx) __attribute__((returns_twice));
    4850extern void context_restore_arch(context_t *ctx) __attribute__((noreturn));
    49 
    50 /** Save register context.
    51  *
    52  * Save the current register context (including stack pointer) to a context
    53  * structure. A subsequent call to context_restore() will return to the same
    54  * address as the corresponding call to context_save().
    55  *
    56  * Note that context_save_arch() must reuse the stack frame of the function
    57  * which called context_save(). We guarantee this by:
    58  *
    59  *   a) implementing context_save_arch() in assembly so that it does not create
    60  *      its own stack frame, and by
    61  *   b) defining context_save() as a macro because the inline keyword is just a
    62  *      hint for the compiler, not a real constraint; the application of a macro
    63  *      will definitely not create a stack frame either.
    64  *
    65  * To imagine what could happen if there were some extra stack frames created
    66  * either by context_save() or context_save_arch(), we need to realize that the
    67  * sp saved in the contex_t structure points to the current stack frame as it
    68  * existed when context_save_arch() was executing. After the return from
    69  * context_save_arch() and context_save(), any extra stack frames created by
    70  * these functions will be destroyed and their contents sooner or later
    71  * overwritten by functions called next. Any attempt to restore to a context
    72  * saved like that would therefore lead to a disaster.
    73  *
    74  * @param ctx Context structure.
    75  *
    76  * @return context_save() returns 1, context_restore() returns 0.
    77  *
    78  */
    79 #define context_save(ctx)  context_save_arch(ctx)
    8051
    8152/** Restore register context.
     
    9162 *
    9263 */
    93 _NO_TRACE static inline void context_restore(context_t *ctx)
     64_NO_TRACE __attribute__((noreturn))
     65    static inline void context_restore(context_t *ctx)
    9466{
    9567        context_restore_arch(ctx);
     68}
     69
     70/**
     71 * Saves current context to the variable pointed to by `self`,
     72 * and restores the context denoted by `other`.
     73 *
     74 * When the `self` context is later restored by another call to
     75 * `context_swap()`, the control flow behaves as if the earlier call to
     76 * `context_swap()` just returned.
     77 */
     78_NO_TRACE static inline void context_swap(context_t *self, context_t *other)
     79{
     80        if (context_save_arch(self))
     81                context_restore_arch(other);
     82}
     83
     84_NO_TRACE static inline void context_create(context_t *context,
     85    void (*fn)(void), void *stack_base, size_t stack_size)
     86{
     87        *context = (context_t) { 0 };
     88        context_set(context, FADDR(fn), stack_base, stack_size);
     89}
     90
     91__attribute__((noreturn)) static inline void context_replace(void (*fn)(void),
     92    void *stack_base, size_t stack_size)
     93{
     94        context_t ctx;
     95        context_create(&ctx, fn, stack_base, stack_size);
     96        context_restore(&ctx);
    9697}
    9798
  • kernel/generic/src/main/main.c

    r4ed7870 red7e057  
    8080#include <arch/arch.h>
    8181#include <arch.h>
    82 #include <arch/faddr.h>
    8382#include <ipc/ipc.h>
    8483#include <macros.h>
     
    174173            ALIGN_UP((uintptr_t) kdata_end - config.base, PAGE_SIZE);
    175174
    176         context_save(&ctx);
    177         context_set(&ctx, FADDR(main_bsp_separated_stack),
     175        context_create(&ctx, main_bsp_separated_stack,
    178176            bootstrap_stack, bootstrap_stack_size);
    179177        context_restore(&ctx);
     
    336334         * switch to this cpu's private stack prior to waking kmp up.
    337335         */
    338         context_t ctx;
    339         context_save(&ctx);
    340         context_set(&ctx, FADDR(main_ap_separated_stack),
    341             (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
    342         context_restore(&ctx);
     336        context_replace(main_ap_separated_stack, CPU_LOCAL->stack, STACK_SIZE);
    343337        /* not reached */
    344338}
  • kernel/generic/src/proc/scheduler.c

    r4ed7870 red7e057  
    309309
    310310        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    311 
    312         context_t ctx;
    313         context_save(&ctx);
    314         context_set(&ctx, FADDR(scheduler_separated_stack),
    315             (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
    316         context_restore(&ctx);
    317 
     311        context_replace(scheduler_separated_stack, CPU_LOCAL->stack, STACK_SIZE);
    318312        unreachable();
    319313}
     
    452446                /* Prefer the thread after it's woken up. */
    453447                THREAD->priority = -1;
    454         }
    455 
    456         if (!context_save(&THREAD->saved_context)) {
    457                 /*
    458                  * This is the place where threads leave scheduler();
    459                  */
    460 
    461                 irq_spinlock_unlock(&THREAD->lock, false);
    462                 interrupts_restore(ipl);
    463                 return;
    464448        }
    465449
     
    486470         */
    487471        context_t ctx;
    488         context_save(&ctx);
    489         context_set(&ctx, FADDR(scheduler_separated_stack),
    490             (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
    491         context_restore(&ctx);
    492 
    493         /* Not reached */
     472        context_create(&ctx, scheduler_separated_stack,
     473            CPU_LOCAL->stack, STACK_SIZE);
     474
     475        /* Switch to scheduler context and store current thread's context. */
     476        context_swap(&THREAD->saved_context, &ctx);
     477
     478        /* Returned from scheduler. */
     479
     480        irq_spinlock_unlock(&THREAD->lock, false);
     481        interrupts_restore(ipl);
    494482}
    495483
  • kernel/generic/src/proc/thread.c

    r4ed7870 red7e057  
    6060#include <arch/interrupt.h>
    6161#include <smp/ipi.h>
    62 #include <arch/faddr.h>
    6362#include <atomic.h>
    6463#include <memw.h>
     
    310309        irq_spinlock_unlock(&tidlock, true);
    311310
    312         memset(&thread->saved_context, 0, sizeof(thread->saved_context));
    313         context_set(&thread->saved_context, FADDR(cushion),
    314             (uintptr_t) thread->kstack, STACK_SIZE);
     311        context_create(&thread->saved_context, cushion, thread->kstack, STACK_SIZE);
    315312
    316313        current_initialize((current_t *) thread->kstack);
Note: See TracChangeset for help on using the changeset viewer.