Ignore:
Timestamp:
2024-01-21T16:36:15Z (4 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
1a1e124
Parents:
ed7e057 (diff), d23712e (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge scheduler refactoring to remove the need for thread structure lock

All necessary synchronization is already a product of other operations
that enforce ordering (that is, runqueue manipulation and thread_sleep()
/thread_wakeup()). Some fields formally become atomic, which is only
needed because they are read from other threads to print out statistics.
These atomic operations are limited to relaxed individual reads/writes
to native-sized fields, which should at least in theory be compiled
identically to regular volatile variable accesses, the only difference
being that concurrent accesses from different threads are not undefined
behavior by definition.

Additionally, it is now made possible to switch directly to new thread
context instead of going through a separate scheduler stack. A separate
context is only needed and used when no runnable threads is immediately
available, which means we optimize switching in the limiting case where
many threads are waiting for execution. Switching is also avoided
altogether when there's only one runnable thread and it is being
preempted. Originally, the scheduler would switch to a separate stack,
requeue the thread that was running, retrieve that same thread from
queue, and switch to it again, all that work is now avoided.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/udebug/udebug_ops.c

    red7e057 ra5b5f17  
    9090        }
    9191
    92         irq_spinlock_lock(&thread->lock, true);
    93 
    9492        /* Verify that 'thread' is a userspace thread. */
    9593        if (!thread->uspace) {
    96                 /* It's not, deny its existence */
    97                 irq_spinlock_unlock(&thread->lock, true);
    9894                mutex_unlock(&TASK->udebug.lock);
    9995                return ENOENT;
    10096        }
    101 
    102         /* Verify debugging state. */
    103         if (thread->udebug.active != true) {
    104                 /* Not in debugging session or undesired GO state */
    105                 irq_spinlock_unlock(&thread->lock, true);
    106                 mutex_unlock(&TASK->udebug.lock);
    107                 return ENOENT;
    108         }
    109 
    110         /* Now verify that the thread belongs to the current task. */
    111         if (thread->task != TASK) {
    112                 /* No such thread belonging this task */
    113                 irq_spinlock_unlock(&thread->lock, true);
    114                 mutex_unlock(&TASK->udebug.lock);
    115                 return ENOENT;
    116         }
    117 
    118         irq_spinlock_unlock(&thread->lock, true);
    119 
    120         /* Only mutex TASK->udebug.lock left. */
    12197
    12298        /*
     
    126102         */
    127103        mutex_lock(&thread->udebug.lock);
     104
     105        /* Verify debugging state. */
     106        if (thread->udebug.active != true) {
     107                /* Not in debugging session or undesired GO state */
     108                mutex_unlock(&thread->udebug.lock);
     109                mutex_unlock(&TASK->udebug.lock);
     110                return ENOENT;
     111        }
     112
     113        /* Now verify that the thread belongs to the current task. */
     114        if (thread->task != TASK) {
     115                /* No such thread belonging this task */
     116                mutex_unlock(&thread->udebug.lock);
     117                mutex_unlock(&TASK->udebug.lock);
     118                return ENOENT;
     119        }
    128120
    129121        /* The big task mutex is no longer needed. */
     
    388380        /* FIXME: make sure the thread isn't past debug shutdown... */
    389381        list_foreach(TASK->threads, th_link, thread_t, thread) {
    390                 irq_spinlock_lock(&thread->lock, false);
    391382                bool uspace = thread->uspace;
    392                 irq_spinlock_unlock(&thread->lock, false);
    393383
    394384                /* Not interested in kernel threads. */
Note: See TracChangeset for help on using the changeset viewer.