Changeset b1c57a8 in mainline for kernel/generic/src


Ignore:
Timestamp:
2014-10-09T15:03:55Z (11 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
e367939c
Parents:
21799398 (diff), 207e8880 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge from lp:~adam-hraska+lp/helenos/rcu/.

Only merge from the feature branch and resolve all conflicts.

Location:
kernel/generic/src
Files:
6 added
30 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/adt/list.c

    r21799398 rb1c57a8  
    6868}
    6969
    70 /** Concatenate two lists
    71  *
    72  * Concatenate lists @a list1 and @a list2, producing a single
    73  * list @a list1 containing items from both (in @a list1, @a list2
    74  * order) and empty list @a list2.
    75  *
    76  * @param list1         First list and concatenated output
    77  * @param list2         Second list and empty output.
    78  *
     70/** Moves items of one list into another after the specified item.
     71 *
     72 * Inserts all items of @a list after item at @a pos in another list.
     73 * Both lists may be empty.
     74 *
     75 * @param list Source list to move after pos. Empty afterwards.
     76 * @param pos Source items will be placed after this item.
    7977 */
    80 void list_concat(list_t *list1, list_t *list2)
     78void list_splice(list_t *list, link_t *pos)
    8179{
    82         if (list_empty(list2))
     80        if (list_empty(list))
    8381                return;
    84 
    85         list2->head.next->prev = list1->head.prev;
    86         list2->head.prev->next = &list1->head;
    87         list1->head.prev->next = list2->head.next;
    88         list1->head.prev = list2->head.prev;
    89         list_initialize(list2);
     82       
     83        /* Attach list to destination. */
     84        list->head.next->prev = pos;
     85        list->head.prev->next = pos->next;
     86       
     87        /* Link destination list to the added list. */
     88        pos->next->prev = list->head.prev;
     89        pos->next = list->head.next;
     90       
     91        list_initialize(list);
    9092}
    9193
  • kernel/generic/src/console/chardev.c

    r21799398 rb1c57a8  
    3939#include <print.h>
    4040#include <func.h>
    41 #include <arch.h>
     41#include <cpu.h>
    4242
    4343/** Initialize input character device.
  • kernel/generic/src/console/cmd.c

    r21799398 rb1c57a8  
    7070#include <sysinfo/sysinfo.h>
    7171#include <symtab.h>
     72#include <synch/workqueue.h>
     73#include <synch/rcu.h>
    7274#include <errno.h>
    7375
     
    526528};
    527529
     530/* Data and methods for the 'workq' command */
     531static int cmd_workq(cmd_arg_t *argv);
     532static cmd_info_t workq_info = {
     533        .name = "workq",
     534        .description = "Show global workq information.",
     535        .func = cmd_workq,
     536        .argc = 0
     537};
     538
     539/* Data and methods for the 'workq' command */
     540static int cmd_rcu(cmd_arg_t *argv);
     541static cmd_info_t rcu_info = {
     542        .name = "rcu",
     543        .description = "Show RCU run-time statistics.",
     544        .func = cmd_rcu,
     545        .argc = 0
     546};
     547
    528548/* Data and methods for 'ipc' command */
    529549static int cmd_ipc(cmd_arg_t *argv);
     
    589609        &physmem_info,
    590610        &reboot_info,
     611        &rcu_info,
    591612        &sched_info,
    592613        &set4_info,
     
    599620        &uptime_info,
    600621        &version_info,
     622        &workq_info,
    601623        &zones_info,
    602624        &zone_info,
     
    12701292{
    12711293        sched_print_list();
     1294        return 1;
     1295}
     1296
     1297/** Prints information about the global work queue.
     1298 *
     1299 * @param argv Ignores
     1300 *
     1301 * @return Always 1
     1302 */
     1303int cmd_workq(cmd_arg_t *argv)
     1304{
     1305        workq_global_print_info();
     1306        return 1;
     1307}
     1308
     1309/** Prints RCU statistics.
     1310 *
     1311 * @param argv Ignores
     1312 *
     1313 * @return Always 1
     1314 */
     1315int cmd_rcu(cmd_arg_t *argv)
     1316{
     1317        rcu_print_stat();
    12721318        return 1;
    12731319}
  • kernel/generic/src/console/console.c

    r21799398 rb1c57a8  
    5353#include <str.h>
    5454#include <abi/kio.h>
     55#include <mm/frame.h> /* SIZE2FRAMES */
     56#include <mm/slab.h>  /* malloc */
    5557
    5658#define KIO_PAGES    8
  • kernel/generic/src/console/kconsole.c

    r21799398 rb1c57a8  
    5959#include <putchar.h>
    6060#include <str.h>
     61#include <mm/slab.h>
    6162
    6263/** Simple kernel console.
  • kernel/generic/src/cpu/cpu.c

    r21799398 rb1c57a8  
    5050#include <sysinfo/sysinfo.h>
    5151#include <arch/cycle.h>
     52#include <synch/rcu.h>
    5253
    5354cpu_t *cpus;
     
    105106        cpu_identify();
    106107        cpu_arch_init();
     108        rcu_cpu_init();
    107109}
    108110
  • kernel/generic/src/debug/panic.c

    r21799398 rb1c57a8  
    9696        printf("THE=%p: ", THE);
    9797        if (THE != NULL) {
    98                 printf("pd=%" PRIun " thread=%p task=%p cpu=%p as=%p"
    99                     " magic=%#" PRIx32 "\n", THE->preemption_disabled,
     98                printf("pe=%" PRIun " thread=%p task=%p cpu=%p as=%p"
     99                    " magic=%#" PRIx32 "\n", THE->preemption,
    100100                    THE->thread, THE->task, THE->cpu, THE->as, THE->magic);
    101101               
  • kernel/generic/src/interrupt/interrupt.c

    r21799398 rb1c57a8  
    112112        }
    113113       
    114         /* Account CPU usage if it has waked up from sleep */
    115         if (CPU) {
     114        /* Account CPU usage if it woke up from sleep */
     115        if (CPU && CPU->idle) {
    116116                irq_spinlock_lock(&CPU->lock, false);
    117                 if (CPU->idle) {
    118                         uint64_t now = get_cycle();
    119                         CPU->idle_cycles += now - CPU->last_cycle;
    120                         CPU->last_cycle = now;
    121                         CPU->idle = false;
    122                 }
     117                uint64_t now = get_cycle();
     118                CPU->idle_cycles += now - CPU->last_cycle;
     119                CPU->last_cycle = now;
     120                CPU->idle = false;
    123121                irq_spinlock_unlock(&CPU->lock, false);
    124122        }
  • kernel/generic/src/ipc/kbox.c

    r21799398 rb1c57a8  
    4444#include <ipc/kbox.h>
    4545#include <print.h>
     46#include <proc/thread.h>
    4647
    4748void ipc_kbox_cleanup(void)
  • kernel/generic/src/lib/str.c

    r21799398 rb1c57a8  
    111111#include <debug.h>
    112112#include <macros.h>
     113#include <mm/slab.h>
    113114
    114115/** Check the condition if wchar_t is signed */
     
    567568        /* There must be space for a null terminator in the buffer. */
    568569        ASSERT(size > 0);
     570        ASSERT(src != NULL);
    569571       
    570572        size_t src_off = 0;
  • kernel/generic/src/main/kinit.c

    r21799398 rb1c57a8  
    7979#include <synch/waitq.h>
    8080#include <synch/spinlock.h>
     81#include <synch/workqueue.h>
     82#include <synch/rcu.h>
    8183
    8284#define ALIVE_CHARS  4
     
    105107         */
    106108        thread_detach(THREAD);
    107        
     109
    108110        interrupts_disable();
     111       
     112        /* Start processing RCU callbacks. RCU is fully functional afterwards. */
     113        rcu_kinit_init();
     114       
     115        /*
     116         * Start processing work queue items. Some may have been queued during boot.
     117         */
     118        workq_global_worker_init();
    109119       
    110120#ifdef CONFIG_SMP
  • kernel/generic/src/main/main.c

    r21799398 rb1c57a8  
    7676#include <synch/waitq.h>
    7777#include <synch/futex.h>
     78#include <synch/workqueue.h>
     79#include <smp/smp_call.h>
    7880#include <arch/arch.h>
    7981#include <arch.h>
     
    263265       
    264266        cpu_init();
    265        
    266267        calibrate_delay_loop();
     268        arch_post_cpu_init();
     269
     270        smp_call_init();
     271        workq_global_init();
    267272        clock_counter_init();
    268273        timeout_init();
     
    367372void main_ap_separated_stack(void)
    368373{
     374        smp_call_init();
     375       
    369376        /*
    370377         * Configure timeouts for this cpu.
  • kernel/generic/src/main/shutdown.c

    r21799398 rb1c57a8  
    3737
    3838#include <arch.h>
     39#include <proc/task.h>
    3940#include <func.h>
    4041#include <print.h>
  • kernel/generic/src/mm/frame.c

    r21799398 rb1c57a8  
    6161#include <config.h>
    6262#include <str.h>
     63#include <proc/thread.h> /* THREAD */
    6364
    6465zones_t zones;
  • kernel/generic/src/mm/km.c

    r21799398 rb1c57a8  
    4949#include <macros.h>
    5050#include <bitops.h>
     51#include <proc/thread.h>
    5152
    5253static ra_arena_t *km_ni_arena;
  • kernel/generic/src/mm/slab.c

    r21799398 rb1c57a8  
    114114#include <bitops.h>
    115115#include <macros.h>
     116#include <cpu.h>
    116117
    117118IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock);
  • kernel/generic/src/preempt/preemption.c

    r21799398 rb1c57a8  
    3737
    3838#include <preemption.h>
    39 #include <arch.h>
    40 #include <arch/asm.h>
    41 #include <arch/barrier.h>
    42 #include <debug.h>
    4339
    44 /** Increment preemption disabled counter. */
    45 void preemption_disable(void)
    46 {
    47         THE->preemption_disabled++;
    48         memory_barrier();
    49 }
    50 
    51 /** Decrement preemption disabled counter. */
    52 void preemption_enable(void)
    53 {
    54         ASSERT(PREEMPTION_DISABLED);
    55         memory_barrier();
    56         THE->preemption_disabled--;
    57 }
    5840
    5941/** @}
  • kernel/generic/src/proc/scheduler.c

    r21799398 rb1c57a8  
    5252#include <atomic.h>
    5353#include <synch/spinlock.h>
     54#include <synch/workqueue.h>
     55#include <synch/rcu.h>
    5456#include <config.h>
    5557#include <context.h>
     
    6466#include <debug.h>
    6567#include <stacktrace.h>
     68#include <cpu.h>
    6669
    6770static void scheduler_separated_stack(void);
     
    8790{
    8891        before_thread_runs_arch();
     92        rcu_before_thread_runs();
    8993       
    9094#ifdef CONFIG_FPU_LAZY
     
    127131static void after_thread_ran(void)
    128132{
     133        workq_after_thread_ran();
     134        rcu_after_thread_ran();
    129135        after_thread_ran_arch();
    130136}
     
    219225                goto loop;
    220226        }
     227
     228        ASSERT(!CPU->idle);
    221229       
    222230        unsigned int i;
     
    398406        ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
    399407        ASSERT(CPU != NULL);
     408        ASSERT(interrupts_disabled());
    400409       
    401410        /*
     
    421430               
    422431                case Exiting:
     432                        rcu_thread_exiting();
    423433repeat:
    424434                        if (THREAD->detached) {
  • kernel/generic/src/proc/task.c

    r21799398 rb1c57a8  
    4141#include <mm/slab.h>
    4242#include <atomic.h>
     43#include <synch/futex.h>
    4344#include <synch/spinlock.h>
    4445#include <synch/waitq.h>
     
    163164       
    164165        irq_spinlock_initialize(&task->lock, "task_t_lock");
    165         mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE);
    166166       
    167167        list_initialize(&task->threads);
     
    175175        spinlock_initialize(&task->active_calls_lock, "active_calls_lock");
    176176        list_initialize(&task->active_calls);
    177        
     177               
    178178#ifdef CONFIG_UDEBUG
    179179        /* Init kbox stuff */
     
    231231                (void) ipc_phone_connect(&task->phones[0], ipc_phone_0);
    232232       
    233         btree_create(&task->futexes);
     233        futex_task_init(task);
    234234       
    235235        /*
     
    272272         * Free up dynamically allocated state.
    273273         */
    274         btree_destroy(&task->futexes);
     274        futex_task_deinit(task);
    275275       
    276276        /*
  • kernel/generic/src/proc/the.c

    r21799398 rb1c57a8  
    4343
    4444#include <arch.h>
     45#include <debug.h>
    4546
    4647/** Initialize THE structure
     
    5354void the_initialize(the_t *the)
    5455{
    55         the->preemption_disabled = 0;
     56        the->preemption = 0;
    5657        the->cpu = NULL;
    5758        the->thread = NULL;
     
    5960        the->as = NULL;
    6061        the->magic = MAGIC;
     62#ifdef RCU_PREEMPT_A   
     63        the->rcu_nesting = 0;
     64#endif
    6165}
    6266
  • kernel/generic/src/proc/thread.c

    r21799398 rb1c57a8  
    4646#include <synch/spinlock.h>
    4747#include <synch/waitq.h>
     48#include <synch/workqueue.h>
     49#include <synch/rcu.h>
    4850#include <cpu.h>
    4951#include <str.h>
     
    263265}
    264266
     267/** Invoked right before thread_ready() readies the thread. thread is locked. */
     268static void before_thread_is_ready(thread_t *thread)
     269{
     270        ASSERT(irq_spinlock_locked(&thread->lock));
     271        workq_before_thread_is_ready(thread);
     272}
     273
    265274/** Make thread ready
    266275 *
     
    275284       
    276285        ASSERT(thread->state != Ready);
     286
     287        before_thread_is_ready(thread);
    277288       
    278289        int i = (thread->priority < RQ_COUNT - 1) ?
    279290            ++thread->priority : thread->priority;
    280        
    281         cpu_t *cpu;
    282         if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
    283                 ASSERT(thread->cpu != NULL);
    284                 cpu = thread->cpu;
    285         } else
     291
     292        /* Check that thread->cpu is set whenever it needs to be. */
     293        ASSERT(thread->cpu != NULL ||
     294                (!thread->wired && !thread->nomigrate && !thread->fpu_context_engaged));
     295
     296        /*
     297         * Prefer to run on the same cpu as the last time. Used by wired
     298         * threads as well as threads with disabled migration.
     299         */
     300        cpu_t *cpu = thread->cpu;
     301        if (cpu == NULL)
    286302                cpu = CPU;
    287303       
     
    377393        thread->task = task;
    378394       
     395        thread->workq = NULL;
     396       
    379397        thread->fpu_context_exists = false;
    380398        thread->fpu_context_engaged = false;
     
    391409        /* Might depend on previous initialization */
    392410        thread_create_arch(thread);
     411       
     412        rcu_thread_init(thread);
    393413       
    394414        if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
     
    501521                         */
    502522                        ipc_cleanup();
    503                         futex_cleanup();
     523                        futex_task_cleanup();
    504524                        LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid);
    505525                }
     
    521541        /* Not reached */
    522542        while (true);
     543}
     544
     545/** Interrupts an existing thread so that it may exit as soon as possible.
     546 *
     547 * Threads that are blocked waiting for a synchronization primitive
     548 * are woken up with a return code of ESYNCH_INTERRUPTED if the
     549 * blocking call was interruptable. See waitq_sleep_timeout().
     550 *
     551 * The caller must guarantee the thread object is valid during the entire
     552 * function, eg by holding the threads_lock lock.
     553 *
     554 * Interrupted threads automatically exit when returning back to user space.
     555 *
     556 * @param thread A valid thread object. The caller must guarantee it
     557 *               will remain valid until thread_interrupt() exits.
     558 */
     559void thread_interrupt(thread_t *thread)
     560{
     561        ASSERT(thread != NULL);
     562       
     563        irq_spinlock_lock(&thread->lock, true);
     564       
     565        thread->interrupted = true;
     566        bool sleeping = (thread->state == Sleeping);
     567       
     568        irq_spinlock_unlock(&thread->lock, true);
     569       
     570        if (sleeping)
     571                waitq_interrupt_sleep(thread);
     572}
     573
     574/** Returns true if the thread was interrupted.
     575 *
     576 * @param thread A valid thread object. User must guarantee it will
     577 *               be alive during the entire call.
     578 * @return true if the thread was already interrupted via thread_interrupt().
     579 */
     580bool thread_interrupted(thread_t *thread)
     581{
     582        ASSERT(thread != NULL);
     583       
     584        bool interrupted;
     585       
     586        irq_spinlock_lock(&thread->lock, true);
     587        interrupted = thread->interrupted;
     588        irq_spinlock_unlock(&thread->lock, true);
     589       
     590        return interrupted;
    523591}
    524592
  • kernel/generic/src/synch/condvar.c

    r21799398 rb1c57a8  
    3838#include <synch/condvar.h>
    3939#include <synch/mutex.h>
     40#include <synch/spinlock.h>
    4041#include <synch/waitq.h>
    4142#include <arch.h>
     
    9091
    9192        ipl = waitq_sleep_prepare(&cv->wq);
     93        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
    9294        mutex_unlock(mtx);
    9395
     
    9597        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
    9698
     99        waitq_sleep_finish(&cv->wq, rc, ipl);
     100        /* Lock only after releasing the waitq to avoid a possible deadlock. */
    97101        mutex_lock(mtx);
    98         waitq_sleep_finish(&cv->wq, rc, ipl);
    99102
    100103        return rc;
    101104}
    102105
     106/** Wait for the condition to become true with a locked spinlock.
     107 *
     108 * The function is not aware of irq_spinlock. Therefore do not even
     109 * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock()
     110 * instead.
     111 *
     112 * @param cv            Condition variable.
     113 * @param lock          Locked spinlock.
     114 * @param usec          Timeout value in microseconds.
     115 * @param flags         Select mode of operation.
     116 *
     117 * For exact description of meaning of possible combinations of usec and flags,
     118 * see comment for waitq_sleep_timeout().  Note that when
     119 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
     120 * returned.
     121 *
     122 * @return See comment for waitq_sleep_timeout().
     123 */
     124int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
     125        uint32_t usec, int flags)
     126{
     127        int rc;
     128        ipl_t ipl;
     129       
     130        ipl = waitq_sleep_prepare(&cv->wq);
     131
     132        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
     133        spinlock_unlock(lock);
     134
     135        cv->wq.missed_wakeups = 0;      /* Enforce blocking. */
     136        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags);
     137
     138        waitq_sleep_finish(&cv->wq, rc, ipl);
     139        /* Lock only after releasing the waitq to avoid a possible deadlock. */
     140        spinlock_lock(lock);
     141       
     142        return rc;
     143}
     144
     145/** Wait for the condition to become true with a locked irq spinlock.
     146 *
     147 * @param cv            Condition variable.
     148 * @param lock          Locked irq spinlock.
     149 * @param usec          Timeout value in microseconds.
     150 * @param flags         Select mode of operation.
     151 *
     152 * For exact description of meaning of possible combinations of usec and flags,
     153 * see comment for waitq_sleep_timeout().  Note that when
     154 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
     155 * returned.
     156 *
     157 * @return See comment for waitq_sleep_timeout().
     158 */
     159int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
     160        uint32_t usec, int flags)
     161{
     162        int rc;
     163        /* Save spinlock's state so we can restore it correctly later on. */
     164        ipl_t ipl = irq_lock->ipl;
     165        bool guard = irq_lock->guard;
     166       
     167        irq_lock->guard = false;
     168       
     169        /*
     170         * waitq_prepare() restores interrupts to the current state,
     171         * ie disabled. Therefore, interrupts will remain disabled while
     172         * it spins waiting for a pending timeout handler to complete.
     173         * Although it spins with interrupts disabled there can only
     174         * be a pending timeout if we failed to cancel an imminent
     175         * timeout (on another cpu) during a wakeup. As a result the
     176         * timeout handler is guaranteed to run (it is most likely already
     177         * running) and there is no danger of a deadlock.
     178         */
     179        rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags);
     180       
     181        irq_lock->guard = guard;
     182        irq_lock->ipl = ipl;
     183       
     184        return rc;
     185}
     186
     187
    103188/** @}
    104189 */
  • kernel/generic/src/synch/futex.c

    r21799398 rb1c57a8  
    11/*
    22 * Copyright (c) 2006 Jakub Jermar
     3 * Copyright (c) 2012 Adam Hraska
    34 * All rights reserved.
    45 *
     
    3435 * @file
    3536 * @brief       Kernel backend for futexes.
     37 *
     38 * Kernel futex objects are stored in a global hash table futex_ht
     39 * where the physical address of the futex variable (futex_t.paddr)
     40 * is used as the lookup key. As a result multiple address spaces
     41 * may share the same futex variable.
     42 *
     43 * A kernel futex object is created the first time a task accesses
     44 * the futex (having a futex variable at a physical address not
     45 * encountered before). Futex object's lifetime is governed by
     46 * a reference count that represents the number of all the different
     47 * user space virtual addresses from all tasks that map to the
     48 * physical address of the futex variable. A futex object is freed
     49 * when the last task having accessed the futex exits.
     50 *
     51 * Each task keeps track of the futex objects it accessed in a list
     52 * of pointers (futex_ptr_t, task->futex_list) to the different futex
     53 * objects.
     54 *
     55 * To speed up translation of futex variables' virtual addresses
     56 * to their physical addresses, futex pointers accessed by the
     57 * task are furthermore stored in a concurrent hash table (CHT,
     58 * task->futexes->ht). A single lookup without locks or accesses
     59 * to the page table translates a futex variable's virtual address
     60 * into its futex kernel object.
    3661 */
    3762
     
    3964#include <synch/mutex.h>
    4065#include <synch/spinlock.h>
     66#include <synch/rcu.h>
    4167#include <mm/frame.h>
    4268#include <mm/page.h>
     
    4672#include <genarch/mm/page_pt.h>
    4773#include <genarch/mm/page_ht.h>
     74#include <adt/cht.h>
    4875#include <adt/hash_table.h>
    4976#include <adt/list.h>
     
    5279#include <panic.h>
    5380#include <errno.h>
    54 #include <print.h>
    5581
    5682#define FUTEX_HT_SIZE   1024    /* keep it a power of 2 */
    5783
    58 static void futex_initialize(futex_t *futex);
    59 
    60 static futex_t *futex_find(uintptr_t paddr);
     84/** Task specific pointer to a global kernel futex object. */
     85typedef struct futex_ptr {
     86        /** CHT link. */
     87        cht_link_t cht_link;
     88        /** List of all futex pointers used by the task. */
     89        link_t all_link;
     90        /** Kernel futex object. */
     91        futex_t *futex;
     92        /** User space virtual address of the futex variable in the task. */
     93        uintptr_t uaddr;
     94} futex_ptr_t;
     95
     96
     97static void destroy_task_cache(work_t *work);
     98
     99static void futex_initialize(futex_t *futex, uintptr_t paddr);
     100static void futex_add_ref(futex_t *futex);
     101static void futex_release_ref(futex_t *futex);
     102static void futex_release_ref_locked(futex_t *futex);
     103
     104static futex_t *get_futex(uintptr_t uaddr);
     105static futex_t *find_cached_futex(uintptr_t uaddr);
     106static futex_t *get_and_cache_futex(uintptr_t phys_addr, uintptr_t uaddr);
     107static bool find_futex_paddr(uintptr_t uaddr, uintptr_t *phys_addr);
     108
    61109static size_t futex_ht_hash(sysarg_t *key);
    62110static bool futex_ht_compare(sysarg_t *key, size_t keys, link_t *item);
    63111static void futex_ht_remove_callback(link_t *item);
    64112
    65 /**
    66  * Mutex protecting global futex hash table.
    67  * It is also used to serialize access to all futex_t structures.
    68  * Must be acquired before the task futex B+tree lock.
    69  */
    70 static mutex_t futex_ht_lock;
    71 
    72 /** Futex hash table. */
     113static size_t task_fut_ht_hash(const cht_link_t *link);
     114static size_t task_fut_ht_key_hash(void *key);
     115static bool task_fut_ht_equal(const cht_link_t *item1, const cht_link_t *item2);
     116static bool task_fut_ht_key_equal(void *key, const cht_link_t *item);
     117
     118
     119/** Mutex protecting the global futex hash table.
     120 *
     121 * Acquire task specific TASK->futex_list_lock before this mutex.
     122 */
     123SPINLOCK_STATIC_INITIALIZE_NAME(futex_ht_lock, "futex-ht-lock");
     124
     125/** Global kernel futex hash table. Lock futex_ht_lock before accessing.
     126 *
     127 * Physical address of the futex variable is the lookup key.
     128 */
    73129static hash_table_t futex_ht;
    74130
    75 /** Futex hash table operations. */
     131/** Global kernel futex hash table operations. */
    76132static hash_table_operations_t futex_ht_ops = {
    77133        .hash = futex_ht_hash,
     
    80136};
    81137
     138/** Task futex cache CHT operations. */
     139static cht_ops_t task_futex_ht_ops = {
     140        .hash = task_fut_ht_hash,
     141        .key_hash = task_fut_ht_key_hash,
     142        .equal = task_fut_ht_equal,
     143        .key_equal = task_fut_ht_key_equal,
     144        .remove_callback = NULL
     145};
     146
    82147/** Initialize futex subsystem. */
    83148void futex_init(void)
    84149{
    85         mutex_initialize(&futex_ht_lock, MUTEX_PASSIVE);
    86150        hash_table_create(&futex_ht, FUTEX_HT_SIZE, 1, &futex_ht_ops);
    87151}
    88152
    89 /** Initialize kernel futex structure.
    90  *
    91  * @param futex         Kernel futex structure.
    92  */
    93 void futex_initialize(futex_t *futex)
     153/** Initializes the futex structures for the new task. */
     154void futex_task_init(struct task *task)
     155{
     156        task->futexes = malloc(sizeof(struct futex_cache), 0);
     157       
     158        cht_create(&task->futexes->ht, 0, 0, 0, true, &task_futex_ht_ops);
     159       
     160        list_initialize(&task->futexes->list);
     161        spinlock_initialize(&task->futexes->list_lock, "futex-list-lock");
     162}
     163
     164/** Destroys the futex structures for the dying task. */
     165void futex_task_deinit(task_t *task)
     166{
     167        /* Interrupts are disabled so we must not block (cannot run cht_destroy). */
     168        if (interrupts_disabled()) {
     169                /* Invoke the blocking cht_destroy in the background. */
     170                workq_global_enqueue_noblock(&task->futexes->destroy_work,
     171                        destroy_task_cache);
     172        } else {
     173                /* We can block. Invoke cht_destroy in this thread. */
     174                destroy_task_cache(&task->futexes->destroy_work);
     175        }
     176}
     177
     178/** Deallocates a task's CHT futex cache (must already be empty). */
     179static void destroy_task_cache(work_t *work)
     180{
     181        struct futex_cache *cache =
     182                member_to_inst(work, struct futex_cache, destroy_work);
     183       
     184        /*
     185         * Destroy the cache before manually freeing items of the cache in case
     186         * table resize is in progress.
     187         */
     188        cht_destroy_unsafe(&cache->ht);
     189       
     190        /* Manually free futex_ptr cache items. */
     191        list_foreach_safe(cache->list, cur_link, next_link) {
     192                futex_ptr_t *fut_ptr = member_to_inst(cur_link, futex_ptr_t, all_link);
     193
     194                list_remove(cur_link);
     195                free(fut_ptr);
     196        }
     197       
     198        free(cache);
     199}
     200
     201/** Remove references from futexes known to the current task. */
     202void futex_task_cleanup(void)
     203{
     204        struct futex_cache *futexes = TASK->futexes;
     205       
     206        /* All threads of this task have terminated. This is the last thread. */
     207        spinlock_lock(&futexes->list_lock);
     208       
     209        list_foreach_safe(futexes->list, cur_link, next_link) {
     210                futex_ptr_t *fut_ptr = member_to_inst(cur_link, futex_ptr_t, all_link);
     211
     212                /*
     213                 * The function is free to free the futex. All other threads of this
     214                 * task have already terminated, so they have also definitely
     215                 * exited their CHT futex cache protecting rcu reader sections.
     216                 * Moreover release_ref() only frees the futex if this is the
     217                 * last task referencing the futex. Therefore, only threads
     218                 * of this task may have referenced the futex if it is to be freed.
     219                 */
     220                futex_release_ref_locked(fut_ptr->futex);
     221        }
     222       
     223        spinlock_unlock(&futexes->list_lock);
     224}
     225
     226
     227/** Initialize the kernel futex structure.
     228 *
     229 * @param futex Kernel futex structure.
     230 * @param paddr Physical address of the futex variable.
     231 */
     232static void futex_initialize(futex_t *futex, uintptr_t paddr)
    94233{
    95234        waitq_initialize(&futex->wq);
    96235        link_initialize(&futex->ht_link);
    97         futex->paddr = 0;
     236        futex->paddr = paddr;
    98237        futex->refcount = 1;
     238}
     239
     240/** Increments the counter of tasks referencing the futex. */
     241static void futex_add_ref(futex_t *futex)
     242{
     243        ASSERT(spinlock_locked(&futex_ht_lock));
     244        ASSERT(0 < futex->refcount);
     245        ++futex->refcount;
     246}
     247
     248/** Decrements the counter of tasks referencing the futex. May free the futex.*/
     249static void futex_release_ref(futex_t *futex)
     250{
     251        ASSERT(spinlock_locked(&futex_ht_lock));
     252        ASSERT(0 < futex->refcount);
     253       
     254        --futex->refcount;
     255       
     256        if (0 == futex->refcount) {
     257                hash_table_remove(&futex_ht, &futex->paddr, 1);
     258        }
     259}
     260
     261/** Decrements the counter of tasks referencing the futex. May free the futex.*/
     262static void futex_release_ref_locked(futex_t *futex)
     263{
     264        spinlock_lock(&futex_ht_lock);
     265        futex_release_ref(futex);
     266        spinlock_unlock(&futex_ht_lock);
     267}
     268
     269/** Returns a futex for the virtual address @a uaddr (or creates one). */
     270static futex_t *get_futex(uintptr_t uaddr)
     271{
     272        futex_t *futex = find_cached_futex(uaddr);
     273       
     274        if (futex)
     275                return futex;
     276
     277        uintptr_t paddr;
     278
     279        if (!find_futex_paddr(uaddr, &paddr)) {
     280                return 0;
     281        }
     282
     283        return get_and_cache_futex(paddr, uaddr);
     284}
     285
     286
     287/** Finds the physical address of the futex variable. */
     288static bool find_futex_paddr(uintptr_t uaddr, uintptr_t *paddr)
     289{
     290        spinlock_lock(&futex_ht_lock);
     291        page_table_lock(AS, false);
     292
     293        bool found = false;
     294        pte_t *t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), true);
     295       
     296        if (t && PTE_VALID(t) && PTE_PRESENT(t)) {
     297                found = true;
     298                *paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE));
     299        }
     300       
     301        page_table_unlock(AS, false);
     302        spinlock_unlock(&futex_ht_lock);
     303       
     304        return found;
     305}
     306
     307/** Returns the futex cached in this task with the virtual address uaddr. */
     308static futex_t *find_cached_futex(uintptr_t uaddr)
     309{
     310        cht_read_lock();
     311       
     312        futex_t *futex;
     313        cht_link_t *futex_ptr_link = cht_find_lazy(&TASK->futexes->ht, &uaddr);
     314
     315        if (futex_ptr_link) {
     316                futex_ptr_t *futex_ptr
     317                        = member_to_inst(futex_ptr_link, futex_ptr_t, cht_link);
     318               
     319                futex = futex_ptr->futex;
     320        } else {
     321                futex = NULL;
     322        }
     323       
     324        cht_read_unlock();
     325       
     326        return futex;
     327}
     328
     329
     330/**
     331 * Returns a kernel futex for the physical address @a phys_addr and caches
     332 * it in this task under the virtual address @a uaddr (if not already cached).
     333 */
     334static futex_t *get_and_cache_futex(uintptr_t phys_addr, uintptr_t uaddr)
     335{
     336        futex_t *futex = malloc(sizeof(futex_t), 0);
     337       
     338        /*
     339         * Find the futex object in the global futex table (or insert it
     340         * if it is not present).
     341         */
     342        spinlock_lock(&futex_ht_lock);
     343       
     344        link_t *fut_link = hash_table_find(&futex_ht, &phys_addr);
     345       
     346        if (fut_link) {
     347                free(futex);
     348                futex = member_to_inst(fut_link, futex_t, ht_link);
     349                futex_add_ref(futex);
     350        } else {
     351                futex_initialize(futex, phys_addr);
     352                hash_table_insert(&futex_ht, &phys_addr, &futex->ht_link);
     353        }
     354       
     355        spinlock_unlock(&futex_ht_lock);
     356       
     357        /*
     358         * Cache the link to the futex object for this task.
     359         */
     360        futex_ptr_t *fut_ptr = malloc(sizeof(futex_ptr_t), 0);
     361        cht_link_t *dup_link;
     362       
     363        fut_ptr->futex = futex;
     364        fut_ptr->uaddr = uaddr;
     365       
     366        cht_read_lock();
     367       
     368        /* Cache the mapping from the virtual address to the futex for this task. */
     369        if (cht_insert_unique(&TASK->futexes->ht, &fut_ptr->cht_link, &dup_link)) {
     370                spinlock_lock(&TASK->futexes->list_lock);
     371                list_append(&fut_ptr->all_link, &TASK->futexes->list);
     372                spinlock_unlock(&TASK->futexes->list_lock);
     373        } else {
     374                /* Another thread of this task beat us to it. Use that mapping instead.*/
     375                free(fut_ptr);
     376                futex_release_ref_locked(futex);
     377               
     378                futex_ptr_t *dup = member_to_inst(dup_link, futex_ptr_t, cht_link);
     379                futex = dup->futex;             
     380        }
     381
     382        cht_read_unlock();
     383       
     384        return futex;
    99385}
    100386
     
    109395sysarg_t sys_futex_sleep(uintptr_t uaddr)
    110396{
    111         futex_t *futex;
    112         uintptr_t paddr;
    113         pte_t *t;
    114         int rc;
    115        
    116         /*
    117          * Find physical address of futex counter.
    118          */
    119         page_table_lock(AS, true);
    120         t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false);
    121         if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) {
    122                 page_table_unlock(AS, true);
     397        futex_t *futex = get_futex(uaddr);
     398       
     399        if (!futex)
    123400                return (sysarg_t) ENOENT;
    124         }
    125         paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE));
    126         page_table_unlock(AS, true);
    127        
    128         futex = futex_find(paddr);
    129 
    130 #ifdef CONFIG_UDEBUG
    131         udebug_stoppable_begin();
    132 #endif
    133         rc = waitq_sleep_timeout(&futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE);
    134 #ifdef CONFIG_UDEBUG
    135         udebug_stoppable_end();
    136 #endif
     401
     402        int rc = waitq_sleep_timeout(&futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE);
     403
    137404        return (sysarg_t) rc;
    138405}
     
    146413sysarg_t sys_futex_wakeup(uintptr_t uaddr)
    147414{
    148         futex_t *futex;
    149         uintptr_t paddr;
    150         pte_t *t;
    151        
    152         /*
    153          * Find physical address of futex counter.
    154          */
    155         page_table_lock(AS, true);
    156         t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false);
    157         if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) {
    158                 page_table_unlock(AS, true);
     415        futex_t *futex = get_futex(uaddr);
     416       
     417        if (futex) {
     418                waitq_wakeup(&futex->wq, WAKEUP_FIRST);
     419                return 0;
     420        } else {
    159421                return (sysarg_t) ENOENT;
    160422        }
    161         paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE));
    162         page_table_unlock(AS, true);
    163        
    164         futex = futex_find(paddr);
    165                
    166         waitq_wakeup(&futex->wq, WAKEUP_FIRST);
    167        
    168         return 0;
    169 }
    170 
    171 /** Find kernel address of the futex structure corresponding to paddr.
    172  *
    173  * If the structure does not exist already, a new one is created.
    174  *
    175  * @param paddr         Physical address of the userspace futex counter.
    176  *
    177  * @return              Address of the kernel futex structure.
    178  */
    179 futex_t *futex_find(uintptr_t paddr)
    180 {
    181         link_t *item;
    182         futex_t *futex;
    183         btree_node_t *leaf;
    184        
    185         /*
    186          * Find the respective futex structure
    187          * or allocate new one if it does not exist already.
    188          */
    189         mutex_lock(&futex_ht_lock);
    190         item = hash_table_find(&futex_ht, &paddr);
    191         if (item) {
    192                 futex = hash_table_get_instance(item, futex_t, ht_link);
    193 
    194                 /*
    195                  * See if the current task knows this futex.
    196                  */
    197                 mutex_lock(&TASK->futexes_lock);
    198                 if (!btree_search(&TASK->futexes, paddr, &leaf)) {
    199                         /*
    200                          * The futex is new to the current task.
    201                          * Upgrade its reference count and put it to the
    202                          * current task's B+tree of known futexes.
    203                          */
    204                         futex->refcount++;
    205                         btree_insert(&TASK->futexes, paddr, futex, leaf);
    206                 }
    207                 mutex_unlock(&TASK->futexes_lock);
    208         } else {
    209                 futex = (futex_t *) malloc(sizeof(futex_t), 0);
    210                 futex_initialize(futex);
    211                 futex->paddr = paddr;
    212                 hash_table_insert(&futex_ht, &paddr, &futex->ht_link);
    213                        
    214                 /*
    215                  * This is the first task referencing the futex.
    216                  * It can be directly inserted into its
    217                  * B+tree of known futexes.
    218                  */
    219                 mutex_lock(&TASK->futexes_lock);
    220                 btree_insert(&TASK->futexes, paddr, futex, NULL);
    221                 mutex_unlock(&TASK->futexes_lock);
    222                
    223         }
    224         mutex_unlock(&futex_ht_lock);
    225        
    226         return futex;
    227 }
     423}
     424
    228425
    229426/** Compute hash index into futex hash table.
     
    268465}
    269466
    270 /** Remove references from futexes known to the current task. */
    271 void futex_cleanup(void)
    272 {
    273         mutex_lock(&futex_ht_lock);
    274         mutex_lock(&TASK->futexes_lock);
    275 
    276         list_foreach(TASK->futexes.leaf_list, leaf_link, btree_node_t, node) {
    277                 unsigned int i;
    278                
    279                 for (i = 0; i < node->keys; i++) {
    280                         futex_t *ftx;
    281                         uintptr_t paddr = node->key[i];
    282                        
    283                         ftx = (futex_t *) node->value[i];
    284                         if (--ftx->refcount == 0)
    285                                 hash_table_remove(&futex_ht, &paddr, 1);
    286                 }
    287         }
    288        
    289         mutex_unlock(&TASK->futexes_lock);
    290         mutex_unlock(&futex_ht_lock);
     467/*
     468 * Operations of a task's CHT that caches mappings of futex user space
     469 * virtual addresses to kernel futex objects.
     470 */
     471
     472static size_t task_fut_ht_hash(const cht_link_t *link)
     473{
     474        const futex_ptr_t *fut_ptr = member_to_inst(link, futex_ptr_t, cht_link);
     475        return fut_ptr->uaddr;
     476}
     477
     478static size_t task_fut_ht_key_hash(void *key)
     479{
     480        return *(uintptr_t*)key;
     481}
     482
     483static bool task_fut_ht_equal(const cht_link_t *item1, const cht_link_t *item2)
     484{
     485        const futex_ptr_t *fut_ptr1 = member_to_inst(item1, futex_ptr_t, cht_link);
     486        const futex_ptr_t *fut_ptr2 = member_to_inst(item2, futex_ptr_t, cht_link);
     487       
     488        return fut_ptr1->uaddr == fut_ptr2->uaddr;
     489}
     490
     491static bool task_fut_ht_key_equal(void *key, const cht_link_t *item)
     492{
     493        const futex_ptr_t *fut_ptr = member_to_inst(item, futex_ptr_t, cht_link);
     494        uintptr_t uaddr = *(uintptr_t*)key;
     495       
     496        return fut_ptr->uaddr == uaddr;
    291497}
    292498
  • kernel/generic/src/synch/mutex.c

    r21799398 rb1c57a8  
    4141#include <arch.h>
    4242#include <stacktrace.h>
     43#include <cpu.h>
     44#include <proc/thread.h>
    4345
    4446/** Initialize mutex.
  • kernel/generic/src/synch/smc.c

    r21799398 rb1c57a8  
    4141#include <arch/barrier.h>
    4242#include <synch/smc.h>
     43#include <mm/as.h>
    4344
    4445sysarg_t sys_smc_coherence(uintptr_t va, size_t size)
  • kernel/generic/src/synch/spinlock.c

    r21799398 rb1c57a8  
    4545#include <symtab.h>
    4646#include <stacktrace.h>
     47#include <cpu.h>
    4748
    4849#ifdef CONFIG_SMP
     
    198199 *
    199200 * @param lock    IRQ spinlock to be locked.
    200  * @param irq_dis If true, interrupts are actually disabled
    201  *                prior locking the spinlock. If false, interrupts
    202  *                are expected to be already disabled.
     201 * @param irq_dis If true, disables interrupts before locking the spinlock.
     202 *                If false, interrupts are expected to be already disabled.
    203203 *
    204204 */
  • kernel/generic/src/synch/waitq.c

    r21799398 rb1c57a8  
    5757
    5858static void waitq_sleep_timed_out(void *);
     59static void waitq_complete_wakeup(waitq_t *);
     60
    5961
    6062/** Initialize wait queue
     
    330332                break;
    331333        default:
     334                /*
     335                 * Wait for a waitq_wakeup() or waitq_unsleep() to complete
     336                 * before returning from waitq_sleep() to the caller. Otherwise
     337                 * the caller might expect that the wait queue is no longer used
     338                 * and deallocate it (although the wakeup on a another cpu has
     339                 * not yet completed and is using the wait queue).
     340                 *
     341                 * Note that we have to do this for ESYNCH_OK_BLOCKED and
     342                 * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT
     343                 * where the timeout handler stops using the waitq before waking
     344                 * us up. To be on the safe side, ensure the waitq is not in use
     345                 * anymore in this case as well.
     346                 */
     347                waitq_complete_wakeup(wq);
    332348                break;
    333349        }
     
    357373        } else {
    358374                if (PARAM_NON_BLOCKING(flags, usec)) {
    359                         /* Return immediatelly instead of going to sleep */
     375                        /* Return immediately instead of going to sleep */
    360376                        return ESYNCH_WOULD_BLOCK;
    361377                }
     
    442458        irq_spinlock_unlock(&wq->lock, true);
    443459}
     460
     461/** If there is a wakeup in progress actively waits for it to complete.
     462 *
     463 * The function returns once the concurrently running waitq_wakeup()
     464 * exits. It returns immediately if there are no concurrent wakeups
     465 * at the time.
     466 *
     467 * Interrupts must be disabled.
     468 *
     469 * Example usage:
     470 * @code
     471 * void callback(waitq *wq)
     472 * {
     473 *     // Do something and notify wait_for_completion() that we're done.
     474 *     waitq_wakeup(wq);
     475 * }
     476 * void wait_for_completion(void)
     477 * {
     478 *     waitq wg;
     479 *     waitq_initialize(&wq);
     480 *     // Run callback() in the background, pass it wq.
     481 *     do_asynchronously(callback, &wq);
     482 *     // Wait for callback() to complete its work.
     483 *     waitq_sleep(&wq);
     484 *     // callback() completed its work, but it may still be accessing
     485 *     // wq in waitq_wakeup(). Therefore it is not yet safe to return
     486 *     // from waitq_sleep() or it would clobber up our stack (where wq
     487 *     // is stored). waitq_sleep() ensures the wait queue is no longer
     488 *     // in use by invoking waitq_complete_wakeup() internally.
     489 *     
     490 *     // waitq_sleep() returned, it is safe to free wq.
     491 * }
     492 * @endcode
     493 *
     494 * @param wq  Pointer to a wait queue.
     495 */
     496static void waitq_complete_wakeup(waitq_t *wq)
     497{
     498        ASSERT(interrupts_disabled());
     499       
     500        irq_spinlock_lock(&wq->lock, false);
     501        irq_spinlock_unlock(&wq->lock, false);
     502}
     503
    444504
    445505/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
  • kernel/generic/src/syscall/syscall.c

    r21799398 rb1c57a8  
    5050#include <synch/futex.h>
    5151#include <synch/smc.h>
     52#include <synch/smp_memory_barrier.h>
    5253#include <ddi/ddi.h>
    5354#include <ipc/event.h>
     
    142143        (syshandler_t) sys_futex_wakeup,
    143144        (syshandler_t) sys_smc_coherence,
     145        (syshandler_t) sys_smp_memory_barrier,
     146       
    144147       
    145148        /* Address space related syscalls. */
  • kernel/generic/src/time/clock.c

    r21799398 rb1c57a8  
    212212                irq_spinlock_unlock(&THREAD->lock, false);
    213213               
    214                 if ((!ticks) && (!PREEMPTION_DISABLED)) {
     214                if (ticks == 0 && PREEMPTION_ENABLED) {
    215215                        scheduler();
    216216#ifdef CONFIG_UDEBUG
  • kernel/generic/src/udebug/udebug.c

    r21799398 rb1c57a8  
    4444#include <print.h>
    4545#include <arch.h>
     46#include <proc/task.h>
     47#include <proc/thread.h>
    4648
    4749/** Initialize udebug part of task structure.
Note: See TracChangeset for help on using the changeset viewer.