Changeset 1b20da0 in mainline for kernel/generic/src/synch/workqueue.c


Ignore:
Timestamp:
2018-02-28T17:52:03Z (7 years ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
3061bc1
Parents:
df6ded8
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:26:03)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:52:03)
Message:

style: Remove trailing whitespace on non-empty lines, in certain file types.

Command used: tools/srepl '\([^[:space:]]\)\s\+$' '\1' -- *.c *.h *.py *.sh *.s *.S *.ag

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/workqueue.c

    rdf6ded8 r1b20da0  
    5454
    5555struct work_queue {
    56         /* 
    57          * Protects everything except activate_worker. 
     56        /*
     57         * Protects everything except activate_worker.
    5858         * Must be acquired after any thread->locks.
    5959         */
     
    9393        /* Magic cookie for integrity checks. Immutable. Accessed without lock. */
    9494        uint32_t cookie;
    95 #endif 
     95#endif
    9696};
    9797
     
    132132static void interrupt_workers(struct work_queue *workq);
    133133static void wait_for_workers(struct work_queue *workq);
    134 static int _workq_enqueue(struct work_queue *workq, work_t *work_item, 
     134static int _workq_enqueue(struct work_queue *workq, work_t *work_item,
    135135        work_func_t func, bool can_block);
    136136static void init_work_item(work_t *work_item, work_func_t func);
     
    150150void workq_global_worker_init(void)
    151151{
    152         /* 
    153          * No need for additional synchronization. Stores to word-sized 
     152        /*
     153         * No need for additional synchronization. Stores to word-sized
    154154         * variables are atomic and the change will eventually propagate.
    155155         * Moreover add_worker() includes the necessary memory barriers
     
    221221#ifdef CONFIG_DEBUG
    222222        workq->cookie = 0;
    223 #endif 
     223#endif
    224224       
    225225        free(workq);
     
    231231#ifdef CONFIG_DEBUG
    232232        workq->cookie = WORKQ_MAGIC;
    233 #endif 
     233#endif
    234234       
    235235        irq_spinlock_initialize(&workq->lock, name);
     
    252252}
    253253
    254 /** Initializes a work queue. Returns true if successful. 
    255  * 
     254/** Initializes a work queue. Returns true if successful.
     255 *
    256256 * Before destroying a work queue it must be stopped via
    257257 * workq_stop().
     
    268268        assert(!workq_corrupted(workq));
    269269
    270         thread_t *thread = thread_create(worker_thread, workq, TASK, 
     270        thread_t *thread = thread_create(worker_thread, workq, TASK,
    271271                THREAD_FLAG_NONE, workq->name);
    272272       
     
    297297                        cpu_id = CPU->id;
    298298
    299                 thread->workq = workq; 
     299                thread->workq = workq;
    300300                thread->cpu = &cpus[cpu_id];
    301301                thread->workq_blocked = false;
     
    305305                list_append(&thread->workq_link, &workq->workers);
    306306        } else {
    307                 /* 
     307                /*
    308308                 * Work queue is shutting down - we must not add the worker
    309309                 * and we cannot destroy it without ready-ing it. Mark it
     
    330330}
    331331
    332 /** Shuts down the work queue. Waits for all pending work items to complete. 
    333  *
    334  * workq_stop() may only be run once. 
     332/** Shuts down the work queue. Waits for all pending work items to complete.
     333 *
     334 * workq_stop() may only be run once.
    335335 */
    336336void workq_stop(struct work_queue *workq)
     
    391391}
    392392
    393 /** Queues a function into the global wait queue without blocking. 
    394  * 
     393/** Queues a function into the global wait queue without blocking.
     394 *
    395395 * See workq_enqueue_noblock() for more details.
    396396 */
     
    400400}
    401401
    402 /** Queues a function into the global wait queue; may block. 
    403  * 
     402/** Queues a function into the global wait queue; may block.
     403 *
    404404 * See workq_enqueue() for more details.
    405405 */
     
    409409}
    410410
    411 /** Adds a function to be invoked in a separate thread without blocking. 
    412  * 
    413  * workq_enqueue_noblock() is guaranteed not to block. It is safe 
     411/** Adds a function to be invoked in a separate thread without blocking.
     412 *
     413 * workq_enqueue_noblock() is guaranteed not to block. It is safe
    414414 * to invoke from interrupt handlers.
    415  * 
     415 *
    416416 * Consider using workq_enqueue() instead if at all possible. Otherwise,
    417  * your work item may have to wait for previously enqueued sleeping 
     417 * your work item may have to wait for previously enqueued sleeping
    418418 * work items to complete if you are unlucky.
    419  * 
     419 *
    420420 * @param workq     Work queue where to queue the work item.
    421421 * @param work_item Work item bookkeeping structure. Must be valid
     
    423423 * @param func      User supplied function to invoke in a worker thread.
    424424 
    425  * @return false if work queue is shutting down; function is not 
    426  *               queued for further processing. 
     425 * @return false if work queue is shutting down; function is not
     426 *               queued for further processing.
    427427 * @return true  Otherwise. func() will be invoked in a separate thread.
    428428 */
    429 bool workq_enqueue_noblock(struct work_queue *workq, work_t *work_item, 
     429bool workq_enqueue_noblock(struct work_queue *workq, work_t *work_item,
    430430        work_func_t func)
    431431{
     
    433433}
    434434
    435 /** Adds a function to be invoked in a separate thread; may block. 
    436  * 
    437  * While the workq_enqueue() is unlikely to block, it may do so if too 
     435/** Adds a function to be invoked in a separate thread; may block.
     436 *
     437 * While the workq_enqueue() is unlikely to block, it may do so if too
    438438 * many previous work items blocked sleeping.
    439  * 
     439 *
    440440 * @param workq     Work queue where to queue the work item.
    441441 * @param work_item Work item bookkeeping structure. Must be valid
     
    443443 * @param func      User supplied function to invoke in a worker thread.
    444444 
    445  * @return false if work queue is shutting down; function is not 
    446  *               queued for further processing. 
     445 * @return false if work queue is shutting down; function is not
     446 *               queued for further processing.
    447447 * @return true  Otherwise. func() will be invoked in a separate thread.
    448448 */
     
    453453
    454454/** Adds a work item that will be processed by a separate worker thread.
    455  * 
    456  * func() will be invoked in another kernel thread and may block. 
    457  * 
     455 *
     456 * func() will be invoked in another kernel thread and may block.
     457 *
    458458 * Prefer to call _workq_enqueue() with can_block set. Otherwise
    459459 * your work item may have to wait for sleeping work items to complete.
     
    461461 * be create without can_block set because creating a thread might
    462462 * block due to low memory conditions.
    463  * 
     463 *
    464464 * @param workq     Work queue where to queue the work item.
    465465 * @param work_item Work item bookkeeping structure. Must be valid
     
    468468 * @param can_block May adding this work item block?
    469469 
    470  * @return false if work queue is shutting down; function is not 
    471  *               queued for further processing. 
     470 * @return false if work queue is shutting down; function is not
     471 *               queued for further processing.
    472472 * @return true  Otherwise.
    473473 */
    474 static int _workq_enqueue(struct work_queue *workq, work_t *work_item, 
     474static int _workq_enqueue(struct work_queue *workq, work_t *work_item,
    475475        work_func_t func, bool can_block)
    476476{
     
    493493                        signal_op = signal_worker_logic(workq, can_block);
    494494                } else {
    495                         /* 
    496                          * During boot there are no workers to signal. Just queue 
     495                        /*
     496                         * During boot there are no workers to signal. Just queue
    497497                         * the work and let future workers take care of it.
    498498                         */
     
    514514#ifdef CONFIG_DEBUG
    515515        work_item->cookie = WORK_ITEM_MAGIC;
    516 #endif 
     516#endif
    517517       
    518518        link_initialize(&work_item->queue_link);
     
    537537        assert(workq->activate_pending <= workq->idle_worker_cnt);
    538538       
    539         /* 
    540          * Workers actively running the work func() this very moment and 
    541          * are neither blocked nor idle. Exclude ->activate_pending workers 
    542          * since they will run their work func() once they get a time slice 
     539        /*
     540         * Workers actively running the work func() this very moment and
     541         * are neither blocked nor idle. Exclude ->activate_pending workers
     542         * since they will run their work func() once they get a time slice
    543543         * and are not running it right now.
    544544         */
     
    546546}
    547547
    548 /** 
    549  * Returns the number of workers that are running or are about to run work 
    550  * func() and that are not blocked. 
     548/**
     549 * Returns the number of workers that are running or are about to run work
     550 * func() and that are not blocked.
    551551 */
    552552static size_t active_workers(struct work_queue *workq)
     
    554554        assert(irq_spinlock_locked(&workq->lock));
    555555       
    556         /* 
    557          * Workers actively running the work func() and are neither blocked nor 
     556        /*
     557         * Workers actively running the work func() and are neither blocked nor
    558558         * idle. ->activate_pending workers will run their work func() once they
    559559         * get a time slice after waking from a condvar wait, so count them
     
    586586
    587587/** Determines how to signal workers if at all.
    588  * 
     588 *
    589589 * @param workq     Work queue where a new work item was queued.
    590  * @param can_block True if we may block while signaling a worker or creating 
     590 * @param can_block True if we may block while signaling a worker or creating
    591591 *                  a new worker.
    592  * 
     592 *
    593593 * @return Function that will notify workers or NULL if no action is needed.
    594594 */
     
    601601        signal_op_t signal_op = NULL;
    602602
    603         /* 
    604          * Workers actively running the work func() and neither blocked nor idle. 
    605          * Including ->activate_pending workers that will run their work func() 
     603        /*
     604         * Workers actively running the work func() and neither blocked nor idle.
     605         * Including ->activate_pending workers that will run their work func()
    606606         * once they get a time slice.
    607607         */
     
    613613        if (max_load < workq->item_cnt) {
    614614
    615                 size_t remaining_idle = 
     615                size_t remaining_idle =
    616616                        workq->idle_worker_cnt - workq->activate_pending;
    617617
    618618                /* Idle workers still exist - activate one. */
    619619                if (remaining_idle > 0) {
    620                         /* 
     620                        /*
    621621                         * Directly changing idle_worker_cnt here would not allow
    622                          * workers to recognize spurious wake-ups. Change 
     622                         * workers to recognize spurious wake-ups. Change
    623623                         * activate_pending instead.
    624624                         */
     
    633633                        if (need_worker && can_block) {
    634634                                signal_op = add_worker_op;
    635                                 /* 
     635                                /*
    636636                                 * It may take some time to actually create the worker.
    637637                                 * We don't want to swamp the thread pool with superfluous
     
    642642                        }
    643643                       
    644                         /* 
     644                        /*
    645645                         * We cannot create a new worker but we need one desperately
    646646                         * because all workers are blocked in their work functions.
     
    661661                }
    662662        } else {
    663                 /* 
    664                  * There are enough active/running workers to process the queue. 
     663                /*
     664                 * There are enough active/running workers to process the queue.
    665665                 * No need to signal/activate any new workers.
    666666                 */
     
    674674static void worker_thread(void *arg)
    675675{
    676         /* 
    677          * The thread has been created after the work queue was ordered to stop. 
    678          * Do not access the work queue and return immediately. 
     676        /*
     677         * The thread has been created after the work queue was ordered to stop.
     678         * Do not access the work queue and return immediately.
    679679         */
    680680        if (thread_interrupted(THREAD)) {
     
    760760                return (min_worker_cnt <= workq->idle_worker_cnt);
    761761        } else {
    762                 /* 
     762                /*
    763763                 * There is work but we are swamped with too many active workers
    764764                 * that were woken up from sleep at around the same time. We
     
    854854        bool stopping = workq->stopping;
    855855        bool worker_surplus = worker_unnecessary(workq);
    856         const char *load_str = worker_surplus ? "decreasing" : 
     856        const char *load_str = worker_surplus ? "decreasing" :
    857857                (0 < workq->activate_pending) ? "increasing" : "stable";
    858858       
     
    869869                "Stopping: %d\n"
    870870                "Load: %s\n",
    871                 max_worker_cnt, min_worker_cnt, 
     871                max_worker_cnt, min_worker_cnt,
    872872                max_concurrent_workers, max_items_per_worker,
    873873                total,
     
    895895       
    896896        while (list_empty(&info->work_queues) && !stop) {
    897                 errno_t ret = _condvar_wait_timeout_irq_spinlock(&info->req_cv, 
     897                errno_t ret = _condvar_wait_timeout_irq_spinlock(&info->req_cv,
    898898                        &info->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE);
    899899               
     
    902902       
    903903        if (!stop) {
    904                 *pworkq = list_get_instance(list_first(&info->work_queues), 
     904                *pworkq = list_get_instance(list_first(&info->work_queues),
    905905                        struct work_queue, nb_link);
    906906
     
    932932        list_initialize(&nonblock_adder.work_queues);
    933933       
    934         nonblock_adder.thread = thread_create(thr_nonblock_add_worker, 
     934        nonblock_adder.thread = thread_create(thr_nonblock_add_worker,
    935935                &nonblock_adder, TASK, THREAD_FLAG_NONE, "kworkq-nb");
    936936       
     
    938938                thread_ready(nonblock_adder.thread);
    939939        } else {
    940                 /* 
     940                /*
    941941                 * We won't be able to add workers without blocking if all workers
    942942                 * sleep, but at least boot the system.
     
    947947
    948948#ifdef CONFIG_DEBUG
    949 /** Returns true if the workq is definitely corrupted; false if not sure. 
    950  * 
     949/** Returns true if the workq is definitely corrupted; false if not sure.
     950 *
    951951 * Can be used outside of any locks.
    952952 */
    953953static bool workq_corrupted(struct work_queue *workq)
    954954{
    955         /* 
     955        /*
    956956         * Needed to make the most current cookie value set by workq_preinit()
    957957         * visible even if we access the workq right after it is created but
     
    963963}
    964964
    965 /** Returns true if the work_item is definitely corrupted; false if not sure. 
    966  * 
     965/** Returns true if the work_item is definitely corrupted; false if not sure.
     966 *
    967967 * Must be used with the work queue protecting spinlock locked.
    968968 */
Note: See TracChangeset for help on using the changeset viewer.