Changeset 0d56712 in mainline for kernel/generic/src/synch/workqueue.c
- Timestamp:
- 2012-07-07T00:27:01Z (11 years ago)
- Branches:
- lfn, master, serial
- Children:
- ff90f5f
- Parents:
- 8a64e81e
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/workqueue.c
r8a64e81e r0d56712 9 9 #include <macros.h> 10 10 11 #define WORKQ_MAGIC 0xf00c1333U 12 #define WORK_ITEM_MAGIC 0xfeec1777U 13 11 14 12 15 struct work_queue { … … 46 49 47 50 link_t nb_link; 51 52 #ifdef CONFIG_DEBUG 53 /* Magic cookie for integrity checks. Immutable. Accessed without lock. */ 54 uint32_t cookie; 55 #endif 48 56 }; 49 57 … … 79 87 80 88 81 82 89 /* Fwd decl. */ 83 90 static void workq_preinit(struct work_queue *workq, const char *name); … … 87 94 static int _workq_enqueue(struct work_queue *workq, work_t *work_item, 88 95 work_func_t func, bool can_block); 96 static void init_work_item(work_t *work_item, work_func_t func); 89 97 static signal_op_t signal_worker_logic(struct work_queue *workq, bool can_block); 90 98 static void worker_thread(void *arg); … … 93 101 static void cv_wait(struct work_queue *workq); 94 102 static void nonblock_init(void); 103 static bool workq_corrupted(struct work_queue *workq); 104 static bool work_item_corrupted(work_t *work_item); 105 95 106 96 107 /** Creates worker thread for the system-wide worker queue. */ … … 138 149 if (workq) { 139 150 if (workq_init(workq, name)) { 151 ASSERT(!workq_corrupted(workq)); 140 152 return workq; 141 153 } … … 150 162 void workq_destroy(struct work_queue *workq) 151 163 { 152 ASSERT( workq);164 ASSERT(!workq_corrupted(workq)); 153 165 154 166 irq_spinlock_lock(&workq->lock, true); … … 163 175 } 164 176 177 #ifdef CONFIG_DEBUG 178 workq->cookie = 0; 179 #endif 180 165 181 free(workq); 166 182 } … … 169 185 static void workq_preinit(struct work_queue *workq, const char *name) 170 186 { 187 #ifdef CONFIG_DEBUG 188 workq->cookie = WORKQ_MAGIC; 189 #endif 190 171 191 irq_spinlock_initialize(&workq->lock, name); 172 192 condvar_initialize(&workq->activate_worker); … … 202 222 static bool add_worker(struct work_queue *workq) 203 223 { 224 ASSERT(!workq_corrupted(workq)); 225 204 226 thread_t *thread = thread_create(worker_thread, workq, TASK, 205 227 THREAD_FLAG_NONE, workq->name); … … 270 292 void workq_stop(struct work_queue *workq) 271 293 { 294 ASSERT(!workq_corrupted(workq)); 295 272 296 interrupt_workers(workq); 273 297 wait_for_workers(workq); … … 285 309 /* Respect lock ordering - do not hold workq->lock during broadcast. */ 286 310 irq_spinlock_unlock(&workq->lock, true); 287 288 311 289 312 condvar_broadcast(&workq->activate_worker); … … 408 431 work_func_t func, bool can_block) 409 432 { 433 ASSERT(!workq_corrupted(workq)); 434 410 435 bool success = true; 411 436 signal_op_t signal_op = NULL; 412 413 link_initialize(&work_item->queue_link);414 work_item->func = func;415 437 416 438 irq_spinlock_lock(&workq->lock, true); … … 419 441 success = false; 420 442 } else { 443 init_work_item(work_item, func); 421 444 list_append(&work_item->queue_link, &workq->queue); 422 445 ++workq->item_cnt; … … 442 465 } 443 466 467 /** Prepare an item to be added to the work item queue. */ 468 static void init_work_item(work_t *work_item, work_func_t func) 469 { 470 #ifdef CONFIG_DEBUG 471 work_item->cookie = WORK_ITEM_MAGIC; 472 #endif 473 474 link_initialize(&work_item->queue_link); 475 work_item->func = func; 476 } 477 444 478 /** Returns the number of workers running work func() that are not blocked. */ 445 479 static size_t active_workers_now(struct work_queue *workq) … … 497 531 static void signal_worker_op(struct work_queue *workq) 498 532 { 533 ASSERT(!workq_corrupted(workq)); 534 499 535 condvar_signal(&workq->activate_worker); 500 536 … … 515 551 static signal_op_t signal_worker_logic(struct work_queue *workq, bool can_block) 516 552 { 553 ASSERT(!workq_corrupted(workq)); 517 554 ASSERT(irq_spinlock_locked(&workq->lock)); 518 555 … … 610 647 /* Copy the func field so func() can safely free work_item. */ 611 648 work_func_t func = work_item->func; 612 649 613 650 func(work_item); 614 651 } … … 618 655 static bool dequeue_work(struct work_queue *workq, work_t **pwork_item) 619 656 { 657 ASSERT(!workq_corrupted(workq)); 658 620 659 irq_spinlock_lock(&workq->lock, true); 621 660 … … 646 685 link_t *work_link = list_first(&workq->queue); 647 686 *pwork_item = list_get_instance(work_link, work_t, queue_link); 687 688 #ifdef CONFIG_DEBUG 689 ASSERT(!work_item_corrupted(*pwork_item)); 690 (*pwork_item)->cookie = 0; 691 #endif 648 692 list_remove(work_link); 649 693 --workq->item_cnt; … … 694 738 &workq->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 695 739 740 ASSERT(!workq_corrupted(workq)); 696 741 ASSERT(irq_spinlock_locked(&workq->lock)); 697 742 … … 711 756 /* Must be blocked in user work func() and not be waiting for work. */ 712 757 ASSERT(!thread->workq_idling); 758 ASSERT(thread->state == Sleeping); 713 759 ASSERT(THREAD != thread); 714 ASSERT( thread->state == Sleeping);760 ASSERT(!workq_corrupted(thread->workq)); 715 761 716 762 /* Protected by thread->lock */ … … 732 778 if (THREAD->workq && THREAD->state == Sleeping && !THREAD->workq_idling) { 733 779 ASSERT(!THREAD->workq_blocked); 780 ASSERT(!workq_corrupted(THREAD->workq)); 781 734 782 THREAD->workq_blocked = true; 735 783 … … 765 813 (0 < workq->activate_pending) ? "increasing" : "stable"; 766 814 767 768 815 irq_spinlock_unlock(&workq->lock, true); 769 816 770 817 printf( 771 "Configur ed with: max_worker_cnt=%zu, min_worker_cnt=%zu,\n"772 " 818 "Configuration: max_worker_cnt=%zu, min_worker_cnt=%zu,\n" 819 " max_concurrent_workers=%zu, max_items_per_worker=%zu\n" 773 820 "Workers: %zu\n" 774 821 "Active: %zu (workers currently processing work)\n" … … 813 860 *pworkq = list_get_instance(list_first(&info->work_queues), 814 861 struct work_queue, nb_link); 862 863 ASSERT(!workq_corrupted(*pworkq)); 815 864 816 865 list_remove(&(*pworkq)->nb_link); … … 835 884 static void nonblock_init(void) 836 885 { 837 irq_spinlock_initialize(&nonblock_adder.lock, " workq:nb.lock");886 irq_spinlock_initialize(&nonblock_adder.lock, "kworkq-nb.lock"); 838 887 condvar_initialize(&nonblock_adder.req_cv); 839 888 list_initialize(&nonblock_adder.work_queues); 840 889 841 890 nonblock_adder.thread = thread_create(thr_nonblock_add_worker, 842 &nonblock_adder, TASK, THREAD_FLAG_NONE, " workq:nb");891 &nonblock_adder, TASK, THREAD_FLAG_NONE, "kworkq-nb"); 843 892 844 893 if (nonblock_adder.thread) { … … 849 898 * sleep, but at least boot the system. 850 899 */ 851 printf("Failed to create workq:nb. Sleeping work may stall the workq.\n"); 852 } 853 } 854 855 900 printf("Failed to create kworkq-nb. Sleeping work may stall the workq.\n"); 901 } 902 } 903 904 /** Returns true if the workq is definitely corrupted; false if not sure. 905 * 906 * Can be used outside of any locks. 907 */ 908 static bool workq_corrupted(struct work_queue *workq) 909 { 910 #ifdef CONFIG_DEBUG 911 /* 912 * Needed to make the most current cookie value set by workq_preinit() 913 * visible even if we access the workq right after it is created but 914 * on a different cpu. Otherwise, workq_corrupted() would not work 915 * outside a lock. 916 */ 917 memory_barrier(); 918 return NULL == workq || workq->cookie != WORKQ_MAGIC; 919 #else 920 return false; 921 #endif 922 } 923 924 /** Returns true if the work_item is definitely corrupted; false if not sure. 925 * 926 * Must be used with the work queue protecting spinlock locked. 927 */ 928 static bool work_item_corrupted(work_t *work_item) 929 { 930 #ifdef CONFIG_DEBUG 931 return NULL == work_item || work_item->cookie != WORK_ITEM_MAGIC; 932 #else 933 return false; 934 #endif 935 }
Note: See TracChangeset
for help on using the changeset viewer.