Changeset 205832b in mainline for kernel/generic/src/synch/rcu.c
- Timestamp:
- 2012-11-05T15:37:39Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f048658
- Parents:
- 6b99156
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/rcu.c
r6b99156 r205832b 237 237 #endif 238 238 239 rcu.detector_thr = 0;239 rcu.detector_thr = NULL; 240 240 241 241 rcu.stat_expedited_cnt = 0; … … 260 260 #endif 261 261 262 CPU->rcu.cur_cbs = 0;262 CPU->rcu.cur_cbs = NULL; 263 263 CPU->rcu.cur_cbs_cnt = 0; 264 CPU->rcu.next_cbs = 0;264 CPU->rcu.next_cbs = NULL; 265 265 CPU->rcu.next_cbs_cnt = 0; 266 CPU->rcu.arriving_cbs = 0;266 CPU->rcu.arriving_cbs = NULL; 267 267 CPU->rcu.parriving_cbs_tail = &CPU->rcu.arriving_cbs; 268 268 CPU->rcu.arriving_cbs_cnt = 0; … … 275 275 /* BSP creates reclaimer threads before AP's rcu_cpu_init() runs. */ 276 276 if (config.cpu_active == 1) 277 CPU->rcu.reclaimer_thr = 0;277 CPU->rcu.reclaimer_thr = NULL; 278 278 279 279 CPU->rcu.stat_max_cbs = 0; … … 317 317 /* Stop and wait for reclaimers. */ 318 318 for (unsigned int cpu_id = 0; cpu_id < config.cpu_active; ++cpu_id) { 319 ASSERT(cpus[cpu_id].rcu.reclaimer_thr != 0);319 ASSERT(cpus[cpu_id].rcu.reclaimer_thr != NULL); 320 320 321 321 if (cpus[cpu_id].rcu.reclaimer_thr) { … … 323 323 thread_join(cpus[cpu_id].rcu.reclaimer_thr); 324 324 thread_detach(cpus[cpu_id].rcu.reclaimer_thr); 325 cpus[cpu_id].rcu.reclaimer_thr = 0;325 cpus[cpu_id].rcu.reclaimer_thr = NULL; 326 326 } 327 327 } … … 333 333 thread_join(rcu.detector_thr); 334 334 thread_detach(rcu.detector_thr); 335 rcu.detector_thr = 0;335 rcu.detector_thr = NULL; 336 336 } 337 337 #endif … … 357 357 358 358 cpus[cpu_id].rcu.reclaimer_thr = 359 thread_create(reclaimer, 0, TASK, THREAD_FLAG_NONE, name);359 thread_create(reclaimer, NULL, TASK, THREAD_FLAG_NONE, name); 360 360 361 361 if (!cpus[cpu_id].rcu.reclaimer_thr) … … 373 373 { 374 374 rcu.detector_thr = 375 thread_create(detector, 0, TASK, THREAD_FLAG_NONE, "rcu-det");375 thread_create(detector, NULL, TASK, THREAD_FLAG_NONE, "rcu-det"); 376 376 377 377 if (!rcu.detector_thr) … … 410 410 * to finish. 411 411 * 412 * Note that THREAD may be 0in scheduler() and not just during boot.412 * Note that THREAD may be NULL in scheduler() and not just during boot. 413 413 */ 414 414 if ((THREAD && THREAD->rcu.was_preempted) || CPU->rcu.is_delaying_gp) { … … 522 522 523 523 cpu_mask_for_each(*cpu_mask, cpu_id) { 524 smp_call(cpu_id, add_barrier_cb, 0);524 smp_call(cpu_id, add_barrier_cb, NULL); 525 525 } 526 526 … … 583 583 584 584 rcu_item->func = func; 585 rcu_item->next = 0;585 rcu_item->next = NULL; 586 586 587 587 preemption_disable(); … … 611 611 { 612 612 ASSERT(THREAD && THREAD->wired); 613 return 0== CPU->rcu.cur_cbs;613 return NULL == CPU->rcu.cur_cbs; 614 614 } 615 615 … … 617 617 { 618 618 ASSERT(THREAD && THREAD->wired); 619 return 0== CPU->rcu.next_cbs;619 return NULL == CPU->rcu.next_cbs; 620 620 } 621 621 … … 628 628 * a false negative if we race with a local interrupt handler. 629 629 */ 630 return 0== CPU->rcu.arriving_cbs;630 return NULL == CPU->rcu.arriving_cbs; 631 631 } 632 632 … … 741 741 } 742 742 743 *phead = 0;743 *phead = NULL; 744 744 } 745 745 … … 779 779 CPU->rcu.next_cbs_cnt = CPU->rcu.arriving_cbs_cnt; 780 780 781 CPU->rcu.arriving_cbs = 0;781 CPU->rcu.arriving_cbs = NULL; 782 782 CPU->rcu.parriving_cbs_tail = &CPU->rcu.arriving_cbs; 783 783 CPU->rcu.arriving_cbs_cnt = 0; … … 1289 1289 atomic_set(&rcu.delaying_cpu_cnt, 0); 1290 1290 1291 sample_cpus(cpu_mask, 0);1291 sample_cpus(cpu_mask, NULL); 1292 1292 } 1293 1293 … … 1452 1452 void rcu_thread_exiting(void) 1453 1453 { 1454 ASSERT(THREAD != 0);1454 ASSERT(THREAD != NULL); 1455 1455 ASSERT(THREAD->state == Exiting); 1456 1456 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
Note:
See TracChangeset
for help on using the changeset viewer.