Index: kernel/generic/src/synch/rcu.c
===================================================================
--- kernel/generic/src/synch/rcu.c	(revision bab75df6bdac0b39185034277416374a06d4b37f)
+++ kernel/generic/src/synch/rcu.c	(revision cdf6066f21385be78f0b5bbfbdde1d1c20cbed4a)
@@ -1026,5 +1026,5 @@
 	cpu_mask_t *reader_cpus = (cpu_mask_t *)arg;
 
-	bool locked = RCU_CNT_INC <= THE->rcu_nesting;
+	bool locked = RCU_CNT_INC <= CURRENT->rcu_nesting;
 	/* smp_call machinery makes the most current _rcu_cur_gp visible. */
 	bool passed_qs = (CPU->rcu.last_seen_gp == _rcu_cur_gp);
@@ -1054,5 +1054,5 @@
 	 * with a local copy.
 	 */
-	size_t nesting_cnt = local_atomic_exchange(&THE->rcu_nesting, 0);
+	size_t nesting_cnt = local_atomic_exchange(&CURRENT->rcu_nesting, 0);
 
 	/*
@@ -1113,5 +1113,5 @@
 
 	/* Load the thread's saved nesting count from before it was preempted. */
-	THE->rcu_nesting = THREAD->rcu.nesting_cnt;
+	CURRENT->rcu_nesting = THREAD->rcu.nesting_cnt;
 }
 
@@ -1123,5 +1123,5 @@
 void rcu_thread_exiting(void)
 {
-	assert(THE->rcu_nesting == 0);
+	assert(CURRENT->rcu_nesting == 0);
 
 	/*
@@ -1145,5 +1145,5 @@
 bool rcu_read_locked(void)
 {
-	return RCU_CNT_INC <= THE->rcu_nesting;
+	return RCU_CNT_INC <= CURRENT->rcu_nesting;
 }
 
@@ -1151,7 +1151,7 @@
 void _rcu_preempted_unlock(void)
 {
-	assert(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);
-
-	size_t prev = local_atomic_exchange(&THE->rcu_nesting, 0);
+	assert(0 == CURRENT->rcu_nesting || RCU_WAS_PREEMPTED == CURRENT->rcu_nesting);
+
+	size_t prev = local_atomic_exchange(&CURRENT->rcu_nesting, 0);
 	if (prev == RCU_WAS_PREEMPTED) {
 		/*
