Index: kernel/test/synch/rcu1.c
===================================================================
--- kernel/test/synch/rcu1.c	(revision 0594c7eacab2692a25c83b318859d6e25c186374)
+++ kernel/test/synch/rcu1.c	(revision 64be561fde4e66c925208ae11919aa6cd8b7e899)
@@ -50,5 +50,7 @@
 } exited_t;
 
+/* Callback raced with preexisting readers. */
 #define ERACE   123
+/* Waited for too long for the callback to exit; consider it lost. */
 #define ECBLOST 432
 
@@ -85,6 +87,6 @@
 		
 	if(thread[k]) {
-		/* Try to distribute evenly but allow migration. */
-		thread[k]->cpu = &cpus[k % config.cpu_active];
+		/* Distribute evenly. */
+		thread_wire(thread[k], &cpus[k % config.cpu_active]);
 		thread_ready(thread[k]);
 	}
@@ -208,5 +210,5 @@
 		
 		for (volatile size_t k = 0; k < nop_iters; ++k) {
-			// nop, but increment volatile k			
+			/* nop, but increment volatile k */
 		}
 		
@@ -338,7 +340,7 @@
 	join_one();
 	
-	TPRINTF("\nJoined one-cb reader, wait for cb.\n");
+	TPRINTF("\nJoined one-cb reader, wait for callback.\n");
 	size_t loop_cnt = 0;
-	size_t max_loops = 4; /* 200 ms */
+	size_t max_loops = 4; /* 200 ms total */
 	
 	while (!one_cb_is_done && loop_cnt < max_loops) {
@@ -526,5 +528,5 @@
 	
 	int result = EOK;
-	wait_for_cb_exit(2, p, &result);
+	wait_for_cb_exit(2 /* secs */, p, &result);
 	
 	if (result != EOK) {
@@ -689,4 +691,5 @@
 	join_one();
 	
+	/* Wait at most 4 secs. */
 	wait_for_cb_exit(4, &p->e, &p->result);
 	
@@ -703,5 +706,5 @@
 static bool do_reader_preempt(void)
 {
-	TPRINTF("\nReader preempts; after GP start, before GP, twice before GP\n");
+	TPRINTF("\nReaders will be preempted.\n");
 	
 	bool success = true;
@@ -744,5 +747,5 @@
 	rcu_read_lock();
 
-	/* Contain synch accessing after reader section beginning. */
+	/* Order accesses of synch after the reader section begins. */
 	memory_barrier();
 	
@@ -750,5 +753,5 @@
 	
 	while (!synch->synch_running) {
-		/* 0.5 sec*/
+		/* 0.5 sec */
 		delay(500 * 1000);
 	}
@@ -829,5 +832,5 @@
 {
 	/* 5 us * 1000 * 1000 iters == 5 sec per updater thread */
-	/*delay(5);*/
+	delay(5);
 	free(item);
 }
@@ -847,6 +850,6 @@
 		}
 		
-		/* Print a dot if we make progress of 1% */
-		if (s->master && 0 == (i % (s->iters/100 + 1)))
+		/* Print a dot if we make a progress of 1% */
+		if (s->master && 0 == (i % (s->iters/100)))
 			TPRINTF(".");
 	}
@@ -855,5 +858,4 @@
 static bool do_stress(void)
 {
-	//size_t cb_per_thread = 1000 * 1000;
 	size_t cb_per_thread = 1000 * 1000;
 	bool done = false;
@@ -861,5 +863,5 @@
 	stress_t worker = { .iters = cb_per_thread, .master = false }; 
 	
-	size_t thread_cnt = min(MAX_THREADS, config.cpu_active);
+	size_t thread_cnt = min(MAX_THREADS / 2, config.cpu_active);
 	/* Each cpu has one reader and one updater. */
 	size_t reader_cnt = thread_cnt;
@@ -873,5 +875,5 @@
 	bin_order_suffix(max_used_mem, &mem_units, &mem_suffix, false);
 
-	TPRINTF("\nStress: Run %zu nop-readers and %zu updaters. %zu callbacks "
+	TPRINTF("\nStress: Run %zu nop-readers and %zu updaters. %zu callbacks"
 		" total (max %" PRIu64 " %s used). Be very patient.\n", 
 		reader_cnt, updater_cnt, exp_upd_calls, mem_units, mem_suffix);
@@ -914,5 +916,5 @@
 		--e->count_down;
 		
-		if (0 == (e->count_down % (e->total_cnt/100 + 1))) {
+		if (0 == (e->count_down % (e->total_cnt/100))) {
 			TPRINTF("*");
 		}
