Index: kernel/generic/src/proc/scheduler.c
===================================================================
--- kernel/generic/src/proc/scheduler.c	(revision a3d87b9d6ac087b0623ed2e3b6ac96f6515f4c5a)
+++ kernel/generic/src/proc/scheduler.c	(revision dfa4be625ce02c0a9d62197b2b62b4c8d6043bf3)
@@ -310,5 +310,4 @@
 	switch_task(THREAD->task);
 
-	irq_spinlock_lock(&THREAD->lock, false);
 	assert(atomic_get_unordered(&THREAD->cpu) == CPU);
 
@@ -364,6 +363,4 @@
 	/* Save current CPU cycle */
 	THREAD->last_cycle = get_cycle();
-
-	irq_spinlock_unlock(&THREAD->lock, false);
 }
 
@@ -386,6 +383,4 @@
 static void thread_requeue_preempted(thread_t *thread)
 {
-	irq_spinlock_lock(&thread->lock, false);
-
 	assert(atomic_get_unordered(&thread->state) == Running);
 	assert(atomic_get_unordered(&thread->cpu) == CPU);
@@ -400,6 +395,4 @@
 	atomic_set_unordered(&thread->state, Ready);
 
-	irq_spinlock_unlock(&thread->lock, false);
-
 	add_to_rq(thread, CPU, prio);
 }
@@ -408,6 +401,4 @@
 {
 	ipl_t ipl = interrupts_disable();
-
-	irq_spinlock_lock(&thread->lock, false);
 
 	assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering);
@@ -423,6 +414,4 @@
 		atomic_set_unordered(&thread->cpu, CPU);
 	}
-
-	irq_spinlock_unlock(&thread->lock, false);
 
 	add_to_rq(thread, cpu, 0);
@@ -500,6 +489,4 @@
 	}
 
-	irq_spinlock_lock(&THREAD->lock, false);
-
 	atomic_set_unordered(&THREAD->state, new_state);
 
@@ -514,6 +501,4 @@
 	 */
 	after_thread_ran_arch();
-
-	irq_spinlock_unlock(&THREAD->lock, false);
 
 	CPU_LOCAL->exiting_state = new_state;
@@ -650,6 +635,4 @@
 	list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) {
 
-		irq_spinlock_lock(&thread->lock, false);
-
 		/*
 		 * Do not steal CPU-wired threads, threads
@@ -658,7 +641,5 @@
 		 * FPU context is still in the CPU.
 		 */
-		if (thread->stolen || thread->nomigrate ||
-		    thread == fpu_owner) {
-			irq_spinlock_unlock(&thread->lock, false);
+		if (thread->stolen || thread->nomigrate || thread == fpu_owner) {
 			continue;
 		}
@@ -666,6 +647,4 @@
 		thread->stolen = true;
 		atomic_set_unordered(&thread->cpu, CPU);
-
-		irq_spinlock_unlock(&thread->lock, false);
 
 		/*
Index: kernel/generic/src/proc/thread.c
===================================================================
--- kernel/generic/src/proc/thread.c	(revision a3d87b9d6ac087b0623ed2e3b6ac96f6515f4c5a)
+++ kernel/generic/src/proc/thread.c	(revision dfa4be625ce02c0a9d62197b2b62b4c8d6043bf3)
@@ -115,5 +115,4 @@
 	thread_t *thread = (thread_t *) obj;
 
-	irq_spinlock_initialize(&thread->lock, "thread_t_lock");
 	link_initialize(&thread->rq_link);
 	link_initialize(&thread->wq_link);
@@ -197,8 +196,8 @@
 void thread_wire(thread_t *thread, cpu_t *cpu)
 {
-	irq_spinlock_lock(&thread->lock, true);
+	ipl_t ipl = interrupts_disable();
 	atomic_set_unordered(&thread->cpu, cpu);
 	thread->nomigrate++;
-	irq_spinlock_unlock(&thread->lock, true);
+	interrupts_restore(ipl);
 }
 
@@ -579,7 +578,10 @@
 void thread_migration_disable(void)
 {
+	ipl_t ipl = interrupts_disable();
+
 	assert(THREAD);
-
 	THREAD->nomigrate++;
+
+	interrupts_restore(ipl);
 }
 
@@ -587,4 +589,6 @@
 void thread_migration_enable(void)
 {
+	ipl_t ipl = interrupts_disable();
+
 	assert(THREAD);
 	assert(THREAD->nomigrate > 0);
@@ -592,4 +596,6 @@
 	if (THREAD->nomigrate > 0)
 		THREAD->nomigrate--;
+
+	interrupts_restore(ipl);
 }
 
Index: kernel/generic/src/sysinfo/stats.c
===================================================================
--- kernel/generic/src/sysinfo/stats.c	(revision a3d87b9d6ac087b0623ed2e3b6ac96f6515f4c5a)
+++ kernel/generic/src/sysinfo/stats.c	(revision dfa4be625ce02c0a9d62197b2b62b4c8d6043bf3)
@@ -362,12 +362,7 @@
 	thread_t *thread = thread_first();
 	while (thread != NULL) {
-		/* Interrupts are already disabled */
-		irq_spinlock_lock(&thread->lock, false);
-
 		/* Record the statistics and increment the index */
 		produce_stats_thread(thread, &stats_threads[i]);
 		i++;
-
-		irq_spinlock_unlock(&thread->lock, false);
 
 		thread = thread_next(thread);
@@ -625,11 +620,5 @@
 		ret.data.size = sizeof(stats_thread_t);
 
-		/*
-		 * Replaced hand-over-hand locking with regular nested sections
-		 * to avoid weak reference leak issues.
-		 */
-		irq_spinlock_lock(&thread->lock, false);
 		produce_stats_thread(thread, stats_thread);
-		irq_spinlock_unlock(&thread->lock, false);
 
 		irq_spinlock_unlock(&threads_lock, true);
Index: kernel/generic/src/udebug/udebug_ops.c
===================================================================
--- kernel/generic/src/udebug/udebug_ops.c	(revision a3d87b9d6ac087b0623ed2e3b6ac96f6515f4c5a)
+++ kernel/generic/src/udebug/udebug_ops.c	(revision dfa4be625ce02c0a9d62197b2b62b4c8d6043bf3)
@@ -90,33 +90,9 @@
 	}
 
-	irq_spinlock_lock(&thread->lock, true);
-
 	/* Verify that 'thread' is a userspace thread. */
 	if (!thread->uspace) {
-		/* It's not, deny its existence */
-		irq_spinlock_unlock(&thread->lock, true);
 		mutex_unlock(&TASK->udebug.lock);
 		return ENOENT;
 	}
-
-	/* Verify debugging state. */
-	if (thread->udebug.active != true) {
-		/* Not in debugging session or undesired GO state */
-		irq_spinlock_unlock(&thread->lock, true);
-		mutex_unlock(&TASK->udebug.lock);
-		return ENOENT;
-	}
-
-	/* Now verify that the thread belongs to the current task. */
-	if (thread->task != TASK) {
-		/* No such thread belonging this task */
-		irq_spinlock_unlock(&thread->lock, true);
-		mutex_unlock(&TASK->udebug.lock);
-		return ENOENT;
-	}
-
-	irq_spinlock_unlock(&thread->lock, true);
-
-	/* Only mutex TASK->udebug.lock left. */
 
 	/*
@@ -126,4 +102,20 @@
 	 */
 	mutex_lock(&thread->udebug.lock);
+
+	/* Verify debugging state. */
+	if (thread->udebug.active != true) {
+		/* Not in debugging session or undesired GO state */
+		mutex_unlock(&thread->udebug.lock);
+		mutex_unlock(&TASK->udebug.lock);
+		return ENOENT;
+	}
+
+	/* Now verify that the thread belongs to the current task. */
+	if (thread->task != TASK) {
+		/* No such thread belonging this task */
+		mutex_unlock(&thread->udebug.lock);
+		mutex_unlock(&TASK->udebug.lock);
+		return ENOENT;
+	}
 
 	/* The big task mutex is no longer needed. */
@@ -388,7 +380,5 @@
 	/* FIXME: make sure the thread isn't past debug shutdown... */
 	list_foreach(TASK->threads, th_link, thread_t, thread) {
-		irq_spinlock_lock(&thread->lock, false);
 		bool uspace = thread->uspace;
-		irq_spinlock_unlock(&thread->lock, false);
 
 		/* Not interested in kernel threads. */
