Index: uspace/srv/bd/hr/hr.c
===================================================================
--- uspace/srv/bd/hr/hr.c	(revision 58d82fa2798504f981fb5b24f7ed6ed42d2a9651)
+++ uspace/srv/bd/hr/hr.c	(revision 3a68baab70c3a34b7d36d95e7573dc8db1dcad3b)
@@ -260,4 +260,7 @@
 	fibril_mutex_initialize(&new_volume->range_lock_list_lock);
 
+	fibril_mutex_initialize(&new_volume->deferred_list_lock);
+	list_initialize(&new_volume->deferred_invalidations_list);
+
 	atomic_init(&new_volume->rebuild_blk, 0);
 
Index: uspace/srv/bd/hr/raid1.c
===================================================================
--- uspace/srv/bd/hr/raid1.c	(revision 58d82fa2798504f981fb5b24f7ed6ed42d2a9651)
+++ uspace/srv/bd/hr/raid1.c	(revision 3a68baab70c3a34b7d36d95e7573dc8db1dcad3b)
@@ -56,4 +56,5 @@
 extern loc_srv_t *hr_srv;
 
+static void process_deferred_invalidations(hr_volume_t *);
 static void hr_raid1_update_vol_status(hr_volume_t *);
 static void hr_raid1_ext_state_callback(hr_volume_t *, size_t, errno_t);
@@ -225,6 +226,60 @@
 }
 
+static void process_deferred_invalidations(hr_volume_t *vol)
+{
+	HR_DEBUG("hr_raid1_update_vol_status(): deferred invalidations\n");
+
+	fibril_mutex_lock(&vol->halt_lock);
+	vol->halt_please = true;
+	fibril_rwlock_write_lock(&vol->extents_lock);
+	fibril_rwlock_write_lock(&vol->states_lock);
+	fibril_mutex_lock(&vol->hotspare_lock);
+
+	list_foreach(vol->deferred_invalidations_list, link,
+	    hr_deferred_invalidation_t, di) {
+		assert(vol->extents[di->index].status == HR_EXT_INVALID);
+
+		HR_DEBUG("moving invalidated extent no. %lu to hotspares\n",
+		    di->index);
+
+		block_fini(di->svc_id);
+
+		size_t hs_idx = vol->hotspare_no;
+
+		vol->hotspare_no++;
+
+		vol->hotspares[hs_idx].svc_id = di->svc_id;
+		hr_update_hotspare_status(vol, hs_idx, HR_EXT_HOTSPARE);
+
+		vol->extents[di->index].svc_id = 0;
+		hr_update_ext_status(vol, di->index, HR_EXT_MISSING);
+
+		assert(vol->hotspare_no < HR_MAX_HOTSPARES + HR_MAX_EXTENTS);
+	}
+
+	for (size_t i = 0; i < HR_MAX_EXTENTS; i++) {
+		hr_deferred_invalidation_t *di = &vol->deferred_inval[i];
+		if (di->svc_id != 0) {
+			list_remove(&di->link);
+			di->svc_id = 0;
+		}
+	}
+
+	fibril_mutex_unlock(&vol->hotspare_lock);
+	fibril_rwlock_write_unlock(&vol->states_lock);
+	fibril_rwlock_write_unlock(&vol->extents_lock);
+	vol->halt_please = false;
+	fibril_mutex_unlock(&vol->halt_lock);
+}
+
 static void hr_raid1_update_vol_status(hr_volume_t *vol)
 {
+	fibril_mutex_lock(&vol->deferred_list_lock);
+
+	if (list_count(&vol->deferred_invalidations_list) > 0)
+		process_deferred_invalidations(vol);
+
+	fibril_mutex_unlock(&vol->deferred_list_lock);
+
 	fibril_rwlock_read_lock(&vol->extents_lock);
 	fibril_rwlock_read_lock(&vol->states_lock);
@@ -279,4 +334,31 @@
 
 	switch (rc) {
+	case ENOMEM:
+		fibril_mutex_lock(&vol->deferred_list_lock);
+
+		service_id_t invalid_svc_id = vol->extents[extent].svc_id;
+
+		list_foreach(vol->deferred_invalidations_list, link,
+		    hr_deferred_invalidation_t, di) {
+			if (di->svc_id == invalid_svc_id) {
+				assert(vol->extents[extent].status ==
+				    HR_EXT_INVALID);
+				goto done;
+			}
+		}
+
+		assert(vol->extents[extent].svc_id != HR_EXT_INVALID);
+
+		hr_update_ext_status(vol, extent, HR_EXT_INVALID);
+
+		size_t i = list_count(&vol->deferred_invalidations_list);
+		vol->deferred_inval[i].svc_id = invalid_svc_id;
+		vol->deferred_inval[i].index = extent;
+
+		list_append(&vol->deferred_inval[i].link,
+		    &vol->deferred_invalidations_list);
+	done:
+		fibril_mutex_unlock(&vol->deferred_list_lock);
+		break;
 	case ENOENT:
 		hr_update_ext_status(vol, extent, HR_EXT_MISSING);
Index: uspace/srv/bd/hr/var.h
===================================================================
--- uspace/srv/bd/hr/var.h	(revision 58d82fa2798504f981fb5b24f7ed6ed42d2a9651)
+++ uspace/srv/bd/hr/var.h	(revision 3a68baab70c3a34b7d36d95e7573dc8db1dcad3b)
@@ -59,4 +59,10 @@
 } hr_ops_t;
 
+typedef struct hr_deferred_invalidation {
+	link_t link;
+	size_t index;
+	service_id_t svc_id;
+} hr_deferred_invalidation_t;
+
 typedef struct hr_volume {
 	hr_ops_t hr_ops;
@@ -89,5 +95,5 @@
 
 	size_t hotspare_no;
-	hr_extent_t hotspares[HR_MAX_HOTSPARES];
+	hr_extent_t hotspares[HR_MAX_HOTSPARES + HR_MAX_EXTENTS];
 
 	/* protects hotspares (hotspares.{svc_id,status}, hotspare_no) */
@@ -103,4 +109,15 @@
 	bool halt_please;
 	fibril_mutex_t halt_lock;
+
+	/*
+	 * For deferring invalidations of extents. Used when
+	 * an extent has to be invalidated (got ENOMEM on a WRITE),
+	 * but workers - therefore state callbacks cannot lock
+	 * extents for writing (they are readers), so invalidations
+	 * are harvested later when we are able to.
+	 */
+	fibril_mutex_t deferred_list_lock;
+	list_t deferred_invalidations_list;
+	hr_deferred_invalidation_t deferred_inval[HR_MAX_EXTENTS];
 
 	_Atomic uint64_t rebuild_blk;
