source: mainline/uspace/srv/bd/hr/raid1.c@ f0950d2

Last change on this file since f0950d2 was 234212a, checked in by Miroslav Cimerman <mc@…>, 8 weeks ago

hr: rename data_dirty to first_write

  • Property mode set to 100644
File size: 18.6 KB
Line 
1/*
2 * Copyright (c) 2025 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
41#include <inttypes.h>
42#include <io/log.h>
43#include <ipc/hr.h>
44#include <ipc/services.h>
45#include <loc.h>
46#include <task.h>
47#include <stdatomic.h>
48#include <stdio.h>
49#include <stdlib.h>
50#include <str_error.h>
51
52#include "fge.h"
53#include "io.h"
54#include "superblock.h"
55#include "util.h"
56#include "var.h"
57
58static void hr_raid1_vol_state_eval_forced(hr_volume_t *);
59static size_t hr_raid1_count_good_extents(hr_volume_t *, uint64_t, size_t,
60 uint64_t);
61static errno_t hr_raid1_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
62 void *, const void *, size_t);
63static errno_t hr_raid1_rebuild(void *);
64static errno_t init_rebuild(hr_volume_t *, size_t *);
65static errno_t swap_hs(hr_volume_t *, size_t, size_t);
66static errno_t hr_raid1_restore_blocks(hr_volume_t *, size_t, uint64_t, size_t,
67 void *);
68
69/* bdops */
70static errno_t hr_raid1_bd_open(bd_srvs_t *, bd_srv_t *);
71static errno_t hr_raid1_bd_close(bd_srv_t *);
72static errno_t hr_raid1_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
73 size_t);
74static errno_t hr_raid1_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
75static errno_t hr_raid1_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
76 const void *, size_t);
77static errno_t hr_raid1_bd_get_block_size(bd_srv_t *, size_t *);
78static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
79
80static bd_ops_t hr_raid1_bd_ops = {
81 .open = hr_raid1_bd_open,
82 .close = hr_raid1_bd_close,
83 .sync_cache = hr_raid1_bd_sync_cache,
84 .read_blocks = hr_raid1_bd_read_blocks,
85 .write_blocks = hr_raid1_bd_write_blocks,
86 .get_block_size = hr_raid1_bd_get_block_size,
87 .get_num_blocks = hr_raid1_bd_get_num_blocks
88};
89
90extern loc_srv_t *hr_srv;
91
92errno_t hr_raid1_create(hr_volume_t *new_volume)
93{
94 HR_DEBUG("%s()", __func__);
95
96 if (new_volume->level != HR_LVL_1)
97 return EINVAL;
98
99 if (new_volume->extent_no < 2) {
100 HR_ERROR("RAID 1 volume needs at least 2 devices\n");
101 return EINVAL;
102 }
103
104 bd_srvs_init(&new_volume->hr_bds);
105 new_volume->hr_bds.ops = &hr_raid1_bd_ops;
106 new_volume->hr_bds.sarg = new_volume;
107
108 hr_raid1_vol_state_eval_forced(new_volume);
109
110 fibril_rwlock_read_lock(&new_volume->states_lock);
111 hr_vol_state_t state = new_volume->state;
112 fibril_rwlock_read_unlock(&new_volume->states_lock);
113 if (state == HR_VOL_FAULTY || state == HR_VOL_NONE) {
114 HR_NOTE("\"%s\": unusable state, not creating\n",
115 new_volume->devname);
116 return EINVAL;
117 }
118
119 return EOK;
120}
121
122/*
123 * Called only once in volume's lifetime.
124 */
125errno_t hr_raid1_init(hr_volume_t *vol)
126{
127 HR_DEBUG("%s()", __func__);
128
129 if (vol->level != HR_LVL_1)
130 return EINVAL;
131
132 vol->data_offset = vol->meta_ops->get_data_offset();
133 vol->data_blkno = vol->truncated_blkno - vol->meta_ops->get_size();
134 vol->strip_size = 0;
135
136 return EOK;
137}
138
139errno_t hr_raid1_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
140{
141 HR_DEBUG("%s()", __func__);
142
143 errno_t rc = hr_util_add_hotspare(vol, hotspare);
144
145 hr_raid1_vol_state_eval(vol);
146
147 return rc;
148}
149
150void hr_raid1_vol_state_eval(hr_volume_t *vol)
151{
152 HR_DEBUG("%s()", __func__);
153
154 bool exp = true;
155 if (!atomic_compare_exchange_strong(&vol->state_dirty, &exp, false))
156 return;
157
158 vol->meta_ops->inc_counter(vol);
159 (void)vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
160
161 hr_raid1_vol_state_eval_forced(vol);
162}
163
164void hr_raid1_ext_state_cb(hr_volume_t *vol, size_t extent, errno_t rc)
165{
166 HR_DEBUG("%s()", __func__);
167
168 assert(fibril_rwlock_is_locked(&vol->extents_lock));
169
170 if (rc == EOK)
171 return;
172
173 fibril_rwlock_write_lock(&vol->states_lock);
174
175 switch (rc) {
176 case ENOMEM:
177 hr_update_ext_state(vol, extent, HR_EXT_INVALID);
178 break;
179 case ENOENT:
180 hr_update_ext_state(vol, extent, HR_EXT_MISSING);
181 break;
182 default:
183 hr_update_ext_state(vol, extent, HR_EXT_FAILED);
184 }
185
186 hr_mark_vol_state_dirty(vol);
187
188 fibril_rwlock_write_unlock(&vol->states_lock);
189}
190
191static void hr_raid1_vol_state_eval_forced(hr_volume_t *vol)
192{
193 HR_DEBUG("%s()", __func__);
194
195 fibril_rwlock_read_lock(&vol->extents_lock);
196 fibril_rwlock_read_lock(&vol->states_lock);
197
198 hr_vol_state_t old_state = vol->state;
199 size_t healthy = hr_count_extents(vol, HR_EXT_ONLINE);
200
201 fibril_rwlock_read_unlock(&vol->states_lock);
202 fibril_rwlock_read_unlock(&vol->extents_lock);
203
204 if (healthy == 0) {
205 if (old_state != HR_VOL_FAULTY) {
206 fibril_rwlock_write_lock(&vol->states_lock);
207 hr_update_vol_state(vol, HR_VOL_FAULTY);
208 fibril_rwlock_write_unlock(&vol->states_lock);
209 }
210 } else if (healthy < vol->extent_no) {
211 if (old_state != HR_VOL_REBUILD &&
212 old_state != HR_VOL_DEGRADED) {
213 fibril_rwlock_write_lock(&vol->states_lock);
214 hr_update_vol_state(vol, HR_VOL_DEGRADED);
215 fibril_rwlock_write_unlock(&vol->states_lock);
216 }
217
218 if (old_state != HR_VOL_REBUILD) {
219 /* XXX: allow REBUILD on INVALID extents */
220 fibril_mutex_lock(&vol->hotspare_lock);
221 size_t hs_no = vol->hotspare_no;
222 fibril_mutex_unlock(&vol->hotspare_lock);
223 if (hs_no > 0) {
224 fid_t fib = fibril_create(hr_raid1_rebuild,
225 vol);
226 if (fib == 0)
227 return;
228 fibril_start(fib);
229 fibril_detach(fib);
230 }
231 }
232 } else {
233 if (old_state != HR_VOL_ONLINE) {
234 fibril_rwlock_write_lock(&vol->states_lock);
235 hr_update_vol_state(vol, HR_VOL_ONLINE);
236 fibril_rwlock_write_unlock(&vol->states_lock);
237 }
238 }
239}
240
241static errno_t hr_raid1_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
242{
243 HR_DEBUG("%s()", __func__);
244
245 hr_volume_t *vol = bd->srvs->sarg;
246
247 atomic_fetch_add_explicit(&vol->open_cnt, 1, memory_order_relaxed);
248
249 return EOK;
250}
251
252static errno_t hr_raid1_bd_close(bd_srv_t *bd)
253{
254 HR_DEBUG("%s()", __func__);
255
256 hr_volume_t *vol = bd->srvs->sarg;
257
258 atomic_fetch_sub_explicit(&vol->open_cnt, 1, memory_order_relaxed);
259
260 return EOK;
261}
262
263static errno_t hr_raid1_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
264{
265 return hr_raid1_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
266}
267
268static errno_t hr_raid1_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
269 void *buf, size_t size)
270{
271 return hr_raid1_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
272}
273
274static errno_t hr_raid1_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
275 const void *data, size_t size)
276{
277 return hr_raid1_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
278}
279
280static errno_t hr_raid1_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
281{
282 hr_volume_t *vol = bd->srvs->sarg;
283
284 *rsize = vol->bsize;
285 return EOK;
286}
287
288static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
289{
290 hr_volume_t *vol = bd->srvs->sarg;
291
292 *rnb = vol->data_blkno;
293 return EOK;
294}
295
296static size_t hr_raid1_count_good_extents(hr_volume_t *vol, uint64_t ba,
297 size_t cnt, uint64_t rebuild_blk)
298{
299 assert(fibril_rwlock_is_locked(&vol->extents_lock));
300 assert(fibril_rwlock_is_locked(&vol->states_lock));
301
302 size_t count = 0;
303 for (size_t i = 0; i < vol->extent_no; i++) {
304 if (vol->extents[i].state == HR_EXT_ONLINE ||
305 (vol->extents[i].state == HR_EXT_REBUILD &&
306 ba < rebuild_blk)) {
307 count++;
308 }
309 }
310
311 return count;
312
313}
314
315static errno_t hr_raid1_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
316 size_t cnt, void *data_read, const void *data_write, size_t size)
317{
318 HR_DEBUG("%s()", __func__);
319
320 hr_volume_t *vol = bd->srvs->sarg;
321 hr_range_lock_t *rl = NULL;
322 errno_t rc;
323 size_t i;
324 uint64_t rebuild_blk;
325
326 if (size < cnt * vol->bsize)
327 return EINVAL;
328
329 fibril_rwlock_read_lock(&vol->states_lock);
330 hr_vol_state_t vol_state = vol->state;
331 fibril_rwlock_read_unlock(&vol->states_lock);
332
333 if (vol_state == HR_VOL_FAULTY || vol_state == HR_VOL_NONE)
334 return EIO;
335
336 /* increment metadata counter only on first write */
337 bool exp = false;
338 if (type == HR_BD_WRITE &&
339 atomic_compare_exchange_strong(&vol->first_write, &exp, true)) {
340 vol->meta_ops->inc_counter(vol);
341 vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
342 }
343
344 rc = hr_check_ba_range(vol, cnt, ba);
345 if (rc != EOK)
346 return rc;
347
348 /* allow full dev sync */
349 if (!(type == HR_BD_SYNC && ba == 0 && cnt == 0))
350 hr_add_data_offset(vol, &ba);
351
352 /*
353 * extent order has to be locked for the whole IO duration,
354 * so that workers have consistent targets
355 */
356 fibril_rwlock_read_lock(&vol->extents_lock);
357
358 size_t successful = 0;
359 switch (type) {
360 case HR_BD_READ:
361 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
362 memory_order_relaxed);
363
364 for (i = 0; i < vol->extent_no; i++) {
365 fibril_rwlock_read_lock(&vol->states_lock);
366 hr_ext_state_t state = vol->extents[i].state;
367 fibril_rwlock_read_unlock(&vol->states_lock);
368
369 if (state != HR_EXT_ONLINE &&
370 (state != HR_EXT_REBUILD ||
371 ba + cnt - 1 >= rebuild_blk)) {
372 continue;
373 }
374
375 rc = block_read_direct(vol->extents[i].svc_id, ba, cnt,
376 data_read);
377
378 if (rc == ENOMEM && i + 1 == vol->extent_no)
379 goto end;
380
381 if (rc == ENOMEM)
382 continue;
383
384 if (rc != EOK) {
385 hr_raid1_ext_state_cb(vol, i, rc);
386 } else {
387 successful++;
388 break;
389 }
390 }
391 break;
392 case HR_BD_SYNC:
393 case HR_BD_WRITE:
394 if (type == HR_BD_WRITE) {
395 rl = hr_range_lock_acquire(vol, ba, cnt);
396 if (rl == NULL) {
397 rc = ENOMEM;
398 goto end;
399 }
400 }
401
402 fibril_rwlock_read_lock(&vol->states_lock);
403
404 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
405 memory_order_relaxed);
406
407 size_t good = hr_raid1_count_good_extents(vol, ba, cnt,
408 rebuild_blk);
409
410 hr_fgroup_t *group = hr_fgroup_create(vol->fge, good);
411 if (group == NULL) {
412 if (type == HR_BD_WRITE)
413 hr_range_lock_release(rl);
414 rc = ENOMEM;
415 fibril_rwlock_read_unlock(&vol->states_lock);
416 goto end;
417 }
418
419 for (i = 0; i < vol->extent_no; i++) {
420 if (vol->extents[i].state != HR_EXT_ONLINE &&
421 (vol->extents[i].state != HR_EXT_REBUILD ||
422 ba >= rebuild_blk)) {
423 /*
424 * When the extent is being rebuilt,
425 * we only write to the part that is already
426 * rebuilt. If IO starts after vol->rebuild_blk
427 * we do not proceed, the write is going to
428 * be replicated later in the rebuild.
429 */
430 continue;
431 }
432
433 hr_io_t *io = hr_fgroup_alloc(group);
434 io->extent = i;
435 io->data_write = data_write;
436 io->data_read = data_read;
437 io->ba = ba;
438 io->cnt = cnt;
439 io->type = type;
440 io->vol = vol;
441
442 hr_fgroup_submit(group, hr_io_worker, io);
443 }
444
445 fibril_rwlock_read_unlock(&vol->states_lock);
446
447 (void)hr_fgroup_wait(group, &successful, NULL);
448
449 if (type == HR_BD_WRITE)
450 hr_range_lock_release(rl);
451
452 break;
453 default:
454 rc = EINVAL;
455 goto end;
456 }
457
458 if (successful > 0)
459 rc = EOK;
460 else
461 rc = EIO;
462
463end:
464 fibril_rwlock_read_unlock(&vol->extents_lock);
465
466 hr_raid1_vol_state_eval(vol);
467
468 return rc;
469}
470
471/*
472 * Put the last HOTSPARE extent in place
473 * of first that != ONLINE, and start the rebuild.
474 */
475static errno_t hr_raid1_rebuild(void *arg)
476{
477 HR_DEBUG("%s()", __func__);
478
479 hr_volume_t *vol = arg;
480 void *buf = NULL;
481 size_t rebuild_idx;
482 errno_t rc;
483
484 rc = init_rebuild(vol, &rebuild_idx);
485 if (rc != EOK)
486 return rc;
487
488 size_t left = vol->data_blkno;
489 size_t max_blks = DATA_XFER_LIMIT / vol->bsize;
490 buf = malloc(max_blks * vol->bsize);
491
492 size_t cnt;
493 uint64_t ba = 0;
494 hr_add_data_offset(vol, &ba);
495
496 /*
497 * XXX: this is useless here after simplified DI, because
498 * rebuild cannot be triggered while ongoing rebuild
499 */
500 fibril_rwlock_read_lock(&vol->extents_lock);
501
502 /* increment metadata counter only on first write */
503 bool exp = false;
504 if (atomic_compare_exchange_strong(&vol->first_write, &exp, true)) {
505 vol->meta_ops->inc_counter(vol);
506 vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
507 }
508
509 hr_range_lock_t *rl = NULL;
510
511 unsigned int percent, old_percent = 100;
512 while (left != 0) {
513 cnt = min(max_blks, left);
514
515 rl = hr_range_lock_acquire(vol, ba, cnt);
516 if (rl == NULL) {
517 rc = ENOMEM;
518 goto end;
519 }
520
521 atomic_store_explicit(&vol->rebuild_blk, ba,
522 memory_order_relaxed);
523
524 rc = hr_raid1_restore_blocks(vol, rebuild_idx, ba, cnt, buf);
525
526 percent = ((ba + cnt) * 100) / vol->data_blkno;
527 if (percent != old_percent) {
528 if (percent % 5 == 0)
529 HR_DEBUG("\"%s\" REBUILD progress: %u%%\n",
530 vol->devname, percent);
531 }
532
533 hr_range_lock_release(rl);
534
535 if (rc != EOK)
536 goto end;
537
538 ba += cnt;
539 left -= cnt;
540 old_percent = percent;
541 }
542
543 HR_DEBUG("hr_raid1_rebuild(): rebuild finished on \"%s\" (%" PRIun "), "
544 "extent no. %zu\n", vol->devname, vol->svc_id, rebuild_idx);
545
546 fibril_rwlock_write_lock(&vol->states_lock);
547
548 hr_update_ext_state(vol, rebuild_idx, HR_EXT_ONLINE);
549
550 /*
551 * We can be optimistic here, if some extents are
552 * still INVALID, FAULTY or MISSING, the update vol
553 * function will pick them up, and set the volume
554 * state accordingly.
555 */
556 hr_update_vol_state(vol, HR_VOL_ONLINE);
557 hr_mark_vol_state_dirty(vol);
558
559 fibril_rwlock_write_unlock(&vol->states_lock);
560
561 (void)vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
562
563end:
564 if (rc != EOK) {
565 /*
566 * We can fail either because:
567 * - the rebuild extent failing or invalidation
568 * - there is are no ONLINE extents (vol is FAULTY)
569 * - we got ENOMEM on all READs (we also invalidate the
570 * rebuild extent here, for now)
571 */
572 fibril_rwlock_write_lock(&vol->states_lock);
573 hr_update_vol_state(vol, HR_VOL_DEGRADED);
574 hr_mark_vol_state_dirty(vol);
575 fibril_rwlock_write_unlock(&vol->states_lock);
576 }
577
578 fibril_rwlock_read_unlock(&vol->extents_lock);
579
580 hr_raid1_vol_state_eval(vol);
581
582 if (buf != NULL)
583 free(buf);
584
585 return rc;
586}
587
588static errno_t init_rebuild(hr_volume_t *vol, size_t *rebuild_idx)
589{
590 errno_t rc = EOK;
591
592 fibril_rwlock_write_lock(&vol->extents_lock);
593 fibril_rwlock_write_lock(&vol->states_lock);
594 fibril_mutex_lock(&vol->hotspare_lock);
595
596 /* XXX: allow REBUILD on INVALID extents */
597 if (vol->hotspare_no == 0) {
598 HR_WARN("hr_raid1_rebuild(): no free hotspares on \"%s\", "
599 "aborting rebuild\n", vol->devname);
600 rc = EINVAL;
601 goto error;
602 }
603
604 size_t bad = vol->extent_no;
605 for (size_t i = 0; i < vol->extent_no; i++) {
606 if (vol->extents[i].state != HR_EXT_ONLINE) {
607 bad = i;
608 break;
609 }
610 }
611
612 if (bad == vol->extent_no) {
613 HR_WARN("hr_raid1_rebuild(): no bad extent on \"%s\", "
614 "aborting rebuild\n", vol->devname);
615 rc = EINVAL;
616 goto error;
617 }
618
619 size_t hotspare_idx = vol->hotspare_no - 1;
620
621 hr_ext_state_t hs_state = vol->hotspares[hotspare_idx].state;
622 if (hs_state != HR_EXT_HOTSPARE) {
623 HR_ERROR("hr_raid1_rebuild(): invalid hotspare state \"%s\", "
624 "aborting rebuild\n", hr_get_ext_state_str(hs_state));
625 rc = EINVAL;
626 goto error;
627 }
628
629 rc = swap_hs(vol, bad, hotspare_idx);
630 if (rc != EOK) {
631 HR_ERROR("hr_raid1_rebuild(): swapping hotspare failed, "
632 "aborting rebuild\n");
633 goto error;
634 }
635
636 hr_extent_t *rebuild_ext = &vol->extents[bad];
637
638 HR_DEBUG("hr_raid1_rebuild(): starting REBUILD on extent no. %zu "
639 "(%" PRIun ")\n", bad, rebuild_ext->svc_id);
640
641 atomic_store_explicit(&vol->rebuild_blk, 0, memory_order_relaxed);
642
643 hr_update_ext_state(vol, bad, HR_EXT_REBUILD);
644 hr_update_vol_state(vol, HR_VOL_REBUILD);
645
646 *rebuild_idx = bad;
647error:
648 fibril_mutex_unlock(&vol->hotspare_lock);
649 fibril_rwlock_write_unlock(&vol->states_lock);
650 fibril_rwlock_write_unlock(&vol->extents_lock);
651
652 return rc;
653}
654
655static errno_t swap_hs(hr_volume_t *vol, size_t bad, size_t hs)
656{
657 HR_DEBUG("hr_raid1_rebuild(): swapping in hotspare\n");
658
659 service_id_t faulty_svc_id = vol->extents[bad].svc_id;
660 service_id_t hs_svc_id = vol->hotspares[hs].svc_id;
661
662 hr_update_ext_svc_id(vol, bad, hs_svc_id);
663 hr_update_ext_state(vol, bad, HR_EXT_HOTSPARE);
664
665 hr_update_hotspare_svc_id(vol, hs, 0);
666 hr_update_hotspare_state(vol, hs, HR_EXT_MISSING);
667
668 vol->hotspare_no--;
669
670 if (faulty_svc_id != 0)
671 block_fini(faulty_svc_id);
672
673 return EOK;
674}
675
676static errno_t hr_raid1_restore_blocks(hr_volume_t *vol, size_t rebuild_idx,
677 uint64_t ba, size_t cnt, void *buf)
678{
679 assert(fibril_rwlock_is_locked(&vol->extents_lock));
680
681 errno_t rc = ENOENT;
682 hr_extent_t *ext, *rebuild_ext = &vol->extents[rebuild_idx];
683
684 fibril_rwlock_read_lock(&vol->states_lock);
685 hr_ext_state_t rebuild_ext_state = rebuild_ext->state;
686 fibril_rwlock_read_unlock(&vol->states_lock);
687
688 if (rebuild_ext_state != HR_EXT_REBUILD)
689 return EINVAL;
690
691 for (size_t i = 0; i < vol->extent_no; i++) {
692 fibril_rwlock_read_lock(&vol->states_lock);
693 ext = &vol->extents[i];
694 if (ext->state != HR_EXT_ONLINE) {
695 fibril_rwlock_read_unlock(&vol->states_lock);
696 continue;
697 }
698 fibril_rwlock_read_unlock(&vol->states_lock);
699
700 rc = block_read_direct(ext->svc_id, ba, cnt, buf);
701 if (rc == EOK)
702 break;
703
704 if (rc != ENOMEM)
705 hr_raid1_ext_state_cb(vol, i, rc);
706
707 if (i + 1 >= vol->extent_no) {
708 if (rc != ENOMEM) {
709 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
710 "failed due to too many failed extents\n",
711 vol->devname, vol->svc_id);
712 }
713
714 /* for now we have to invalidate the rebuild extent */
715 if (rc == ENOMEM) {
716 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
717 "failed due to too many failed reads, "
718 "because of not enough memory\n",
719 vol->devname, vol->svc_id);
720 hr_raid1_ext_state_cb(vol, rebuild_idx,
721 ENOMEM);
722 }
723
724 return rc;
725 }
726 }
727
728 rc = block_write_direct(rebuild_ext->svc_id, ba, cnt, buf);
729 if (rc != EOK) {
730 /*
731 * Here we dont handle ENOMEM, because maybe in the
732 * future, there is going to be M_WAITOK, or we are
733 * going to wait for more memory, so that we don't
734 * have to invalidate it...
735 *
736 * XXX: for now we do
737 */
738 hr_raid1_ext_state_cb(vol, rebuild_idx, rc);
739
740 HR_ERROR("rebuild on \"%s\" (%" PRIun "), failed due to "
741 "the rebuilt extent no. %zu WRITE (rc: %s)\n",
742 vol->devname, vol->svc_id, rebuild_idx, str_error(rc));
743
744 return rc;
745 }
746
747 return EOK;
748}
749
750/** @}
751 */
Note: See TracBrowser for help on using the repository browser.