source: mainline/uspace/srv/bd/hr/raid1.c@ 372a9fc

Last change on this file since 372a9fc was 50603405, checked in by Miroslav Cimerman <mc@…>, 7 months ago

hr: metadata format agnostic superblock ops

Put metadata specific code behind a new hr_superblock_ops_t
interface, that allows to easily add support for new metadata
formats.

  • Property mode set to 100644
File size: 18.5 KB
Line 
1/*
2 * Copyright (c) 2025 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
41#include <inttypes.h>
42#include <io/log.h>
43#include <ipc/hr.h>
44#include <ipc/services.h>
45#include <loc.h>
46#include <task.h>
47#include <stdatomic.h>
48#include <stdio.h>
49#include <stdlib.h>
50#include <str_error.h>
51
52#include "fge.h"
53#include "io.h"
54#include "superblock.h"
55#include "util.h"
56#include "var.h"
57
58static void hr_raid1_update_vol_status(hr_volume_t *);
59static void hr_raid1_ext_state_callback(hr_volume_t *, size_t, errno_t);
60static size_t hr_raid1_count_good_extents(hr_volume_t *, uint64_t, size_t,
61 uint64_t);
62static errno_t hr_raid1_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
63 void *, const void *, size_t);
64static errno_t hr_raid1_rebuild(void *);
65static errno_t init_rebuild(hr_volume_t *, size_t *);
66static errno_t swap_hs(hr_volume_t *, size_t, size_t);
67static errno_t hr_raid1_restore_blocks(hr_volume_t *, size_t, uint64_t, size_t,
68 void *);
69
70/* bdops */
71static errno_t hr_raid1_bd_open(bd_srvs_t *, bd_srv_t *);
72static errno_t hr_raid1_bd_close(bd_srv_t *);
73static errno_t hr_raid1_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
74 size_t);
75static errno_t hr_raid1_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
76static errno_t hr_raid1_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
77 const void *, size_t);
78static errno_t hr_raid1_bd_get_block_size(bd_srv_t *, size_t *);
79static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
80
81static bd_ops_t hr_raid1_bd_ops = {
82 .open = hr_raid1_bd_open,
83 .close = hr_raid1_bd_close,
84 .sync_cache = hr_raid1_bd_sync_cache,
85 .read_blocks = hr_raid1_bd_read_blocks,
86 .write_blocks = hr_raid1_bd_write_blocks,
87 .get_block_size = hr_raid1_bd_get_block_size,
88 .get_num_blocks = hr_raid1_bd_get_num_blocks
89};
90
91extern loc_srv_t *hr_srv;
92
93errno_t hr_raid1_create(hr_volume_t *new_volume)
94{
95 HR_DEBUG("%s()", __func__);
96
97 assert(new_volume->level == HR_LVL_1);
98
99 if (new_volume->extent_no < 2) {
100 HR_ERROR("RAID 1 array needs at least 2 devices\n");
101 return EINVAL;
102 }
103
104 bd_srvs_init(&new_volume->hr_bds);
105 new_volume->hr_bds.ops = &hr_raid1_bd_ops;
106 new_volume->hr_bds.sarg = new_volume;
107
108 new_volume->state_callback = hr_raid1_ext_state_callback;
109
110 /* force volume state update */
111 hr_mark_vol_state_dirty(new_volume);
112 hr_raid1_update_vol_status(new_volume);
113
114 fibril_rwlock_read_lock(&new_volume->states_lock);
115 hr_vol_status_t state = new_volume->status;
116 fibril_rwlock_read_unlock(&new_volume->states_lock);
117 if (state == HR_VOL_FAULTY || state == HR_VOL_NONE)
118 return EINVAL;
119
120 return EOK;
121}
122
123/*
124 * Called only once in volume's lifetime.
125 */
126errno_t hr_raid1_init(hr_volume_t *vol)
127{
128 HR_DEBUG("%s()", __func__);
129
130 assert(vol->level == HR_LVL_1);
131
132 uint64_t truncated_blkno = vol->extents[0].blkno;
133 for (size_t i = 1; i < vol->extent_no; i++) {
134 if (vol->extents[i].blkno < truncated_blkno)
135 truncated_blkno = vol->extents[i].blkno;
136 }
137
138 vol->truncated_blkno = truncated_blkno;
139 vol->nblocks = truncated_blkno;
140 vol->data_offset = vol->meta_ops->get_data_offset();
141 vol->data_blkno = truncated_blkno - vol->meta_ops->get_size();
142 vol->strip_size = 0;
143
144 return EOK;
145}
146
147void hr_raid1_status_event(hr_volume_t *vol)
148{
149 HR_DEBUG("%s()", __func__);
150
151 hr_raid1_update_vol_status(vol);
152}
153
154errno_t hr_raid1_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
155{
156 HR_DEBUG("%s()", __func__);
157
158 errno_t rc = hr_util_add_hotspare(vol, hotspare);
159
160 hr_raid1_update_vol_status(vol);
161
162 return rc;
163}
164
165static errno_t hr_raid1_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
166{
167 HR_DEBUG("%s()", __func__);
168
169 hr_volume_t *vol = bd->srvs->sarg;
170
171 atomic_fetch_add_explicit(&vol->open_cnt, 1, memory_order_relaxed);
172
173 return EOK;
174}
175
176static errno_t hr_raid1_bd_close(bd_srv_t *bd)
177{
178 HR_DEBUG("%s()", __func__);
179
180 hr_volume_t *vol = bd->srvs->sarg;
181
182 atomic_fetch_sub_explicit(&vol->open_cnt, 1, memory_order_relaxed);
183
184 return EOK;
185}
186
187static errno_t hr_raid1_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
188{
189 return hr_raid1_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
190}
191
192static errno_t hr_raid1_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
193 void *buf, size_t size)
194{
195 return hr_raid1_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
196}
197
198static errno_t hr_raid1_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
199 const void *data, size_t size)
200{
201 return hr_raid1_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
202}
203
204static errno_t hr_raid1_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
205{
206 hr_volume_t *vol = bd->srvs->sarg;
207
208 *rsize = vol->bsize;
209 return EOK;
210}
211
212static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
213{
214 hr_volume_t *vol = bd->srvs->sarg;
215
216 *rnb = vol->data_blkno;
217 return EOK;
218}
219
220static void hr_raid1_update_vol_status(hr_volume_t *vol)
221{
222 bool exp = true;
223
224 /* TODO: could also wrap this */
225 if (!atomic_compare_exchange_strong(&vol->state_dirty, &exp, false))
226 return;
227
228 fibril_mutex_lock(&vol->md_lock);
229
230 vol->meta_ops->inc_counter(vol->in_mem_md);
231 /* XXX: save right away */
232
233 fibril_mutex_unlock(&vol->md_lock);
234
235 fibril_rwlock_read_lock(&vol->extents_lock);
236 fibril_rwlock_read_lock(&vol->states_lock);
237
238 hr_vol_status_t old_state = vol->status;
239 size_t healthy = hr_count_extents(vol, HR_EXT_ONLINE);
240
241 fibril_rwlock_read_unlock(&vol->states_lock);
242 fibril_rwlock_read_unlock(&vol->extents_lock);
243
244 if (healthy == 0) {
245 if (old_state != HR_VOL_FAULTY) {
246 fibril_rwlock_write_lock(&vol->states_lock);
247 hr_update_vol_status(vol, HR_VOL_FAULTY);
248 fibril_rwlock_write_unlock(&vol->states_lock);
249 }
250 } else if (healthy < vol->extent_no) {
251 if (old_state != HR_VOL_REBUILD &&
252 old_state != HR_VOL_DEGRADED) {
253 fibril_rwlock_write_lock(&vol->states_lock);
254 hr_update_vol_status(vol, HR_VOL_DEGRADED);
255 fibril_rwlock_write_unlock(&vol->states_lock);
256 }
257
258 if (old_state != HR_VOL_REBUILD) {
259 /* XXX: allow REBUILD on INVALID extents */
260 if (vol->hotspare_no > 0) {
261 fid_t fib = fibril_create(hr_raid1_rebuild,
262 vol);
263 if (fib == 0)
264 return;
265 fibril_start(fib);
266 fibril_detach(fib);
267 }
268 }
269 } else {
270 if (old_state != HR_VOL_ONLINE) {
271 fibril_rwlock_write_lock(&vol->states_lock);
272 hr_update_vol_status(vol, HR_VOL_ONLINE);
273 fibril_rwlock_write_unlock(&vol->states_lock);
274 }
275 }
276}
277
278static void hr_raid1_ext_state_callback(hr_volume_t *vol, size_t extent,
279 errno_t rc)
280{
281 if (rc == EOK)
282 return;
283
284 assert(fibril_rwlock_is_locked(&vol->extents_lock));
285
286 fibril_rwlock_write_lock(&vol->states_lock);
287
288 switch (rc) {
289 case ENOMEM:
290 hr_update_ext_status(vol, extent, HR_EXT_INVALID);
291 break;
292 case ENOENT:
293 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
294 break;
295 default:
296 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
297 }
298
299 hr_mark_vol_state_dirty(vol);
300
301 fibril_rwlock_write_unlock(&vol->states_lock);
302}
303
304static size_t hr_raid1_count_good_extents(hr_volume_t *vol, uint64_t ba,
305 size_t cnt, uint64_t rebuild_blk)
306{
307 assert(fibril_rwlock_is_locked(&vol->extents_lock));
308 assert(fibril_rwlock_is_locked(&vol->states_lock));
309
310 size_t count = 0;
311 for (size_t i = 0; i < vol->extent_no; i++) {
312 if (vol->extents[i].status == HR_EXT_ONLINE ||
313 (vol->extents[i].status == HR_EXT_REBUILD &&
314 ba < rebuild_blk)) {
315 count++;
316 }
317 }
318
319 return count;
320
321}
322
323static errno_t hr_raid1_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
324 size_t cnt, void *data_read, const void *data_write, size_t size)
325{
326 hr_volume_t *vol = bd->srvs->sarg;
327 hr_range_lock_t *rl = NULL;
328 errno_t rc;
329 size_t i;
330 uint64_t rebuild_blk;
331
332 fibril_rwlock_read_lock(&vol->states_lock);
333 hr_vol_status_t vol_state = vol->status;
334 fibril_rwlock_read_unlock(&vol->states_lock);
335
336 if (vol_state == HR_VOL_FAULTY || vol_state == HR_VOL_NONE)
337 return EIO;
338
339 if (type == HR_BD_READ || type == HR_BD_WRITE)
340 if (size < cnt * vol->bsize)
341 return EINVAL;
342
343 rc = hr_check_ba_range(vol, cnt, ba);
344 if (rc != EOK)
345 return rc;
346
347 /* allow full dev sync */
348 if (type != HR_BD_SYNC || ba != 0)
349 hr_add_ba_offset(vol, &ba);
350
351 /*
352 * extent order has to be locked for the whole IO duration,
353 * so that workers have consistent targets
354 */
355 fibril_rwlock_read_lock(&vol->extents_lock);
356
357 size_t successful = 0;
358 switch (type) {
359 case HR_BD_READ:
360 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
361 memory_order_relaxed);
362
363 for (i = 0; i < vol->extent_no; i++) {
364 fibril_rwlock_read_lock(&vol->states_lock);
365 hr_ext_status_t state = vol->extents[i].status;
366 fibril_rwlock_read_unlock(&vol->states_lock);
367
368 if (state != HR_EXT_ONLINE &&
369 (state != HR_EXT_REBUILD ||
370 ba + cnt - 1 >= rebuild_blk)) {
371 continue;
372 }
373
374 rc = block_read_direct(vol->extents[i].svc_id, ba, cnt,
375 data_read);
376
377 if (rc == ENOMEM && i + 1 == vol->extent_no)
378 goto end;
379
380 if (rc == ENOMEM)
381 continue;
382
383 if (rc != EOK) {
384 hr_raid1_ext_state_callback(vol, i, rc);
385 } else {
386 successful++;
387 break;
388 }
389 }
390 break;
391 case HR_BD_SYNC:
392 case HR_BD_WRITE:
393 if (type == HR_BD_WRITE) {
394 rl = hr_range_lock_acquire(vol, ba, cnt);
395 if (rl == NULL) {
396 rc = ENOMEM;
397 goto end;
398 }
399 }
400
401 fibril_rwlock_read_lock(&vol->states_lock);
402
403 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
404 memory_order_relaxed);
405
406 size_t good = hr_raid1_count_good_extents(vol, ba, cnt,
407 rebuild_blk);
408
409 hr_fgroup_t *group = hr_fgroup_create(vol->fge, good);
410 if (group == NULL) {
411 if (type == HR_BD_WRITE)
412 hr_range_lock_release(rl);
413 rc = ENOMEM;
414 fibril_rwlock_read_unlock(&vol->states_lock);
415 goto end;
416 }
417
418 for (i = 0; i < vol->extent_no; i++) {
419 if (vol->extents[i].status != HR_EXT_ONLINE &&
420 (vol->extents[i].status != HR_EXT_REBUILD ||
421 ba >= rebuild_blk)) {
422 /*
423 * When the extent is being rebuilt,
424 * we only write to the part that is already
425 * rebuilt. If IO starts after vol->rebuild_blk
426 * we do not proceed, the write is going to
427 * be replicated later in the rebuild.
428 */
429 continue;
430 }
431
432 hr_io_t *io = hr_fgroup_alloc(group);
433 io->extent = i;
434 io->data_write = data_write;
435 io->data_read = data_read;
436 io->ba = ba;
437 io->cnt = cnt;
438 io->type = type;
439 io->vol = vol;
440
441 hr_fgroup_submit(group, hr_io_worker, io);
442 }
443
444 fibril_rwlock_read_unlock(&vol->states_lock);
445
446 (void)hr_fgroup_wait(group, &successful, NULL);
447
448 if (type == HR_BD_WRITE)
449 hr_range_lock_release(rl);
450
451 break;
452 default:
453 rc = EINVAL;
454 goto end;
455 }
456
457 if (successful > 0)
458 rc = EOK;
459 else
460 rc = EIO;
461
462end:
463 fibril_rwlock_read_unlock(&vol->extents_lock);
464
465 hr_raid1_update_vol_status(vol);
466
467 return rc;
468}
469
470/*
471 * Put the last HOTSPARE extent in place
472 * of first that != ONLINE, and start the rebuild.
473 */
474static errno_t hr_raid1_rebuild(void *arg)
475{
476 HR_DEBUG("%s()", __func__);
477
478 hr_volume_t *vol = arg;
479 void *buf = NULL;
480 size_t rebuild_idx;
481 errno_t rc;
482
483 rc = init_rebuild(vol, &rebuild_idx);
484 if (rc != EOK)
485 return rc;
486
487 size_t left = vol->data_blkno;
488 size_t max_blks = DATA_XFER_LIMIT / vol->bsize;
489 buf = malloc(max_blks * vol->bsize);
490
491 size_t cnt;
492 uint64_t ba = 0;
493 hr_add_ba_offset(vol, &ba);
494
495 /*
496 * XXX: this is useless here after simplified DI, because
497 * rebuild cannot be triggered while ongoing rebuild
498 */
499 fibril_rwlock_read_lock(&vol->extents_lock);
500
501 hr_range_lock_t *rl = NULL;
502
503 unsigned int percent, old_percent = 100;
504 while (left != 0) {
505 cnt = min(max_blks, left);
506
507 rl = hr_range_lock_acquire(vol, ba, cnt);
508 if (rl == NULL) {
509 rc = ENOMEM;
510 goto end;
511 }
512
513 atomic_store_explicit(&vol->rebuild_blk, ba,
514 memory_order_relaxed);
515
516 rc = hr_raid1_restore_blocks(vol, rebuild_idx, ba, cnt, buf);
517
518 percent = ((ba + cnt) * 100) / vol->data_blkno;
519 if (percent != old_percent) {
520 if (percent % 5 == 0)
521 HR_DEBUG("\"%s\" REBUILD progress: %u%%\n",
522 vol->devname, percent);
523 }
524
525 hr_range_lock_release(rl);
526
527 if (rc != EOK)
528 goto end;
529
530 ba += cnt;
531 left -= cnt;
532 old_percent = percent;
533 }
534
535 HR_DEBUG("hr_raid1_rebuild(): rebuild finished on \"%s\" (%" PRIun "), "
536 "extent no. %zu\n", vol->devname, vol->svc_id, rebuild_idx);
537
538 fibril_rwlock_write_lock(&vol->states_lock);
539
540 hr_update_ext_status(vol, rebuild_idx, HR_EXT_ONLINE);
541
542 /*
543 * We can be optimistic here, if some extents are
544 * still INVALID, FAULTY or MISSING, the update vol
545 * function will pick them up, and set the volume
546 * state accordingly.
547 */
548 hr_update_vol_status(vol, HR_VOL_ONLINE);
549 hr_mark_vol_state_dirty(vol);
550
551 fibril_rwlock_write_unlock(&vol->states_lock);
552
553 rc = vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
554
555end:
556 if (rc != EOK) {
557 /*
558 * We can fail either because:
559 * - the rebuild extent failing or invalidation
560 * - there is are no ONLINE extents (vol is FAULTY)
561 * - we got ENOMEM on all READs (we also invalidate the
562 * rebuild extent here, for now)
563 */
564 fibril_rwlock_write_lock(&vol->states_lock);
565 hr_update_vol_status(vol, HR_VOL_DEGRADED);
566 hr_mark_vol_state_dirty(vol);
567 fibril_rwlock_write_unlock(&vol->states_lock);
568 }
569
570 fibril_rwlock_read_unlock(&vol->extents_lock);
571
572 hr_raid1_update_vol_status(vol);
573
574 if (buf != NULL)
575 free(buf);
576
577 return rc;
578}
579
580static errno_t init_rebuild(hr_volume_t *vol, size_t *rebuild_idx)
581{
582 errno_t rc = EOK;
583
584 fibril_rwlock_write_lock(&vol->extents_lock);
585 fibril_rwlock_write_lock(&vol->states_lock);
586 fibril_mutex_lock(&vol->hotspare_lock);
587
588 /* XXX: allow REBUILD on INVALID extents */
589 if (vol->hotspare_no == 0) {
590 HR_WARN("hr_raid1_rebuild(): no free hotspares on \"%s\", "
591 "aborting rebuild\n", vol->devname);
592 rc = EINVAL;
593 goto error;
594 }
595
596 size_t bad = vol->extent_no;
597 for (size_t i = 0; i < vol->extent_no; i++) {
598 if (vol->extents[i].status != HR_EXT_ONLINE) {
599 bad = i;
600 break;
601 }
602 }
603
604 if (bad == vol->extent_no) {
605 HR_WARN("hr_raid1_rebuild(): no bad extent on \"%s\", "
606 "aborting rebuild\n", vol->devname);
607 rc = EINVAL;
608 goto error;
609 }
610
611 size_t hotspare_idx = vol->hotspare_no - 1;
612
613 hr_ext_status_t hs_state = vol->hotspares[hotspare_idx].status;
614 if (hs_state != HR_EXT_HOTSPARE) {
615 HR_ERROR("hr_raid1_rebuild(): invalid hotspare state \"%s\", "
616 "aborting rebuild\n", hr_get_ext_status_msg(hs_state));
617 rc = EINVAL;
618 goto error;
619 }
620
621 rc = swap_hs(vol, bad, hotspare_idx);
622 if (rc != EOK) {
623 HR_ERROR("hr_raid1_rebuild(): swapping hotspare failed, "
624 "aborting rebuild\n");
625 goto error;
626 }
627
628 hr_extent_t *rebuild_ext = &vol->extents[bad];
629
630 HR_DEBUG("hr_raid1_rebuild(): starting REBUILD on extent no. %zu "
631 "(%" PRIun ")\n", bad, rebuild_ext->svc_id);
632
633 atomic_store_explicit(&vol->rebuild_blk, 0, memory_order_relaxed);
634
635 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
636 hr_update_vol_status(vol, HR_VOL_REBUILD);
637
638 *rebuild_idx = bad;
639error:
640 fibril_mutex_unlock(&vol->hotspare_lock);
641 fibril_rwlock_write_unlock(&vol->states_lock);
642 fibril_rwlock_write_unlock(&vol->extents_lock);
643
644 return rc;
645}
646
647static errno_t swap_hs(hr_volume_t *vol, size_t bad, size_t hs)
648{
649 HR_DEBUG("hr_raid1_rebuild(): swapping in hotspare\n");
650
651 service_id_t faulty_svc_id = vol->extents[bad].svc_id;
652 service_id_t hs_svc_id = vol->hotspares[hs].svc_id;
653
654 hr_update_ext_svc_id(vol, bad, hs_svc_id);
655 hr_update_ext_status(vol, bad, HR_EXT_HOTSPARE);
656
657 hr_update_hotspare_svc_id(vol, hs, 0);
658 hr_update_hotspare_status(vol, hs, HR_EXT_MISSING);
659
660 vol->hotspare_no--;
661
662 if (faulty_svc_id != 0)
663 block_fini(faulty_svc_id);
664
665 return EOK;
666}
667
668static errno_t hr_raid1_restore_blocks(hr_volume_t *vol, size_t rebuild_idx,
669 uint64_t ba, size_t cnt, void *buf)
670{
671 assert(fibril_rwlock_is_locked(&vol->extents_lock));
672
673 errno_t rc = ENOENT;
674 hr_extent_t *ext, *rebuild_ext = &vol->extents[rebuild_idx];
675
676 fibril_rwlock_read_lock(&vol->states_lock);
677 hr_ext_status_t rebuild_ext_status = rebuild_ext->status;
678 fibril_rwlock_read_unlock(&vol->states_lock);
679
680 if (rebuild_ext_status != HR_EXT_REBUILD)
681 return EINVAL;
682
683 for (size_t i = 0; i < vol->extent_no; i++) {
684 fibril_rwlock_read_lock(&vol->states_lock);
685 ext = &vol->extents[i];
686 if (ext->status != HR_EXT_ONLINE) {
687 fibril_rwlock_read_unlock(&vol->states_lock);
688 continue;
689 }
690 fibril_rwlock_read_unlock(&vol->states_lock);
691
692 rc = block_read_direct(ext->svc_id, ba, cnt, buf);
693 if (rc == EOK)
694 break;
695
696 if (rc != ENOMEM)
697 hr_raid1_ext_state_callback(vol, i, rc);
698
699 if (i + 1 >= vol->extent_no) {
700 if (rc != ENOMEM) {
701 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
702 "failed due to too many failed extents\n",
703 vol->devname, vol->svc_id);
704 }
705
706 /* for now we have to invalidate the rebuild extent */
707 if (rc == ENOMEM) {
708 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
709 "failed due to too many failed reads, "
710 "because of not enough memory\n",
711 vol->devname, vol->svc_id);
712 hr_raid1_ext_state_callback(vol, rebuild_idx,
713 ENOMEM);
714 }
715
716 return rc;
717 }
718 }
719
720 rc = block_write_direct(rebuild_ext->svc_id, ba, cnt, buf);
721 if (rc != EOK) {
722 /*
723 * Here we dont handle ENOMEM, because maybe in the
724 * future, there is going to be M_WAITOK, or we are
725 * going to wait for more memory, so that we don't
726 * have to invalidate it...
727 *
728 * XXX: for now we do
729 */
730 hr_raid1_ext_state_callback(vol, rebuild_idx, rc);
731
732 HR_ERROR("rebuild on \"%s\" (%" PRIun "), failed due to "
733 "the rebuilt extent no. %zu WRITE (rc: %s)\n",
734 vol->devname, vol->svc_id, rebuild_idx, str_error(rc));
735
736 return rc;
737 }
738
739 return EOK;
740}
741
742/** @}
743 */
Note: See TracBrowser for help on using the repository browser.