source: mainline/uspace/srv/bd/hr/raid1.c@ ca7fa5b

Last change on this file since ca7fa5b was ca7fa5b, checked in by Miroslav Cimerman <mc@…>, 4 months ago

hr: use <inttypes.h> macro specifiers

  • Property mode set to 100644
File size: 18.1 KB
Line 
1/*
2 * Copyright (c) 2025 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <bd_srv.h>
37#include <block.h>
38#include <errno.h>
39#include <hr.h>
40#include <inttypes.h>
41#include <io/log.h>
42#include <ipc/hr.h>
43#include <ipc/services.h>
44#include <loc.h>
45#include <task.h>
46#include <stdatomic.h>
47#include <stdio.h>
48#include <stdlib.h>
49#include <str_error.h>
50
51#include "fge.h"
52#include "io.h"
53#include "superblock.h"
54#include "util.h"
55#include "var.h"
56
57static void hr_raid1_update_vol_status(hr_volume_t *);
58static void hr_raid1_ext_state_callback(hr_volume_t *, size_t, errno_t);
59static size_t hr_raid1_count_good_extents(hr_volume_t *, uint64_t, size_t,
60 uint64_t);
61static errno_t hr_raid1_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
62 void *, const void *, size_t);
63static errno_t hr_raid1_rebuild(void *);
64static errno_t init_rebuild(hr_volume_t *, size_t *);
65static errno_t swap_hs(hr_volume_t *, size_t, size_t);
66static errno_t hr_raid1_restore_blocks(hr_volume_t *, size_t, uint64_t, size_t,
67 void *);
68
69/* bdops */
70static errno_t hr_raid1_bd_open(bd_srvs_t *, bd_srv_t *);
71static errno_t hr_raid1_bd_close(bd_srv_t *);
72static errno_t hr_raid1_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
73 size_t);
74static errno_t hr_raid1_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
75static errno_t hr_raid1_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
76 const void *, size_t);
77static errno_t hr_raid1_bd_get_block_size(bd_srv_t *, size_t *);
78static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
79
80static bd_ops_t hr_raid1_bd_ops = {
81 .open = hr_raid1_bd_open,
82 .close = hr_raid1_bd_close,
83 .sync_cache = hr_raid1_bd_sync_cache,
84 .read_blocks = hr_raid1_bd_read_blocks,
85 .write_blocks = hr_raid1_bd_write_blocks,
86 .get_block_size = hr_raid1_bd_get_block_size,
87 .get_num_blocks = hr_raid1_bd_get_num_blocks
88};
89
90extern loc_srv_t *hr_srv;
91
92errno_t hr_raid1_create(hr_volume_t *new_volume)
93{
94 HR_DEBUG("%s()", __func__);
95
96 assert(new_volume->level == HR_LVL_1);
97
98 if (new_volume->extent_no < 2) {
99 HR_ERROR("RAID 1 array needs at least 2 devices\n");
100 return EINVAL;
101 }
102
103 bd_srvs_init(&new_volume->hr_bds);
104 new_volume->hr_bds.ops = &hr_raid1_bd_ops;
105 new_volume->hr_bds.sarg = new_volume;
106
107 /* force volume state update */
108 hr_mark_vol_state_dirty(new_volume);
109 hr_raid1_update_vol_status(new_volume);
110
111 fibril_rwlock_read_lock(&new_volume->states_lock);
112 hr_vol_status_t state = new_volume->status;
113 fibril_rwlock_read_unlock(&new_volume->states_lock);
114 if (state == HR_VOL_FAULTY || state == HR_VOL_NONE)
115 return EINVAL;
116
117 return EOK;
118}
119
120/*
121 * Called only once in volume's lifetime.
122 */
123errno_t hr_raid1_init(hr_volume_t *vol)
124{
125 HR_DEBUG("%s()", __func__);
126
127 assert(vol->level == HR_LVL_1);
128
129 uint64_t truncated_blkno = vol->extents[0].blkno;
130 for (size_t i = 1; i < vol->extent_no; i++) {
131 if (vol->extents[i].blkno < truncated_blkno)
132 truncated_blkno = vol->extents[i].blkno;
133 }
134
135 vol->truncated_blkno = truncated_blkno;
136 vol->nblocks = truncated_blkno;
137 vol->data_offset = HR_DATA_OFF;
138 vol->data_blkno = truncated_blkno - HR_META_SIZE;
139 vol->strip_size = 0;
140
141 return EOK;
142}
143
144void hr_raid1_status_event(hr_volume_t *vol)
145{
146 HR_DEBUG("%s()", __func__);
147
148 hr_raid1_update_vol_status(vol);
149}
150
151errno_t hr_raid1_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
152{
153 HR_DEBUG("%s()", __func__);
154
155 errno_t rc = hr_util_add_hotspare(vol, hotspare);
156
157 hr_raid1_update_vol_status(vol);
158
159 return rc;
160}
161
162static errno_t hr_raid1_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
163{
164 HR_DEBUG("%s()", __func__);
165
166 hr_volume_t *vol = bd->srvs->sarg;
167
168 atomic_fetch_add_explicit(&vol->open_cnt, 1, memory_order_relaxed);
169
170 return EOK;
171}
172
173static errno_t hr_raid1_bd_close(bd_srv_t *bd)
174{
175 HR_DEBUG("%s()", __func__);
176
177 hr_volume_t *vol = bd->srvs->sarg;
178
179 atomic_fetch_sub_explicit(&vol->open_cnt, 1, memory_order_relaxed);
180
181 return EOK;
182}
183
184static errno_t hr_raid1_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
185{
186 return hr_raid1_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
187}
188
189static errno_t hr_raid1_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
190 void *buf, size_t size)
191{
192 return hr_raid1_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
193}
194
195static errno_t hr_raid1_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
196 const void *data, size_t size)
197{
198 return hr_raid1_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
199}
200
201static errno_t hr_raid1_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
202{
203 hr_volume_t *vol = bd->srvs->sarg;
204
205 *rsize = vol->bsize;
206 return EOK;
207}
208
209static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
210{
211 hr_volume_t *vol = bd->srvs->sarg;
212
213 *rnb = vol->data_blkno;
214 return EOK;
215}
216
217static void hr_raid1_update_vol_status(hr_volume_t *vol)
218{
219 bool exp = true;
220
221 /* TODO: could also wrap this */
222 if (!atomic_compare_exchange_strong(&vol->state_dirty, &exp, false))
223 return;
224
225 fibril_rwlock_read_lock(&vol->extents_lock);
226 fibril_rwlock_read_lock(&vol->states_lock);
227
228 hr_vol_status_t old_state = vol->status;
229 size_t healthy = hr_count_extents(vol, HR_EXT_ONLINE);
230
231 fibril_rwlock_read_unlock(&vol->states_lock);
232 fibril_rwlock_read_unlock(&vol->extents_lock);
233
234 if (healthy == 0) {
235 if (old_state != HR_VOL_FAULTY) {
236 fibril_rwlock_write_lock(&vol->states_lock);
237 hr_update_vol_status(vol, HR_VOL_FAULTY);
238 fibril_rwlock_write_unlock(&vol->states_lock);
239 }
240 } else if (healthy < vol->extent_no) {
241 if (old_state != HR_VOL_REBUILD &&
242 old_state != HR_VOL_DEGRADED) {
243 fibril_rwlock_write_lock(&vol->states_lock);
244 hr_update_vol_status(vol, HR_VOL_DEGRADED);
245 fibril_rwlock_write_unlock(&vol->states_lock);
246 }
247
248 if (old_state != HR_VOL_REBUILD) {
249 if (vol->hotspare_no > 0) {
250 fid_t fib = fibril_create(hr_raid1_rebuild,
251 vol);
252 if (fib == 0)
253 return;
254 fibril_start(fib);
255 fibril_detach(fib);
256 }
257 }
258 } else {
259 if (old_state != HR_VOL_ONLINE) {
260 fibril_rwlock_write_lock(&vol->states_lock);
261 hr_update_vol_status(vol, HR_VOL_ONLINE);
262 fibril_rwlock_write_unlock(&vol->states_lock);
263 }
264 }
265}
266
267static void hr_raid1_ext_state_callback(hr_volume_t *vol, size_t extent,
268 errno_t rc)
269{
270 if (rc == EOK)
271 return;
272
273 assert(fibril_rwlock_is_locked(&vol->extents_lock));
274
275 fibril_rwlock_write_lock(&vol->states_lock);
276
277 switch (rc) {
278 case ENOMEM:
279 hr_update_ext_status(vol, extent, HR_EXT_INVALID);
280 break;
281 case ENOENT:
282 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
283 break;
284 default:
285 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
286 }
287
288 hr_mark_vol_state_dirty(vol);
289
290 fibril_rwlock_write_unlock(&vol->states_lock);
291}
292
293static size_t hr_raid1_count_good_extents(hr_volume_t *vol, uint64_t ba,
294 size_t cnt, uint64_t rebuild_blk)
295{
296 assert(fibril_rwlock_is_locked(&vol->extents_lock));
297 assert(fibril_rwlock_is_locked(&vol->states_lock));
298
299 size_t count = 0;
300 for (size_t i = 0; i < vol->extent_no; i++) {
301 if (vol->extents[i].status == HR_EXT_ONLINE ||
302 (vol->extents[i].status == HR_EXT_REBUILD &&
303 ba < rebuild_blk)) {
304 count++;
305 }
306 }
307
308 return count;
309
310}
311
312static errno_t hr_raid1_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
313 size_t cnt, void *data_read, const void *data_write, size_t size)
314{
315 hr_volume_t *vol = bd->srvs->sarg;
316 hr_range_lock_t *rl = NULL;
317 errno_t rc;
318 size_t i;
319 uint64_t rebuild_blk;
320
321 fibril_rwlock_read_lock(&vol->states_lock);
322 hr_vol_status_t vol_state = vol->status;
323 fibril_rwlock_read_unlock(&vol->states_lock);
324
325 if (vol_state == HR_VOL_FAULTY || vol_state == HR_VOL_NONE)
326 return EIO;
327
328 if (type == HR_BD_READ || type == HR_BD_WRITE)
329 if (size < cnt * vol->bsize)
330 return EINVAL;
331
332 rc = hr_check_ba_range(vol, cnt, ba);
333 if (rc != EOK)
334 return rc;
335
336 /* allow full dev sync */
337 if (type != HR_BD_SYNC || ba != 0)
338 hr_add_ba_offset(vol, &ba);
339
340 /*
341 * extent order has to be locked for the whole IO duration,
342 * so that workers have consistent targets
343 */
344 fibril_rwlock_read_lock(&vol->extents_lock);
345
346 size_t successful = 0;
347 switch (type) {
348 case HR_BD_READ:
349 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
350 memory_order_relaxed);
351
352 for (i = 0; i < vol->extent_no; i++) {
353 fibril_rwlock_read_lock(&vol->states_lock);
354 hr_ext_status_t state = vol->extents[i].status;
355 fibril_rwlock_read_unlock(&vol->states_lock);
356
357 if (state != HR_EXT_ONLINE &&
358 (state != HR_EXT_REBUILD ||
359 ba + cnt - 1 >= rebuild_blk)) {
360 continue;
361 }
362
363 rc = block_read_direct(vol->extents[i].svc_id, ba, cnt,
364 data_read);
365
366 if (rc == ENOMEM && i + 1 == vol->extent_no)
367 goto end;
368
369 if (rc == ENOMEM)
370 continue;
371
372 if (rc != EOK) {
373 hr_raid1_ext_state_callback(vol, i, rc);
374 } else {
375 successful++;
376 break;
377 }
378 }
379 break;
380 case HR_BD_SYNC:
381 case HR_BD_WRITE:
382 if (type == HR_BD_WRITE) {
383 rl = hr_range_lock_acquire(vol, ba, cnt);
384 if (rl == NULL) {
385 rc = ENOMEM;
386 goto end;
387 }
388 }
389
390 fibril_rwlock_read_lock(&vol->states_lock);
391
392 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
393 memory_order_relaxed);
394
395 size_t good = hr_raid1_count_good_extents(vol, ba, cnt,
396 rebuild_blk);
397
398 hr_fgroup_t *group = hr_fgroup_create(vol->fge, good);
399 if (group == NULL) {
400 if (type == HR_BD_WRITE)
401 hr_range_lock_release(rl);
402 rc = ENOMEM;
403 fibril_rwlock_read_unlock(&vol->states_lock);
404 goto end;
405 }
406
407 for (i = 0; i < vol->extent_no; i++) {
408 if (vol->extents[i].status != HR_EXT_ONLINE &&
409 (vol->extents[i].status != HR_EXT_REBUILD ||
410 ba >= rebuild_blk)) {
411 /*
412 * When the extent is being rebuilt,
413 * we only write to the part that is already
414 * rebuilt. If IO starts after vol->rebuild_blk
415 * we do not proceed, the write is going to
416 * be replicated later in the rebuild.
417 */
418 continue;
419 }
420
421 hr_io_t *io = hr_fgroup_alloc(group);
422 io->extent = i;
423 io->data_write = data_write;
424 io->data_read = data_read;
425 io->ba = ba;
426 io->cnt = cnt;
427 io->type = type;
428 io->vol = vol;
429 io->state_callback = hr_raid1_ext_state_callback;
430
431 hr_fgroup_submit(group, hr_io_worker, io);
432 }
433
434 fibril_rwlock_read_unlock(&vol->states_lock);
435
436 (void)hr_fgroup_wait(group, &successful, NULL);
437
438 if (type == HR_BD_WRITE)
439 hr_range_lock_release(rl);
440
441 break;
442 default:
443 rc = EINVAL;
444 goto end;
445 }
446
447 if (successful > 0)
448 rc = EOK;
449 else
450 rc = EIO;
451
452end:
453 fibril_rwlock_read_unlock(&vol->extents_lock);
454
455 hr_raid1_update_vol_status(vol);
456
457 return rc;
458}
459
460/*
461 * Put the last HOTSPARE extent in place
462 * of first that != ONLINE, and start the rebuild.
463 */
464static errno_t hr_raid1_rebuild(void *arg)
465{
466 HR_DEBUG("%s()", __func__);
467
468 hr_volume_t *vol = arg;
469 void *buf = NULL;
470 size_t rebuild_idx;
471 errno_t rc;
472
473 rc = init_rebuild(vol, &rebuild_idx);
474 if (rc != EOK)
475 return rc;
476
477 size_t left = vol->data_blkno;
478 size_t max_blks = DATA_XFER_LIMIT / vol->bsize;
479 buf = malloc(max_blks * vol->bsize);
480
481 size_t cnt;
482 uint64_t ba = 0;
483 hr_add_ba_offset(vol, &ba);
484
485 /*
486 * XXX: this is useless here after simplified DI, because
487 * rebuild cannot be triggered while ongoing rebuild
488 */
489 fibril_rwlock_read_lock(&vol->extents_lock);
490
491 hr_range_lock_t *rl = NULL;
492
493 unsigned int percent, old_percent = 100;
494 while (left != 0) {
495 cnt = min(max_blks, left);
496
497 rl = hr_range_lock_acquire(vol, ba, cnt);
498 if (rl == NULL) {
499 rc = ENOMEM;
500 goto end;
501 }
502
503 atomic_store_explicit(&vol->rebuild_blk, ba,
504 memory_order_relaxed);
505
506 rc = hr_raid1_restore_blocks(vol, rebuild_idx, ba, cnt, buf);
507
508 percent = ((ba + cnt) * 100) / vol->data_blkno;
509 if (percent != old_percent) {
510 if (percent % 5 == 0)
511 HR_DEBUG("\"%s\" REBUILD progress: %u%%\n",
512 vol->devname, percent);
513 }
514
515 hr_range_lock_release(rl);
516
517 if (rc != EOK)
518 goto end;
519
520 ba += cnt;
521 left -= cnt;
522 old_percent = percent;
523 }
524
525 HR_DEBUG("hr_raid1_rebuild(): rebuild finished on \"%s\" (%" PRIun "), "
526 "extent no. %zu\n", vol->devname, vol->svc_id, rebuild_idx);
527
528 fibril_rwlock_write_lock(&vol->states_lock);
529
530 hr_update_ext_status(vol, rebuild_idx, HR_EXT_ONLINE);
531
532 /*
533 * We can be optimistic here, if some extents are
534 * still INVALID, FAULTY or MISSING, the update vol
535 * function will pick them up, and set the volume
536 * state accordingly.
537 */
538 hr_update_vol_status(vol, HR_VOL_ONLINE);
539 hr_mark_vol_state_dirty(vol);
540
541 fibril_rwlock_write_unlock(&vol->states_lock);
542
543 rc = hr_metadata_save(vol);
544
545end:
546 if (rc != EOK) {
547 /*
548 * We can fail either because:
549 * - the rebuild extent failing or invalidation
550 * - there is are no ONLINE extents (vol is FAULTY)
551 * - we got ENOMEM on all READs (we also invalidate the
552 * rebuild extent here, for now)
553 */
554 fibril_rwlock_write_lock(&vol->states_lock);
555 hr_update_vol_status(vol, HR_VOL_DEGRADED);
556 hr_mark_vol_state_dirty(vol);
557 fibril_rwlock_write_unlock(&vol->states_lock);
558 }
559
560 fibril_rwlock_read_unlock(&vol->extents_lock);
561
562 hr_raid1_update_vol_status(vol);
563
564 if (buf != NULL)
565 free(buf);
566
567 return rc;
568}
569
570static errno_t init_rebuild(hr_volume_t *vol, size_t *rebuild_idx)
571{
572 errno_t rc = EOK;
573
574 fibril_rwlock_write_lock(&vol->extents_lock);
575 fibril_rwlock_write_lock(&vol->states_lock);
576 fibril_mutex_lock(&vol->hotspare_lock);
577
578 if (vol->hotspare_no == 0) {
579 HR_WARN("hr_raid1_rebuild(): no free hotspares on \"%s\", "
580 "aborting rebuild\n", vol->devname);
581 rc = EINVAL;
582 goto error;
583 }
584
585 size_t bad = vol->extent_no;
586 for (size_t i = 0; i < vol->extent_no; i++) {
587 if (vol->extents[i].status != HR_EXT_ONLINE) {
588 bad = i;
589 break;
590 }
591 }
592
593 if (bad == vol->extent_no) {
594 HR_WARN("hr_raid1_rebuild(): no bad extent on \"%s\", "
595 "aborting rebuild\n", vol->devname);
596 rc = EINVAL;
597 goto error;
598 }
599
600 size_t hotspare_idx = vol->hotspare_no - 1;
601
602 hr_ext_status_t hs_state = vol->hotspares[hotspare_idx].status;
603 if (hs_state != HR_EXT_HOTSPARE) {
604 HR_ERROR("hr_raid1_rebuild(): invalid hotspare state \"%s\", "
605 "aborting rebuild\n", hr_get_ext_status_msg(hs_state));
606 rc = EINVAL;
607 goto error;
608 }
609
610 rc = swap_hs(vol, bad, hotspare_idx);
611 if (rc != EOK) {
612 HR_ERROR("hr_raid1_rebuild(): swapping hotspare failed, "
613 "aborting rebuild\n");
614 goto error;
615 }
616
617 hr_extent_t *rebuild_ext = &vol->extents[bad];
618
619 HR_DEBUG("hr_raid1_rebuild(): starting REBUILD on extent no. %zu "
620 "(%" PRIun ")\n", bad, rebuild_ext->svc_id);
621
622 atomic_store_explicit(&vol->rebuild_blk, 0, memory_order_relaxed);
623
624 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
625 hr_update_vol_status(vol, HR_VOL_REBUILD);
626
627 *rebuild_idx = bad;
628error:
629 fibril_mutex_unlock(&vol->hotspare_lock);
630 fibril_rwlock_write_unlock(&vol->states_lock);
631 fibril_rwlock_write_unlock(&vol->extents_lock);
632
633 return rc;
634}
635
636static errno_t swap_hs(hr_volume_t *vol, size_t bad, size_t hs)
637{
638 HR_DEBUG("hr_raid1_rebuild(): swapping in hotspare\n");
639
640 service_id_t faulty_svc_id = vol->extents[bad].svc_id;
641 service_id_t hs_svc_id = vol->hotspares[hs].svc_id;
642
643 hr_update_ext_svc_id(vol, bad, hs_svc_id);
644 hr_update_ext_status(vol, bad, HR_EXT_HOTSPARE);
645
646 hr_update_hotspare_svc_id(vol, hs, 0);
647 hr_update_hotspare_status(vol, hs, HR_EXT_MISSING);
648
649 vol->hotspare_no--;
650
651 if (faulty_svc_id != 0)
652 block_fini(faulty_svc_id);
653
654 return EOK;
655}
656
657static errno_t hr_raid1_restore_blocks(hr_volume_t *vol, size_t rebuild_idx,
658 uint64_t ba, size_t cnt, void *buf)
659{
660 assert(fibril_rwlock_is_locked(&vol->extents_lock));
661
662 errno_t rc = ENOENT;
663 hr_extent_t *ext, *rebuild_ext = &vol->extents[rebuild_idx];
664
665 fibril_rwlock_read_lock(&vol->states_lock);
666 hr_ext_status_t rebuild_ext_status = rebuild_ext->status;
667 fibril_rwlock_read_unlock(&vol->states_lock);
668
669 if (rebuild_ext_status != HR_EXT_REBUILD)
670 return EINVAL;
671
672 for (size_t i = 0; i < vol->extent_no; i++) {
673 fibril_rwlock_read_lock(&vol->states_lock);
674 ext = &vol->extents[i];
675 if (ext->status != HR_EXT_ONLINE) {
676 fibril_rwlock_read_unlock(&vol->states_lock);
677 continue;
678 }
679 fibril_rwlock_read_unlock(&vol->states_lock);
680
681 rc = block_read_direct(ext->svc_id, ba, cnt, buf);
682 if (rc == EOK)
683 break;
684
685 if (rc != ENOMEM)
686 hr_raid1_ext_state_callback(vol, i, rc);
687
688 if (i + 1 >= vol->extent_no) {
689 if (rc != ENOMEM) {
690 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
691 "failed due to too many failed extents\n",
692 vol->devname, vol->svc_id);
693 }
694
695 /* for now we have to invalidate the rebuild extent */
696 if (rc == ENOMEM) {
697 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
698 "failed due to too many failed reads, "
699 "because of not enough memory\n",
700 vol->devname, vol->svc_id);
701 hr_raid1_ext_state_callback(vol, rebuild_idx,
702 ENOMEM);
703 }
704
705 return rc;
706 }
707 }
708
709 rc = block_write_direct(rebuild_ext->svc_id, ba, cnt, buf);
710 if (rc != EOK) {
711 /*
712 * Here we dont handle ENOMEM, because maybe in the
713 * future, there is going to be M_WAITOK, or we are
714 * going to wait for more memory, so that we don't
715 * have to invalidate it...
716 *
717 * XXX: for now we do
718 */
719 hr_raid1_ext_state_callback(vol, rebuild_idx, rc);
720
721 HR_ERROR("rebuild on \"%s\" (%" PRIun "), failed due to "
722 "the rebuilt extent no. %zu WRITE (rc: %s)\n",
723 vol->devname, vol->svc_id, rebuild_idx, str_error(rc));
724
725 return rc;
726 }
727
728 return EOK;
729}
730
731/** @}
732 */
Note: See TracBrowser for help on using the repository browser.