source: mainline/uspace/srv/bd/hr/raid1.c@ f09b75b

Last change on this file since f09b75b was f647b87, checked in by Miroslav Cimerman <mc@…>, 6 months ago

srv/bd/hr: remove unused nblocks variable

  • Property mode set to 100644
File size: 18.4 KB
Line 
1/*
2 * Copyright (c) 2025 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
41#include <inttypes.h>
42#include <io/log.h>
43#include <ipc/hr.h>
44#include <ipc/services.h>
45#include <loc.h>
46#include <task.h>
47#include <stdatomic.h>
48#include <stdio.h>
49#include <stdlib.h>
50#include <str_error.h>
51
52#include "fge.h"
53#include "io.h"
54#include "superblock.h"
55#include "util.h"
56#include "var.h"
57
58static void hr_raid1_update_vol_status(hr_volume_t *);
59static void hr_raid1_ext_state_callback(hr_volume_t *, size_t, errno_t);
60static size_t hr_raid1_count_good_extents(hr_volume_t *, uint64_t, size_t,
61 uint64_t);
62static errno_t hr_raid1_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
63 void *, const void *, size_t);
64static errno_t hr_raid1_rebuild(void *);
65static errno_t init_rebuild(hr_volume_t *, size_t *);
66static errno_t swap_hs(hr_volume_t *, size_t, size_t);
67static errno_t hr_raid1_restore_blocks(hr_volume_t *, size_t, uint64_t, size_t,
68 void *);
69
70/* bdops */
71static errno_t hr_raid1_bd_open(bd_srvs_t *, bd_srv_t *);
72static errno_t hr_raid1_bd_close(bd_srv_t *);
73static errno_t hr_raid1_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
74 size_t);
75static errno_t hr_raid1_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
76static errno_t hr_raid1_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
77 const void *, size_t);
78static errno_t hr_raid1_bd_get_block_size(bd_srv_t *, size_t *);
79static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
80
81static bd_ops_t hr_raid1_bd_ops = {
82 .open = hr_raid1_bd_open,
83 .close = hr_raid1_bd_close,
84 .sync_cache = hr_raid1_bd_sync_cache,
85 .read_blocks = hr_raid1_bd_read_blocks,
86 .write_blocks = hr_raid1_bd_write_blocks,
87 .get_block_size = hr_raid1_bd_get_block_size,
88 .get_num_blocks = hr_raid1_bd_get_num_blocks
89};
90
91extern loc_srv_t *hr_srv;
92
93errno_t hr_raid1_create(hr_volume_t *new_volume)
94{
95 HR_DEBUG("%s()", __func__);
96
97 assert(new_volume->level == HR_LVL_1);
98
99 if (new_volume->extent_no < 2) {
100 HR_ERROR("RAID 1 array needs at least 2 devices\n");
101 return EINVAL;
102 }
103
104 bd_srvs_init(&new_volume->hr_bds);
105 new_volume->hr_bds.ops = &hr_raid1_bd_ops;
106 new_volume->hr_bds.sarg = new_volume;
107
108 new_volume->state_callback = hr_raid1_ext_state_callback;
109
110 /* force volume state update */
111 hr_mark_vol_state_dirty(new_volume);
112 hr_raid1_update_vol_status(new_volume);
113
114 fibril_rwlock_read_lock(&new_volume->states_lock);
115 hr_vol_status_t state = new_volume->status;
116 fibril_rwlock_read_unlock(&new_volume->states_lock);
117 if (state == HR_VOL_FAULTY || state == HR_VOL_NONE)
118 return EINVAL;
119
120 return EOK;
121}
122
123/*
124 * Called only once in volume's lifetime.
125 */
126errno_t hr_raid1_init(hr_volume_t *vol)
127{
128 HR_DEBUG("%s()", __func__);
129
130 assert(vol->level == HR_LVL_1);
131
132 uint64_t truncated_blkno = vol->extents[0].blkno;
133 for (size_t i = 1; i < vol->extent_no; i++) {
134 if (vol->extents[i].blkno < truncated_blkno)
135 truncated_blkno = vol->extents[i].blkno;
136 }
137
138 vol->truncated_blkno = truncated_blkno;
139 vol->data_offset = vol->meta_ops->get_data_offset();
140 vol->data_blkno = truncated_blkno - vol->meta_ops->get_size();
141 vol->strip_size = 0;
142
143 return EOK;
144}
145
146void hr_raid1_status_event(hr_volume_t *vol)
147{
148 HR_DEBUG("%s()", __func__);
149
150 hr_raid1_update_vol_status(vol);
151}
152
153errno_t hr_raid1_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
154{
155 HR_DEBUG("%s()", __func__);
156
157 errno_t rc = hr_util_add_hotspare(vol, hotspare);
158
159 hr_raid1_update_vol_status(vol);
160
161 return rc;
162}
163
164static errno_t hr_raid1_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
165{
166 HR_DEBUG("%s()", __func__);
167
168 hr_volume_t *vol = bd->srvs->sarg;
169
170 atomic_fetch_add_explicit(&vol->open_cnt, 1, memory_order_relaxed);
171
172 return EOK;
173}
174
175static errno_t hr_raid1_bd_close(bd_srv_t *bd)
176{
177 HR_DEBUG("%s()", __func__);
178
179 hr_volume_t *vol = bd->srvs->sarg;
180
181 atomic_fetch_sub_explicit(&vol->open_cnt, 1, memory_order_relaxed);
182
183 return EOK;
184}
185
186static errno_t hr_raid1_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
187{
188 return hr_raid1_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
189}
190
191static errno_t hr_raid1_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
192 void *buf, size_t size)
193{
194 return hr_raid1_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
195}
196
197static errno_t hr_raid1_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
198 const void *data, size_t size)
199{
200 return hr_raid1_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
201}
202
203static errno_t hr_raid1_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
204{
205 hr_volume_t *vol = bd->srvs->sarg;
206
207 *rsize = vol->bsize;
208 return EOK;
209}
210
211static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
212{
213 hr_volume_t *vol = bd->srvs->sarg;
214
215 *rnb = vol->data_blkno;
216 return EOK;
217}
218
219static void hr_raid1_update_vol_status(hr_volume_t *vol)
220{
221 bool exp = true;
222
223 /* TODO: could also wrap this */
224 if (!atomic_compare_exchange_strong(&vol->state_dirty, &exp, false))
225 return;
226
227 fibril_mutex_lock(&vol->md_lock);
228
229 vol->meta_ops->inc_counter(vol->in_mem_md);
230 /* XXX: save right away */
231
232 fibril_mutex_unlock(&vol->md_lock);
233
234 fibril_rwlock_read_lock(&vol->extents_lock);
235 fibril_rwlock_read_lock(&vol->states_lock);
236
237 hr_vol_status_t old_state = vol->status;
238 size_t healthy = hr_count_extents(vol, HR_EXT_ONLINE);
239
240 fibril_rwlock_read_unlock(&vol->states_lock);
241 fibril_rwlock_read_unlock(&vol->extents_lock);
242
243 if (healthy == 0) {
244 if (old_state != HR_VOL_FAULTY) {
245 fibril_rwlock_write_lock(&vol->states_lock);
246 hr_update_vol_status(vol, HR_VOL_FAULTY);
247 fibril_rwlock_write_unlock(&vol->states_lock);
248 }
249 } else if (healthy < vol->extent_no) {
250 if (old_state != HR_VOL_REBUILD &&
251 old_state != HR_VOL_DEGRADED) {
252 fibril_rwlock_write_lock(&vol->states_lock);
253 hr_update_vol_status(vol, HR_VOL_DEGRADED);
254 fibril_rwlock_write_unlock(&vol->states_lock);
255 }
256
257 if (old_state != HR_VOL_REBUILD) {
258 /* XXX: allow REBUILD on INVALID extents */
259 if (vol->hotspare_no > 0) {
260 fid_t fib = fibril_create(hr_raid1_rebuild,
261 vol);
262 if (fib == 0)
263 return;
264 fibril_start(fib);
265 fibril_detach(fib);
266 }
267 }
268 } else {
269 if (old_state != HR_VOL_ONLINE) {
270 fibril_rwlock_write_lock(&vol->states_lock);
271 hr_update_vol_status(vol, HR_VOL_ONLINE);
272 fibril_rwlock_write_unlock(&vol->states_lock);
273 }
274 }
275}
276
277static void hr_raid1_ext_state_callback(hr_volume_t *vol, size_t extent,
278 errno_t rc)
279{
280 if (rc == EOK)
281 return;
282
283 assert(fibril_rwlock_is_locked(&vol->extents_lock));
284
285 fibril_rwlock_write_lock(&vol->states_lock);
286
287 switch (rc) {
288 case ENOMEM:
289 hr_update_ext_status(vol, extent, HR_EXT_INVALID);
290 break;
291 case ENOENT:
292 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
293 break;
294 default:
295 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
296 }
297
298 hr_mark_vol_state_dirty(vol);
299
300 fibril_rwlock_write_unlock(&vol->states_lock);
301}
302
303static size_t hr_raid1_count_good_extents(hr_volume_t *vol, uint64_t ba,
304 size_t cnt, uint64_t rebuild_blk)
305{
306 assert(fibril_rwlock_is_locked(&vol->extents_lock));
307 assert(fibril_rwlock_is_locked(&vol->states_lock));
308
309 size_t count = 0;
310 for (size_t i = 0; i < vol->extent_no; i++) {
311 if (vol->extents[i].status == HR_EXT_ONLINE ||
312 (vol->extents[i].status == HR_EXT_REBUILD &&
313 ba < rebuild_blk)) {
314 count++;
315 }
316 }
317
318 return count;
319
320}
321
322static errno_t hr_raid1_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
323 size_t cnt, void *data_read, const void *data_write, size_t size)
324{
325 hr_volume_t *vol = bd->srvs->sarg;
326 hr_range_lock_t *rl = NULL;
327 errno_t rc;
328 size_t i;
329 uint64_t rebuild_blk;
330
331 fibril_rwlock_read_lock(&vol->states_lock);
332 hr_vol_status_t vol_state = vol->status;
333 fibril_rwlock_read_unlock(&vol->states_lock);
334
335 if (vol_state == HR_VOL_FAULTY || vol_state == HR_VOL_NONE)
336 return EIO;
337
338 if (type == HR_BD_READ || type == HR_BD_WRITE)
339 if (size < cnt * vol->bsize)
340 return EINVAL;
341
342 rc = hr_check_ba_range(vol, cnt, ba);
343 if (rc != EOK)
344 return rc;
345
346 /* allow full dev sync */
347 if (type != HR_BD_SYNC || ba != 0)
348 hr_add_ba_offset(vol, &ba);
349
350 /*
351 * extent order has to be locked for the whole IO duration,
352 * so that workers have consistent targets
353 */
354 fibril_rwlock_read_lock(&vol->extents_lock);
355
356 size_t successful = 0;
357 switch (type) {
358 case HR_BD_READ:
359 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
360 memory_order_relaxed);
361
362 for (i = 0; i < vol->extent_no; i++) {
363 fibril_rwlock_read_lock(&vol->states_lock);
364 hr_ext_status_t state = vol->extents[i].status;
365 fibril_rwlock_read_unlock(&vol->states_lock);
366
367 if (state != HR_EXT_ONLINE &&
368 (state != HR_EXT_REBUILD ||
369 ba + cnt - 1 >= rebuild_blk)) {
370 continue;
371 }
372
373 rc = block_read_direct(vol->extents[i].svc_id, ba, cnt,
374 data_read);
375
376 if (rc == ENOMEM && i + 1 == vol->extent_no)
377 goto end;
378
379 if (rc == ENOMEM)
380 continue;
381
382 if (rc != EOK) {
383 hr_raid1_ext_state_callback(vol, i, rc);
384 } else {
385 successful++;
386 break;
387 }
388 }
389 break;
390 case HR_BD_SYNC:
391 case HR_BD_WRITE:
392 if (type == HR_BD_WRITE) {
393 rl = hr_range_lock_acquire(vol, ba, cnt);
394 if (rl == NULL) {
395 rc = ENOMEM;
396 goto end;
397 }
398 }
399
400 fibril_rwlock_read_lock(&vol->states_lock);
401
402 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
403 memory_order_relaxed);
404
405 size_t good = hr_raid1_count_good_extents(vol, ba, cnt,
406 rebuild_blk);
407
408 hr_fgroup_t *group = hr_fgroup_create(vol->fge, good);
409 if (group == NULL) {
410 if (type == HR_BD_WRITE)
411 hr_range_lock_release(rl);
412 rc = ENOMEM;
413 fibril_rwlock_read_unlock(&vol->states_lock);
414 goto end;
415 }
416
417 for (i = 0; i < vol->extent_no; i++) {
418 if (vol->extents[i].status != HR_EXT_ONLINE &&
419 (vol->extents[i].status != HR_EXT_REBUILD ||
420 ba >= rebuild_blk)) {
421 /*
422 * When the extent is being rebuilt,
423 * we only write to the part that is already
424 * rebuilt. If IO starts after vol->rebuild_blk
425 * we do not proceed, the write is going to
426 * be replicated later in the rebuild.
427 */
428 continue;
429 }
430
431 hr_io_t *io = hr_fgroup_alloc(group);
432 io->extent = i;
433 io->data_write = data_write;
434 io->data_read = data_read;
435 io->ba = ba;
436 io->cnt = cnt;
437 io->type = type;
438 io->vol = vol;
439
440 hr_fgroup_submit(group, hr_io_worker, io);
441 }
442
443 fibril_rwlock_read_unlock(&vol->states_lock);
444
445 (void)hr_fgroup_wait(group, &successful, NULL);
446
447 if (type == HR_BD_WRITE)
448 hr_range_lock_release(rl);
449
450 break;
451 default:
452 rc = EINVAL;
453 goto end;
454 }
455
456 if (successful > 0)
457 rc = EOK;
458 else
459 rc = EIO;
460
461end:
462 fibril_rwlock_read_unlock(&vol->extents_lock);
463
464 hr_raid1_update_vol_status(vol);
465
466 return rc;
467}
468
469/*
470 * Put the last HOTSPARE extent in place
471 * of first that != ONLINE, and start the rebuild.
472 */
473static errno_t hr_raid1_rebuild(void *arg)
474{
475 HR_DEBUG("%s()", __func__);
476
477 hr_volume_t *vol = arg;
478 void *buf = NULL;
479 size_t rebuild_idx;
480 errno_t rc;
481
482 rc = init_rebuild(vol, &rebuild_idx);
483 if (rc != EOK)
484 return rc;
485
486 size_t left = vol->data_blkno;
487 size_t max_blks = DATA_XFER_LIMIT / vol->bsize;
488 buf = malloc(max_blks * vol->bsize);
489
490 size_t cnt;
491 uint64_t ba = 0;
492 hr_add_ba_offset(vol, &ba);
493
494 /*
495 * XXX: this is useless here after simplified DI, because
496 * rebuild cannot be triggered while ongoing rebuild
497 */
498 fibril_rwlock_read_lock(&vol->extents_lock);
499
500 hr_range_lock_t *rl = NULL;
501
502 unsigned int percent, old_percent = 100;
503 while (left != 0) {
504 cnt = min(max_blks, left);
505
506 rl = hr_range_lock_acquire(vol, ba, cnt);
507 if (rl == NULL) {
508 rc = ENOMEM;
509 goto end;
510 }
511
512 atomic_store_explicit(&vol->rebuild_blk, ba,
513 memory_order_relaxed);
514
515 rc = hr_raid1_restore_blocks(vol, rebuild_idx, ba, cnt, buf);
516
517 percent = ((ba + cnt) * 100) / vol->data_blkno;
518 if (percent != old_percent) {
519 if (percent % 5 == 0)
520 HR_DEBUG("\"%s\" REBUILD progress: %u%%\n",
521 vol->devname, percent);
522 }
523
524 hr_range_lock_release(rl);
525
526 if (rc != EOK)
527 goto end;
528
529 ba += cnt;
530 left -= cnt;
531 old_percent = percent;
532 }
533
534 HR_DEBUG("hr_raid1_rebuild(): rebuild finished on \"%s\" (%" PRIun "), "
535 "extent no. %zu\n", vol->devname, vol->svc_id, rebuild_idx);
536
537 fibril_rwlock_write_lock(&vol->states_lock);
538
539 hr_update_ext_status(vol, rebuild_idx, HR_EXT_ONLINE);
540
541 /*
542 * We can be optimistic here, if some extents are
543 * still INVALID, FAULTY or MISSING, the update vol
544 * function will pick them up, and set the volume
545 * state accordingly.
546 */
547 hr_update_vol_status(vol, HR_VOL_ONLINE);
548 hr_mark_vol_state_dirty(vol);
549
550 fibril_rwlock_write_unlock(&vol->states_lock);
551
552 rc = vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
553
554end:
555 if (rc != EOK) {
556 /*
557 * We can fail either because:
558 * - the rebuild extent failing or invalidation
559 * - there is are no ONLINE extents (vol is FAULTY)
560 * - we got ENOMEM on all READs (we also invalidate the
561 * rebuild extent here, for now)
562 */
563 fibril_rwlock_write_lock(&vol->states_lock);
564 hr_update_vol_status(vol, HR_VOL_DEGRADED);
565 hr_mark_vol_state_dirty(vol);
566 fibril_rwlock_write_unlock(&vol->states_lock);
567 }
568
569 fibril_rwlock_read_unlock(&vol->extents_lock);
570
571 hr_raid1_update_vol_status(vol);
572
573 if (buf != NULL)
574 free(buf);
575
576 return rc;
577}
578
579static errno_t init_rebuild(hr_volume_t *vol, size_t *rebuild_idx)
580{
581 errno_t rc = EOK;
582
583 fibril_rwlock_write_lock(&vol->extents_lock);
584 fibril_rwlock_write_lock(&vol->states_lock);
585 fibril_mutex_lock(&vol->hotspare_lock);
586
587 /* XXX: allow REBUILD on INVALID extents */
588 if (vol->hotspare_no == 0) {
589 HR_WARN("hr_raid1_rebuild(): no free hotspares on \"%s\", "
590 "aborting rebuild\n", vol->devname);
591 rc = EINVAL;
592 goto error;
593 }
594
595 size_t bad = vol->extent_no;
596 for (size_t i = 0; i < vol->extent_no; i++) {
597 if (vol->extents[i].status != HR_EXT_ONLINE) {
598 bad = i;
599 break;
600 }
601 }
602
603 if (bad == vol->extent_no) {
604 HR_WARN("hr_raid1_rebuild(): no bad extent on \"%s\", "
605 "aborting rebuild\n", vol->devname);
606 rc = EINVAL;
607 goto error;
608 }
609
610 size_t hotspare_idx = vol->hotspare_no - 1;
611
612 hr_ext_status_t hs_state = vol->hotspares[hotspare_idx].status;
613 if (hs_state != HR_EXT_HOTSPARE) {
614 HR_ERROR("hr_raid1_rebuild(): invalid hotspare state \"%s\", "
615 "aborting rebuild\n", hr_get_ext_status_msg(hs_state));
616 rc = EINVAL;
617 goto error;
618 }
619
620 rc = swap_hs(vol, bad, hotspare_idx);
621 if (rc != EOK) {
622 HR_ERROR("hr_raid1_rebuild(): swapping hotspare failed, "
623 "aborting rebuild\n");
624 goto error;
625 }
626
627 hr_extent_t *rebuild_ext = &vol->extents[bad];
628
629 HR_DEBUG("hr_raid1_rebuild(): starting REBUILD on extent no. %zu "
630 "(%" PRIun ")\n", bad, rebuild_ext->svc_id);
631
632 atomic_store_explicit(&vol->rebuild_blk, 0, memory_order_relaxed);
633
634 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
635 hr_update_vol_status(vol, HR_VOL_REBUILD);
636
637 *rebuild_idx = bad;
638error:
639 fibril_mutex_unlock(&vol->hotspare_lock);
640 fibril_rwlock_write_unlock(&vol->states_lock);
641 fibril_rwlock_write_unlock(&vol->extents_lock);
642
643 return rc;
644}
645
646static errno_t swap_hs(hr_volume_t *vol, size_t bad, size_t hs)
647{
648 HR_DEBUG("hr_raid1_rebuild(): swapping in hotspare\n");
649
650 service_id_t faulty_svc_id = vol->extents[bad].svc_id;
651 service_id_t hs_svc_id = vol->hotspares[hs].svc_id;
652
653 hr_update_ext_svc_id(vol, bad, hs_svc_id);
654 hr_update_ext_status(vol, bad, HR_EXT_HOTSPARE);
655
656 hr_update_hotspare_svc_id(vol, hs, 0);
657 hr_update_hotspare_status(vol, hs, HR_EXT_MISSING);
658
659 vol->hotspare_no--;
660
661 if (faulty_svc_id != 0)
662 block_fini(faulty_svc_id);
663
664 return EOK;
665}
666
667static errno_t hr_raid1_restore_blocks(hr_volume_t *vol, size_t rebuild_idx,
668 uint64_t ba, size_t cnt, void *buf)
669{
670 assert(fibril_rwlock_is_locked(&vol->extents_lock));
671
672 errno_t rc = ENOENT;
673 hr_extent_t *ext, *rebuild_ext = &vol->extents[rebuild_idx];
674
675 fibril_rwlock_read_lock(&vol->states_lock);
676 hr_ext_status_t rebuild_ext_status = rebuild_ext->status;
677 fibril_rwlock_read_unlock(&vol->states_lock);
678
679 if (rebuild_ext_status != HR_EXT_REBUILD)
680 return EINVAL;
681
682 for (size_t i = 0; i < vol->extent_no; i++) {
683 fibril_rwlock_read_lock(&vol->states_lock);
684 ext = &vol->extents[i];
685 if (ext->status != HR_EXT_ONLINE) {
686 fibril_rwlock_read_unlock(&vol->states_lock);
687 continue;
688 }
689 fibril_rwlock_read_unlock(&vol->states_lock);
690
691 rc = block_read_direct(ext->svc_id, ba, cnt, buf);
692 if (rc == EOK)
693 break;
694
695 if (rc != ENOMEM)
696 hr_raid1_ext_state_callback(vol, i, rc);
697
698 if (i + 1 >= vol->extent_no) {
699 if (rc != ENOMEM) {
700 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
701 "failed due to too many failed extents\n",
702 vol->devname, vol->svc_id);
703 }
704
705 /* for now we have to invalidate the rebuild extent */
706 if (rc == ENOMEM) {
707 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
708 "failed due to too many failed reads, "
709 "because of not enough memory\n",
710 vol->devname, vol->svc_id);
711 hr_raid1_ext_state_callback(vol, rebuild_idx,
712 ENOMEM);
713 }
714
715 return rc;
716 }
717 }
718
719 rc = block_write_direct(rebuild_ext->svc_id, ba, cnt, buf);
720 if (rc != EOK) {
721 /*
722 * Here we dont handle ENOMEM, because maybe in the
723 * future, there is going to be M_WAITOK, or we are
724 * going to wait for more memory, so that we don't
725 * have to invalidate it...
726 *
727 * XXX: for now we do
728 */
729 hr_raid1_ext_state_callback(vol, rebuild_idx, rc);
730
731 HR_ERROR("rebuild on \"%s\" (%" PRIun "), failed due to "
732 "the rebuilt extent no. %zu WRITE (rc: %s)\n",
733 vol->devname, vol->svc_id, rebuild_idx, str_error(rc));
734
735 return rc;
736 }
737
738 return EOK;
739}
740
741/** @}
742 */
Note: See TracBrowser for help on using the repository browser.