source: mainline/uspace/srv/bd/hr/raid1.c@ 155d34f

Last change on this file since 155d34f was 155d34f, checked in by Miroslav Cimerman <mc@…>, 7 weeks ago

hr: rename hr_get_*_status_msg → hr_get_*_state_str

  • Property mode set to 100644
File size: 18.3 KB
RevLine 
[94d84a0]1/*
[58d82fa]2 * Copyright (c) 2025 Miroslav Cimerman
[94d84a0]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
[50603405]36#include <abi/ipc/ipc.h>
[94d84a0]37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
[ca7fa5b]41#include <inttypes.h>
[94d84a0]42#include <io/log.h>
43#include <ipc/hr.h>
44#include <ipc/services.h>
45#include <loc.h>
46#include <task.h>
[58d82fa]47#include <stdatomic.h>
[94d84a0]48#include <stdio.h>
49#include <stdlib.h>
50#include <str_error.h>
51
[58d82fa]52#include "fge.h"
53#include "io.h"
[6b8e89b0]54#include "superblock.h"
[da5c257]55#include "util.h"
[b0f1366]56#include "var.h"
[94d84a0]57
[6d0fc11]58static void hr_raid1_update_vol_status(hr_volume_t *);
59static void hr_raid1_ext_state_callback(hr_volume_t *, size_t, errno_t);
60static size_t hr_raid1_count_good_extents(hr_volume_t *, uint64_t, size_t,
[58d82fa]61 uint64_t);
[6d0fc11]62static errno_t hr_raid1_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
[733564a]63 void *, const void *, size_t);
[6d0fc11]64static errno_t hr_raid1_rebuild(void *);
65static errno_t init_rebuild(hr_volume_t *, size_t *);
66static errno_t swap_hs(hr_volume_t *, size_t, size_t);
67static errno_t hr_raid1_restore_blocks(hr_volume_t *, size_t, uint64_t, size_t,
[35f2a877]68 void *);
[733564a]69
70/* bdops */
[6d0fc11]71static errno_t hr_raid1_bd_open(bd_srvs_t *, bd_srv_t *);
72static errno_t hr_raid1_bd_close(bd_srv_t *);
73static errno_t hr_raid1_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
[94d84a0]74 size_t);
[6d0fc11]75static errno_t hr_raid1_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
76static errno_t hr_raid1_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
[94d84a0]77 const void *, size_t);
[6d0fc11]78static errno_t hr_raid1_bd_get_block_size(bd_srv_t *, size_t *);
79static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
[94d84a0]80
81static bd_ops_t hr_raid1_bd_ops = {
[6d0fc11]82 .open = hr_raid1_bd_open,
83 .close = hr_raid1_bd_close,
84 .sync_cache = hr_raid1_bd_sync_cache,
85 .read_blocks = hr_raid1_bd_read_blocks,
86 .write_blocks = hr_raid1_bd_write_blocks,
87 .get_block_size = hr_raid1_bd_get_block_size,
88 .get_num_blocks = hr_raid1_bd_get_num_blocks
[94d84a0]89};
90
[6d0fc11]91extern loc_srv_t *hr_srv;
92
[733564a]93errno_t hr_raid1_create(hr_volume_t *new_volume)
94{
[baa4929]95 HR_DEBUG("%s()", __func__);
96
[733564a]97 assert(new_volume->level == HR_LVL_1);
98
[65706f1]99 if (new_volume->extent_no < 2) {
[d199a6f]100 HR_ERROR("RAID 1 array needs at least 2 devices\n");
[733564a]101 return EINVAL;
102 }
103
104 bd_srvs_init(&new_volume->hr_bds);
105 new_volume->hr_bds.ops = &hr_raid1_bd_ops;
106 new_volume->hr_bds.sarg = new_volume;
107
[800d188]108 new_volume->state_callback = hr_raid1_ext_state_callback;
109
[401b9e42]110 /* force volume state update */
[d6fe2a1]111 hr_mark_vol_state_dirty(new_volume);
[58d82fa]112 hr_raid1_update_vol_status(new_volume);
[23df41b]113
114 fibril_rwlock_read_lock(&new_volume->states_lock);
115 hr_vol_status_t state = new_volume->status;
116 fibril_rwlock_read_unlock(&new_volume->states_lock);
[18c3658]117 if (state == HR_VOL_FAULTY || state == HR_VOL_NONE) {
118 HR_NOTE("\"%s\": unusable state, not creating\n",
119 new_volume->devname);
[58d82fa]120 return EINVAL;
[18c3658]121 }
[58d82fa]122
[8a65373]123 return EOK;
[733564a]124}
125
[746e636]126/*
127 * Called only once in volume's lifetime.
128 */
[733564a]129errno_t hr_raid1_init(hr_volume_t *vol)
130{
[baa4929]131 HR_DEBUG("%s()", __func__);
[733564a]132
133 assert(vol->level == HR_LVL_1);
134
[50603405]135 vol->data_offset = vol->meta_ops->get_data_offset();
[80c760e]136 vol->data_blkno = vol->truncated_blkno - vol->meta_ops->get_size();
[733564a]137 vol->strip_size = 0;
138
139 return EOK;
140}
141
[7b359f5]142void hr_raid1_status_event(hr_volume_t *vol)
143{
[baa4929]144 HR_DEBUG("%s()", __func__);
145
[58d82fa]146 hr_raid1_update_vol_status(vol);
[7b359f5]147}
148
[5b320ac]149errno_t hr_raid1_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
150{
[a57dde4]151 HR_DEBUG("%s()", __func__);
[5b320ac]152
[56214383]153 errno_t rc = hr_util_add_hotspare(vol, hotspare);
[5b320ac]154
[dec4150]155 hr_raid1_update_vol_status(vol);
156
[58d82fa]157 return rc;
[5b320ac]158}
159
[733564a]160static errno_t hr_raid1_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
161{
[7a80c63]162 HR_DEBUG("%s()", __func__);
163
164 hr_volume_t *vol = bd->srvs->sarg;
165
166 atomic_fetch_add_explicit(&vol->open_cnt, 1, memory_order_relaxed);
167
[733564a]168 return EOK;
169}
170
171static errno_t hr_raid1_bd_close(bd_srv_t *bd)
172{
[7a80c63]173 HR_DEBUG("%s()", __func__);
174
175 hr_volume_t *vol = bd->srvs->sarg;
176
177 atomic_fetch_sub_explicit(&vol->open_cnt, 1, memory_order_relaxed);
178
[733564a]179 return EOK;
180}
181
182static errno_t hr_raid1_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
183{
184 return hr_raid1_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
185}
186
187static errno_t hr_raid1_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
188 void *buf, size_t size)
189{
190 return hr_raid1_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
191}
192
193static errno_t hr_raid1_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
194 const void *data, size_t size)
195{
196 return hr_raid1_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
197}
198
199static errno_t hr_raid1_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
200{
201 hr_volume_t *vol = bd->srvs->sarg;
202
203 *rsize = vol->bsize;
204 return EOK;
205}
206
207static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
208{
209 hr_volume_t *vol = bd->srvs->sarg;
210
211 *rnb = vol->data_blkno;
212 return EOK;
213}
214
[58d82fa]215static void hr_raid1_update_vol_status(hr_volume_t *vol)
[d84773a]216{
[401b9e42]217 bool exp = true;
[5ee041e]218
[d2da1be]219 /* TODO: could also wrap this */
220 if (!atomic_compare_exchange_strong(&vol->state_dirty, &exp, false))
[401b9e42]221 return;
[5ee041e]222
[800d188]223 fibril_mutex_lock(&vol->md_lock);
224
[50603405]225 vol->meta_ops->inc_counter(vol->in_mem_md);
226 /* XXX: save right away */
[800d188]227
228 fibril_mutex_unlock(&vol->md_lock);
229
[58d82fa]230 fibril_rwlock_read_lock(&vol->extents_lock);
231 fibril_rwlock_read_lock(&vol->states_lock);
[d84773a]232
233 hr_vol_status_t old_state = vol->status;
[5b320ac]234 size_t healthy = hr_count_extents(vol, HR_EXT_ONLINE);
[d84773a]235
[58d82fa]236 fibril_rwlock_read_unlock(&vol->states_lock);
237 fibril_rwlock_read_unlock(&vol->extents_lock);
238
[d84773a]239 if (healthy == 0) {
[58d82fa]240 if (old_state != HR_VOL_FAULTY) {
241 fibril_rwlock_write_lock(&vol->states_lock);
[a0c3080]242 hr_update_vol_status(vol, HR_VOL_FAULTY);
[58d82fa]243 fibril_rwlock_write_unlock(&vol->states_lock);
244 }
[65706f1]245 } else if (healthy < vol->extent_no) {
[dec4150]246 if (old_state != HR_VOL_REBUILD &&
247 old_state != HR_VOL_DEGRADED) {
248 fibril_rwlock_write_lock(&vol->states_lock);
249 hr_update_vol_status(vol, HR_VOL_DEGRADED);
250 fibril_rwlock_write_unlock(&vol->states_lock);
251 }
[a0c3080]252
[dec4150]253 if (old_state != HR_VOL_REBUILD) {
[800d188]254 /* XXX: allow REBUILD on INVALID extents */
[5b320ac]255 if (vol->hotspare_no > 0) {
256 fid_t fib = fibril_create(hr_raid1_rebuild,
257 vol);
[a0c3080]258 if (fib == 0)
[58d82fa]259 return;
[5b320ac]260 fibril_start(fib);
261 fibril_detach(fib);
262 }
[d84773a]263 }
264 } else {
[58d82fa]265 if (old_state != HR_VOL_ONLINE) {
266 fibril_rwlock_write_lock(&vol->states_lock);
[a0c3080]267 hr_update_vol_status(vol, HR_VOL_ONLINE);
[58d82fa]268 fibril_rwlock_write_unlock(&vol->states_lock);
269 }
[d84773a]270 }
271}
272
[58d82fa]273static void hr_raid1_ext_state_callback(hr_volume_t *vol, size_t extent,
[d84773a]274 errno_t rc)
275{
[58d82fa]276 if (rc == EOK)
277 return;
278
279 assert(fibril_rwlock_is_locked(&vol->extents_lock));
280
281 fibril_rwlock_write_lock(&vol->states_lock);
282
283 switch (rc) {
[5ee041e]284 case ENOMEM:
285 hr_update_ext_status(vol, extent, HR_EXT_INVALID);
286 break;
[58d82fa]287 case ENOENT:
[d84773a]288 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
[58d82fa]289 break;
290 default:
[d84773a]291 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
[58d82fa]292 }
293
[d6fe2a1]294 hr_mark_vol_state_dirty(vol);
[401b9e42]295
[58d82fa]296 fibril_rwlock_write_unlock(&vol->states_lock);
297}
298
299static size_t hr_raid1_count_good_extents(hr_volume_t *vol, uint64_t ba,
300 size_t cnt, uint64_t rebuild_blk)
301{
302 assert(fibril_rwlock_is_locked(&vol->extents_lock));
303 assert(fibril_rwlock_is_locked(&vol->states_lock));
304
305 size_t count = 0;
306 for (size_t i = 0; i < vol->extent_no; i++) {
307 if (vol->extents[i].status == HR_EXT_ONLINE ||
308 (vol->extents[i].status == HR_EXT_REBUILD &&
309 ba < rebuild_blk)) {
310 count++;
311 }
312 }
313
314 return count;
315
[d84773a]316}
317
[fad91b9]318static errno_t hr_raid1_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
319 size_t cnt, void *data_read, const void *data_write, size_t size)
[94d84a0]320{
321 hr_volume_t *vol = bd->srvs->sarg;
[58d82fa]322 hr_range_lock_t *rl = NULL;
[94d84a0]323 errno_t rc;
324 size_t i;
[58d82fa]325 uint64_t rebuild_blk;
326
327 fibril_rwlock_read_lock(&vol->states_lock);
328 hr_vol_status_t vol_state = vol->status;
329 fibril_rwlock_read_unlock(&vol->states_lock);
330
[e24c064]331 if (vol_state == HR_VOL_FAULTY || vol_state == HR_VOL_NONE)
[58d82fa]332 return EIO;
[94d84a0]333
[fad91b9]334 if (type == HR_BD_READ || type == HR_BD_WRITE)
335 if (size < cnt * vol->bsize)
336 return EINVAL;
337
[4a2a6b8b]338 rc = hr_check_ba_range(vol, cnt, ba);
339 if (rc != EOK)
[b0f1366]340 return rc;
[4a2a6b8b]341
[58d82fa]342 /* allow full dev sync */
343 if (type != HR_BD_SYNC || ba != 0)
344 hr_add_ba_offset(vol, &ba);
[4a2a6b8b]345
[58d82fa]346 /*
347 * extent order has to be locked for the whole IO duration,
348 * so that workers have consistent targets
349 */
350 fibril_rwlock_read_lock(&vol->extents_lock);
[d84773a]351
352 size_t successful = 0;
[fad91b9]353 switch (type) {
354 case HR_BD_READ:
[58d82fa]355 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
356 memory_order_relaxed);
357
[65706f1]358 for (i = 0; i < vol->extent_no; i++) {
[58d82fa]359 fibril_rwlock_read_lock(&vol->states_lock);
360 hr_ext_status_t state = vol->extents[i].status;
361 fibril_rwlock_read_unlock(&vol->states_lock);
362
363 if (state != HR_EXT_ONLINE &&
364 (state != HR_EXT_REBUILD ||
365 ba + cnt - 1 >= rebuild_blk)) {
[d84773a]366 continue;
[58d82fa]367 }
368
[fad91b9]369 rc = block_read_direct(vol->extents[i].svc_id, ba, cnt,
370 data_read);
[58d82fa]371
372 if (rc == ENOMEM && i + 1 == vol->extent_no)
373 goto end;
374
375 if (rc == ENOMEM)
376 continue;
377
[d0f0744]378 if (rc != EOK) {
[58d82fa]379 hr_raid1_ext_state_callback(vol, i, rc);
[d0f0744]380 } else {
[d84773a]381 successful++;
[d0f0744]382 break;
383 }
[fad91b9]384 }
385 break;
[58d82fa]386 case HR_BD_SYNC:
[fad91b9]387 case HR_BD_WRITE:
[58d82fa]388 if (type == HR_BD_WRITE) {
389 rl = hr_range_lock_acquire(vol, ba, cnt);
390 if (rl == NULL) {
391 rc = ENOMEM;
392 goto end;
393 }
394 }
395
396 fibril_rwlock_read_lock(&vol->states_lock);
397
398 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
399 memory_order_relaxed);
400
401 size_t good = hr_raid1_count_good_extents(vol, ba, cnt,
402 rebuild_blk);
403
404 hr_fgroup_t *group = hr_fgroup_create(vol->fge, good);
405 if (group == NULL) {
406 if (type == HR_BD_WRITE)
407 hr_range_lock_release(rl);
408 rc = ENOMEM;
409 fibril_rwlock_read_unlock(&vol->states_lock);
410 goto end;
411 }
412
[65706f1]413 for (i = 0; i < vol->extent_no; i++) {
[58d82fa]414 if (vol->extents[i].status != HR_EXT_ONLINE &&
415 (vol->extents[i].status != HR_EXT_REBUILD ||
416 ba >= rebuild_blk)) {
[b8409b9]417 /*
418 * When the extent is being rebuilt,
419 * we only write to the part that is already
[58d82fa]420 * rebuilt. If IO starts after vol->rebuild_blk
421 * we do not proceed, the write is going to
422 * be replicated later in the rebuild.
[b8409b9]423 */
[d84773a]424 continue;
[58d82fa]425 }
426
427 hr_io_t *io = hr_fgroup_alloc(group);
428 io->extent = i;
429 io->data_write = data_write;
430 io->data_read = data_read;
431 io->ba = ba;
432 io->cnt = cnt;
433 io->type = type;
434 io->vol = vol;
435
436 hr_fgroup_submit(group, hr_io_worker, io);
[fad91b9]437 }
[58d82fa]438
439 fibril_rwlock_read_unlock(&vol->states_lock);
440
441 (void)hr_fgroup_wait(group, &successful, NULL);
442
443 if (type == HR_BD_WRITE)
444 hr_range_lock_release(rl);
445
[fad91b9]446 break;
447 default:
448 rc = EINVAL;
[a0c3080]449 goto end;
[94d84a0]450 }
451
[d84773a]452 if (successful > 0)
453 rc = EOK;
454 else
455 rc = EIO;
456
[a0c3080]457end:
[58d82fa]458 fibril_rwlock_read_unlock(&vol->extents_lock);
459
460 hr_raid1_update_vol_status(vol);
461
[94d84a0]462 return rc;
463}
464
[35f2a877]465/*
466 * Put the last HOTSPARE extent in place
467 * of first that != ONLINE, and start the rebuild.
468 */
469static errno_t hr_raid1_rebuild(void *arg)
[5b320ac]470{
[a57dde4]471 HR_DEBUG("%s()", __func__);
[5b320ac]472
[35f2a877]473 hr_volume_t *vol = arg;
474 void *buf = NULL;
475 size_t rebuild_idx;
476 errno_t rc;
[58d82fa]477
[35f2a877]478 rc = init_rebuild(vol, &rebuild_idx);
479 if (rc != EOK)
[58d82fa]480 return rc;
[35f2a877]481
482 size_t left = vol->data_blkno;
483 size_t max_blks = DATA_XFER_LIMIT / vol->bsize;
484 buf = malloc(max_blks * vol->bsize);
485
486 size_t cnt;
487 uint64_t ba = 0;
488 hr_add_ba_offset(vol, &ba);
489
[4d30c475]490 /*
491 * XXX: this is useless here after simplified DI, because
492 * rebuild cannot be triggered while ongoing rebuild
493 */
[35f2a877]494 fibril_rwlock_read_lock(&vol->extents_lock);
495
496 hr_range_lock_t *rl = NULL;
497
[6123753]498 unsigned int percent, old_percent = 100;
[35f2a877]499 while (left != 0) {
500 cnt = min(max_blks, left);
501
502 rl = hr_range_lock_acquire(vol, ba, cnt);
503 if (rl == NULL) {
504 rc = ENOMEM;
505 goto end;
506 }
507
508 atomic_store_explicit(&vol->rebuild_blk, ba,
509 memory_order_relaxed);
510
511 rc = hr_raid1_restore_blocks(vol, rebuild_idx, ba, cnt, buf);
512
[6123753]513 percent = ((ba + cnt) * 100) / vol->data_blkno;
514 if (percent != old_percent) {
515 if (percent % 5 == 0)
516 HR_DEBUG("\"%s\" REBUILD progress: %u%%\n",
517 vol->devname, percent);
518 }
519
[35f2a877]520 hr_range_lock_release(rl);
521
522 if (rc != EOK)
523 goto end;
524
525 ba += cnt;
526 left -= cnt;
[6123753]527 old_percent = percent;
[58d82fa]528 }
529
[ca7fa5b]530 HR_DEBUG("hr_raid1_rebuild(): rebuild finished on \"%s\" (%" PRIun "), "
531 "extent no. %zu\n", vol->devname, vol->svc_id, rebuild_idx);
[58d82fa]532
[35f2a877]533 fibril_rwlock_write_lock(&vol->states_lock);
[58d82fa]534
[35f2a877]535 hr_update_ext_status(vol, rebuild_idx, HR_EXT_ONLINE);
[4d30c475]536
[35f2a877]537 /*
538 * We can be optimistic here, if some extents are
539 * still INVALID, FAULTY or MISSING, the update vol
540 * function will pick them up, and set the volume
541 * state accordingly.
542 */
543 hr_update_vol_status(vol, HR_VOL_ONLINE);
[d6fe2a1]544 hr_mark_vol_state_dirty(vol);
[58d82fa]545
[35f2a877]546 fibril_rwlock_write_unlock(&vol->states_lock);
[58d82fa]547
[50603405]548 rc = vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
[0277ec2]549
[35f2a877]550end:
551 if (rc != EOK) {
552 /*
553 * We can fail either because:
554 * - the rebuild extent failing or invalidation
555 * - there is are no ONLINE extents (vol is FAULTY)
556 * - we got ENOMEM on all READs (we also invalidate the
557 * rebuild extent here, for now)
558 */
559 fibril_rwlock_write_lock(&vol->states_lock);
560 hr_update_vol_status(vol, HR_VOL_DEGRADED);
[d6fe2a1]561 hr_mark_vol_state_dirty(vol);
[35f2a877]562 fibril_rwlock_write_unlock(&vol->states_lock);
563 }
564
565 fibril_rwlock_read_unlock(&vol->extents_lock);
566
567 hr_raid1_update_vol_status(vol);
568
569 if (buf != NULL)
570 free(buf);
571
572 return rc;
[58d82fa]573}
574
575static errno_t init_rebuild(hr_volume_t *vol, size_t *rebuild_idx)
576{
[5b320ac]577 errno_t rc = EOK;
578
[58d82fa]579 fibril_rwlock_write_lock(&vol->extents_lock);
580 fibril_rwlock_write_lock(&vol->states_lock);
581 fibril_mutex_lock(&vol->hotspare_lock);
[5b320ac]582
[800d188]583 /* XXX: allow REBUILD on INVALID extents */
[5b320ac]584 if (vol->hotspare_no == 0) {
585 HR_WARN("hr_raid1_rebuild(): no free hotspares on \"%s\", "
586 "aborting rebuild\n", vol->devname);
[58d82fa]587 rc = EINVAL;
588 goto error;
[5b320ac]589 }
590
[65706f1]591 size_t bad = vol->extent_no;
592 for (size_t i = 0; i < vol->extent_no; i++) {
[58d82fa]593 if (vol->extents[i].status != HR_EXT_ONLINE) {
[5b320ac]594 bad = i;
595 break;
596 }
597 }
598
[65706f1]599 if (bad == vol->extent_no) {
[5b320ac]600 HR_WARN("hr_raid1_rebuild(): no bad extent on \"%s\", "
601 "aborting rebuild\n", vol->devname);
[58d82fa]602 rc = EINVAL;
603 goto error;
[5b320ac]604 }
605
606 size_t hotspare_idx = vol->hotspare_no - 1;
607
[a0c3080]608 hr_ext_status_t hs_state = vol->hotspares[hotspare_idx].status;
609 if (hs_state != HR_EXT_HOTSPARE) {
610 HR_ERROR("hr_raid1_rebuild(): invalid hotspare state \"%s\", "
[155d34f]611 "aborting rebuild\n", hr_get_ext_state_str(hs_state));
[a0c3080]612 rc = EINVAL;
[58d82fa]613 goto error;
[a0c3080]614 }
615
[58d82fa]616 rc = swap_hs(vol, bad, hotspare_idx);
617 if (rc != EOK) {
618 HR_ERROR("hr_raid1_rebuild(): swapping hotspare failed, "
619 "aborting rebuild\n");
620 goto error;
621 }
[a0c3080]622
[58d82fa]623 hr_extent_t *rebuild_ext = &vol->extents[bad];
[a0c3080]624
[ca7fa5b]625 HR_DEBUG("hr_raid1_rebuild(): starting REBUILD on extent no. %zu "
626 "(%" PRIun ")\n", bad, rebuild_ext->svc_id);
[5b320ac]627
[58d82fa]628 atomic_store_explicit(&vol->rebuild_blk, 0, memory_order_relaxed);
[5b320ac]629
[58d82fa]630 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
631 hr_update_vol_status(vol, HR_VOL_REBUILD);
[5b320ac]632
[58d82fa]633 *rebuild_idx = bad;
634error:
635 fibril_mutex_unlock(&vol->hotspare_lock);
636 fibril_rwlock_write_unlock(&vol->states_lock);
637 fibril_rwlock_write_unlock(&vol->extents_lock);
638
639 return rc;
640}
641
[35f2a877]642static errno_t swap_hs(hr_volume_t *vol, size_t bad, size_t hs)
643{
644 HR_DEBUG("hr_raid1_rebuild(): swapping in hotspare\n");
645
646 service_id_t faulty_svc_id = vol->extents[bad].svc_id;
647 service_id_t hs_svc_id = vol->hotspares[hs].svc_id;
648
649 hr_update_ext_svc_id(vol, bad, hs_svc_id);
650 hr_update_ext_status(vol, bad, HR_EXT_HOTSPARE);
651
652 hr_update_hotspare_svc_id(vol, hs, 0);
[e24c064]653 hr_update_hotspare_status(vol, hs, HR_EXT_MISSING);
[35f2a877]654
655 vol->hotspare_no--;
656
657 if (faulty_svc_id != 0)
658 block_fini(faulty_svc_id);
659
660 return EOK;
661}
662
[58d82fa]663static errno_t hr_raid1_restore_blocks(hr_volume_t *vol, size_t rebuild_idx,
664 uint64_t ba, size_t cnt, void *buf)
665{
666 assert(fibril_rwlock_is_locked(&vol->extents_lock));
[5b320ac]667
[58d82fa]668 errno_t rc = ENOENT;
669 hr_extent_t *ext, *rebuild_ext = &vol->extents[rebuild_idx];
670
[4d30c475]671 fibril_rwlock_read_lock(&vol->states_lock);
672 hr_ext_status_t rebuild_ext_status = rebuild_ext->status;
673 fibril_rwlock_read_unlock(&vol->states_lock);
674
675 if (rebuild_ext_status != HR_EXT_REBUILD)
676 return EINVAL;
677
[58d82fa]678 for (size_t i = 0; i < vol->extent_no; i++) {
679 fibril_rwlock_read_lock(&vol->states_lock);
680 ext = &vol->extents[i];
[4d30c475]681 if (ext->status != HR_EXT_ONLINE) {
682 fibril_rwlock_read_unlock(&vol->states_lock);
[58d82fa]683 continue;
[4d30c475]684 }
[58d82fa]685 fibril_rwlock_read_unlock(&vol->states_lock);
686
687 rc = block_read_direct(ext->svc_id, ba, cnt, buf);
688 if (rc == EOK)
689 break;
690
691 if (rc != ENOMEM)
692 hr_raid1_ext_state_callback(vol, i, rc);
693
694 if (i + 1 >= vol->extent_no) {
[d773bea9]695 if (rc != ENOMEM) {
[ca7fa5b]696 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
697 "failed due to too many failed extents\n",
[d773bea9]698 vol->devname, vol->svc_id);
699 }
700
701 /* for now we have to invalidate the rebuild extent */
702 if (rc == ENOMEM) {
[ca7fa5b]703 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
704 "failed due to too many failed reads, "
705 "because of not enough memory\n",
[d773bea9]706 vol->devname, vol->svc_id);
707 hr_raid1_ext_state_callback(vol, rebuild_idx,
708 ENOMEM);
709 }
[58d82fa]710
711 return rc;
712 }
713 }
714
715 rc = block_write_direct(rebuild_ext->svc_id, ba, cnt, buf);
[5b320ac]716 if (rc != EOK) {
[d773bea9]717 /*
718 * Here we dont handle ENOMEM, because maybe in the
719 * future, there is going to be M_WAITOK, or we are
720 * going to wait for more memory, so that we don't
721 * have to invalidate it...
722 *
723 * XXX: for now we do
724 */
725 hr_raid1_ext_state_callback(vol, rebuild_idx, rc);
[58d82fa]726
[ca7fa5b]727 HR_ERROR("rebuild on \"%s\" (%" PRIun "), failed due to "
728 "the rebuilt extent no. %zu WRITE (rc: %s)\n",
[58d82fa]729 vol->devname, vol->svc_id, rebuild_idx, str_error(rc));
730
731 return rc;
[5b320ac]732 }
733
[58d82fa]734 return EOK;
735}
[a0c3080]736
[94d84a0]737/** @}
738 */
Note: See TracBrowser for help on using the repository browser.