source: mainline/uspace/srv/bd/hr/raid5.c@ 50603405

Last change on this file since 50603405 was 50603405, checked in by Miroslav Cimerman <mc@…>, 4 months ago

hr: metadata format agnostic superblock ops

Put metadata specific code behind a new hr_superblock_ops_t
interface, that allows to easily add support for new metadata
formats.

  • Property mode set to 100644
File size: 21.1 KB
RevLine 
[dceb6e7]1/*
[746e636]2 * Copyright (c) 2025 Miroslav Cimerman
[dceb6e7]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
[ca7fa5b]41#include <inttypes.h>
[dceb6e7]42#include <io/log.h>
43#include <ipc/hr.h>
44#include <ipc/services.h>
45#include <loc.h>
[978130a]46#include <mem.h>
[dceb6e7]47#include <task.h>
48#include <stdio.h>
49#include <stdlib.h>
50#include <str_error.h>
51
52#include "superblock.h"
53#include "util.h"
54#include "var.h"
55
[6d0fc11]56static errno_t hr_raid5_vol_usable(hr_volume_t *);
57static ssize_t hr_raid5_get_bad_ext(hr_volume_t *);
58static errno_t hr_raid5_update_vol_status(hr_volume_t *);
59static void hr_raid5_handle_extent_error(hr_volume_t *, size_t, errno_t);
60static void xor(void *, const void *, size_t);
61static errno_t hr_raid5_read_degraded(hr_volume_t *, uint64_t, uint64_t,
[733564a]62 void *, size_t);
[6d0fc11]63static errno_t hr_raid5_write(hr_volume_t *, uint64_t, uint64_t, aoff64_t,
[733564a]64 const void *, size_t);
[6d0fc11]65static errno_t hr_raid5_write_parity(hr_volume_t *, uint64_t, uint64_t,
[733564a]66 uint64_t, const void *, size_t);
[6d0fc11]67static errno_t hr_raid5_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
[733564a]68 void *, const void *, size_t);
[6d0fc11]69static errno_t hr_raid5_rebuild(void *);
[733564a]70
71/* bdops */
[6d0fc11]72static errno_t hr_raid5_bd_open(bd_srvs_t *, bd_srv_t *);
73static errno_t hr_raid5_bd_close(bd_srv_t *);
74static errno_t hr_raid5_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
[dceb6e7]75 size_t);
[6d0fc11]76static errno_t hr_raid5_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
77static errno_t hr_raid5_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
[dceb6e7]78 const void *, size_t);
[6d0fc11]79static errno_t hr_raid5_bd_get_block_size(bd_srv_t *, size_t *);
80static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
[dceb6e7]81
82static bd_ops_t hr_raid5_bd_ops = {
[6d0fc11]83 .open = hr_raid5_bd_open,
84 .close = hr_raid5_bd_close,
85 .sync_cache = hr_raid5_bd_sync_cache,
86 .read_blocks = hr_raid5_bd_read_blocks,
87 .write_blocks = hr_raid5_bd_write_blocks,
88 .get_block_size = hr_raid5_bd_get_block_size,
89 .get_num_blocks = hr_raid5_bd_get_num_blocks
[dceb6e7]90};
91
[6d0fc11]92extern loc_srv_t *hr_srv;
93
[733564a]94errno_t hr_raid5_create(hr_volume_t *new_volume)
95{
[baa4929]96 HR_DEBUG("%s()", __func__);
97
[d7768d11]98 assert(new_volume->level == HR_LVL_5 || new_volume->level == HR_LVL_4);
[733564a]99
[65706f1]100 if (new_volume->extent_no < 3) {
[d199a6f]101 HR_ERROR("RAID 5 array needs at least 3 devices\n");
[733564a]102 return EINVAL;
103 }
104
[f1be66bf]105 fibril_rwlock_write_lock(&new_volume->states_lock);
106
[8a65373]107 errno_t rc = hr_raid5_update_vol_status(new_volume);
[f1be66bf]108 if (rc != EOK) {
109 fibril_rwlock_write_unlock(&new_volume->states_lock);
[733564a]110 return rc;
[f1be66bf]111 }
[733564a]112
113 bd_srvs_init(&new_volume->hr_bds);
114 new_volume->hr_bds.ops = &hr_raid5_bd_ops;
115 new_volume->hr_bds.sarg = new_volume;
116
[f1be66bf]117 fibril_rwlock_write_unlock(&new_volume->states_lock);
118
[8a65373]119 return EOK;
[733564a]120}
121
[746e636]122/*
123 * Called only once in volume's lifetime.
124 */
[733564a]125errno_t hr_raid5_init(hr_volume_t *vol)
126{
[baa4929]127 HR_DEBUG("%s()", __func__);
[733564a]128
[d7768d11]129 assert(vol->level == HR_LVL_5 || vol->level == HR_LVL_4);
[733564a]130
[baa4929]131 uint64_t truncated_blkno = vol->extents[0].blkno;
132 for (size_t i = 1; i < vol->extent_no; i++) {
133 if (vol->extents[i].blkno < truncated_blkno)
134 truncated_blkno = vol->extents[i].blkno;
135 }
136
137 uint64_t total_blkno = truncated_blkno * vol->extent_no;
[733564a]138
[baa4929]139 vol->truncated_blkno = truncated_blkno;
[733564a]140 vol->nblocks = total_blkno;
[50603405]141 vol->data_offset = vol->meta_ops->get_data_offset();
[baa4929]142
143 vol->data_blkno = total_blkno;
[50603405]144 /* count md blocks */
145 vol->data_blkno -= vol->meta_ops->get_size() * vol->extent_no;
[baa4929]146 vol->data_blkno -= truncated_blkno; /* count parity */
147
[733564a]148 vol->strip_size = HR_STRIP_SIZE;
149
150 return EOK;
151}
152
[7b359f5]153void hr_raid5_status_event(hr_volume_t *vol)
154{
155 fibril_mutex_lock(&vol->lock);
[f1be66bf]156 fibril_rwlock_write_lock(&vol->states_lock);
[bf0a791]157 (void)hr_raid5_update_vol_status(vol);
[f1be66bf]158 fibril_rwlock_write_unlock(&vol->states_lock);
[7b359f5]159 fibril_mutex_unlock(&vol->lock);
160}
161
[aa7864b]162errno_t hr_raid5_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
163{
[56214383]164 HR_DEBUG("%s()", __func__);
[aa7864b]165
166 fibril_mutex_lock(&vol->lock);
167
[56214383]168 errno_t rc = hr_util_add_hotspare(vol, hotspare);
169 if (rc != EOK)
170 goto end;
[f1be66bf]171
[aa7864b]172 /*
173 * If the volume is degraded, start rebuild right away.
174 */
175 if (vol->status == HR_VOL_DEGRADED) {
176 HR_DEBUG("hr_raid5_add_hotspare(): volume in DEGRADED state, "
177 "spawning new rebuild fibril\n");
178 fid_t fib = fibril_create(hr_raid5_rebuild, vol);
[f1be66bf]179 if (fib == 0) {
180 fibril_mutex_unlock(&vol->hotspare_lock);
181 fibril_mutex_unlock(&vol->lock);
[a0c3080]182 return ENOMEM;
[f1be66bf]183 }
[aa7864b]184 fibril_start(fib);
185 fibril_detach(fib);
186 }
187
[56214383]188end:
[aa7864b]189 fibril_mutex_unlock(&vol->lock);
190
[56214383]191 return rc;
[aa7864b]192}
193
[733564a]194static errno_t hr_raid5_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
195{
[7a80c63]196 HR_DEBUG("%s()\n", __func__);
197
198 hr_volume_t *vol = bd->srvs->sarg;
199
200 atomic_fetch_add_explicit(&vol->open_cnt, 1, memory_order_relaxed);
201
[733564a]202 return EOK;
203}
204
205static errno_t hr_raid5_bd_close(bd_srv_t *bd)
206{
[7a80c63]207 HR_DEBUG("%s()\n", __func__);
208
209 hr_volume_t *vol = bd->srvs->sarg;
210
211 atomic_fetch_sub_explicit(&vol->open_cnt, 1, memory_order_relaxed);
212
[733564a]213 return EOK;
214}
215
216static errno_t hr_raid5_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
217{
218 return hr_raid5_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
219}
220
221static errno_t hr_raid5_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
222 void *buf, size_t size)
223{
224 return hr_raid5_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
225}
226
227static errno_t hr_raid5_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
228 const void *data, size_t size)
229{
230 return hr_raid5_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
231}
232
233static errno_t hr_raid5_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
234{
235 hr_volume_t *vol = bd->srvs->sarg;
236
237 *rsize = vol->bsize;
238 return EOK;
239}
240
241static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
242{
243 hr_volume_t *vol = bd->srvs->sarg;
244
245 *rnb = vol->data_blkno;
246 return EOK;
247}
248
[da0570a]249static errno_t hr_raid5_vol_usable(hr_volume_t *vol)
250{
251 if (vol->status == HR_VOL_ONLINE ||
[40bf2c6]252 vol->status == HR_VOL_DEGRADED ||
253 vol->status == HR_VOL_REBUILD)
[da0570a]254 return EOK;
[a0c3080]255 return EIO;
[da0570a]256}
257
258/*
259 * Returns (-1) if all extents are online,
260 * else returns index of first bad one.
261 */
262static ssize_t hr_raid5_get_bad_ext(hr_volume_t *vol)
263{
[65706f1]264 for (size_t i = 0; i < vol->extent_no; i++)
[da0570a]265 if (vol->extents[i].status != HR_EXT_ONLINE)
266 return i;
267 return -1;
268}
269
270static errno_t hr_raid5_update_vol_status(hr_volume_t *vol)
271{
272 hr_vol_status_t old_state = vol->status;
273 size_t bad = 0;
[65706f1]274 for (size_t i = 0; i < vol->extent_no; i++)
[da0570a]275 if (vol->extents[i].status != HR_EXT_ONLINE)
276 bad++;
277
278 switch (bad) {
279 case 0:
[a0c3080]280 if (old_state != HR_VOL_ONLINE)
281 hr_update_vol_status(vol, HR_VOL_ONLINE);
[da0570a]282 return EOK;
283 case 1:
[aa7864b]284 if (old_state != HR_VOL_DEGRADED &&
285 old_state != HR_VOL_REBUILD) {
[a0c3080]286
287 hr_update_vol_status(vol, HR_VOL_DEGRADED);
288
[aa7864b]289 if (vol->hotspare_no > 0) {
290 fid_t fib = fibril_create(hr_raid5_rebuild,
291 vol);
[a0c3080]292 if (fib == 0)
293 return ENOMEM;
[aa7864b]294 fibril_start(fib);
295 fibril_detach(fib);
296 }
[da0570a]297 }
298 return EOK;
299 default:
[a0c3080]300 if (old_state != HR_VOL_FAULTY)
301 hr_update_vol_status(vol, HR_VOL_FAULTY);
302 return EIO;
[da0570a]303 }
304}
305
[aa7864b]306static void hr_raid5_handle_extent_error(hr_volume_t *vol, size_t extent,
307 errno_t rc)
308{
309 if (rc == ENOENT)
310 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
311 else if (rc != EOK)
312 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
313}
314
[dceb6e7]315static void xor(void *dst, const void *src, size_t size)
316{
317 size_t i;
318 uint64_t *d = dst;
319 const uint64_t *s = src;
320
321 for (i = 0; i < size / sizeof(uint64_t); ++i)
322 *d++ ^= *s++;
323}
324
[da0570a]325static errno_t hr_raid5_read_degraded(hr_volume_t *vol, uint64_t bad,
326 uint64_t block, void *data, size_t cnt)
[dceb6e7]327{
328 errno_t rc;
[da0570a]329 size_t i;
[dceb6e7]330 void *xorbuf;
331 void *buf;
[da0570a]332 uint64_t len = vol->bsize * cnt;
[dceb6e7]333
[da0570a]334 xorbuf = malloc(len);
[dceb6e7]335 if (xorbuf == NULL)
336 return ENOMEM;
337
[da0570a]338 buf = malloc(len);
[c7b4452]339 if (buf == NULL) {
340 free(xorbuf);
[dceb6e7]341 return ENOMEM;
[c7b4452]342 }
[dceb6e7]343
[da0570a]344 /* read all other extents in the stripe */
[8160e4c0]345 bool first = true;
[65706f1]346 for (i = 0; i < vol->extent_no; i++) {
[8160e4c0]347 if (i == bad)
[da0570a]348 continue;
[8160e4c0]349
350 if (first) {
351 rc = block_read_direct(vol->extents[i].svc_id, block,
352 cnt, xorbuf);
353 if (rc != EOK)
354 goto end;
355
356 first = false;
[da0570a]357 } else {
358 rc = block_read_direct(vol->extents[i].svc_id, block,
359 cnt, buf);
360 if (rc != EOK)
361 goto end;
362 xor(xorbuf, buf, len);
363 }
364 }
[978130a]365
[da0570a]366 memcpy(data, xorbuf, len);
367end:
368 free(xorbuf);
369 free(buf);
370 return rc;
371}
372
373static errno_t hr_raid5_write(hr_volume_t *vol, uint64_t p_extent,
374 uint64_t extent, aoff64_t ba, const void *data, size_t cnt)
375{
376 errno_t rc;
377 size_t i;
378 void *xorbuf;
379 void *buf;
380 uint64_t len = vol->bsize * cnt;
381
382 ssize_t bad = hr_raid5_get_bad_ext(vol);
383 if (bad == -1 || (size_t)bad == p_extent) {
384 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
385 data);
386 if (rc != EOK)
387 return rc;
388 /*
389 * DEGRADED parity - skip parity write
390 */
391 if ((size_t)bad == p_extent)
392 return EOK;
393
394 rc = hr_raid5_write_parity(vol, p_extent, extent, ba, data,
395 cnt);
396 return rc;
397 }
398
399 xorbuf = malloc(len);
400 if (xorbuf == NULL)
401 return ENOMEM;
402
403 buf = malloc(len);
404 if (buf == NULL) {
405 free(xorbuf);
406 return ENOMEM;
407 }
408
[bf0a791]409 if (extent == (size_t)bad) {
[da0570a]410 /*
411 * new parity = read other and xor in new data
412 *
413 * write new parity
414 */
[8160e4c0]415 bool first = true;
[521b387]416 for (i = 0; i < vol->extent_no; i++) {
[bf0a791]417 if (i == (size_t)bad)
[da0570a]418 continue;
[521b387]419 if (i == p_extent)
420 continue;
[8160e4c0]421 if (first) {
422 rc = block_read_direct(vol->extents[i].svc_id,
423 ba, cnt, xorbuf);
424 if (rc != EOK)
425 goto end;
426
427 first = false;
[978130a]428 } else {
429 rc = block_read_direct(vol->extents[i].svc_id,
[da0570a]430 ba, cnt, buf);
[978130a]431 if (rc != EOK)
432 goto end;
[da0570a]433 xor(xorbuf, buf, len);
[978130a]434 }
[dceb6e7]435 }
[da0570a]436 xor(xorbuf, data, len);
437 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
438 xorbuf);
439 if (rc != EOK)
440 goto end;
441 } else {
442 /*
443 * new parity = xor original data and old parity and new data
444 *
445 * write parity, new data
446 */
447 rc = block_read_direct(vol->extents[extent].svc_id, ba, cnt,
448 xorbuf);
449 if (rc != EOK)
450 goto end;
451 rc = block_read_direct(vol->extents[p_extent].svc_id, ba, cnt,
452 buf);
453 if (rc != EOK)
454 goto end;
455
456 xor(xorbuf, buf, len);
[dceb6e7]457
[da0570a]458 xor(xorbuf, data, len);
459
460 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
[978130a]461 xorbuf);
462 if (rc != EOK)
463 goto end;
[da0570a]464 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
465 data);
466 if (rc != EOK)
467 goto end;
468 }
469end:
470 free(xorbuf);
471 free(buf);
472 return rc;
473}
474
475static errno_t hr_raid5_write_parity(hr_volume_t *vol, uint64_t p_extent,
476 uint64_t extent, uint64_t block, const void *data, size_t cnt)
477{
478 errno_t rc;
479 size_t i;
480 void *xorbuf;
481 void *buf;
482 uint64_t len = vol->bsize * cnt;
483
484 xorbuf = malloc(len);
485 if (xorbuf == NULL)
486 return ENOMEM;
487
488 buf = malloc(len);
489 if (buf == NULL) {
490 free(xorbuf);
491 return ENOMEM;
[978130a]492 }
[dceb6e7]493
[8160e4c0]494 bool first = true;
[65706f1]495 for (i = 0; i < vol->extent_no; i++) {
[da0570a]496 if (i == p_extent)
497 continue;
[8160e4c0]498
499 if (first) {
500 if (i == extent) {
501 memcpy(xorbuf, data, len);
502 } else {
503 rc = block_read_direct(vol->extents[i].svc_id,
504 block, cnt, xorbuf);
505 if (rc != EOK)
506 goto end;
507 }
508
509 first = false;
[da0570a]510 } else {
[8160e4c0]511 if (i == extent) {
512 xor(xorbuf, data, len);
513 } else {
514 rc = block_read_direct(vol->extents[i].svc_id,
515 block, cnt, buf);
516 if (rc != EOK)
517 goto end;
518
519 xor(xorbuf, buf, len);
520 }
[da0570a]521 }
522 }
523
524 rc = block_write_direct(vol->extents[p_extent].svc_id, block, cnt,
525 xorbuf);
[dceb6e7]526end:
527 free(xorbuf);
528 free(buf);
[12321f8]529 return rc;
[dceb6e7]530}
531
[fad91b9]532static errno_t hr_raid5_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
[da0570a]533 size_t cnt, void *dst, const void *src, size_t size)
[dceb6e7]534{
535 hr_volume_t *vol = bd->srvs->sarg;
536 errno_t rc;
[da0570a]537 uint64_t phys_block, len;
[978130a]538 size_t left;
[da0570a]539 const uint8_t *data_write = src;
540 uint8_t *data_read = dst;
541
542 /* propagate sync */
543 if (type == HR_BD_SYNC && ba == 0 && cnt == 0) {
544 hr_sync_all_extents(vol);
545 rc = hr_raid5_update_vol_status(vol);
546 return rc;
547 }
[fad91b9]548
549 if (type == HR_BD_READ || type == HR_BD_WRITE)
550 if (size < cnt * vol->bsize)
551 return EINVAL;
[dceb6e7]552
553 rc = hr_check_ba_range(vol, cnt, ba);
554 if (rc != EOK)
555 return rc;
556
[37a9c1e]557 uint8_t layout = vol->layout;
[d7768d11]558 hr_level_t level = vol->level;
559
[978130a]560 uint64_t strip_size = vol->strip_size / vol->bsize; /* in blocks */
561 uint64_t stripe = (ba / strip_size); /* stripe number */
[d7768d11]562
563 /* parity extent */
564 uint64_t p_extent;
[37a9c1e]565 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]566 p_extent = 0;
[37a9c1e]567 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]568 p_extent = vol->extent_no - 1;
[37a9c1e]569 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_0R) {
[d7768d11]570 p_extent = (stripe / (vol->extent_no - 1)) % vol->extent_no;
571 } else if (level == HR_LVL_5 &&
[37a9c1e]572 (layout == HR_RLQ_RAID5_NR || layout == HR_RLQ_RAID5_NC)) {
[d7768d11]573 p_extent = (vol->extent_no - 1) -
574 (stripe / (vol->extent_no - 1)) % vol->extent_no;
575 } else {
576 return EINVAL;
577 }
578
[978130a]579 uint64_t extent;
[37a9c1e]580 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]581 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]582 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]583 extent = stripe % (vol->extent_no - 1);
584 } else if (level == HR_LVL_5 &&
[37a9c1e]585 (layout == HR_RLQ_RAID5_0R || layout == HR_RLQ_RAID5_NR)) {
[d7768d11]586 if ((stripe % (vol->extent_no - 1)) < p_extent)
587 extent = stripe % (vol->extent_no - 1);
588 else
589 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]590 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_NC) {
[eb31781]591 extent =
592 ((stripe % (vol->extent_no - 1)) + p_extent + 1) %
593 vol->extent_no;
[d7768d11]594 } else {
595 return EINVAL;
596 }
597
[65706f1]598 uint64_t ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
[978130a]599 uint64_t strip_off = ba % strip_size; /* strip offset */
600
[abc2c4b]601 fibril_mutex_lock(&vol->lock);
[dceb6e7]602
[da0570a]603 rc = hr_raid5_vol_usable(vol);
604 if (rc != EOK) {
605 fibril_mutex_unlock(&vol->lock);
606 return EIO;
607 }
608
[fad91b9]609 left = cnt;
[a0c3080]610
[f1be66bf]611 fibril_rwlock_write_lock(&vol->states_lock);
[dceb6e7]612 while (left != 0) {
[978130a]613 phys_block = ext_stripe * strip_size + strip_off;
614 cnt = min(left, strip_size - strip_off);
[da0570a]615 len = vol->bsize * cnt;
[978130a]616 hr_add_ba_offset(vol, &phys_block);
[fad91b9]617 switch (type) {
618 case HR_BD_SYNC:
[da0570a]619 if (vol->extents[extent].status != HR_EXT_ONLINE)
620 break;
[fad91b9]621 rc = block_sync_cache(vol->extents[extent].svc_id,
[978130a]622 phys_block, cnt);
[da0570a]623 /* allow unsupported sync */
624 if (rc == ENOTSUP)
625 rc = EOK;
[fad91b9]626 break;
627 case HR_BD_READ:
[da0570a]628 retry_read:
629 ssize_t bad = hr_raid5_get_bad_ext(vol);
[521b387]630 if (bad > -1 && extent == (size_t)bad) {
[da0570a]631 rc = hr_raid5_read_degraded(vol, bad,
632 phys_block, data_read, cnt);
633 } else {
634 rc = block_read_direct(vol->extents[extent].svc_id,
635 phys_block, cnt, data_read);
636 }
637 data_read += len;
[fad91b9]638 break;
639 case HR_BD_WRITE:
[da0570a]640 retry_write:
641 rc = hr_raid5_write(vol, p_extent, extent, phys_block,
[978130a]642 data_write, cnt);
[da0570a]643 data_write += len;
[dceb6e7]644 break;
[fad91b9]645 default:
646 rc = EINVAL;
[da0570a]647 goto error;
[fad91b9]648 }
649
[da0570a]650 if (rc == ENOMEM)
[fad91b9]651 goto error;
652
[aa7864b]653 hr_raid5_handle_extent_error(vol, extent, rc);
[da0570a]654
655 if (rc != EOK) {
656 rc = hr_raid5_update_vol_status(vol);
657 if (rc == EOK) {
658 /*
659 * State changed from ONLINE -> DEGRADED,
660 * rewind and retry
661 */
662 if (type == HR_BD_WRITE) {
663 data_write -= len;
664 goto retry_write;
665 } else if (type == HR_BD_WRITE) {
666 data_read -= len;
667 goto retry_read;
668 }
669 } else {
670 rc = EIO;
671 goto error;
672 }
673 }
674
[978130a]675 left -= cnt;
676 strip_off = 0;
677 stripe++;
[d7768d11]678
679 ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
680
[37a9c1e]681 if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_0R) {
[eb31781]682 p_extent =
683 (stripe / (vol->extent_no - 1)) % vol->extent_no;
[d7768d11]684 } else if (level == HR_LVL_5 &&
[37a9c1e]685 (layout == HR_RLQ_RAID5_NR || layout == HR_RLQ_RAID5_NC)) {
[d7768d11]686 p_extent = (vol->extent_no - 1) -
687 (stripe / (vol->extent_no - 1)) % vol->extent_no;
688 }
689
[37a9c1e]690 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]691 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]692 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]693 extent = stripe % (vol->extent_no - 1);
694 } else if (level == HR_LVL_5 &&
[37a9c1e]695 (layout == HR_RLQ_RAID5_0R || layout == HR_RLQ_RAID5_NR)) {
[d7768d11]696 if ((stripe % (vol->extent_no - 1)) < p_extent)
697 extent = stripe % (vol->extent_no - 1);
698 else
699 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]700 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_NC) {
[eb31781]701 extent =
702 ((stripe % (vol->extent_no - 1)) + p_extent + 1) %
703 vol->extent_no;
[d7768d11]704 }
[dceb6e7]705 }
706
[fad91b9]707error:
[bf0a791]708 (void)hr_raid5_update_vol_status(vol);
[f1be66bf]709 fibril_rwlock_write_unlock(&vol->states_lock);
[abc2c4b]710 fibril_mutex_unlock(&vol->lock);
[dceb6e7]711 return rc;
712}
713
[aa7864b]714static errno_t hr_raid5_rebuild(void *arg)
715{
716 HR_DEBUG("hr_raid5_rebuild()\n");
717
718 hr_volume_t *vol = arg;
719 errno_t rc = EOK;
720 void *buf = NULL, *xorbuf = NULL;
721
722 fibril_mutex_lock(&vol->lock);
[f1be66bf]723 fibril_rwlock_read_lock(&vol->extents_lock);
724 fibril_rwlock_write_lock(&vol->states_lock);
[aa7864b]725
726 if (vol->hotspare_no == 0) {
727 HR_WARN("hr_raid5_rebuild(): no free hotspares on \"%s\", "
728 "aborting rebuild\n", vol->devname);
729 /* retval isn't checked for now */
730 goto end;
731 }
732
[65706f1]733 size_t bad = vol->extent_no;
734 for (size_t i = 0; i < vol->extent_no; i++) {
[aa7864b]735 if (vol->extents[i].status == HR_EXT_FAILED) {
736 bad = i;
737 break;
738 }
739 }
740
[65706f1]741 if (bad == vol->extent_no) {
[aa7864b]742 HR_WARN("hr_raid5_rebuild(): no bad extent on \"%s\", "
743 "aborting rebuild\n", vol->devname);
744 /* retval isn't checked for now */
745 goto end;
746 }
747
748 size_t hotspare_idx = vol->hotspare_no - 1;
749
[a0c3080]750 hr_ext_status_t hs_state = vol->hotspares[hotspare_idx].status;
751 if (hs_state != HR_EXT_HOTSPARE) {
752 HR_ERROR("hr_raid5_rebuild(): invalid hotspare state \"%s\", "
753 "aborting rebuild\n", hr_get_ext_status_msg(hs_state));
754 rc = EINVAL;
755 goto end;
756 }
757
758 HR_DEBUG("hr_raid5_rebuild(): swapping in hotspare\n");
759
760 block_fini(vol->extents[bad].svc_id);
761
[aa7864b]762 vol->extents[bad].svc_id = vol->hotspares[hotspare_idx].svc_id;
[a0c3080]763 hr_update_ext_status(vol, bad, HR_EXT_HOTSPARE);
[aa7864b]764
765 vol->hotspares[hotspare_idx].svc_id = 0;
[f1be66bf]766 fibril_mutex_lock(&vol->hotspare_lock);
[a0c3080]767 hr_update_hotspare_status(vol, hotspare_idx, HR_EXT_MISSING);
[f1be66bf]768 fibril_mutex_unlock(&vol->hotspare_lock);
[aa7864b]769
[a0c3080]770 vol->hotspare_no--;
[aa7864b]771
[a0c3080]772 hr_extent_t *rebuild_ext = &vol->extents[bad];
[aa7864b]773
[ca7fa5b]774 HR_DEBUG("hr_raid5_rebuild(): starting rebuild on (%" PRIun ")\n",
[a0c3080]775 rebuild_ext->svc_id);
776
777 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
778 hr_update_vol_status(vol, HR_VOL_REBUILD);
779
[aa7864b]780 uint64_t max_blks = DATA_XFER_LIMIT / vol->bsize;
[65706f1]781 uint64_t left = vol->data_blkno / (vol->extent_no - 1);
[aa7864b]782 buf = malloc(max_blks * vol->bsize);
783 xorbuf = malloc(max_blks * vol->bsize);
784
785 uint64_t ba = 0, cnt;
786 hr_add_ba_offset(vol, &ba);
[a0c3080]787
[aa7864b]788 while (left != 0) {
789 cnt = min(left, max_blks);
790
791 /*
792 * Almost the same as read_degraded,
793 * but we don't want to allocate new
794 * xorbuf each blk rebuild batch.
795 */
796 bool first = true;
[65706f1]797 for (size_t i = 0; i < vol->extent_no; i++) {
[aa7864b]798 if (i == bad)
799 continue;
[8160e4c0]800 if (first)
801 rc = block_read_direct(vol->extents[i].svc_id,
802 ba, cnt, xorbuf);
803 else
804 rc = block_read_direct(vol->extents[i].svc_id,
805 ba, cnt, buf);
[aa7864b]806 if (rc != EOK) {
807 hr_raid5_handle_extent_error(vol, i, rc);
[ca7fa5b]808 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
809 "failed due to a failed ONLINE extent, "
810 "number %zu\n",
[aa7864b]811 vol->devname, vol->svc_id, i);
812 goto end;
813 }
814
[8160e4c0]815 if (!first)
[aa7864b]816 xor(xorbuf, buf, cnt * vol->bsize);
[8160e4c0]817 else
818 first = false;
[aa7864b]819 }
820
[a0c3080]821 rc = block_write_direct(rebuild_ext->svc_id, ba, cnt, xorbuf);
[aa7864b]822 if (rc != EOK) {
823 hr_raid5_handle_extent_error(vol, bad, rc);
[ca7fa5b]824 HR_ERROR("rebuild on \"%s\" (%" PRIun "), failed due to "
825 "the rebuilt extent number %zu failing\n",
[aa7864b]826 vol->devname, vol->svc_id, bad);
827 goto end;
828 }
829
830 ba += cnt;
831 left -= cnt;
[40bf2c6]832
833 /*
834 * Let other IO requests be served
835 * during rebuild.
836 */
[f1be66bf]837 fibril_rwlock_write_unlock(&vol->states_lock);
[40bf2c6]838 fibril_mutex_unlock(&vol->lock);
839 fibril_mutex_lock(&vol->lock);
[f1be66bf]840 fibril_rwlock_write_lock(&vol->states_lock);
[aa7864b]841 }
842
[ca7fa5b]843 HR_DEBUG("hr_raid5_rebuild(): rebuild finished on \"%s\" (%" PRIun "), "
844 "extent number %zu\n", vol->devname, vol->svc_id, hotspare_idx);
[aa7864b]845
846 hr_update_ext_status(vol, bad, HR_EXT_ONLINE);
[0277ec2]847
[50603405]848 rc = vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
[0277ec2]849
[aa7864b]850end:
[bf0a791]851 (void)hr_raid5_update_vol_status(vol);
[aa7864b]852
[f1be66bf]853 fibril_rwlock_write_unlock(&vol->states_lock);
854 fibril_rwlock_read_unlock(&vol->extents_lock);
[aa7864b]855 fibril_mutex_unlock(&vol->lock);
856
857 if (buf != NULL)
858 free(buf);
859
860 if (xorbuf != NULL)
861 free(xorbuf);
862
863 return rc;
864}
865
[dceb6e7]866/** @}
867 */
Note: See TracBrowser for help on using the repository browser.