source: mainline/uspace/srv/bd/hr/raid5.c@ 155d34f

Last change on this file since 155d34f was 155d34f, checked in by Miroslav Cimerman <mc@…>, 8 months ago

hr: rename hr_get_*_status_msg → hr_get_*_state_str

  • Property mode set to 100644
File size: 20.9 KB
RevLine 
[dceb6e7]1/*
[746e636]2 * Copyright (c) 2025 Miroslav Cimerman
[dceb6e7]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
[ca7fa5b]41#include <inttypes.h>
[dceb6e7]42#include <io/log.h>
43#include <ipc/hr.h>
44#include <ipc/services.h>
45#include <loc.h>
[978130a]46#include <mem.h>
[dceb6e7]47#include <task.h>
48#include <stdio.h>
49#include <stdlib.h>
50#include <str_error.h>
51
52#include "superblock.h"
53#include "util.h"
54#include "var.h"
55
[6d0fc11]56static errno_t hr_raid5_vol_usable(hr_volume_t *);
57static ssize_t hr_raid5_get_bad_ext(hr_volume_t *);
58static errno_t hr_raid5_update_vol_status(hr_volume_t *);
59static void hr_raid5_handle_extent_error(hr_volume_t *, size_t, errno_t);
60static void xor(void *, const void *, size_t);
61static errno_t hr_raid5_read_degraded(hr_volume_t *, uint64_t, uint64_t,
[733564a]62 void *, size_t);
[6d0fc11]63static errno_t hr_raid5_write(hr_volume_t *, uint64_t, uint64_t, aoff64_t,
[733564a]64 const void *, size_t);
[6d0fc11]65static errno_t hr_raid5_write_parity(hr_volume_t *, uint64_t, uint64_t,
[733564a]66 uint64_t, const void *, size_t);
[6d0fc11]67static errno_t hr_raid5_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
[733564a]68 void *, const void *, size_t);
[6d0fc11]69static errno_t hr_raid5_rebuild(void *);
[733564a]70
71/* bdops */
[6d0fc11]72static errno_t hr_raid5_bd_open(bd_srvs_t *, bd_srv_t *);
73static errno_t hr_raid5_bd_close(bd_srv_t *);
74static errno_t hr_raid5_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
[dceb6e7]75 size_t);
[6d0fc11]76static errno_t hr_raid5_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
77static errno_t hr_raid5_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
[dceb6e7]78 const void *, size_t);
[6d0fc11]79static errno_t hr_raid5_bd_get_block_size(bd_srv_t *, size_t *);
80static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
[dceb6e7]81
82static bd_ops_t hr_raid5_bd_ops = {
[6d0fc11]83 .open = hr_raid5_bd_open,
84 .close = hr_raid5_bd_close,
85 .sync_cache = hr_raid5_bd_sync_cache,
86 .read_blocks = hr_raid5_bd_read_blocks,
87 .write_blocks = hr_raid5_bd_write_blocks,
88 .get_block_size = hr_raid5_bd_get_block_size,
89 .get_num_blocks = hr_raid5_bd_get_num_blocks
[dceb6e7]90};
91
[6d0fc11]92extern loc_srv_t *hr_srv;
93
[733564a]94errno_t hr_raid5_create(hr_volume_t *new_volume)
95{
[baa4929]96 HR_DEBUG("%s()", __func__);
97
[d7768d11]98 assert(new_volume->level == HR_LVL_5 || new_volume->level == HR_LVL_4);
[733564a]99
[65706f1]100 if (new_volume->extent_no < 3) {
[d199a6f]101 HR_ERROR("RAID 5 array needs at least 3 devices\n");
[733564a]102 return EINVAL;
103 }
104
[f1be66bf]105 fibril_rwlock_write_lock(&new_volume->states_lock);
106
[8a65373]107 errno_t rc = hr_raid5_update_vol_status(new_volume);
[f1be66bf]108 if (rc != EOK) {
[18c3658]109 HR_NOTE("\"%s\": unusable state, not creating\n",
110 new_volume->devname);
[f1be66bf]111 fibril_rwlock_write_unlock(&new_volume->states_lock);
[733564a]112 return rc;
[f1be66bf]113 }
[733564a]114
115 bd_srvs_init(&new_volume->hr_bds);
116 new_volume->hr_bds.ops = &hr_raid5_bd_ops;
117 new_volume->hr_bds.sarg = new_volume;
118
[f1be66bf]119 fibril_rwlock_write_unlock(&new_volume->states_lock);
120
[8a65373]121 return EOK;
[733564a]122}
123
[746e636]124/*
125 * Called only once in volume's lifetime.
126 */
[733564a]127errno_t hr_raid5_init(hr_volume_t *vol)
128{
[baa4929]129 HR_DEBUG("%s()", __func__);
[733564a]130
[d7768d11]131 assert(vol->level == HR_LVL_5 || vol->level == HR_LVL_4);
[733564a]132
[80c760e]133 uint64_t total_blkno = vol->truncated_blkno * vol->extent_no;
[733564a]134
[50603405]135 vol->data_offset = vol->meta_ops->get_data_offset();
[baa4929]136
137 vol->data_blkno = total_blkno;
[50603405]138 /* count md blocks */
139 vol->data_blkno -= vol->meta_ops->get_size() * vol->extent_no;
[80c760e]140 vol->data_blkno -= vol->truncated_blkno; /* count parity */
[baa4929]141
[733564a]142 vol->strip_size = HR_STRIP_SIZE;
143
144 return EOK;
145}
146
[7b359f5]147void hr_raid5_status_event(hr_volume_t *vol)
148{
149 fibril_mutex_lock(&vol->lock);
[f1be66bf]150 fibril_rwlock_write_lock(&vol->states_lock);
[bf0a791]151 (void)hr_raid5_update_vol_status(vol);
[f1be66bf]152 fibril_rwlock_write_unlock(&vol->states_lock);
[7b359f5]153 fibril_mutex_unlock(&vol->lock);
154}
155
[aa7864b]156errno_t hr_raid5_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
157{
[56214383]158 HR_DEBUG("%s()", __func__);
[aa7864b]159
160 fibril_mutex_lock(&vol->lock);
161
[56214383]162 errno_t rc = hr_util_add_hotspare(vol, hotspare);
163 if (rc != EOK)
164 goto end;
[f1be66bf]165
[aa7864b]166 /*
167 * If the volume is degraded, start rebuild right away.
168 */
169 if (vol->status == HR_VOL_DEGRADED) {
170 HR_DEBUG("hr_raid5_add_hotspare(): volume in DEGRADED state, "
171 "spawning new rebuild fibril\n");
172 fid_t fib = fibril_create(hr_raid5_rebuild, vol);
[f1be66bf]173 if (fib == 0) {
174 fibril_mutex_unlock(&vol->hotspare_lock);
175 fibril_mutex_unlock(&vol->lock);
[a0c3080]176 return ENOMEM;
[f1be66bf]177 }
[aa7864b]178 fibril_start(fib);
179 fibril_detach(fib);
180 }
181
[56214383]182end:
[aa7864b]183 fibril_mutex_unlock(&vol->lock);
184
[56214383]185 return rc;
[aa7864b]186}
187
[733564a]188static errno_t hr_raid5_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
189{
[7a80c63]190 HR_DEBUG("%s()\n", __func__);
191
192 hr_volume_t *vol = bd->srvs->sarg;
193
194 atomic_fetch_add_explicit(&vol->open_cnt, 1, memory_order_relaxed);
195
[733564a]196 return EOK;
197}
198
199static errno_t hr_raid5_bd_close(bd_srv_t *bd)
200{
[7a80c63]201 HR_DEBUG("%s()\n", __func__);
202
203 hr_volume_t *vol = bd->srvs->sarg;
204
205 atomic_fetch_sub_explicit(&vol->open_cnt, 1, memory_order_relaxed);
206
[733564a]207 return EOK;
208}
209
210static errno_t hr_raid5_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
211{
212 return hr_raid5_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
213}
214
215static errno_t hr_raid5_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
216 void *buf, size_t size)
217{
218 return hr_raid5_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
219}
220
221static errno_t hr_raid5_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
222 const void *data, size_t size)
223{
224 return hr_raid5_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
225}
226
227static errno_t hr_raid5_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
228{
229 hr_volume_t *vol = bd->srvs->sarg;
230
231 *rsize = vol->bsize;
232 return EOK;
233}
234
235static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
236{
237 hr_volume_t *vol = bd->srvs->sarg;
238
239 *rnb = vol->data_blkno;
240 return EOK;
241}
242
[da0570a]243static errno_t hr_raid5_vol_usable(hr_volume_t *vol)
244{
245 if (vol->status == HR_VOL_ONLINE ||
[40bf2c6]246 vol->status == HR_VOL_DEGRADED ||
247 vol->status == HR_VOL_REBUILD)
[da0570a]248 return EOK;
[a0c3080]249 return EIO;
[da0570a]250}
251
252/*
253 * Returns (-1) if all extents are online,
254 * else returns index of first bad one.
255 */
256static ssize_t hr_raid5_get_bad_ext(hr_volume_t *vol)
257{
[65706f1]258 for (size_t i = 0; i < vol->extent_no; i++)
[da0570a]259 if (vol->extents[i].status != HR_EXT_ONLINE)
260 return i;
261 return -1;
262}
263
264static errno_t hr_raid5_update_vol_status(hr_volume_t *vol)
265{
266 hr_vol_status_t old_state = vol->status;
267 size_t bad = 0;
[65706f1]268 for (size_t i = 0; i < vol->extent_no; i++)
[da0570a]269 if (vol->extents[i].status != HR_EXT_ONLINE)
270 bad++;
271
272 switch (bad) {
273 case 0:
[a0c3080]274 if (old_state != HR_VOL_ONLINE)
275 hr_update_vol_status(vol, HR_VOL_ONLINE);
[da0570a]276 return EOK;
277 case 1:
[aa7864b]278 if (old_state != HR_VOL_DEGRADED &&
279 old_state != HR_VOL_REBUILD) {
[a0c3080]280
281 hr_update_vol_status(vol, HR_VOL_DEGRADED);
282
[aa7864b]283 if (vol->hotspare_no > 0) {
284 fid_t fib = fibril_create(hr_raid5_rebuild,
285 vol);
[a0c3080]286 if (fib == 0)
287 return ENOMEM;
[aa7864b]288 fibril_start(fib);
289 fibril_detach(fib);
290 }
[da0570a]291 }
292 return EOK;
293 default:
[a0c3080]294 if (old_state != HR_VOL_FAULTY)
295 hr_update_vol_status(vol, HR_VOL_FAULTY);
296 return EIO;
[da0570a]297 }
298}
299
[aa7864b]300static void hr_raid5_handle_extent_error(hr_volume_t *vol, size_t extent,
301 errno_t rc)
302{
303 if (rc == ENOENT)
304 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
305 else if (rc != EOK)
306 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
307}
308
[dceb6e7]309static void xor(void *dst, const void *src, size_t size)
310{
311 size_t i;
312 uint64_t *d = dst;
313 const uint64_t *s = src;
314
315 for (i = 0; i < size / sizeof(uint64_t); ++i)
316 *d++ ^= *s++;
317}
318
[da0570a]319static errno_t hr_raid5_read_degraded(hr_volume_t *vol, uint64_t bad,
320 uint64_t block, void *data, size_t cnt)
[dceb6e7]321{
322 errno_t rc;
[da0570a]323 size_t i;
[dceb6e7]324 void *xorbuf;
325 void *buf;
[da0570a]326 uint64_t len = vol->bsize * cnt;
[dceb6e7]327
[da0570a]328 xorbuf = malloc(len);
[dceb6e7]329 if (xorbuf == NULL)
330 return ENOMEM;
331
[da0570a]332 buf = malloc(len);
[c7b4452]333 if (buf == NULL) {
334 free(xorbuf);
[dceb6e7]335 return ENOMEM;
[c7b4452]336 }
[dceb6e7]337
[da0570a]338 /* read all other extents in the stripe */
[8160e4c0]339 bool first = true;
[65706f1]340 for (i = 0; i < vol->extent_no; i++) {
[8160e4c0]341 if (i == bad)
[da0570a]342 continue;
[8160e4c0]343
344 if (first) {
345 rc = block_read_direct(vol->extents[i].svc_id, block,
346 cnt, xorbuf);
347 if (rc != EOK)
348 goto end;
349
350 first = false;
[da0570a]351 } else {
352 rc = block_read_direct(vol->extents[i].svc_id, block,
353 cnt, buf);
354 if (rc != EOK)
355 goto end;
356 xor(xorbuf, buf, len);
357 }
358 }
[978130a]359
[da0570a]360 memcpy(data, xorbuf, len);
361end:
362 free(xorbuf);
363 free(buf);
364 return rc;
365}
366
367static errno_t hr_raid5_write(hr_volume_t *vol, uint64_t p_extent,
368 uint64_t extent, aoff64_t ba, const void *data, size_t cnt)
369{
370 errno_t rc;
371 size_t i;
372 void *xorbuf;
373 void *buf;
374 uint64_t len = vol->bsize * cnt;
375
376 ssize_t bad = hr_raid5_get_bad_ext(vol);
377 if (bad == -1 || (size_t)bad == p_extent) {
378 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
379 data);
380 if (rc != EOK)
381 return rc;
382 /*
383 * DEGRADED parity - skip parity write
384 */
385 if ((size_t)bad == p_extent)
386 return EOK;
387
388 rc = hr_raid5_write_parity(vol, p_extent, extent, ba, data,
389 cnt);
390 return rc;
391 }
392
393 xorbuf = malloc(len);
394 if (xorbuf == NULL)
395 return ENOMEM;
396
397 buf = malloc(len);
398 if (buf == NULL) {
399 free(xorbuf);
400 return ENOMEM;
401 }
402
[bf0a791]403 if (extent == (size_t)bad) {
[da0570a]404 /*
405 * new parity = read other and xor in new data
406 *
407 * write new parity
408 */
[8160e4c0]409 bool first = true;
[521b387]410 for (i = 0; i < vol->extent_no; i++) {
[bf0a791]411 if (i == (size_t)bad)
[da0570a]412 continue;
[521b387]413 if (i == p_extent)
414 continue;
[8160e4c0]415 if (first) {
416 rc = block_read_direct(vol->extents[i].svc_id,
417 ba, cnt, xorbuf);
418 if (rc != EOK)
419 goto end;
420
421 first = false;
[978130a]422 } else {
423 rc = block_read_direct(vol->extents[i].svc_id,
[da0570a]424 ba, cnt, buf);
[978130a]425 if (rc != EOK)
426 goto end;
[da0570a]427 xor(xorbuf, buf, len);
[978130a]428 }
[dceb6e7]429 }
[da0570a]430 xor(xorbuf, data, len);
431 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
432 xorbuf);
433 if (rc != EOK)
434 goto end;
435 } else {
436 /*
437 * new parity = xor original data and old parity and new data
438 *
439 * write parity, new data
440 */
441 rc = block_read_direct(vol->extents[extent].svc_id, ba, cnt,
442 xorbuf);
443 if (rc != EOK)
444 goto end;
445 rc = block_read_direct(vol->extents[p_extent].svc_id, ba, cnt,
446 buf);
447 if (rc != EOK)
448 goto end;
449
450 xor(xorbuf, buf, len);
[dceb6e7]451
[da0570a]452 xor(xorbuf, data, len);
453
454 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
[978130a]455 xorbuf);
456 if (rc != EOK)
457 goto end;
[da0570a]458 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
459 data);
460 if (rc != EOK)
461 goto end;
462 }
463end:
464 free(xorbuf);
465 free(buf);
466 return rc;
467}
468
469static errno_t hr_raid5_write_parity(hr_volume_t *vol, uint64_t p_extent,
470 uint64_t extent, uint64_t block, const void *data, size_t cnt)
471{
472 errno_t rc;
473 size_t i;
474 void *xorbuf;
475 void *buf;
476 uint64_t len = vol->bsize * cnt;
477
478 xorbuf = malloc(len);
479 if (xorbuf == NULL)
480 return ENOMEM;
481
482 buf = malloc(len);
483 if (buf == NULL) {
484 free(xorbuf);
485 return ENOMEM;
[978130a]486 }
[dceb6e7]487
[8160e4c0]488 bool first = true;
[65706f1]489 for (i = 0; i < vol->extent_no; i++) {
[da0570a]490 if (i == p_extent)
491 continue;
[8160e4c0]492
493 if (first) {
494 if (i == extent) {
495 memcpy(xorbuf, data, len);
496 } else {
497 rc = block_read_direct(vol->extents[i].svc_id,
498 block, cnt, xorbuf);
499 if (rc != EOK)
500 goto end;
501 }
502
503 first = false;
[da0570a]504 } else {
[8160e4c0]505 if (i == extent) {
506 xor(xorbuf, data, len);
507 } else {
508 rc = block_read_direct(vol->extents[i].svc_id,
509 block, cnt, buf);
510 if (rc != EOK)
511 goto end;
512
513 xor(xorbuf, buf, len);
514 }
[da0570a]515 }
516 }
517
518 rc = block_write_direct(vol->extents[p_extent].svc_id, block, cnt,
519 xorbuf);
[dceb6e7]520end:
521 free(xorbuf);
522 free(buf);
[12321f8]523 return rc;
[dceb6e7]524}
525
[fad91b9]526static errno_t hr_raid5_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
[da0570a]527 size_t cnt, void *dst, const void *src, size_t size)
[dceb6e7]528{
529 hr_volume_t *vol = bd->srvs->sarg;
530 errno_t rc;
[da0570a]531 uint64_t phys_block, len;
[978130a]532 size_t left;
[da0570a]533 const uint8_t *data_write = src;
534 uint8_t *data_read = dst;
535
536 /* propagate sync */
537 if (type == HR_BD_SYNC && ba == 0 && cnt == 0) {
538 hr_sync_all_extents(vol);
539 rc = hr_raid5_update_vol_status(vol);
540 return rc;
541 }
[fad91b9]542
543 if (type == HR_BD_READ || type == HR_BD_WRITE)
544 if (size < cnt * vol->bsize)
545 return EINVAL;
[dceb6e7]546
547 rc = hr_check_ba_range(vol, cnt, ba);
548 if (rc != EOK)
549 return rc;
550
[37a9c1e]551 uint8_t layout = vol->layout;
[d7768d11]552 hr_level_t level = vol->level;
553
[978130a]554 uint64_t strip_size = vol->strip_size / vol->bsize; /* in blocks */
555 uint64_t stripe = (ba / strip_size); /* stripe number */
[d7768d11]556
557 /* parity extent */
558 uint64_t p_extent;
[37a9c1e]559 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]560 p_extent = 0;
[37a9c1e]561 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]562 p_extent = vol->extent_no - 1;
[37a9c1e]563 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_0R) {
[d7768d11]564 p_extent = (stripe / (vol->extent_no - 1)) % vol->extent_no;
565 } else if (level == HR_LVL_5 &&
[37a9c1e]566 (layout == HR_RLQ_RAID5_NR || layout == HR_RLQ_RAID5_NC)) {
[d7768d11]567 p_extent = (vol->extent_no - 1) -
568 (stripe / (vol->extent_no - 1)) % vol->extent_no;
569 } else {
570 return EINVAL;
571 }
572
[978130a]573 uint64_t extent;
[37a9c1e]574 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]575 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]576 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]577 extent = stripe % (vol->extent_no - 1);
578 } else if (level == HR_LVL_5 &&
[37a9c1e]579 (layout == HR_RLQ_RAID5_0R || layout == HR_RLQ_RAID5_NR)) {
[d7768d11]580 if ((stripe % (vol->extent_no - 1)) < p_extent)
581 extent = stripe % (vol->extent_no - 1);
582 else
583 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]584 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_NC) {
[eb31781]585 extent =
586 ((stripe % (vol->extent_no - 1)) + p_extent + 1) %
587 vol->extent_no;
[d7768d11]588 } else {
589 return EINVAL;
590 }
591
[65706f1]592 uint64_t ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
[978130a]593 uint64_t strip_off = ba % strip_size; /* strip offset */
594
[abc2c4b]595 fibril_mutex_lock(&vol->lock);
[dceb6e7]596
[da0570a]597 rc = hr_raid5_vol_usable(vol);
598 if (rc != EOK) {
599 fibril_mutex_unlock(&vol->lock);
600 return EIO;
601 }
602
[fad91b9]603 left = cnt;
[a0c3080]604
[f1be66bf]605 fibril_rwlock_write_lock(&vol->states_lock);
[dceb6e7]606 while (left != 0) {
[978130a]607 phys_block = ext_stripe * strip_size + strip_off;
608 cnt = min(left, strip_size - strip_off);
[da0570a]609 len = vol->bsize * cnt;
[978130a]610 hr_add_ba_offset(vol, &phys_block);
[fad91b9]611 switch (type) {
612 case HR_BD_SYNC:
[da0570a]613 if (vol->extents[extent].status != HR_EXT_ONLINE)
614 break;
[fad91b9]615 rc = block_sync_cache(vol->extents[extent].svc_id,
[978130a]616 phys_block, cnt);
[da0570a]617 /* allow unsupported sync */
618 if (rc == ENOTSUP)
619 rc = EOK;
[fad91b9]620 break;
621 case HR_BD_READ:
[da0570a]622 retry_read:
623 ssize_t bad = hr_raid5_get_bad_ext(vol);
[521b387]624 if (bad > -1 && extent == (size_t)bad) {
[da0570a]625 rc = hr_raid5_read_degraded(vol, bad,
626 phys_block, data_read, cnt);
627 } else {
628 rc = block_read_direct(vol->extents[extent].svc_id,
629 phys_block, cnt, data_read);
630 }
631 data_read += len;
[fad91b9]632 break;
633 case HR_BD_WRITE:
[da0570a]634 retry_write:
635 rc = hr_raid5_write(vol, p_extent, extent, phys_block,
[978130a]636 data_write, cnt);
[da0570a]637 data_write += len;
[dceb6e7]638 break;
[fad91b9]639 default:
640 rc = EINVAL;
[da0570a]641 goto error;
[fad91b9]642 }
643
[da0570a]644 if (rc == ENOMEM)
[fad91b9]645 goto error;
646
[aa7864b]647 hr_raid5_handle_extent_error(vol, extent, rc);
[da0570a]648
649 if (rc != EOK) {
650 rc = hr_raid5_update_vol_status(vol);
651 if (rc == EOK) {
652 /*
653 * State changed from ONLINE -> DEGRADED,
654 * rewind and retry
655 */
656 if (type == HR_BD_WRITE) {
657 data_write -= len;
658 goto retry_write;
659 } else if (type == HR_BD_WRITE) {
660 data_read -= len;
661 goto retry_read;
662 }
663 } else {
664 rc = EIO;
665 goto error;
666 }
667 }
668
[978130a]669 left -= cnt;
670 strip_off = 0;
671 stripe++;
[d7768d11]672
673 ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
674
[37a9c1e]675 if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_0R) {
[eb31781]676 p_extent =
677 (stripe / (vol->extent_no - 1)) % vol->extent_no;
[d7768d11]678 } else if (level == HR_LVL_5 &&
[37a9c1e]679 (layout == HR_RLQ_RAID5_NR || layout == HR_RLQ_RAID5_NC)) {
[d7768d11]680 p_extent = (vol->extent_no - 1) -
681 (stripe / (vol->extent_no - 1)) % vol->extent_no;
682 }
683
[37a9c1e]684 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]685 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]686 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]687 extent = stripe % (vol->extent_no - 1);
688 } else if (level == HR_LVL_5 &&
[37a9c1e]689 (layout == HR_RLQ_RAID5_0R || layout == HR_RLQ_RAID5_NR)) {
[d7768d11]690 if ((stripe % (vol->extent_no - 1)) < p_extent)
691 extent = stripe % (vol->extent_no - 1);
692 else
693 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]694 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_NC) {
[eb31781]695 extent =
696 ((stripe % (vol->extent_no - 1)) + p_extent + 1) %
697 vol->extent_no;
[d7768d11]698 }
[dceb6e7]699 }
700
[fad91b9]701error:
[bf0a791]702 (void)hr_raid5_update_vol_status(vol);
[f1be66bf]703 fibril_rwlock_write_unlock(&vol->states_lock);
[abc2c4b]704 fibril_mutex_unlock(&vol->lock);
[dceb6e7]705 return rc;
706}
707
[aa7864b]708static errno_t hr_raid5_rebuild(void *arg)
709{
710 HR_DEBUG("hr_raid5_rebuild()\n");
711
712 hr_volume_t *vol = arg;
713 errno_t rc = EOK;
714 void *buf = NULL, *xorbuf = NULL;
715
716 fibril_mutex_lock(&vol->lock);
[f1be66bf]717 fibril_rwlock_read_lock(&vol->extents_lock);
718 fibril_rwlock_write_lock(&vol->states_lock);
[aa7864b]719
720 if (vol->hotspare_no == 0) {
721 HR_WARN("hr_raid5_rebuild(): no free hotspares on \"%s\", "
722 "aborting rebuild\n", vol->devname);
723 /* retval isn't checked for now */
724 goto end;
725 }
726
[65706f1]727 size_t bad = vol->extent_no;
728 for (size_t i = 0; i < vol->extent_no; i++) {
[aa7864b]729 if (vol->extents[i].status == HR_EXT_FAILED) {
730 bad = i;
731 break;
732 }
733 }
734
[65706f1]735 if (bad == vol->extent_no) {
[aa7864b]736 HR_WARN("hr_raid5_rebuild(): no bad extent on \"%s\", "
737 "aborting rebuild\n", vol->devname);
738 /* retval isn't checked for now */
739 goto end;
740 }
741
742 size_t hotspare_idx = vol->hotspare_no - 1;
743
[a0c3080]744 hr_ext_status_t hs_state = vol->hotspares[hotspare_idx].status;
745 if (hs_state != HR_EXT_HOTSPARE) {
746 HR_ERROR("hr_raid5_rebuild(): invalid hotspare state \"%s\", "
[155d34f]747 "aborting rebuild\n", hr_get_ext_state_str(hs_state));
[a0c3080]748 rc = EINVAL;
749 goto end;
750 }
751
752 HR_DEBUG("hr_raid5_rebuild(): swapping in hotspare\n");
753
754 block_fini(vol->extents[bad].svc_id);
755
[aa7864b]756 vol->extents[bad].svc_id = vol->hotspares[hotspare_idx].svc_id;
[a0c3080]757 hr_update_ext_status(vol, bad, HR_EXT_HOTSPARE);
[aa7864b]758
759 vol->hotspares[hotspare_idx].svc_id = 0;
[f1be66bf]760 fibril_mutex_lock(&vol->hotspare_lock);
[a0c3080]761 hr_update_hotspare_status(vol, hotspare_idx, HR_EXT_MISSING);
[f1be66bf]762 fibril_mutex_unlock(&vol->hotspare_lock);
[aa7864b]763
[a0c3080]764 vol->hotspare_no--;
[aa7864b]765
[a0c3080]766 hr_extent_t *rebuild_ext = &vol->extents[bad];
[aa7864b]767
[ca7fa5b]768 HR_DEBUG("hr_raid5_rebuild(): starting rebuild on (%" PRIun ")\n",
[a0c3080]769 rebuild_ext->svc_id);
770
771 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
772 hr_update_vol_status(vol, HR_VOL_REBUILD);
773
[aa7864b]774 uint64_t max_blks = DATA_XFER_LIMIT / vol->bsize;
[65706f1]775 uint64_t left = vol->data_blkno / (vol->extent_no - 1);
[aa7864b]776 buf = malloc(max_blks * vol->bsize);
777 xorbuf = malloc(max_blks * vol->bsize);
778
779 uint64_t ba = 0, cnt;
780 hr_add_ba_offset(vol, &ba);
[a0c3080]781
[aa7864b]782 while (left != 0) {
783 cnt = min(left, max_blks);
784
785 /*
786 * Almost the same as read_degraded,
787 * but we don't want to allocate new
788 * xorbuf each blk rebuild batch.
789 */
790 bool first = true;
[65706f1]791 for (size_t i = 0; i < vol->extent_no; i++) {
[aa7864b]792 if (i == bad)
793 continue;
[8160e4c0]794 if (first)
795 rc = block_read_direct(vol->extents[i].svc_id,
796 ba, cnt, xorbuf);
797 else
798 rc = block_read_direct(vol->extents[i].svc_id,
799 ba, cnt, buf);
[aa7864b]800 if (rc != EOK) {
801 hr_raid5_handle_extent_error(vol, i, rc);
[ca7fa5b]802 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
803 "failed due to a failed ONLINE extent, "
804 "number %zu\n",
[aa7864b]805 vol->devname, vol->svc_id, i);
806 goto end;
807 }
808
[8160e4c0]809 if (!first)
[aa7864b]810 xor(xorbuf, buf, cnt * vol->bsize);
[8160e4c0]811 else
812 first = false;
[aa7864b]813 }
814
[a0c3080]815 rc = block_write_direct(rebuild_ext->svc_id, ba, cnt, xorbuf);
[aa7864b]816 if (rc != EOK) {
817 hr_raid5_handle_extent_error(vol, bad, rc);
[ca7fa5b]818 HR_ERROR("rebuild on \"%s\" (%" PRIun "), failed due to "
819 "the rebuilt extent number %zu failing\n",
[aa7864b]820 vol->devname, vol->svc_id, bad);
821 goto end;
822 }
823
824 ba += cnt;
825 left -= cnt;
[40bf2c6]826
827 /*
828 * Let other IO requests be served
829 * during rebuild.
830 */
[f1be66bf]831 fibril_rwlock_write_unlock(&vol->states_lock);
[40bf2c6]832 fibril_mutex_unlock(&vol->lock);
833 fibril_mutex_lock(&vol->lock);
[f1be66bf]834 fibril_rwlock_write_lock(&vol->states_lock);
[aa7864b]835 }
836
[ca7fa5b]837 HR_DEBUG("hr_raid5_rebuild(): rebuild finished on \"%s\" (%" PRIun "), "
838 "extent number %zu\n", vol->devname, vol->svc_id, hotspare_idx);
[aa7864b]839
840 hr_update_ext_status(vol, bad, HR_EXT_ONLINE);
[0277ec2]841
[50603405]842 rc = vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
[0277ec2]843
[aa7864b]844end:
[bf0a791]845 (void)hr_raid5_update_vol_status(vol);
[aa7864b]846
[f1be66bf]847 fibril_rwlock_write_unlock(&vol->states_lock);
848 fibril_rwlock_read_unlock(&vol->extents_lock);
[aa7864b]849 fibril_mutex_unlock(&vol->lock);
850
851 if (buf != NULL)
852 free(buf);
853
854 if (xorbuf != NULL)
855 free(xorbuf);
856
857 return rc;
858}
859
[dceb6e7]860/** @}
861 */
Note: See TracBrowser for help on using the repository browser.