source: mainline/uspace/srv/bd/hr/raid5.c@ 521b387

Last change on this file since 521b387 was 521b387, checked in by Miroslav Cimerman <mc@…>, 13 months ago

hr: RAID5: fix degraded write

  • Property mode set to 100644
File size: 20.2 KB
Line 
1/*
2 * Copyright (c) 2024 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
41#include <io/log.h>
42#include <ipc/hr.h>
43#include <ipc/services.h>
44#include <loc.h>
45#include <mem.h>
46#include <task.h>
47#include <stdio.h>
48#include <stdlib.h>
49#include <str_error.h>
50
51#include "superblock.h"
52#include "util.h"
53#include "var.h"
54
55extern loc_srv_t *hr_srv;
56
57static errno_t hr_raid5_vol_usable(hr_volume_t *);
58static ssize_t hr_raid5_get_bad_ext(hr_volume_t *);
59static errno_t hr_raid5_update_vol_status(hr_volume_t *);
60static void hr_raid5_handle_extent_error(hr_volume_t *, size_t, errno_t);
61static void xor(void *, const void *, size_t);
62static errno_t hr_raid5_read_degraded(hr_volume_t *, uint64_t, uint64_t,
63 void *, size_t);
64static errno_t hr_raid5_write(hr_volume_t *, uint64_t, uint64_t, aoff64_t,
65 const void *, size_t);
66static errno_t hr_raid5_write_parity(hr_volume_t *, uint64_t, uint64_t,
67 uint64_t, const void *, size_t);
68static errno_t hr_raid5_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
69 void *, const void *, size_t);
70static errno_t hr_raid5_rebuild(void *);
71
72/* bdops */
73static errno_t hr_raid5_bd_open(bd_srvs_t *, bd_srv_t *);
74static errno_t hr_raid5_bd_close(bd_srv_t *);
75static errno_t hr_raid5_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
76 size_t);
77static errno_t hr_raid5_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
78static errno_t hr_raid5_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
79 const void *, size_t);
80static errno_t hr_raid5_bd_get_block_size(bd_srv_t *, size_t *);
81static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
82
83static bd_ops_t hr_raid5_bd_ops = {
84 .open = hr_raid5_bd_open,
85 .close = hr_raid5_bd_close,
86 .sync_cache = hr_raid5_bd_sync_cache,
87 .read_blocks = hr_raid5_bd_read_blocks,
88 .write_blocks = hr_raid5_bd_write_blocks,
89 .get_block_size = hr_raid5_bd_get_block_size,
90 .get_num_blocks = hr_raid5_bd_get_num_blocks
91};
92
93errno_t hr_raid5_create(hr_volume_t *new_volume)
94{
95 errno_t rc;
96
97 assert(new_volume->level == HR_LVL_5 || new_volume->level == HR_LVL_4);
98
99 if (new_volume->extent_no < 3) {
100 HR_ERROR("RAID 5 array needs at least 3 devices\n");
101 return EINVAL;
102 }
103
104 rc = hr_raid5_update_vol_status(new_volume);
105 if (rc != EOK)
106 return rc;
107
108 bd_srvs_init(&new_volume->hr_bds);
109 new_volume->hr_bds.ops = &hr_raid5_bd_ops;
110 new_volume->hr_bds.sarg = new_volume;
111
112 rc = hr_register_volume(new_volume);
113
114 return rc;
115}
116
117errno_t hr_raid5_init(hr_volume_t *vol)
118{
119 errno_t rc;
120 size_t bsize;
121 uint64_t total_blkno;
122
123 assert(vol->level == HR_LVL_5 || vol->level == HR_LVL_4);
124
125 rc = hr_check_devs(vol, &total_blkno, &bsize);
126 if (rc != EOK)
127 return rc;
128
129 vol->nblocks = total_blkno;
130 vol->bsize = bsize;
131 vol->data_offset = HR_DATA_OFF;
132 vol->data_blkno = vol->nblocks - (vol->data_offset * vol->extent_no) -
133 (vol->nblocks / vol->extent_no);
134 vol->strip_size = HR_STRIP_SIZE;
135
136 return EOK;
137}
138
139void hr_raid5_status_event(hr_volume_t *vol)
140{
141 fibril_mutex_lock(&vol->lock);
142 (void)hr_raid5_update_vol_status(vol);
143 fibril_mutex_unlock(&vol->lock);
144}
145
146errno_t hr_raid5_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
147{
148 HR_DEBUG("hr_raid5_add_hotspare()\n");
149
150 fibril_mutex_lock(&vol->lock);
151
152 if (vol->hotspare_no >= HR_MAX_HOTSPARES) {
153 HR_ERROR("hr_raid5_add_hotspare(): cannot add more hotspares "
154 "to \"%s\"\n", vol->devname);
155 fibril_mutex_unlock(&vol->lock);
156 return ELIMIT;
157 }
158
159 vol->hotspares[vol->hotspare_no].svc_id = hotspare;
160 hr_update_hotspare_status(vol, vol->hotspare_no, HR_EXT_HOTSPARE);
161
162 vol->hotspare_no++;
163
164 /*
165 * If the volume is degraded, start rebuild right away.
166 */
167 if (vol->status == HR_VOL_DEGRADED) {
168 HR_DEBUG("hr_raid5_add_hotspare(): volume in DEGRADED state, "
169 "spawning new rebuild fibril\n");
170 fid_t fib = fibril_create(hr_raid5_rebuild, vol);
171 if (fib == 0)
172 return ENOMEM;
173 fibril_start(fib);
174 fibril_detach(fib);
175 }
176
177 fibril_mutex_unlock(&vol->lock);
178
179 return EOK;
180}
181
182static errno_t hr_raid5_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
183{
184 HR_DEBUG("hr_bd_open()\n");
185 return EOK;
186}
187
188static errno_t hr_raid5_bd_close(bd_srv_t *bd)
189{
190 HR_DEBUG("hr_bd_close()\n");
191 return EOK;
192}
193
194static errno_t hr_raid5_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
195{
196 return hr_raid5_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
197}
198
199static errno_t hr_raid5_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
200 void *buf, size_t size)
201{
202 return hr_raid5_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
203}
204
205static errno_t hr_raid5_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
206 const void *data, size_t size)
207{
208 return hr_raid5_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
209}
210
211static errno_t hr_raid5_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
212{
213 hr_volume_t *vol = bd->srvs->sarg;
214
215 *rsize = vol->bsize;
216 return EOK;
217}
218
219static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
220{
221 hr_volume_t *vol = bd->srvs->sarg;
222
223 *rnb = vol->data_blkno;
224 return EOK;
225}
226
227static errno_t hr_raid5_vol_usable(hr_volume_t *vol)
228{
229 if (vol->status == HR_VOL_ONLINE ||
230 vol->status == HR_VOL_DEGRADED ||
231 vol->status == HR_VOL_REBUILD)
232 return EOK;
233 return EIO;
234}
235
236/*
237 * Returns (-1) if all extents are online,
238 * else returns index of first bad one.
239 */
240static ssize_t hr_raid5_get_bad_ext(hr_volume_t *vol)
241{
242 for (size_t i = 0; i < vol->extent_no; i++)
243 if (vol->extents[i].status != HR_EXT_ONLINE)
244 return i;
245 return -1;
246}
247
248static errno_t hr_raid5_update_vol_status(hr_volume_t *vol)
249{
250 hr_vol_status_t old_state = vol->status;
251 size_t bad = 0;
252 for (size_t i = 0; i < vol->extent_no; i++)
253 if (vol->extents[i].status != HR_EXT_ONLINE)
254 bad++;
255
256 switch (bad) {
257 case 0:
258 if (old_state != HR_VOL_ONLINE)
259 hr_update_vol_status(vol, HR_VOL_ONLINE);
260 return EOK;
261 case 1:
262 if (old_state != HR_VOL_DEGRADED &&
263 old_state != HR_VOL_REBUILD) {
264
265 hr_update_vol_status(vol, HR_VOL_DEGRADED);
266
267 if (vol->hotspare_no > 0) {
268 fid_t fib = fibril_create(hr_raid5_rebuild,
269 vol);
270 if (fib == 0)
271 return ENOMEM;
272 fibril_start(fib);
273 fibril_detach(fib);
274 }
275 }
276 return EOK;
277 default:
278 if (old_state != HR_VOL_FAULTY)
279 hr_update_vol_status(vol, HR_VOL_FAULTY);
280 return EIO;
281 }
282}
283
284static void hr_raid5_handle_extent_error(hr_volume_t *vol, size_t extent,
285 errno_t rc)
286{
287 if (rc == ENOENT)
288 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
289 else if (rc != EOK)
290 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
291}
292
293static void xor(void *dst, const void *src, size_t size)
294{
295 size_t i;
296 uint64_t *d = dst;
297 const uint64_t *s = src;
298
299 for (i = 0; i < size / sizeof(uint64_t); ++i)
300 *d++ ^= *s++;
301}
302
303static errno_t hr_raid5_read_degraded(hr_volume_t *vol, uint64_t bad,
304 uint64_t block, void *data, size_t cnt)
305{
306 errno_t rc;
307 size_t i;
308 void *xorbuf;
309 void *buf;
310 uint64_t len = vol->bsize * cnt;
311
312 xorbuf = malloc(len);
313 if (xorbuf == NULL)
314 return ENOMEM;
315
316 buf = malloc(len);
317 if (buf == NULL) {
318 free(xorbuf);
319 return ENOMEM;
320 }
321
322 /* read all other extents in the stripe */
323 bool first = true;
324 for (i = 0; i < vol->extent_no; i++) {
325 if (i == bad)
326 continue;
327
328 if (first) {
329 rc = block_read_direct(vol->extents[i].svc_id, block,
330 cnt, xorbuf);
331 if (rc != EOK)
332 goto end;
333
334 first = false;
335 } else {
336 rc = block_read_direct(vol->extents[i].svc_id, block,
337 cnt, buf);
338 if (rc != EOK)
339 goto end;
340 xor(xorbuf, buf, len);
341 }
342 }
343
344 memcpy(data, xorbuf, len);
345end:
346 free(xorbuf);
347 free(buf);
348 return rc;
349}
350
351static errno_t hr_raid5_write(hr_volume_t *vol, uint64_t p_extent,
352 uint64_t extent, aoff64_t ba, const void *data, size_t cnt)
353{
354 errno_t rc;
355 size_t i;
356 void *xorbuf;
357 void *buf;
358 uint64_t len = vol->bsize * cnt;
359
360 ssize_t bad = hr_raid5_get_bad_ext(vol);
361 if (bad == -1 || (size_t)bad == p_extent) {
362 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
363 data);
364 if (rc != EOK)
365 return rc;
366 /*
367 * DEGRADED parity - skip parity write
368 */
369 if ((size_t)bad == p_extent)
370 return EOK;
371
372 rc = hr_raid5_write_parity(vol, p_extent, extent, ba, data,
373 cnt);
374 return rc;
375 }
376
377 xorbuf = malloc(len);
378 if (xorbuf == NULL)
379 return ENOMEM;
380
381 buf = malloc(len);
382 if (buf == NULL) {
383 free(xorbuf);
384 return ENOMEM;
385 }
386
387 if (extent == (size_t)bad) {
388 /*
389 * new parity = read other and xor in new data
390 *
391 * write new parity
392 */
393 bool first = true;
394 for (i = 0; i < vol->extent_no; i++) {
395 if (i == (size_t)bad)
396 continue;
397 if (i == p_extent)
398 continue;
399 if (first) {
400 rc = block_read_direct(vol->extents[i].svc_id,
401 ba, cnt, xorbuf);
402 if (rc != EOK)
403 goto end;
404
405 first = false;
406 } else {
407 rc = block_read_direct(vol->extents[i].svc_id,
408 ba, cnt, buf);
409 if (rc != EOK)
410 goto end;
411 xor(xorbuf, buf, len);
412 }
413 }
414 xor(xorbuf, data, len);
415 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
416 xorbuf);
417 if (rc != EOK)
418 goto end;
419 } else {
420 /*
421 * new parity = xor original data and old parity and new data
422 *
423 * write parity, new data
424 */
425 rc = block_read_direct(vol->extents[extent].svc_id, ba, cnt,
426 xorbuf);
427 if (rc != EOK)
428 goto end;
429 rc = block_read_direct(vol->extents[p_extent].svc_id, ba, cnt,
430 buf);
431 if (rc != EOK)
432 goto end;
433
434 xor(xorbuf, buf, len);
435
436 xor(xorbuf, data, len);
437
438 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
439 xorbuf);
440 if (rc != EOK)
441 goto end;
442 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
443 data);
444 if (rc != EOK)
445 goto end;
446 }
447end:
448 free(xorbuf);
449 free(buf);
450 return rc;
451}
452
453static errno_t hr_raid5_write_parity(hr_volume_t *vol, uint64_t p_extent,
454 uint64_t extent, uint64_t block, const void *data, size_t cnt)
455{
456 errno_t rc;
457 size_t i;
458 void *xorbuf;
459 void *buf;
460 uint64_t len = vol->bsize * cnt;
461
462 xorbuf = malloc(len);
463 if (xorbuf == NULL)
464 return ENOMEM;
465
466 buf = malloc(len);
467 if (buf == NULL) {
468 free(xorbuf);
469 return ENOMEM;
470 }
471
472 bool first = true;
473 for (i = 0; i < vol->extent_no; i++) {
474 if (i == p_extent)
475 continue;
476
477 if (first) {
478 if (i == extent) {
479 memcpy(xorbuf, data, len);
480 } else {
481 rc = block_read_direct(vol->extents[i].svc_id,
482 block, cnt, xorbuf);
483 if (rc != EOK)
484 goto end;
485 }
486
487 first = false;
488 } else {
489 if (i == extent) {
490 xor(xorbuf, data, len);
491 } else {
492 rc = block_read_direct(vol->extents[i].svc_id,
493 block, cnt, buf);
494 if (rc != EOK)
495 goto end;
496
497 xor(xorbuf, buf, len);
498 }
499 }
500 }
501
502 rc = block_write_direct(vol->extents[p_extent].svc_id, block, cnt,
503 xorbuf);
504end:
505 free(xorbuf);
506 free(buf);
507 return rc;
508}
509
510static errno_t hr_raid5_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
511 size_t cnt, void *dst, const void *src, size_t size)
512{
513 hr_volume_t *vol = bd->srvs->sarg;
514 errno_t rc;
515 uint64_t phys_block, len;
516 size_t left;
517 const uint8_t *data_write = src;
518 uint8_t *data_read = dst;
519
520 /* propagate sync */
521 if (type == HR_BD_SYNC && ba == 0 && cnt == 0) {
522 hr_sync_all_extents(vol);
523 rc = hr_raid5_update_vol_status(vol);
524 return rc;
525 }
526
527 if (type == HR_BD_READ || type == HR_BD_WRITE)
528 if (size < cnt * vol->bsize)
529 return EINVAL;
530
531 rc = hr_check_ba_range(vol, cnt, ba);
532 if (rc != EOK)
533 return rc;
534
535 uint8_t RLQ = vol->RLQ;
536 hr_level_t level = vol->level;
537
538 uint64_t strip_size = vol->strip_size / vol->bsize; /* in blocks */
539 uint64_t stripe = (ba / strip_size); /* stripe number */
540
541 /* parity extent */
542 uint64_t p_extent;
543 if (level == HR_LVL_4 && RLQ == HR_RLQ_RAID4_0) {
544 p_extent = 0;
545 } else if (level == HR_LVL_4 && RLQ == HR_RLQ_RAID4_N) {
546 p_extent = vol->extent_no - 1;
547 } else if (level == HR_LVL_5 && RLQ == HR_RLQ_RAID5_0R) {
548 p_extent = (stripe / (vol->extent_no - 1)) % vol->extent_no;
549 } else if (level == HR_LVL_5 &&
550 (RLQ == HR_RLQ_RAID5_NR || RLQ == HR_RLQ_RAID5_NC)) {
551 p_extent = (vol->extent_no - 1) -
552 (stripe / (vol->extent_no - 1)) % vol->extent_no;
553 } else {
554 return EINVAL;
555 }
556
557 uint64_t extent;
558 if (level == HR_LVL_4 && RLQ == HR_RLQ_RAID4_0) {
559 extent = (stripe % (vol->extent_no - 1)) + 1;
560 } else if (level == HR_LVL_4 && RLQ == HR_RLQ_RAID4_N) {
561 extent = stripe % (vol->extent_no - 1);
562 } else if (level == HR_LVL_5 &&
563 (RLQ == HR_RLQ_RAID5_0R || RLQ == HR_RLQ_RAID5_NR)) {
564 if ((stripe % (vol->extent_no - 1)) < p_extent)
565 extent = stripe % (vol->extent_no - 1);
566 else
567 extent = (stripe % (vol->extent_no - 1)) + 1;
568 } else if (level == HR_LVL_5 && RLQ == HR_RLQ_RAID5_NC) {
569 extent = ((stripe % (vol->extent_no - 1)) + p_extent + 1) % vol->extent_no;
570 } else {
571 return EINVAL;
572 }
573
574 uint64_t ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
575 uint64_t strip_off = ba % strip_size; /* strip offset */
576
577 fibril_mutex_lock(&vol->lock);
578
579 rc = hr_raid5_vol_usable(vol);
580 if (rc != EOK) {
581 fibril_mutex_unlock(&vol->lock);
582 return EIO;
583 }
584
585 left = cnt;
586
587 while (left != 0) {
588 phys_block = ext_stripe * strip_size + strip_off;
589 cnt = min(left, strip_size - strip_off);
590 len = vol->bsize * cnt;
591 hr_add_ba_offset(vol, &phys_block);
592 switch (type) {
593 case HR_BD_SYNC:
594 if (vol->extents[extent].status != HR_EXT_ONLINE)
595 break;
596 rc = block_sync_cache(vol->extents[extent].svc_id,
597 phys_block, cnt);
598 /* allow unsupported sync */
599 if (rc == ENOTSUP)
600 rc = EOK;
601 break;
602 case HR_BD_READ:
603 retry_read:
604 ssize_t bad = hr_raid5_get_bad_ext(vol);
605 if (bad > -1 && extent == (size_t)bad) {
606 rc = hr_raid5_read_degraded(vol, bad,
607 phys_block, data_read, cnt);
608 } else {
609 rc = block_read_direct(vol->extents[extent].svc_id,
610 phys_block, cnt, data_read);
611 }
612 data_read += len;
613 break;
614 case HR_BD_WRITE:
615 retry_write:
616 rc = hr_raid5_write(vol, p_extent, extent, phys_block,
617 data_write, cnt);
618 data_write += len;
619 break;
620 default:
621 rc = EINVAL;
622 goto error;
623 }
624
625 if (rc == ENOMEM)
626 goto error;
627
628 hr_raid5_handle_extent_error(vol, extent, rc);
629
630 if (rc != EOK) {
631 rc = hr_raid5_update_vol_status(vol);
632 if (rc == EOK) {
633 /*
634 * State changed from ONLINE -> DEGRADED,
635 * rewind and retry
636 */
637 if (type == HR_BD_WRITE) {
638 data_write -= len;
639 goto retry_write;
640 } else if (type == HR_BD_WRITE) {
641 data_read -= len;
642 goto retry_read;
643 }
644 } else {
645 rc = EIO;
646 goto error;
647 }
648 }
649
650 left -= cnt;
651 strip_off = 0;
652 stripe++;
653
654 ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
655
656 if (level == HR_LVL_5 && RLQ == HR_RLQ_RAID5_0R) {
657 p_extent = (stripe / (vol->extent_no - 1)) % vol->extent_no;
658 } else if (level == HR_LVL_5 &&
659 (RLQ == HR_RLQ_RAID5_NR || RLQ == HR_RLQ_RAID5_NC)) {
660 p_extent = (vol->extent_no - 1) -
661 (stripe / (vol->extent_no - 1)) % vol->extent_no;
662 }
663
664 if (level == HR_LVL_4 && RLQ == HR_RLQ_RAID4_0) {
665 extent = (stripe % (vol->extent_no - 1)) + 1;
666 } else if (level == HR_LVL_4 && RLQ == HR_RLQ_RAID4_N) {
667 extent = stripe % (vol->extent_no - 1);
668 } else if (level == HR_LVL_5 &&
669 (RLQ == HR_RLQ_RAID5_0R || RLQ == HR_RLQ_RAID5_NR)) {
670 if ((stripe % (vol->extent_no - 1)) < p_extent)
671 extent = stripe % (vol->extent_no - 1);
672 else
673 extent = (stripe % (vol->extent_no - 1)) + 1;
674 } else if (level == HR_LVL_5 && RLQ == HR_RLQ_RAID5_NC) {
675 extent = ((stripe % (vol->extent_no - 1)) + p_extent + 1) % vol->extent_no;
676 }
677 }
678
679error:
680 (void)hr_raid5_update_vol_status(vol);
681 fibril_mutex_unlock(&vol->lock);
682 return rc;
683}
684
685static errno_t hr_raid5_rebuild(void *arg)
686{
687 HR_DEBUG("hr_raid5_rebuild()\n");
688
689 hr_volume_t *vol = arg;
690 errno_t rc = EOK;
691 void *buf = NULL, *xorbuf = NULL;
692
693 fibril_mutex_lock(&vol->lock);
694
695 if (vol->hotspare_no == 0) {
696 HR_WARN("hr_raid5_rebuild(): no free hotspares on \"%s\", "
697 "aborting rebuild\n", vol->devname);
698 /* retval isn't checked for now */
699 goto end;
700 }
701
702 size_t bad = vol->extent_no;
703 for (size_t i = 0; i < vol->extent_no; i++) {
704 if (vol->extents[i].status == HR_EXT_FAILED) {
705 bad = i;
706 break;
707 }
708 }
709
710 if (bad == vol->extent_no) {
711 HR_WARN("hr_raid5_rebuild(): no bad extent on \"%s\", "
712 "aborting rebuild\n", vol->devname);
713 /* retval isn't checked for now */
714 goto end;
715 }
716
717 size_t hotspare_idx = vol->hotspare_no - 1;
718
719 hr_ext_status_t hs_state = vol->hotspares[hotspare_idx].status;
720 if (hs_state != HR_EXT_HOTSPARE) {
721 HR_ERROR("hr_raid5_rebuild(): invalid hotspare state \"%s\", "
722 "aborting rebuild\n", hr_get_ext_status_msg(hs_state));
723 rc = EINVAL;
724 goto end;
725 }
726
727 HR_DEBUG("hr_raid5_rebuild(): swapping in hotspare\n");
728
729 block_fini(vol->extents[bad].svc_id);
730
731 vol->extents[bad].svc_id = vol->hotspares[hotspare_idx].svc_id;
732 hr_update_ext_status(vol, bad, HR_EXT_HOTSPARE);
733
734 vol->hotspares[hotspare_idx].svc_id = 0;
735 hr_update_hotspare_status(vol, hotspare_idx, HR_EXT_MISSING);
736
737 vol->hotspare_no--;
738
739 hr_extent_t *rebuild_ext = &vol->extents[bad];
740
741 rc = block_init(rebuild_ext->svc_id);
742 if (rc != EOK) {
743 HR_ERROR("hr_raid5_rebuild(): initing (%lu) failed, "
744 "aborting rebuild\n", rebuild_ext->svc_id);
745 goto end;
746 }
747
748 HR_DEBUG("hr_raid5_rebuild(): starting rebuild on (%lu)\n",
749 rebuild_ext->svc_id);
750
751 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
752 hr_update_vol_status(vol, HR_VOL_REBUILD);
753
754 uint64_t max_blks = DATA_XFER_LIMIT / vol->bsize;
755 uint64_t left = vol->data_blkno / (vol->extent_no - 1);
756 buf = malloc(max_blks * vol->bsize);
757 xorbuf = malloc(max_blks * vol->bsize);
758
759 uint64_t ba = 0, cnt;
760 hr_add_ba_offset(vol, &ba);
761
762 while (left != 0) {
763 cnt = min(left, max_blks);
764
765 /*
766 * Almost the same as read_degraded,
767 * but we don't want to allocate new
768 * xorbuf each blk rebuild batch.
769 */
770 bool first = true;
771 for (size_t i = 0; i < vol->extent_no; i++) {
772 if (i == bad)
773 continue;
774 if (first)
775 rc = block_read_direct(vol->extents[i].svc_id,
776 ba, cnt, xorbuf);
777 else
778 rc = block_read_direct(vol->extents[i].svc_id,
779 ba, cnt, buf);
780 if (rc != EOK) {
781 hr_raid5_handle_extent_error(vol, i, rc);
782 HR_ERROR("rebuild on \"%s\" (%lu), failed due "
783 "to a failed ONLINE extent, number %lu\n",
784 vol->devname, vol->svc_id, i);
785 goto end;
786 }
787
788 if (!first)
789 xor(xorbuf, buf, cnt * vol->bsize);
790 else
791 first = false;
792 }
793
794 rc = block_write_direct(rebuild_ext->svc_id, ba, cnt, xorbuf);
795 if (rc != EOK) {
796 hr_raid5_handle_extent_error(vol, bad, rc);
797 HR_ERROR("rebuild on \"%s\" (%lu), failed due to "
798 "the rebuilt extent number %lu failing\n",
799 vol->devname, vol->svc_id, bad);
800 goto end;
801 }
802
803 ba += cnt;
804 left -= cnt;
805
806 /*
807 * Let other IO requests be served
808 * during rebuild.
809 */
810 fibril_mutex_unlock(&vol->lock);
811 fibril_mutex_lock(&vol->lock);
812 }
813
814 HR_DEBUG("hr_raid5_rebuild(): rebuild finished on \"%s\" (%lu), "
815 "extent number %lu\n", vol->devname, vol->svc_id, hotspare_idx);
816
817 hr_update_ext_status(vol, bad, HR_EXT_ONLINE);
818 /*
819 * For now write metadata at the end, because
820 * we don't sync metada accross extents yet.
821 */
822 hr_write_meta_to_ext(vol, bad);
823end:
824 (void)hr_raid5_update_vol_status(vol);
825
826 fibril_mutex_unlock(&vol->lock);
827
828 if (buf != NULL)
829 free(buf);
830
831 if (xorbuf != NULL)
832 free(xorbuf);
833
834 return rc;
835}
836
837/** @}
838 */
Note: See TracBrowser for help on using the repository browser.