source: mainline/uspace/srv/bd/hr/raid4.c@ bf0a791

Last change on this file since bf0a791 was bf0a791, checked in by Miroslav Cimerman <mc@…>, 7 months ago

hr: cstyle

  • Property mode set to 100644
File size: 17.4 KB
Line 
1/*
2 * Copyright (c) 2024 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
41#include <io/log.h>
42#include <ipc/hr.h>
43#include <ipc/services.h>
44#include <loc.h>
45#include <mem.h>
46#include <task.h>
47#include <stdio.h>
48#include <stdlib.h>
49#include <str_error.h>
50
51#include "superblock.h"
52#include "util.h"
53#include "var.h"
54
55extern loc_srv_t *hr_srv;
56
57static errno_t hr_raid4_vol_usable(hr_volume_t *);
58static ssize_t hr_raid4_get_bad_ext(hr_volume_t *);
59static errno_t hr_raid4_update_vol_status(hr_volume_t *);
60static void hr_raid4_handle_extent_error(hr_volume_t *, size_t, errno_t);
61static void xor(void *, const void *, size_t);
62static errno_t hr_raid4_read_degraded(hr_volume_t *, uint64_t, uint64_t,
63 void *, size_t);
64static errno_t hr_raid4_write(hr_volume_t *, uint64_t, aoff64_t, const void *,
65 size_t);
66static errno_t hr_raid4_write_parity(hr_volume_t *, uint64_t, uint64_t,
67 const void *, size_t);
68static errno_t hr_raid4_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
69 void *, const void *, size_t);
70static errno_t hr_raid4_rebuild(void *);
71
72/* bdops */
73static errno_t hr_raid4_bd_open(bd_srvs_t *, bd_srv_t *);
74static errno_t hr_raid4_bd_close(bd_srv_t *);
75static errno_t hr_raid4_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
76 size_t);
77static errno_t hr_raid4_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
78static errno_t hr_raid4_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
79 const void *, size_t);
80static errno_t hr_raid4_bd_get_block_size(bd_srv_t *, size_t *);
81static errno_t hr_raid4_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
82
83static errno_t hr_raid4_write_parity(hr_volume_t *, uint64_t, uint64_t,
84 const void *, size_t);
85
86static bd_ops_t hr_raid4_bd_ops = {
87 .open = hr_raid4_bd_open,
88 .close = hr_raid4_bd_close,
89 .sync_cache = hr_raid4_bd_sync_cache,
90 .read_blocks = hr_raid4_bd_read_blocks,
91 .write_blocks = hr_raid4_bd_write_blocks,
92 .get_block_size = hr_raid4_bd_get_block_size,
93 .get_num_blocks = hr_raid4_bd_get_num_blocks
94};
95
96errno_t hr_raid4_create(hr_volume_t *new_volume)
97{
98 errno_t rc;
99
100 assert(new_volume->level == HR_LVL_4);
101
102 if (new_volume->dev_no < 3) {
103 HR_ERROR("RAID 4 array needs at least 3 devices\n");
104 return EINVAL;
105 }
106
107 rc = hr_raid4_update_vol_status(new_volume);
108 if (rc != EOK)
109 return rc;
110
111 bd_srvs_init(&new_volume->hr_bds);
112 new_volume->hr_bds.ops = &hr_raid4_bd_ops;
113 new_volume->hr_bds.sarg = new_volume;
114
115 rc = hr_register_volume(new_volume);
116
117 return rc;
118}
119
120errno_t hr_raid4_init(hr_volume_t *vol)
121{
122 errno_t rc;
123 size_t bsize;
124 uint64_t total_blkno;
125
126 assert(vol->level == HR_LVL_4);
127
128 rc = hr_check_devs(vol, &total_blkno, &bsize);
129 if (rc != EOK)
130 return rc;
131
132 vol->nblocks = total_blkno;
133 vol->bsize = bsize;
134 vol->data_offset = HR_DATA_OFF;
135 vol->data_blkno = vol->nblocks - (vol->data_offset * vol->dev_no) -
136 (vol->nblocks / vol->dev_no);
137 vol->strip_size = HR_STRIP_SIZE;
138
139 return EOK;
140}
141
142void hr_raid4_status_event(hr_volume_t *vol)
143{
144 fibril_mutex_lock(&vol->lock);
145 (void)hr_raid4_update_vol_status(vol);
146 fibril_mutex_unlock(&vol->lock);
147}
148
149errno_t hr_raid4_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
150{
151 HR_DEBUG("hr_raid4_add_hotspare()\n");
152
153 fibril_mutex_lock(&vol->lock);
154
155 if (vol->hotspare_no >= HR_MAX_HOTSPARES) {
156 HR_ERROR("hr_raid4_add_hotspare(): cannot add more hotspares "
157 "to \"%s\"\n", vol->devname);
158 fibril_mutex_unlock(&vol->lock);
159 return ELIMIT;
160 }
161
162 vol->hotspares[vol->hotspare_no].svc_id = hotspare;
163 vol->hotspares[vol->hotspare_no].status = HR_EXT_HOTSPARE;
164 vol->hotspare_no++;
165
166 /*
167 * If the volume is degraded, start rebuild right away.
168 */
169 if (vol->status == HR_VOL_DEGRADED) {
170 HR_DEBUG("hr_raid4_add_hotspare(): volume in DEGRADED state, "
171 "spawning new rebuild fibril\n");
172 fid_t fib = fibril_create(hr_raid4_rebuild, vol);
173 if (fib == 0)
174 return EINVAL;
175 fibril_start(fib);
176 fibril_detach(fib);
177 }
178
179 fibril_mutex_unlock(&vol->lock);
180
181 return EOK;
182}
183
184static errno_t hr_raid4_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
185{
186 HR_DEBUG("hr_bd_open()\n");
187 return EOK;
188}
189
190static errno_t hr_raid4_bd_close(bd_srv_t *bd)
191{
192 HR_DEBUG("hr_bd_close()\n");
193 return EOK;
194}
195
196static errno_t hr_raid4_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
197{
198 return hr_raid4_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
199}
200
201static errno_t hr_raid4_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
202 void *buf, size_t size)
203{
204 return hr_raid4_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
205}
206
207static errno_t hr_raid4_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
208 const void *data, size_t size)
209{
210 return hr_raid4_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
211}
212
213static errno_t hr_raid4_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
214{
215 hr_volume_t *vol = bd->srvs->sarg;
216
217 *rsize = vol->bsize;
218 return EOK;
219}
220
221static errno_t hr_raid4_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
222{
223 hr_volume_t *vol = bd->srvs->sarg;
224
225 *rnb = vol->data_blkno;
226 return EOK;
227}
228
229static errno_t hr_raid4_vol_usable(hr_volume_t *vol)
230{
231 if (vol->status == HR_VOL_ONLINE ||
232 vol->status == HR_VOL_DEGRADED)
233 return EOK;
234 return EINVAL;
235}
236
237/*
238 * Returns (-1) if all extents are online,
239 * else returns index of first bad one.
240 */
241static ssize_t hr_raid4_get_bad_ext(hr_volume_t *vol)
242{
243 for (size_t i = 0; i < vol->dev_no; i++)
244 if (vol->extents[i].status != HR_EXT_ONLINE)
245 return i;
246 return -1;
247}
248
249static errno_t hr_raid4_update_vol_status(hr_volume_t *vol)
250{
251 hr_vol_status_t old_state = vol->status;
252 size_t bad = 0;
253 for (size_t i = 0; i < vol->dev_no; i++)
254 if (vol->extents[i].status != HR_EXT_ONLINE)
255 bad++;
256
257 switch (bad) {
258 case 0:
259 if (old_state != HR_VOL_ONLINE) {
260 HR_WARN("RAID 4 has all extents online, "
261 "marking \"%s\" (%lu) as ONLINE",
262 vol->devname, vol->svc_id);
263 vol->status = HR_VOL_ONLINE;
264 }
265 return EOK;
266 case 1:
267 if (old_state != HR_VOL_DEGRADED &&
268 old_state != HR_VOL_REBUILD) {
269 HR_WARN("RAID 4 array \"%s\" (%lu) has 1 extent "
270 "inactive, marking as DEGRADED",
271 vol->devname, vol->svc_id);
272 vol->status = HR_VOL_DEGRADED;
273 if (vol->hotspare_no > 0) {
274 fid_t fib = fibril_create(hr_raid4_rebuild,
275 vol);
276 if (fib == 0) {
277 return EINVAL;
278 }
279 fibril_start(fib);
280 fibril_detach(fib);
281 }
282 }
283 return EOK;
284 default:
285 if (old_state != HR_VOL_FAULTY) {
286 HR_WARN("RAID 4 array \"%s\" (%lu) has more "
287 "than one 1 extent unusable, marking as FAULTY",
288 vol->devname, vol->svc_id);
289 vol->status = HR_VOL_FAULTY;
290 }
291 return EINVAL;
292 }
293}
294
295static void hr_raid4_handle_extent_error(hr_volume_t *vol, size_t extent,
296 errno_t rc)
297{
298 if (rc == ENOENT)
299 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
300 else if (rc != EOK)
301 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
302}
303
304static void xor(void *dst, const void *src, size_t size)
305{
306 size_t i;
307 uint64_t *d = dst;
308 const uint64_t *s = src;
309
310 for (i = 0; i < size / sizeof(uint64_t); ++i)
311 *d++ ^= *s++;
312}
313
314static errno_t hr_raid4_read_degraded(hr_volume_t *vol, uint64_t bad,
315 uint64_t block, void *data, size_t cnt)
316{
317 errno_t rc;
318 size_t i;
319 void *xorbuf;
320 void *buf;
321 uint64_t len = vol->bsize * cnt;
322
323 xorbuf = malloc(len);
324 if (xorbuf == NULL)
325 return ENOMEM;
326
327 buf = malloc(len);
328 if (buf == NULL) {
329 free(xorbuf);
330 return ENOMEM;
331 }
332
333 /* read all other extents in the stripe */
334 memset(xorbuf, 0, len);
335 for (i = 0; i < vol->dev_no; i++) {
336 if (i == bad) {
337 continue;
338 } else {
339 rc = block_read_direct(vol->extents[i].svc_id, block,
340 cnt, buf);
341 if (rc != EOK)
342 goto end;
343 xor(xorbuf, buf, len);
344 }
345 }
346
347 memcpy(data, xorbuf, len);
348end:
349 free(xorbuf);
350 free(buf);
351 return rc;
352}
353
354static errno_t hr_raid4_write(hr_volume_t *vol, uint64_t extent, aoff64_t ba,
355 const void *data, size_t cnt)
356{
357 errno_t rc;
358 size_t i;
359 void *xorbuf;
360 void *buf;
361 uint64_t len = vol->bsize * cnt;
362
363 ssize_t bad = hr_raid4_get_bad_ext(vol);
364 if (bad < 1) {
365 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
366 data);
367 if (rc != EOK)
368 return rc;
369 /*
370 * DEGRADED parity - skip parity write
371 */
372 if (bad == 0)
373 return EOK;
374
375 rc = hr_raid4_write_parity(vol, extent, ba, data, cnt);
376 return rc;
377 }
378
379 xorbuf = malloc(len);
380 if (xorbuf == NULL)
381 return ENOMEM;
382
383 buf = malloc(len);
384 if (buf == NULL) {
385 free(xorbuf);
386 return ENOMEM;
387 }
388
389 if (extent == (size_t)bad) {
390 /*
391 * new parity = read other and xor in new data
392 *
393 * write new parity
394 */
395 memset(xorbuf, 0, len);
396 for (i = 1; i < vol->dev_no; i++) {
397 if (i == (size_t)bad)
398 continue;
399 } else {
400 rc = block_read_direct(vol->extents[i].svc_id,
401 ba, cnt, buf);
402 if (rc != EOK)
403 goto end;
404 xor(xorbuf, buf, len);
405 }
406 }
407 xor(xorbuf, data, len);
408 rc = block_write_direct(vol->extents[0].svc_id, ba, cnt,
409 xorbuf);
410 if (rc != EOK)
411 goto end;
412 } else {
413 /*
414 * new parity = xor original data and old parity and new data
415 *
416 * write parity, new data
417 */
418 rc = block_read_direct(vol->extents[extent].svc_id, ba, cnt,
419 xorbuf);
420 if (rc != EOK)
421 goto end;
422 rc = block_read_direct(vol->extents[0].svc_id, ba, cnt, buf);
423 if (rc != EOK)
424 goto end;
425
426 xor(xorbuf, buf, len);
427
428 xor(xorbuf, data, len);
429
430 rc = block_write_direct(vol->extents[0].svc_id, ba, cnt,
431 xorbuf);
432 if (rc != EOK)
433 goto end;
434 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
435 data);
436 if (rc != EOK)
437 goto end;
438 }
439end:
440 free(xorbuf);
441 free(buf);
442 return rc;
443}
444
445static errno_t hr_raid4_write_parity(hr_volume_t *vol, uint64_t extent,
446 uint64_t block, const void *data, size_t cnt)
447{
448 errno_t rc;
449 size_t i;
450 void *xorbuf;
451 void *buf;
452 uint64_t len = vol->bsize * cnt;
453
454 xorbuf = malloc(len);
455 if (xorbuf == NULL)
456 return ENOMEM;
457
458 buf = malloc(len);
459 if (buf == NULL) {
460 free(xorbuf);
461 return ENOMEM;
462 }
463
464 /*
465 * parity = read and xor all other data extents, xor in new data
466 *
467 * XXX: subtract method
468 */
469 memset(xorbuf, 0, len);
470 for (i = 1; i < vol->dev_no; i++) {
471 if (i == extent) {
472 xor(xorbuf, data, len);
473 } else {
474 rc = block_read_direct(vol->extents[i].svc_id, block,
475 cnt, buf);
476 if (rc != EOK)
477 goto end;
478 xor(xorbuf, buf, len);
479 }
480 }
481
482 rc = block_write_direct(vol->extents[0].svc_id, block, cnt, xorbuf);
483end:
484 free(xorbuf);
485 free(buf);
486 return rc;
487}
488
489static errno_t hr_raid4_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
490 size_t cnt, void *dst, const void *src, size_t size)
491{
492 hr_volume_t *vol = bd->srvs->sarg;
493 errno_t rc;
494 uint64_t phys_block, len;
495 size_t left;
496 const uint8_t *data_write = src;
497 uint8_t *data_read = dst;
498
499 /* propagate sync */
500 if (type == HR_BD_SYNC && ba == 0 && cnt == 0) {
501 hr_sync_all_extents(vol);
502 rc = hr_raid4_update_vol_status(vol);
503 return rc;
504 }
505
506 if (type == HR_BD_READ || type == HR_BD_WRITE)
507 if (size < cnt * vol->bsize)
508 return EINVAL;
509
510 rc = hr_check_ba_range(vol, cnt, ba);
511 if (rc != EOK)
512 return rc;
513
514 uint64_t strip_size = vol->strip_size / vol->bsize; /* in blocks */
515 uint64_t stripe = (ba / strip_size); /* stripe number */
516 uint64_t extent = (stripe % (vol->dev_no - 1)) + 1;
517 uint64_t ext_stripe = stripe / (vol->dev_no - 1); /* stripe level */
518 uint64_t strip_off = ba % strip_size; /* strip offset */
519
520 fibril_mutex_lock(&vol->lock);
521
522 rc = hr_raid4_vol_usable(vol);
523 if (rc != EOK) {
524 fibril_mutex_unlock(&vol->lock);
525 return EIO;
526 }
527
528 left = cnt;
529 while (left != 0) {
530 phys_block = ext_stripe * strip_size + strip_off;
531 cnt = min(left, strip_size - strip_off);
532 len = vol->bsize * cnt;
533 hr_add_ba_offset(vol, &phys_block);
534 switch (type) {
535 case HR_BD_SYNC:
536 if (vol->extents[extent].status != HR_EXT_ONLINE)
537 break;
538 rc = block_sync_cache(vol->extents[extent].svc_id,
539 phys_block, cnt);
540 /* allow unsupported sync */
541 if (rc == ENOTSUP)
542 rc = EOK;
543 break;
544 case HR_BD_READ:
545 retry_read:
546 ssize_t bad = hr_raid4_get_bad_ext(vol);
547 if (bad > 0 && extent == (size_t)bad) {
548 rc = hr_raid4_read_degraded(vol, bad,
549 phys_block, data_read, cnt);
550 } else {
551 rc = block_read_direct(vol->extents[extent].svc_id,
552 phys_block, cnt, data_read);
553 }
554 data_read += len;
555 break;
556 case HR_BD_WRITE:
557 retry_write:
558 rc = hr_raid4_write(vol, extent, phys_block,
559 data_write, cnt);
560 data_write += len;
561 break;
562 default:
563 rc = EINVAL;
564 goto error;
565 }
566
567 if (rc == ENOMEM)
568 goto error;
569
570 hr_raid4_handle_extent_error(vol, extent, rc);
571
572 if (rc != EOK) {
573 rc = hr_raid4_update_vol_status(vol);
574 if (rc == EOK) {
575 /*
576 * State changed from ONLINE -> DEGRADED,
577 * rewind and retry
578 */
579 if (type == HR_BD_WRITE) {
580 data_write -= len;
581 goto retry_write;
582 } else if (type == HR_BD_WRITE) {
583 data_read -= len;
584 goto retry_read;
585 }
586 } else {
587 rc = EIO;
588 goto error;
589 }
590 }
591
592 left -= cnt;
593 strip_off = 0;
594 extent++;
595 if (extent >= vol->dev_no) {
596 ext_stripe++;
597 extent = 1;
598 }
599 }
600
601error:
602 (void)hr_raid4_update_vol_status(vol);
603 fibril_mutex_unlock(&vol->lock);
604 return rc;
605}
606
607static errno_t hr_raid4_rebuild(void *arg)
608{
609 HR_DEBUG("hr_raid4_rebuild()\n");
610
611 hr_volume_t *vol = arg;
612 errno_t rc = EOK;
613 void *buf = NULL, *xorbuf = NULL;
614
615 fibril_mutex_lock(&vol->lock);
616
617 if (vol->hotspare_no == 0) {
618 HR_WARN("hr_raid4_rebuild(): no free hotspares on \"%s\", "
619 "aborting rebuild\n", vol->devname);
620 /* retval isn't checked for now */
621 goto end;
622 }
623
624 size_t bad = vol->dev_no;
625 for (size_t i = 0; i < vol->dev_no; i++) {
626 if (vol->extents[i].status == HR_EXT_FAILED) {
627 bad = i;
628 break;
629 }
630 }
631
632 if (bad == vol->dev_no) {
633 HR_WARN("hr_raid4_rebuild(): no bad extent on \"%s\", "
634 "aborting rebuild\n", vol->devname);
635 /* retval isn't checked for now */
636 goto end;
637 }
638
639 block_fini(vol->extents[bad].svc_id);
640
641 size_t hotspare_idx = vol->hotspare_no - 1;
642
643 vol->extents[bad].svc_id = vol->hotspares[hotspare_idx].svc_id;
644 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
645
646 vol->hotspares[hotspare_idx].svc_id = 0;
647 vol->hotspares[hotspare_idx].status = HR_EXT_MISSING;
648 vol->hotspare_no--;
649
650 HR_WARN("hr_raid4_rebuild(): changing volume \"%s\" (%lu) state "
651 "from %s to %s\n", vol->devname, vol->svc_id,
652 hr_get_vol_status_msg(vol->status),
653 hr_get_vol_status_msg(HR_VOL_REBUILD));
654 vol->status = HR_VOL_REBUILD;
655
656 hr_extent_t *hotspare = &vol->extents[bad];
657
658 HR_DEBUG("hr_raid4_rebuild(): initing (%lu)\n", hotspare->svc_id);
659
660 rc = block_init(hotspare->svc_id);
661 if (rc != EOK) {
662 HR_ERROR("hr_raid4_rebuild(): initing (%lu) failed, "
663 "aborting rebuild\n", hotspare->svc_id);
664 goto end;
665 }
666
667 uint64_t max_blks = DATA_XFER_LIMIT / vol->bsize;
668 uint64_t left = vol->data_blkno / (vol->dev_no - 1);
669 buf = malloc(max_blks * vol->bsize);
670 xorbuf = malloc(max_blks * vol->bsize);
671
672 uint64_t ba = 0, cnt;
673 hr_add_ba_offset(vol, &ba);
674 while (left != 0) {
675 cnt = min(left, max_blks);
676
677 /*
678 * Almost the same as read_degraded,
679 * but we don't want to allocate new
680 * xorbuf each blk rebuild batch.
681 */
682 bool first = true;
683 for (size_t i = 0; i < vol->dev_no; i++) {
684 if (i == bad)
685 continue;
686 rc = block_read_direct(vol->extents[i].svc_id, ba, cnt,
687 buf);
688 if (rc != EOK) {
689 hr_raid4_handle_extent_error(vol, i, rc);
690 HR_ERROR("rebuild on \"%s\" (%lu), failed due "
691 "to a failed ONLINE extent, number %lu\n",
692 vol->devname, vol->svc_id, i);
693 goto end;
694 }
695
696 if (first)
697 memcpy(xorbuf, buf, cnt * vol->bsize);
698 else
699 xor(xorbuf, buf, cnt * vol->bsize);
700
701 first = false;
702 }
703
704 rc = block_write_direct(hotspare->svc_id, ba, cnt, xorbuf);
705 if (rc != EOK) {
706 hr_raid4_handle_extent_error(vol, bad, rc);
707 HR_ERROR("rebuild on \"%s\" (%lu), failed due to "
708 "the rebuilt extent number %lu failing\n",
709 vol->devname, vol->svc_id, bad);
710 goto end;
711 }
712
713 ba += cnt;
714 left -= cnt;
715 }
716
717 HR_DEBUG("hr_raid4_rebuild(): rebuild finished on \"%s\" (%lu), "
718 "extent number %lu\n", vol->devname, vol->svc_id, hotspare_idx);
719
720 hr_update_ext_status(vol, bad, HR_EXT_ONLINE);
721 /*
722 * For now write metadata at the end, because
723 * we don't sync metada accross extents yet.
724 */
725 hr_write_meta_to_ext(vol, bad);
726end:
727 (void)hr_raid4_update_vol_status(vol);
728
729 fibril_mutex_unlock(&vol->lock);
730
731 if (buf != NULL)
732 free(buf);
733
734 if (xorbuf != NULL)
735 free(xorbuf);
736
737 return rc;
738}
739
740/** @}
741 */
Note: See TracBrowser for help on using the repository browser.