source: mainline/uspace/srv/bd/hr/raid5.c@ 2958e70

Last change on this file since 2958e70 was f1be66bf, checked in by Miroslav Cimerman <mc@…>, 8 months ago

hr: raid5.c: fast patch to make new asserts pass

  • Property mode set to 100644
File size: 21.2 KB
RevLine 
[dceb6e7]1/*
2 * Copyright (c) 2024 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
41#include <io/log.h>
42#include <ipc/hr.h>
43#include <ipc/services.h>
44#include <loc.h>
[978130a]45#include <mem.h>
[dceb6e7]46#include <task.h>
47#include <stdio.h>
48#include <stdlib.h>
49#include <str_error.h>
50
51#include "superblock.h"
52#include "util.h"
53#include "var.h"
54
55extern loc_srv_t *hr_srv;
56
[733564a]57static errno_t hr_raid5_vol_usable(hr_volume_t *);
58static ssize_t hr_raid5_get_bad_ext(hr_volume_t *);
59static errno_t hr_raid5_update_vol_status(hr_volume_t *);
[aa7864b]60static void hr_raid5_handle_extent_error(hr_volume_t *, size_t, errno_t);
[733564a]61static void xor(void *, const void *, size_t);
62static errno_t hr_raid5_read_degraded(hr_volume_t *, uint64_t, uint64_t,
63 void *, size_t);
64static errno_t hr_raid5_write(hr_volume_t *, uint64_t, uint64_t, aoff64_t,
65 const void *, size_t);
66static errno_t hr_raid5_write_parity(hr_volume_t *, uint64_t, uint64_t,
67 uint64_t, const void *, size_t);
68static errno_t hr_raid5_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
69 void *, const void *, size_t);
[aa7864b]70static errno_t hr_raid5_rebuild(void *);
[733564a]71
72/* bdops */
[dceb6e7]73static errno_t hr_raid5_bd_open(bd_srvs_t *, bd_srv_t *);
74static errno_t hr_raid5_bd_close(bd_srv_t *);
75static errno_t hr_raid5_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
76 size_t);
77static errno_t hr_raid5_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
78static errno_t hr_raid5_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
79 const void *, size_t);
80static errno_t hr_raid5_bd_get_block_size(bd_srv_t *, size_t *);
81static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
82
83static bd_ops_t hr_raid5_bd_ops = {
84 .open = hr_raid5_bd_open,
85 .close = hr_raid5_bd_close,
86 .sync_cache = hr_raid5_bd_sync_cache,
87 .read_blocks = hr_raid5_bd_read_blocks,
88 .write_blocks = hr_raid5_bd_write_blocks,
89 .get_block_size = hr_raid5_bd_get_block_size,
90 .get_num_blocks = hr_raid5_bd_get_num_blocks
91};
92
[733564a]93errno_t hr_raid5_create(hr_volume_t *new_volume)
94{
95 errno_t rc;
96
[d7768d11]97 assert(new_volume->level == HR_LVL_5 || new_volume->level == HR_LVL_4);
[733564a]98
[65706f1]99 if (new_volume->extent_no < 3) {
[d199a6f]100 HR_ERROR("RAID 5 array needs at least 3 devices\n");
[733564a]101 return EINVAL;
102 }
103
[f1be66bf]104 fibril_rwlock_write_lock(&new_volume->states_lock);
105
[733564a]106 rc = hr_raid5_update_vol_status(new_volume);
[f1be66bf]107 if (rc != EOK) {
108 fibril_rwlock_write_unlock(&new_volume->states_lock);
[733564a]109 return rc;
[f1be66bf]110 }
[733564a]111
112 bd_srvs_init(&new_volume->hr_bds);
113 new_volume->hr_bds.ops = &hr_raid5_bd_ops;
114 new_volume->hr_bds.sarg = new_volume;
115
116 rc = hr_register_volume(new_volume);
117
[f1be66bf]118 fibril_rwlock_write_unlock(&new_volume->states_lock);
119
[733564a]120 return rc;
121}
122
123errno_t hr_raid5_init(hr_volume_t *vol)
124{
125 errno_t rc;
126 size_t bsize;
127 uint64_t total_blkno;
128
[d7768d11]129 assert(vol->level == HR_LVL_5 || vol->level == HR_LVL_4);
[733564a]130
131 rc = hr_check_devs(vol, &total_blkno, &bsize);
132 if (rc != EOK)
133 return rc;
134
135 vol->nblocks = total_blkno;
136 vol->bsize = bsize;
137 vol->data_offset = HR_DATA_OFF;
[65706f1]138 vol->data_blkno = vol->nblocks - (vol->data_offset * vol->extent_no) -
139 (vol->nblocks / vol->extent_no);
[733564a]140 vol->strip_size = HR_STRIP_SIZE;
141
142 return EOK;
143}
144
[7b359f5]145void hr_raid5_status_event(hr_volume_t *vol)
146{
147 fibril_mutex_lock(&vol->lock);
[f1be66bf]148 fibril_rwlock_write_lock(&vol->states_lock);
[bf0a791]149 (void)hr_raid5_update_vol_status(vol);
[f1be66bf]150 fibril_rwlock_write_unlock(&vol->states_lock);
[7b359f5]151 fibril_mutex_unlock(&vol->lock);
152}
153
[aa7864b]154errno_t hr_raid5_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
155{
156 HR_DEBUG("hr_raid5_add_hotspare()\n");
157
158 fibril_mutex_lock(&vol->lock);
[f1be66bf]159 fibril_mutex_lock(&vol->hotspare_lock);
[aa7864b]160
161 if (vol->hotspare_no >= HR_MAX_HOTSPARES) {
162 HR_ERROR("hr_raid5_add_hotspare(): cannot add more hotspares "
163 "to \"%s\"\n", vol->devname);
164 fibril_mutex_unlock(&vol->lock);
165 return ELIMIT;
166 }
167
168 vol->hotspares[vol->hotspare_no].svc_id = hotspare;
[a0c3080]169
[aa7864b]170 vol->hotspare_no++;
171
[f1be66bf]172 hr_update_hotspare_status(vol, vol->hotspare_no - 1, HR_EXT_HOTSPARE);
173
[aa7864b]174 /*
175 * If the volume is degraded, start rebuild right away.
176 */
177 if (vol->status == HR_VOL_DEGRADED) {
178 HR_DEBUG("hr_raid5_add_hotspare(): volume in DEGRADED state, "
179 "spawning new rebuild fibril\n");
180 fid_t fib = fibril_create(hr_raid5_rebuild, vol);
[f1be66bf]181 if (fib == 0) {
182 fibril_mutex_unlock(&vol->hotspare_lock);
183 fibril_mutex_unlock(&vol->lock);
[a0c3080]184 return ENOMEM;
[f1be66bf]185 }
[aa7864b]186 fibril_start(fib);
187 fibril_detach(fib);
188 }
189
[f1be66bf]190 fibril_mutex_unlock(&vol->hotspare_lock);
[aa7864b]191 fibril_mutex_unlock(&vol->lock);
192
193 return EOK;
194}
195
[733564a]196static errno_t hr_raid5_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
197{
[d199a6f]198 HR_DEBUG("hr_bd_open()\n");
[733564a]199 return EOK;
200}
201
202static errno_t hr_raid5_bd_close(bd_srv_t *bd)
203{
[d199a6f]204 HR_DEBUG("hr_bd_close()\n");
[733564a]205 return EOK;
206}
207
208static errno_t hr_raid5_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
209{
210 return hr_raid5_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
211}
212
213static errno_t hr_raid5_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
214 void *buf, size_t size)
215{
216 return hr_raid5_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
217}
218
219static errno_t hr_raid5_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
220 const void *data, size_t size)
221{
222 return hr_raid5_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
223}
224
225static errno_t hr_raid5_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
226{
227 hr_volume_t *vol = bd->srvs->sarg;
228
229 *rsize = vol->bsize;
230 return EOK;
231}
232
233static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
234{
235 hr_volume_t *vol = bd->srvs->sarg;
236
237 *rnb = vol->data_blkno;
238 return EOK;
239}
240
[da0570a]241static errno_t hr_raid5_vol_usable(hr_volume_t *vol)
242{
243 if (vol->status == HR_VOL_ONLINE ||
[40bf2c6]244 vol->status == HR_VOL_DEGRADED ||
245 vol->status == HR_VOL_REBUILD)
[da0570a]246 return EOK;
[a0c3080]247 return EIO;
[da0570a]248}
249
250/*
251 * Returns (-1) if all extents are online,
252 * else returns index of first bad one.
253 */
254static ssize_t hr_raid5_get_bad_ext(hr_volume_t *vol)
255{
[65706f1]256 for (size_t i = 0; i < vol->extent_no; i++)
[da0570a]257 if (vol->extents[i].status != HR_EXT_ONLINE)
258 return i;
259 return -1;
260}
261
262static errno_t hr_raid5_update_vol_status(hr_volume_t *vol)
263{
264 hr_vol_status_t old_state = vol->status;
265 size_t bad = 0;
[65706f1]266 for (size_t i = 0; i < vol->extent_no; i++)
[da0570a]267 if (vol->extents[i].status != HR_EXT_ONLINE)
268 bad++;
269
270 switch (bad) {
271 case 0:
[a0c3080]272 if (old_state != HR_VOL_ONLINE)
273 hr_update_vol_status(vol, HR_VOL_ONLINE);
[da0570a]274 return EOK;
275 case 1:
[aa7864b]276 if (old_state != HR_VOL_DEGRADED &&
277 old_state != HR_VOL_REBUILD) {
[a0c3080]278
279 hr_update_vol_status(vol, HR_VOL_DEGRADED);
280
[aa7864b]281 if (vol->hotspare_no > 0) {
282 fid_t fib = fibril_create(hr_raid5_rebuild,
283 vol);
[a0c3080]284 if (fib == 0)
285 return ENOMEM;
[aa7864b]286 fibril_start(fib);
287 fibril_detach(fib);
288 }
[da0570a]289 }
290 return EOK;
291 default:
[a0c3080]292 if (old_state != HR_VOL_FAULTY)
293 hr_update_vol_status(vol, HR_VOL_FAULTY);
294 return EIO;
[da0570a]295 }
296}
297
[aa7864b]298static void hr_raid5_handle_extent_error(hr_volume_t *vol, size_t extent,
299 errno_t rc)
300{
301 if (rc == ENOENT)
302 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
303 else if (rc != EOK)
304 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
305}
306
[dceb6e7]307static void xor(void *dst, const void *src, size_t size)
308{
309 size_t i;
310 uint64_t *d = dst;
311 const uint64_t *s = src;
312
313 for (i = 0; i < size / sizeof(uint64_t); ++i)
314 *d++ ^= *s++;
315}
316
[da0570a]317static errno_t hr_raid5_read_degraded(hr_volume_t *vol, uint64_t bad,
318 uint64_t block, void *data, size_t cnt)
[dceb6e7]319{
320 errno_t rc;
[da0570a]321 size_t i;
[dceb6e7]322 void *xorbuf;
323 void *buf;
[da0570a]324 uint64_t len = vol->bsize * cnt;
[dceb6e7]325
[da0570a]326 xorbuf = malloc(len);
[dceb6e7]327 if (xorbuf == NULL)
328 return ENOMEM;
329
[da0570a]330 buf = malloc(len);
[c7b4452]331 if (buf == NULL) {
332 free(xorbuf);
[dceb6e7]333 return ENOMEM;
[c7b4452]334 }
[dceb6e7]335
[da0570a]336 /* read all other extents in the stripe */
[8160e4c0]337 bool first = true;
[65706f1]338 for (i = 0; i < vol->extent_no; i++) {
[8160e4c0]339 if (i == bad)
[da0570a]340 continue;
[8160e4c0]341
342 if (first) {
343 rc = block_read_direct(vol->extents[i].svc_id, block,
344 cnt, xorbuf);
345 if (rc != EOK)
346 goto end;
347
348 first = false;
[da0570a]349 } else {
350 rc = block_read_direct(vol->extents[i].svc_id, block,
351 cnt, buf);
352 if (rc != EOK)
353 goto end;
354 xor(xorbuf, buf, len);
355 }
356 }
[978130a]357
[da0570a]358 memcpy(data, xorbuf, len);
359end:
360 free(xorbuf);
361 free(buf);
362 return rc;
363}
364
365static errno_t hr_raid5_write(hr_volume_t *vol, uint64_t p_extent,
366 uint64_t extent, aoff64_t ba, const void *data, size_t cnt)
367{
368 errno_t rc;
369 size_t i;
370 void *xorbuf;
371 void *buf;
372 uint64_t len = vol->bsize * cnt;
373
374 ssize_t bad = hr_raid5_get_bad_ext(vol);
375 if (bad == -1 || (size_t)bad == p_extent) {
376 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
377 data);
378 if (rc != EOK)
379 return rc;
380 /*
381 * DEGRADED parity - skip parity write
382 */
383 if ((size_t)bad == p_extent)
384 return EOK;
385
386 rc = hr_raid5_write_parity(vol, p_extent, extent, ba, data,
387 cnt);
388 return rc;
389 }
390
391 xorbuf = malloc(len);
392 if (xorbuf == NULL)
393 return ENOMEM;
394
395 buf = malloc(len);
396 if (buf == NULL) {
397 free(xorbuf);
398 return ENOMEM;
399 }
400
[bf0a791]401 if (extent == (size_t)bad) {
[da0570a]402 /*
403 * new parity = read other and xor in new data
404 *
405 * write new parity
406 */
[8160e4c0]407 bool first = true;
[521b387]408 for (i = 0; i < vol->extent_no; i++) {
[bf0a791]409 if (i == (size_t)bad)
[da0570a]410 continue;
[521b387]411 if (i == p_extent)
412 continue;
[8160e4c0]413 if (first) {
414 rc = block_read_direct(vol->extents[i].svc_id,
415 ba, cnt, xorbuf);
416 if (rc != EOK)
417 goto end;
418
419 first = false;
[978130a]420 } else {
421 rc = block_read_direct(vol->extents[i].svc_id,
[da0570a]422 ba, cnt, buf);
[978130a]423 if (rc != EOK)
424 goto end;
[da0570a]425 xor(xorbuf, buf, len);
[978130a]426 }
[dceb6e7]427 }
[da0570a]428 xor(xorbuf, data, len);
429 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
430 xorbuf);
431 if (rc != EOK)
432 goto end;
433 } else {
434 /*
435 * new parity = xor original data and old parity and new data
436 *
437 * write parity, new data
438 */
439 rc = block_read_direct(vol->extents[extent].svc_id, ba, cnt,
440 xorbuf);
441 if (rc != EOK)
442 goto end;
443 rc = block_read_direct(vol->extents[p_extent].svc_id, ba, cnt,
444 buf);
445 if (rc != EOK)
446 goto end;
447
448 xor(xorbuf, buf, len);
[dceb6e7]449
[da0570a]450 xor(xorbuf, data, len);
451
452 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
[978130a]453 xorbuf);
454 if (rc != EOK)
455 goto end;
[da0570a]456 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
457 data);
458 if (rc != EOK)
459 goto end;
460 }
461end:
462 free(xorbuf);
463 free(buf);
464 return rc;
465}
466
467static errno_t hr_raid5_write_parity(hr_volume_t *vol, uint64_t p_extent,
468 uint64_t extent, uint64_t block, const void *data, size_t cnt)
469{
470 errno_t rc;
471 size_t i;
472 void *xorbuf;
473 void *buf;
474 uint64_t len = vol->bsize * cnt;
475
476 xorbuf = malloc(len);
477 if (xorbuf == NULL)
478 return ENOMEM;
479
480 buf = malloc(len);
481 if (buf == NULL) {
482 free(xorbuf);
483 return ENOMEM;
[978130a]484 }
[dceb6e7]485
[8160e4c0]486 bool first = true;
[65706f1]487 for (i = 0; i < vol->extent_no; i++) {
[da0570a]488 if (i == p_extent)
489 continue;
[8160e4c0]490
491 if (first) {
492 if (i == extent) {
493 memcpy(xorbuf, data, len);
494 } else {
495 rc = block_read_direct(vol->extents[i].svc_id,
496 block, cnt, xorbuf);
497 if (rc != EOK)
498 goto end;
499 }
500
501 first = false;
[da0570a]502 } else {
[8160e4c0]503 if (i == extent) {
504 xor(xorbuf, data, len);
505 } else {
506 rc = block_read_direct(vol->extents[i].svc_id,
507 block, cnt, buf);
508 if (rc != EOK)
509 goto end;
510
511 xor(xorbuf, buf, len);
512 }
[da0570a]513 }
514 }
515
516 rc = block_write_direct(vol->extents[p_extent].svc_id, block, cnt,
517 xorbuf);
[dceb6e7]518end:
519 free(xorbuf);
520 free(buf);
[12321f8]521 return rc;
[dceb6e7]522}
523
[fad91b9]524static errno_t hr_raid5_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
[da0570a]525 size_t cnt, void *dst, const void *src, size_t size)
[dceb6e7]526{
527 hr_volume_t *vol = bd->srvs->sarg;
528 errno_t rc;
[da0570a]529 uint64_t phys_block, len;
[978130a]530 size_t left;
[da0570a]531 const uint8_t *data_write = src;
532 uint8_t *data_read = dst;
533
534 /* propagate sync */
535 if (type == HR_BD_SYNC && ba == 0 && cnt == 0) {
536 hr_sync_all_extents(vol);
537 rc = hr_raid5_update_vol_status(vol);
538 return rc;
539 }
[fad91b9]540
541 if (type == HR_BD_READ || type == HR_BD_WRITE)
542 if (size < cnt * vol->bsize)
543 return EINVAL;
[dceb6e7]544
545 rc = hr_check_ba_range(vol, cnt, ba);
546 if (rc != EOK)
547 return rc;
548
[37a9c1e]549 uint8_t layout = vol->layout;
[d7768d11]550 hr_level_t level = vol->level;
551
[978130a]552 uint64_t strip_size = vol->strip_size / vol->bsize; /* in blocks */
553 uint64_t stripe = (ba / strip_size); /* stripe number */
[d7768d11]554
555 /* parity extent */
556 uint64_t p_extent;
[37a9c1e]557 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]558 p_extent = 0;
[37a9c1e]559 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]560 p_extent = vol->extent_no - 1;
[37a9c1e]561 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_0R) {
[d7768d11]562 p_extent = (stripe / (vol->extent_no - 1)) % vol->extent_no;
563 } else if (level == HR_LVL_5 &&
[37a9c1e]564 (layout == HR_RLQ_RAID5_NR || layout == HR_RLQ_RAID5_NC)) {
[d7768d11]565 p_extent = (vol->extent_no - 1) -
566 (stripe / (vol->extent_no - 1)) % vol->extent_no;
567 } else {
568 return EINVAL;
569 }
570
[978130a]571 uint64_t extent;
[37a9c1e]572 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]573 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]574 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]575 extent = stripe % (vol->extent_no - 1);
576 } else if (level == HR_LVL_5 &&
[37a9c1e]577 (layout == HR_RLQ_RAID5_0R || layout == HR_RLQ_RAID5_NR)) {
[d7768d11]578 if ((stripe % (vol->extent_no - 1)) < p_extent)
579 extent = stripe % (vol->extent_no - 1);
580 else
581 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]582 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_NC) {
[eb31781]583 extent =
584 ((stripe % (vol->extent_no - 1)) + p_extent + 1) %
585 vol->extent_no;
[d7768d11]586 } else {
587 return EINVAL;
588 }
589
[65706f1]590 uint64_t ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
[978130a]591 uint64_t strip_off = ba % strip_size; /* strip offset */
592
[abc2c4b]593 fibril_mutex_lock(&vol->lock);
[dceb6e7]594
[da0570a]595 rc = hr_raid5_vol_usable(vol);
596 if (rc != EOK) {
597 fibril_mutex_unlock(&vol->lock);
598 return EIO;
599 }
600
[fad91b9]601 left = cnt;
[a0c3080]602
[f1be66bf]603 fibril_rwlock_write_lock(&vol->states_lock);
[dceb6e7]604 while (left != 0) {
[978130a]605 phys_block = ext_stripe * strip_size + strip_off;
606 cnt = min(left, strip_size - strip_off);
[da0570a]607 len = vol->bsize * cnt;
[978130a]608 hr_add_ba_offset(vol, &phys_block);
[fad91b9]609 switch (type) {
610 case HR_BD_SYNC:
[da0570a]611 if (vol->extents[extent].status != HR_EXT_ONLINE)
612 break;
[fad91b9]613 rc = block_sync_cache(vol->extents[extent].svc_id,
[978130a]614 phys_block, cnt);
[da0570a]615 /* allow unsupported sync */
616 if (rc == ENOTSUP)
617 rc = EOK;
[fad91b9]618 break;
619 case HR_BD_READ:
[da0570a]620 retry_read:
621 ssize_t bad = hr_raid5_get_bad_ext(vol);
[521b387]622 if (bad > -1 && extent == (size_t)bad) {
[da0570a]623 rc = hr_raid5_read_degraded(vol, bad,
624 phys_block, data_read, cnt);
625 } else {
626 rc = block_read_direct(vol->extents[extent].svc_id,
627 phys_block, cnt, data_read);
628 }
629 data_read += len;
[fad91b9]630 break;
631 case HR_BD_WRITE:
[da0570a]632 retry_write:
633 rc = hr_raid5_write(vol, p_extent, extent, phys_block,
[978130a]634 data_write, cnt);
[da0570a]635 data_write += len;
[dceb6e7]636 break;
[fad91b9]637 default:
638 rc = EINVAL;
[da0570a]639 goto error;
[fad91b9]640 }
641
[da0570a]642 if (rc == ENOMEM)
[fad91b9]643 goto error;
644
[aa7864b]645 hr_raid5_handle_extent_error(vol, extent, rc);
[da0570a]646
647 if (rc != EOK) {
648 rc = hr_raid5_update_vol_status(vol);
649 if (rc == EOK) {
650 /*
651 * State changed from ONLINE -> DEGRADED,
652 * rewind and retry
653 */
654 if (type == HR_BD_WRITE) {
655 data_write -= len;
656 goto retry_write;
657 } else if (type == HR_BD_WRITE) {
658 data_read -= len;
659 goto retry_read;
660 }
661 } else {
662 rc = EIO;
663 goto error;
664 }
665 }
666
[978130a]667 left -= cnt;
668 strip_off = 0;
669 stripe++;
[d7768d11]670
671 ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
672
[37a9c1e]673 if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_0R) {
[eb31781]674 p_extent =
675 (stripe / (vol->extent_no - 1)) % vol->extent_no;
[d7768d11]676 } else if (level == HR_LVL_5 &&
[37a9c1e]677 (layout == HR_RLQ_RAID5_NR || layout == HR_RLQ_RAID5_NC)) {
[d7768d11]678 p_extent = (vol->extent_no - 1) -
679 (stripe / (vol->extent_no - 1)) % vol->extent_no;
680 }
681
[37a9c1e]682 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]683 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]684 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]685 extent = stripe % (vol->extent_no - 1);
686 } else if (level == HR_LVL_5 &&
[37a9c1e]687 (layout == HR_RLQ_RAID5_0R || layout == HR_RLQ_RAID5_NR)) {
[d7768d11]688 if ((stripe % (vol->extent_no - 1)) < p_extent)
689 extent = stripe % (vol->extent_no - 1);
690 else
691 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]692 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_NC) {
[eb31781]693 extent =
694 ((stripe % (vol->extent_no - 1)) + p_extent + 1) %
695 vol->extent_no;
[d7768d11]696 }
[dceb6e7]697 }
698
[fad91b9]699error:
[bf0a791]700 (void)hr_raid5_update_vol_status(vol);
[f1be66bf]701 fibril_rwlock_write_unlock(&vol->states_lock);
[abc2c4b]702 fibril_mutex_unlock(&vol->lock);
[dceb6e7]703 return rc;
704}
705
[aa7864b]706static errno_t hr_raid5_rebuild(void *arg)
707{
708 HR_DEBUG("hr_raid5_rebuild()\n");
709
710 hr_volume_t *vol = arg;
711 errno_t rc = EOK;
712 void *buf = NULL, *xorbuf = NULL;
713
714 fibril_mutex_lock(&vol->lock);
[f1be66bf]715 fibril_rwlock_read_lock(&vol->extents_lock);
716 fibril_rwlock_write_lock(&vol->states_lock);
[aa7864b]717
718 if (vol->hotspare_no == 0) {
719 HR_WARN("hr_raid5_rebuild(): no free hotspares on \"%s\", "
720 "aborting rebuild\n", vol->devname);
721 /* retval isn't checked for now */
722 goto end;
723 }
724
[65706f1]725 size_t bad = vol->extent_no;
726 for (size_t i = 0; i < vol->extent_no; i++) {
[aa7864b]727 if (vol->extents[i].status == HR_EXT_FAILED) {
728 bad = i;
729 break;
730 }
731 }
732
[65706f1]733 if (bad == vol->extent_no) {
[aa7864b]734 HR_WARN("hr_raid5_rebuild(): no bad extent on \"%s\", "
735 "aborting rebuild\n", vol->devname);
736 /* retval isn't checked for now */
737 goto end;
738 }
739
740 size_t hotspare_idx = vol->hotspare_no - 1;
741
[a0c3080]742 hr_ext_status_t hs_state = vol->hotspares[hotspare_idx].status;
743 if (hs_state != HR_EXT_HOTSPARE) {
744 HR_ERROR("hr_raid5_rebuild(): invalid hotspare state \"%s\", "
745 "aborting rebuild\n", hr_get_ext_status_msg(hs_state));
746 rc = EINVAL;
747 goto end;
748 }
749
750 HR_DEBUG("hr_raid5_rebuild(): swapping in hotspare\n");
751
752 block_fini(vol->extents[bad].svc_id);
753
[aa7864b]754 vol->extents[bad].svc_id = vol->hotspares[hotspare_idx].svc_id;
[a0c3080]755 hr_update_ext_status(vol, bad, HR_EXT_HOTSPARE);
[aa7864b]756
757 vol->hotspares[hotspare_idx].svc_id = 0;
[f1be66bf]758 fibril_mutex_lock(&vol->hotspare_lock);
[a0c3080]759 hr_update_hotspare_status(vol, hotspare_idx, HR_EXT_MISSING);
[f1be66bf]760 fibril_mutex_unlock(&vol->hotspare_lock);
[aa7864b]761
[a0c3080]762 vol->hotspare_no--;
[aa7864b]763
[a0c3080]764 hr_extent_t *rebuild_ext = &vol->extents[bad];
[aa7864b]765
[a0c3080]766 rc = block_init(rebuild_ext->svc_id);
[aa7864b]767 if (rc != EOK) {
768 HR_ERROR("hr_raid5_rebuild(): initing (%lu) failed, "
[a0c3080]769 "aborting rebuild\n", rebuild_ext->svc_id);
[aa7864b]770 goto end;
771 }
772
[a0c3080]773 HR_DEBUG("hr_raid5_rebuild(): starting rebuild on (%lu)\n",
774 rebuild_ext->svc_id);
775
776 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
777 hr_update_vol_status(vol, HR_VOL_REBUILD);
778
[aa7864b]779 uint64_t max_blks = DATA_XFER_LIMIT / vol->bsize;
[65706f1]780 uint64_t left = vol->data_blkno / (vol->extent_no - 1);
[aa7864b]781 buf = malloc(max_blks * vol->bsize);
782 xorbuf = malloc(max_blks * vol->bsize);
783
784 uint64_t ba = 0, cnt;
785 hr_add_ba_offset(vol, &ba);
[a0c3080]786
[aa7864b]787 while (left != 0) {
788 cnt = min(left, max_blks);
789
790 /*
791 * Almost the same as read_degraded,
792 * but we don't want to allocate new
793 * xorbuf each blk rebuild batch.
794 */
795 bool first = true;
[65706f1]796 for (size_t i = 0; i < vol->extent_no; i++) {
[aa7864b]797 if (i == bad)
798 continue;
[8160e4c0]799 if (first)
800 rc = block_read_direct(vol->extents[i].svc_id,
801 ba, cnt, xorbuf);
802 else
803 rc = block_read_direct(vol->extents[i].svc_id,
804 ba, cnt, buf);
[aa7864b]805 if (rc != EOK) {
806 hr_raid5_handle_extent_error(vol, i, rc);
807 HR_ERROR("rebuild on \"%s\" (%lu), failed due "
808 "to a failed ONLINE extent, number %lu\n",
809 vol->devname, vol->svc_id, i);
810 goto end;
811 }
812
[8160e4c0]813 if (!first)
[aa7864b]814 xor(xorbuf, buf, cnt * vol->bsize);
[8160e4c0]815 else
816 first = false;
[aa7864b]817 }
818
[a0c3080]819 rc = block_write_direct(rebuild_ext->svc_id, ba, cnt, xorbuf);
[aa7864b]820 if (rc != EOK) {
821 hr_raid5_handle_extent_error(vol, bad, rc);
822 HR_ERROR("rebuild on \"%s\" (%lu), failed due to "
823 "the rebuilt extent number %lu failing\n",
824 vol->devname, vol->svc_id, bad);
825 goto end;
826 }
827
828 ba += cnt;
829 left -= cnt;
[40bf2c6]830
831 /*
832 * Let other IO requests be served
833 * during rebuild.
834 */
[f1be66bf]835 fibril_rwlock_write_unlock(&vol->states_lock);
[40bf2c6]836 fibril_mutex_unlock(&vol->lock);
837 fibril_mutex_lock(&vol->lock);
[f1be66bf]838 fibril_rwlock_write_lock(&vol->states_lock);
[aa7864b]839 }
840
841 HR_DEBUG("hr_raid5_rebuild(): rebuild finished on \"%s\" (%lu), "
842 "extent number %lu\n", vol->devname, vol->svc_id, hotspare_idx);
843
844 hr_update_ext_status(vol, bad, HR_EXT_ONLINE);
845 /*
846 * For now write metadata at the end, because
847 * we don't sync metada accross extents yet.
848 */
849 hr_write_meta_to_ext(vol, bad);
850end:
[bf0a791]851 (void)hr_raid5_update_vol_status(vol);
[aa7864b]852
[f1be66bf]853 fibril_rwlock_write_unlock(&vol->states_lock);
854 fibril_rwlock_read_unlock(&vol->extents_lock);
[aa7864b]855 fibril_mutex_unlock(&vol->lock);
856
857 if (buf != NULL)
858 free(buf);
859
860 if (xorbuf != NULL)
861 free(xorbuf);
862
863 return rc;
864}
865
[dceb6e7]866/** @}
867 */
Note: See TracBrowser for help on using the repository browser.