source: mainline/uspace/srv/bd/hr/raid5.c@ afec52b4

Last change on this file since afec52b4 was f647b87, checked in by Miroslav Cimerman <mc@…>, 7 months ago

srv/bd/hr: remove unused nblocks variable

  • Property mode set to 100644
File size: 21.1 KB
RevLine 
[dceb6e7]1/*
[746e636]2 * Copyright (c) 2025 Miroslav Cimerman
[dceb6e7]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
[ca7fa5b]41#include <inttypes.h>
[dceb6e7]42#include <io/log.h>
43#include <ipc/hr.h>
44#include <ipc/services.h>
45#include <loc.h>
[978130a]46#include <mem.h>
[dceb6e7]47#include <task.h>
48#include <stdio.h>
49#include <stdlib.h>
50#include <str_error.h>
51
52#include "superblock.h"
53#include "util.h"
54#include "var.h"
55
[6d0fc11]56static errno_t hr_raid5_vol_usable(hr_volume_t *);
57static ssize_t hr_raid5_get_bad_ext(hr_volume_t *);
58static errno_t hr_raid5_update_vol_status(hr_volume_t *);
59static void hr_raid5_handle_extent_error(hr_volume_t *, size_t, errno_t);
60static void xor(void *, const void *, size_t);
61static errno_t hr_raid5_read_degraded(hr_volume_t *, uint64_t, uint64_t,
[733564a]62 void *, size_t);
[6d0fc11]63static errno_t hr_raid5_write(hr_volume_t *, uint64_t, uint64_t, aoff64_t,
[733564a]64 const void *, size_t);
[6d0fc11]65static errno_t hr_raid5_write_parity(hr_volume_t *, uint64_t, uint64_t,
[733564a]66 uint64_t, const void *, size_t);
[6d0fc11]67static errno_t hr_raid5_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
[733564a]68 void *, const void *, size_t);
[6d0fc11]69static errno_t hr_raid5_rebuild(void *);
[733564a]70
71/* bdops */
[6d0fc11]72static errno_t hr_raid5_bd_open(bd_srvs_t *, bd_srv_t *);
73static errno_t hr_raid5_bd_close(bd_srv_t *);
74static errno_t hr_raid5_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
[dceb6e7]75 size_t);
[6d0fc11]76static errno_t hr_raid5_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
77static errno_t hr_raid5_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
[dceb6e7]78 const void *, size_t);
[6d0fc11]79static errno_t hr_raid5_bd_get_block_size(bd_srv_t *, size_t *);
80static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
[dceb6e7]81
82static bd_ops_t hr_raid5_bd_ops = {
[6d0fc11]83 .open = hr_raid5_bd_open,
84 .close = hr_raid5_bd_close,
85 .sync_cache = hr_raid5_bd_sync_cache,
86 .read_blocks = hr_raid5_bd_read_blocks,
87 .write_blocks = hr_raid5_bd_write_blocks,
88 .get_block_size = hr_raid5_bd_get_block_size,
89 .get_num_blocks = hr_raid5_bd_get_num_blocks
[dceb6e7]90};
91
[6d0fc11]92extern loc_srv_t *hr_srv;
93
[733564a]94errno_t hr_raid5_create(hr_volume_t *new_volume)
95{
[baa4929]96 HR_DEBUG("%s()", __func__);
97
[d7768d11]98 assert(new_volume->level == HR_LVL_5 || new_volume->level == HR_LVL_4);
[733564a]99
[65706f1]100 if (new_volume->extent_no < 3) {
[d199a6f]101 HR_ERROR("RAID 5 array needs at least 3 devices\n");
[733564a]102 return EINVAL;
103 }
104
[f1be66bf]105 fibril_rwlock_write_lock(&new_volume->states_lock);
106
[8a65373]107 errno_t rc = hr_raid5_update_vol_status(new_volume);
[f1be66bf]108 if (rc != EOK) {
109 fibril_rwlock_write_unlock(&new_volume->states_lock);
[733564a]110 return rc;
[f1be66bf]111 }
[733564a]112
113 bd_srvs_init(&new_volume->hr_bds);
114 new_volume->hr_bds.ops = &hr_raid5_bd_ops;
115 new_volume->hr_bds.sarg = new_volume;
116
[f1be66bf]117 fibril_rwlock_write_unlock(&new_volume->states_lock);
118
[8a65373]119 return EOK;
[733564a]120}
121
[746e636]122/*
123 * Called only once in volume's lifetime.
124 */
[733564a]125errno_t hr_raid5_init(hr_volume_t *vol)
126{
[baa4929]127 HR_DEBUG("%s()", __func__);
[733564a]128
[d7768d11]129 assert(vol->level == HR_LVL_5 || vol->level == HR_LVL_4);
[733564a]130
[baa4929]131 uint64_t truncated_blkno = vol->extents[0].blkno;
132 for (size_t i = 1; i < vol->extent_no; i++) {
133 if (vol->extents[i].blkno < truncated_blkno)
134 truncated_blkno = vol->extents[i].blkno;
135 }
136
137 uint64_t total_blkno = truncated_blkno * vol->extent_no;
[733564a]138
[baa4929]139 vol->truncated_blkno = truncated_blkno;
[50603405]140 vol->data_offset = vol->meta_ops->get_data_offset();
[baa4929]141
142 vol->data_blkno = total_blkno;
[50603405]143 /* count md blocks */
144 vol->data_blkno -= vol->meta_ops->get_size() * vol->extent_no;
[baa4929]145 vol->data_blkno -= truncated_blkno; /* count parity */
146
[733564a]147 vol->strip_size = HR_STRIP_SIZE;
148
149 return EOK;
150}
151
[7b359f5]152void hr_raid5_status_event(hr_volume_t *vol)
153{
154 fibril_mutex_lock(&vol->lock);
[f1be66bf]155 fibril_rwlock_write_lock(&vol->states_lock);
[bf0a791]156 (void)hr_raid5_update_vol_status(vol);
[f1be66bf]157 fibril_rwlock_write_unlock(&vol->states_lock);
[7b359f5]158 fibril_mutex_unlock(&vol->lock);
159}
160
[aa7864b]161errno_t hr_raid5_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
162{
[56214383]163 HR_DEBUG("%s()", __func__);
[aa7864b]164
165 fibril_mutex_lock(&vol->lock);
166
[56214383]167 errno_t rc = hr_util_add_hotspare(vol, hotspare);
168 if (rc != EOK)
169 goto end;
[f1be66bf]170
[aa7864b]171 /*
172 * If the volume is degraded, start rebuild right away.
173 */
174 if (vol->status == HR_VOL_DEGRADED) {
175 HR_DEBUG("hr_raid5_add_hotspare(): volume in DEGRADED state, "
176 "spawning new rebuild fibril\n");
177 fid_t fib = fibril_create(hr_raid5_rebuild, vol);
[f1be66bf]178 if (fib == 0) {
179 fibril_mutex_unlock(&vol->hotspare_lock);
180 fibril_mutex_unlock(&vol->lock);
[a0c3080]181 return ENOMEM;
[f1be66bf]182 }
[aa7864b]183 fibril_start(fib);
184 fibril_detach(fib);
185 }
186
[56214383]187end:
[aa7864b]188 fibril_mutex_unlock(&vol->lock);
189
[56214383]190 return rc;
[aa7864b]191}
192
[733564a]193static errno_t hr_raid5_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
194{
[7a80c63]195 HR_DEBUG("%s()\n", __func__);
196
197 hr_volume_t *vol = bd->srvs->sarg;
198
199 atomic_fetch_add_explicit(&vol->open_cnt, 1, memory_order_relaxed);
200
[733564a]201 return EOK;
202}
203
204static errno_t hr_raid5_bd_close(bd_srv_t *bd)
205{
[7a80c63]206 HR_DEBUG("%s()\n", __func__);
207
208 hr_volume_t *vol = bd->srvs->sarg;
209
210 atomic_fetch_sub_explicit(&vol->open_cnt, 1, memory_order_relaxed);
211
[733564a]212 return EOK;
213}
214
215static errno_t hr_raid5_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
216{
217 return hr_raid5_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
218}
219
220static errno_t hr_raid5_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
221 void *buf, size_t size)
222{
223 return hr_raid5_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
224}
225
226static errno_t hr_raid5_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
227 const void *data, size_t size)
228{
229 return hr_raid5_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
230}
231
232static errno_t hr_raid5_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
233{
234 hr_volume_t *vol = bd->srvs->sarg;
235
236 *rsize = vol->bsize;
237 return EOK;
238}
239
240static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
241{
242 hr_volume_t *vol = bd->srvs->sarg;
243
244 *rnb = vol->data_blkno;
245 return EOK;
246}
247
[da0570a]248static errno_t hr_raid5_vol_usable(hr_volume_t *vol)
249{
250 if (vol->status == HR_VOL_ONLINE ||
[40bf2c6]251 vol->status == HR_VOL_DEGRADED ||
252 vol->status == HR_VOL_REBUILD)
[da0570a]253 return EOK;
[a0c3080]254 return EIO;
[da0570a]255}
256
257/*
258 * Returns (-1) if all extents are online,
259 * else returns index of first bad one.
260 */
261static ssize_t hr_raid5_get_bad_ext(hr_volume_t *vol)
262{
[65706f1]263 for (size_t i = 0; i < vol->extent_no; i++)
[da0570a]264 if (vol->extents[i].status != HR_EXT_ONLINE)
265 return i;
266 return -1;
267}
268
269static errno_t hr_raid5_update_vol_status(hr_volume_t *vol)
270{
271 hr_vol_status_t old_state = vol->status;
272 size_t bad = 0;
[65706f1]273 for (size_t i = 0; i < vol->extent_no; i++)
[da0570a]274 if (vol->extents[i].status != HR_EXT_ONLINE)
275 bad++;
276
277 switch (bad) {
278 case 0:
[a0c3080]279 if (old_state != HR_VOL_ONLINE)
280 hr_update_vol_status(vol, HR_VOL_ONLINE);
[da0570a]281 return EOK;
282 case 1:
[aa7864b]283 if (old_state != HR_VOL_DEGRADED &&
284 old_state != HR_VOL_REBUILD) {
[a0c3080]285
286 hr_update_vol_status(vol, HR_VOL_DEGRADED);
287
[aa7864b]288 if (vol->hotspare_no > 0) {
289 fid_t fib = fibril_create(hr_raid5_rebuild,
290 vol);
[a0c3080]291 if (fib == 0)
292 return ENOMEM;
[aa7864b]293 fibril_start(fib);
294 fibril_detach(fib);
295 }
[da0570a]296 }
297 return EOK;
298 default:
[a0c3080]299 if (old_state != HR_VOL_FAULTY)
300 hr_update_vol_status(vol, HR_VOL_FAULTY);
301 return EIO;
[da0570a]302 }
303}
304
[aa7864b]305static void hr_raid5_handle_extent_error(hr_volume_t *vol, size_t extent,
306 errno_t rc)
307{
308 if (rc == ENOENT)
309 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
310 else if (rc != EOK)
311 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
312}
313
[dceb6e7]314static void xor(void *dst, const void *src, size_t size)
315{
316 size_t i;
317 uint64_t *d = dst;
318 const uint64_t *s = src;
319
320 for (i = 0; i < size / sizeof(uint64_t); ++i)
321 *d++ ^= *s++;
322}
323
[da0570a]324static errno_t hr_raid5_read_degraded(hr_volume_t *vol, uint64_t bad,
325 uint64_t block, void *data, size_t cnt)
[dceb6e7]326{
327 errno_t rc;
[da0570a]328 size_t i;
[dceb6e7]329 void *xorbuf;
330 void *buf;
[da0570a]331 uint64_t len = vol->bsize * cnt;
[dceb6e7]332
[da0570a]333 xorbuf = malloc(len);
[dceb6e7]334 if (xorbuf == NULL)
335 return ENOMEM;
336
[da0570a]337 buf = malloc(len);
[c7b4452]338 if (buf == NULL) {
339 free(xorbuf);
[dceb6e7]340 return ENOMEM;
[c7b4452]341 }
[dceb6e7]342
[da0570a]343 /* read all other extents in the stripe */
[8160e4c0]344 bool first = true;
[65706f1]345 for (i = 0; i < vol->extent_no; i++) {
[8160e4c0]346 if (i == bad)
[da0570a]347 continue;
[8160e4c0]348
349 if (first) {
350 rc = block_read_direct(vol->extents[i].svc_id, block,
351 cnt, xorbuf);
352 if (rc != EOK)
353 goto end;
354
355 first = false;
[da0570a]356 } else {
357 rc = block_read_direct(vol->extents[i].svc_id, block,
358 cnt, buf);
359 if (rc != EOK)
360 goto end;
361 xor(xorbuf, buf, len);
362 }
363 }
[978130a]364
[da0570a]365 memcpy(data, xorbuf, len);
366end:
367 free(xorbuf);
368 free(buf);
369 return rc;
370}
371
372static errno_t hr_raid5_write(hr_volume_t *vol, uint64_t p_extent,
373 uint64_t extent, aoff64_t ba, const void *data, size_t cnt)
374{
375 errno_t rc;
376 size_t i;
377 void *xorbuf;
378 void *buf;
379 uint64_t len = vol->bsize * cnt;
380
381 ssize_t bad = hr_raid5_get_bad_ext(vol);
382 if (bad == -1 || (size_t)bad == p_extent) {
383 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
384 data);
385 if (rc != EOK)
386 return rc;
387 /*
388 * DEGRADED parity - skip parity write
389 */
390 if ((size_t)bad == p_extent)
391 return EOK;
392
393 rc = hr_raid5_write_parity(vol, p_extent, extent, ba, data,
394 cnt);
395 return rc;
396 }
397
398 xorbuf = malloc(len);
399 if (xorbuf == NULL)
400 return ENOMEM;
401
402 buf = malloc(len);
403 if (buf == NULL) {
404 free(xorbuf);
405 return ENOMEM;
406 }
407
[bf0a791]408 if (extent == (size_t)bad) {
[da0570a]409 /*
410 * new parity = read other and xor in new data
411 *
412 * write new parity
413 */
[8160e4c0]414 bool first = true;
[521b387]415 for (i = 0; i < vol->extent_no; i++) {
[bf0a791]416 if (i == (size_t)bad)
[da0570a]417 continue;
[521b387]418 if (i == p_extent)
419 continue;
[8160e4c0]420 if (first) {
421 rc = block_read_direct(vol->extents[i].svc_id,
422 ba, cnt, xorbuf);
423 if (rc != EOK)
424 goto end;
425
426 first = false;
[978130a]427 } else {
428 rc = block_read_direct(vol->extents[i].svc_id,
[da0570a]429 ba, cnt, buf);
[978130a]430 if (rc != EOK)
431 goto end;
[da0570a]432 xor(xorbuf, buf, len);
[978130a]433 }
[dceb6e7]434 }
[da0570a]435 xor(xorbuf, data, len);
436 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
437 xorbuf);
438 if (rc != EOK)
439 goto end;
440 } else {
441 /*
442 * new parity = xor original data and old parity and new data
443 *
444 * write parity, new data
445 */
446 rc = block_read_direct(vol->extents[extent].svc_id, ba, cnt,
447 xorbuf);
448 if (rc != EOK)
449 goto end;
450 rc = block_read_direct(vol->extents[p_extent].svc_id, ba, cnt,
451 buf);
452 if (rc != EOK)
453 goto end;
454
455 xor(xorbuf, buf, len);
[dceb6e7]456
[da0570a]457 xor(xorbuf, data, len);
458
459 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
[978130a]460 xorbuf);
461 if (rc != EOK)
462 goto end;
[da0570a]463 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
464 data);
465 if (rc != EOK)
466 goto end;
467 }
468end:
469 free(xorbuf);
470 free(buf);
471 return rc;
472}
473
474static errno_t hr_raid5_write_parity(hr_volume_t *vol, uint64_t p_extent,
475 uint64_t extent, uint64_t block, const void *data, size_t cnt)
476{
477 errno_t rc;
478 size_t i;
479 void *xorbuf;
480 void *buf;
481 uint64_t len = vol->bsize * cnt;
482
483 xorbuf = malloc(len);
484 if (xorbuf == NULL)
485 return ENOMEM;
486
487 buf = malloc(len);
488 if (buf == NULL) {
489 free(xorbuf);
490 return ENOMEM;
[978130a]491 }
[dceb6e7]492
[8160e4c0]493 bool first = true;
[65706f1]494 for (i = 0; i < vol->extent_no; i++) {
[da0570a]495 if (i == p_extent)
496 continue;
[8160e4c0]497
498 if (first) {
499 if (i == extent) {
500 memcpy(xorbuf, data, len);
501 } else {
502 rc = block_read_direct(vol->extents[i].svc_id,
503 block, cnt, xorbuf);
504 if (rc != EOK)
505 goto end;
506 }
507
508 first = false;
[da0570a]509 } else {
[8160e4c0]510 if (i == extent) {
511 xor(xorbuf, data, len);
512 } else {
513 rc = block_read_direct(vol->extents[i].svc_id,
514 block, cnt, buf);
515 if (rc != EOK)
516 goto end;
517
518 xor(xorbuf, buf, len);
519 }
[da0570a]520 }
521 }
522
523 rc = block_write_direct(vol->extents[p_extent].svc_id, block, cnt,
524 xorbuf);
[dceb6e7]525end:
526 free(xorbuf);
527 free(buf);
[12321f8]528 return rc;
[dceb6e7]529}
530
[fad91b9]531static errno_t hr_raid5_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
[da0570a]532 size_t cnt, void *dst, const void *src, size_t size)
[dceb6e7]533{
534 hr_volume_t *vol = bd->srvs->sarg;
535 errno_t rc;
[da0570a]536 uint64_t phys_block, len;
[978130a]537 size_t left;
[da0570a]538 const uint8_t *data_write = src;
539 uint8_t *data_read = dst;
540
541 /* propagate sync */
542 if (type == HR_BD_SYNC && ba == 0 && cnt == 0) {
543 hr_sync_all_extents(vol);
544 rc = hr_raid5_update_vol_status(vol);
545 return rc;
546 }
[fad91b9]547
548 if (type == HR_BD_READ || type == HR_BD_WRITE)
549 if (size < cnt * vol->bsize)
550 return EINVAL;
[dceb6e7]551
552 rc = hr_check_ba_range(vol, cnt, ba);
553 if (rc != EOK)
554 return rc;
555
[37a9c1e]556 uint8_t layout = vol->layout;
[d7768d11]557 hr_level_t level = vol->level;
558
[978130a]559 uint64_t strip_size = vol->strip_size / vol->bsize; /* in blocks */
560 uint64_t stripe = (ba / strip_size); /* stripe number */
[d7768d11]561
562 /* parity extent */
563 uint64_t p_extent;
[37a9c1e]564 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]565 p_extent = 0;
[37a9c1e]566 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]567 p_extent = vol->extent_no - 1;
[37a9c1e]568 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_0R) {
[d7768d11]569 p_extent = (stripe / (vol->extent_no - 1)) % vol->extent_no;
570 } else if (level == HR_LVL_5 &&
[37a9c1e]571 (layout == HR_RLQ_RAID5_NR || layout == HR_RLQ_RAID5_NC)) {
[d7768d11]572 p_extent = (vol->extent_no - 1) -
573 (stripe / (vol->extent_no - 1)) % vol->extent_no;
574 } else {
575 return EINVAL;
576 }
577
[978130a]578 uint64_t extent;
[37a9c1e]579 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]580 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]581 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]582 extent = stripe % (vol->extent_no - 1);
583 } else if (level == HR_LVL_5 &&
[37a9c1e]584 (layout == HR_RLQ_RAID5_0R || layout == HR_RLQ_RAID5_NR)) {
[d7768d11]585 if ((stripe % (vol->extent_no - 1)) < p_extent)
586 extent = stripe % (vol->extent_no - 1);
587 else
588 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]589 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_NC) {
[eb31781]590 extent =
591 ((stripe % (vol->extent_no - 1)) + p_extent + 1) %
592 vol->extent_no;
[d7768d11]593 } else {
594 return EINVAL;
595 }
596
[65706f1]597 uint64_t ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
[978130a]598 uint64_t strip_off = ba % strip_size; /* strip offset */
599
[abc2c4b]600 fibril_mutex_lock(&vol->lock);
[dceb6e7]601
[da0570a]602 rc = hr_raid5_vol_usable(vol);
603 if (rc != EOK) {
604 fibril_mutex_unlock(&vol->lock);
605 return EIO;
606 }
607
[fad91b9]608 left = cnt;
[a0c3080]609
[f1be66bf]610 fibril_rwlock_write_lock(&vol->states_lock);
[dceb6e7]611 while (left != 0) {
[978130a]612 phys_block = ext_stripe * strip_size + strip_off;
613 cnt = min(left, strip_size - strip_off);
[da0570a]614 len = vol->bsize * cnt;
[978130a]615 hr_add_ba_offset(vol, &phys_block);
[fad91b9]616 switch (type) {
617 case HR_BD_SYNC:
[da0570a]618 if (vol->extents[extent].status != HR_EXT_ONLINE)
619 break;
[fad91b9]620 rc = block_sync_cache(vol->extents[extent].svc_id,
[978130a]621 phys_block, cnt);
[da0570a]622 /* allow unsupported sync */
623 if (rc == ENOTSUP)
624 rc = EOK;
[fad91b9]625 break;
626 case HR_BD_READ:
[da0570a]627 retry_read:
628 ssize_t bad = hr_raid5_get_bad_ext(vol);
[521b387]629 if (bad > -1 && extent == (size_t)bad) {
[da0570a]630 rc = hr_raid5_read_degraded(vol, bad,
631 phys_block, data_read, cnt);
632 } else {
633 rc = block_read_direct(vol->extents[extent].svc_id,
634 phys_block, cnt, data_read);
635 }
636 data_read += len;
[fad91b9]637 break;
638 case HR_BD_WRITE:
[da0570a]639 retry_write:
640 rc = hr_raid5_write(vol, p_extent, extent, phys_block,
[978130a]641 data_write, cnt);
[da0570a]642 data_write += len;
[dceb6e7]643 break;
[fad91b9]644 default:
645 rc = EINVAL;
[da0570a]646 goto error;
[fad91b9]647 }
648
[da0570a]649 if (rc == ENOMEM)
[fad91b9]650 goto error;
651
[aa7864b]652 hr_raid5_handle_extent_error(vol, extent, rc);
[da0570a]653
654 if (rc != EOK) {
655 rc = hr_raid5_update_vol_status(vol);
656 if (rc == EOK) {
657 /*
658 * State changed from ONLINE -> DEGRADED,
659 * rewind and retry
660 */
661 if (type == HR_BD_WRITE) {
662 data_write -= len;
663 goto retry_write;
664 } else if (type == HR_BD_WRITE) {
665 data_read -= len;
666 goto retry_read;
667 }
668 } else {
669 rc = EIO;
670 goto error;
671 }
672 }
673
[978130a]674 left -= cnt;
675 strip_off = 0;
676 stripe++;
[d7768d11]677
678 ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
679
[37a9c1e]680 if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_0R) {
[eb31781]681 p_extent =
682 (stripe / (vol->extent_no - 1)) % vol->extent_no;
[d7768d11]683 } else if (level == HR_LVL_5 &&
[37a9c1e]684 (layout == HR_RLQ_RAID5_NR || layout == HR_RLQ_RAID5_NC)) {
[d7768d11]685 p_extent = (vol->extent_no - 1) -
686 (stripe / (vol->extent_no - 1)) % vol->extent_no;
687 }
688
[37a9c1e]689 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]690 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]691 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]692 extent = stripe % (vol->extent_no - 1);
693 } else if (level == HR_LVL_5 &&
[37a9c1e]694 (layout == HR_RLQ_RAID5_0R || layout == HR_RLQ_RAID5_NR)) {
[d7768d11]695 if ((stripe % (vol->extent_no - 1)) < p_extent)
696 extent = stripe % (vol->extent_no - 1);
697 else
698 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]699 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_NC) {
[eb31781]700 extent =
701 ((stripe % (vol->extent_no - 1)) + p_extent + 1) %
702 vol->extent_no;
[d7768d11]703 }
[dceb6e7]704 }
705
[fad91b9]706error:
[bf0a791]707 (void)hr_raid5_update_vol_status(vol);
[f1be66bf]708 fibril_rwlock_write_unlock(&vol->states_lock);
[abc2c4b]709 fibril_mutex_unlock(&vol->lock);
[dceb6e7]710 return rc;
711}
712
[aa7864b]713static errno_t hr_raid5_rebuild(void *arg)
714{
715 HR_DEBUG("hr_raid5_rebuild()\n");
716
717 hr_volume_t *vol = arg;
718 errno_t rc = EOK;
719 void *buf = NULL, *xorbuf = NULL;
720
721 fibril_mutex_lock(&vol->lock);
[f1be66bf]722 fibril_rwlock_read_lock(&vol->extents_lock);
723 fibril_rwlock_write_lock(&vol->states_lock);
[aa7864b]724
725 if (vol->hotspare_no == 0) {
726 HR_WARN("hr_raid5_rebuild(): no free hotspares on \"%s\", "
727 "aborting rebuild\n", vol->devname);
728 /* retval isn't checked for now */
729 goto end;
730 }
731
[65706f1]732 size_t bad = vol->extent_no;
733 for (size_t i = 0; i < vol->extent_no; i++) {
[aa7864b]734 if (vol->extents[i].status == HR_EXT_FAILED) {
735 bad = i;
736 break;
737 }
738 }
739
[65706f1]740 if (bad == vol->extent_no) {
[aa7864b]741 HR_WARN("hr_raid5_rebuild(): no bad extent on \"%s\", "
742 "aborting rebuild\n", vol->devname);
743 /* retval isn't checked for now */
744 goto end;
745 }
746
747 size_t hotspare_idx = vol->hotspare_no - 1;
748
[a0c3080]749 hr_ext_status_t hs_state = vol->hotspares[hotspare_idx].status;
750 if (hs_state != HR_EXT_HOTSPARE) {
751 HR_ERROR("hr_raid5_rebuild(): invalid hotspare state \"%s\", "
752 "aborting rebuild\n", hr_get_ext_status_msg(hs_state));
753 rc = EINVAL;
754 goto end;
755 }
756
757 HR_DEBUG("hr_raid5_rebuild(): swapping in hotspare\n");
758
759 block_fini(vol->extents[bad].svc_id);
760
[aa7864b]761 vol->extents[bad].svc_id = vol->hotspares[hotspare_idx].svc_id;
[a0c3080]762 hr_update_ext_status(vol, bad, HR_EXT_HOTSPARE);
[aa7864b]763
764 vol->hotspares[hotspare_idx].svc_id = 0;
[f1be66bf]765 fibril_mutex_lock(&vol->hotspare_lock);
[a0c3080]766 hr_update_hotspare_status(vol, hotspare_idx, HR_EXT_MISSING);
[f1be66bf]767 fibril_mutex_unlock(&vol->hotspare_lock);
[aa7864b]768
[a0c3080]769 vol->hotspare_no--;
[aa7864b]770
[a0c3080]771 hr_extent_t *rebuild_ext = &vol->extents[bad];
[aa7864b]772
[ca7fa5b]773 HR_DEBUG("hr_raid5_rebuild(): starting rebuild on (%" PRIun ")\n",
[a0c3080]774 rebuild_ext->svc_id);
775
776 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
777 hr_update_vol_status(vol, HR_VOL_REBUILD);
778
[aa7864b]779 uint64_t max_blks = DATA_XFER_LIMIT / vol->bsize;
[65706f1]780 uint64_t left = vol->data_blkno / (vol->extent_no - 1);
[aa7864b]781 buf = malloc(max_blks * vol->bsize);
782 xorbuf = malloc(max_blks * vol->bsize);
783
784 uint64_t ba = 0, cnt;
785 hr_add_ba_offset(vol, &ba);
[a0c3080]786
[aa7864b]787 while (left != 0) {
788 cnt = min(left, max_blks);
789
790 /*
791 * Almost the same as read_degraded,
792 * but we don't want to allocate new
793 * xorbuf each blk rebuild batch.
794 */
795 bool first = true;
[65706f1]796 for (size_t i = 0; i < vol->extent_no; i++) {
[aa7864b]797 if (i == bad)
798 continue;
[8160e4c0]799 if (first)
800 rc = block_read_direct(vol->extents[i].svc_id,
801 ba, cnt, xorbuf);
802 else
803 rc = block_read_direct(vol->extents[i].svc_id,
804 ba, cnt, buf);
[aa7864b]805 if (rc != EOK) {
806 hr_raid5_handle_extent_error(vol, i, rc);
[ca7fa5b]807 HR_ERROR("rebuild on \"%s\" (%" PRIun "), "
808 "failed due to a failed ONLINE extent, "
809 "number %zu\n",
[aa7864b]810 vol->devname, vol->svc_id, i);
811 goto end;
812 }
813
[8160e4c0]814 if (!first)
[aa7864b]815 xor(xorbuf, buf, cnt * vol->bsize);
[8160e4c0]816 else
817 first = false;
[aa7864b]818 }
819
[a0c3080]820 rc = block_write_direct(rebuild_ext->svc_id, ba, cnt, xorbuf);
[aa7864b]821 if (rc != EOK) {
822 hr_raid5_handle_extent_error(vol, bad, rc);
[ca7fa5b]823 HR_ERROR("rebuild on \"%s\" (%" PRIun "), failed due to "
824 "the rebuilt extent number %zu failing\n",
[aa7864b]825 vol->devname, vol->svc_id, bad);
826 goto end;
827 }
828
829 ba += cnt;
830 left -= cnt;
[40bf2c6]831
832 /*
833 * Let other IO requests be served
834 * during rebuild.
835 */
[f1be66bf]836 fibril_rwlock_write_unlock(&vol->states_lock);
[40bf2c6]837 fibril_mutex_unlock(&vol->lock);
838 fibril_mutex_lock(&vol->lock);
[f1be66bf]839 fibril_rwlock_write_lock(&vol->states_lock);
[aa7864b]840 }
841
[ca7fa5b]842 HR_DEBUG("hr_raid5_rebuild(): rebuild finished on \"%s\" (%" PRIun "), "
843 "extent number %zu\n", vol->devname, vol->svc_id, hotspare_idx);
[aa7864b]844
845 hr_update_ext_status(vol, bad, HR_EXT_ONLINE);
[0277ec2]846
[50603405]847 rc = vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
[0277ec2]848
[aa7864b]849end:
[bf0a791]850 (void)hr_raid5_update_vol_status(vol);
[aa7864b]851
[f1be66bf]852 fibril_rwlock_write_unlock(&vol->states_lock);
853 fibril_rwlock_read_unlock(&vol->extents_lock);
[aa7864b]854 fibril_mutex_unlock(&vol->lock);
855
856 if (buf != NULL)
857 free(buf);
858
859 if (xorbuf != NULL)
860 free(xorbuf);
861
862 return rc;
863}
864
[dceb6e7]865/** @}
866 */
Note: See TracBrowser for help on using the repository browser.