source: mainline/uspace/srv/bd/hr/raid5.c@ 7a80c63

Last change on this file since 7a80c63 was 7a80c63, checked in by Miroslav Cimerman <mc@…>, 3 months ago

hr: raid{0,1,5}.c: increment open() count

  • Property mode set to 100644
File size: 21.4 KB
RevLine 
[dceb6e7]1/*
2 * Copyright (c) 2024 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
41#include <io/log.h>
42#include <ipc/hr.h>
43#include <ipc/services.h>
44#include <loc.h>
[978130a]45#include <mem.h>
[dceb6e7]46#include <task.h>
47#include <stdio.h>
48#include <stdlib.h>
49#include <str_error.h>
50
51#include "superblock.h"
52#include "util.h"
53#include "var.h"
54
55extern loc_srv_t *hr_srv;
56
[733564a]57static errno_t hr_raid5_vol_usable(hr_volume_t *);
58static ssize_t hr_raid5_get_bad_ext(hr_volume_t *);
59static errno_t hr_raid5_update_vol_status(hr_volume_t *);
[aa7864b]60static void hr_raid5_handle_extent_error(hr_volume_t *, size_t, errno_t);
[733564a]61static void xor(void *, const void *, size_t);
62static errno_t hr_raid5_read_degraded(hr_volume_t *, uint64_t, uint64_t,
63 void *, size_t);
64static errno_t hr_raid5_write(hr_volume_t *, uint64_t, uint64_t, aoff64_t,
65 const void *, size_t);
66static errno_t hr_raid5_write_parity(hr_volume_t *, uint64_t, uint64_t,
67 uint64_t, const void *, size_t);
68static errno_t hr_raid5_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
69 void *, const void *, size_t);
[aa7864b]70static errno_t hr_raid5_rebuild(void *);
[733564a]71
72/* bdops */
[dceb6e7]73static errno_t hr_raid5_bd_open(bd_srvs_t *, bd_srv_t *);
74static errno_t hr_raid5_bd_close(bd_srv_t *);
75static errno_t hr_raid5_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
76 size_t);
77static errno_t hr_raid5_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
78static errno_t hr_raid5_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
79 const void *, size_t);
80static errno_t hr_raid5_bd_get_block_size(bd_srv_t *, size_t *);
81static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
82
83static bd_ops_t hr_raid5_bd_ops = {
84 .open = hr_raid5_bd_open,
85 .close = hr_raid5_bd_close,
86 .sync_cache = hr_raid5_bd_sync_cache,
87 .read_blocks = hr_raid5_bd_read_blocks,
88 .write_blocks = hr_raid5_bd_write_blocks,
89 .get_block_size = hr_raid5_bd_get_block_size,
90 .get_num_blocks = hr_raid5_bd_get_num_blocks
91};
92
[733564a]93errno_t hr_raid5_create(hr_volume_t *new_volume)
94{
95 errno_t rc;
96
[d7768d11]97 assert(new_volume->level == HR_LVL_5 || new_volume->level == HR_LVL_4);
[733564a]98
[65706f1]99 if (new_volume->extent_no < 3) {
[d199a6f]100 HR_ERROR("RAID 5 array needs at least 3 devices\n");
[733564a]101 return EINVAL;
102 }
103
[f1be66bf]104 fibril_rwlock_write_lock(&new_volume->states_lock);
105
[733564a]106 rc = hr_raid5_update_vol_status(new_volume);
[f1be66bf]107 if (rc != EOK) {
108 fibril_rwlock_write_unlock(&new_volume->states_lock);
[733564a]109 return rc;
[f1be66bf]110 }
[733564a]111
112 bd_srvs_init(&new_volume->hr_bds);
113 new_volume->hr_bds.ops = &hr_raid5_bd_ops;
114 new_volume->hr_bds.sarg = new_volume;
115
116 rc = hr_register_volume(new_volume);
117
[f1be66bf]118 fibril_rwlock_write_unlock(&new_volume->states_lock);
119
[733564a]120 return rc;
121}
122
123errno_t hr_raid5_init(hr_volume_t *vol)
124{
125 errno_t rc;
126 size_t bsize;
127 uint64_t total_blkno;
128
[d7768d11]129 assert(vol->level == HR_LVL_5 || vol->level == HR_LVL_4);
[733564a]130
131 rc = hr_check_devs(vol, &total_blkno, &bsize);
132 if (rc != EOK)
133 return rc;
134
135 vol->nblocks = total_blkno;
136 vol->bsize = bsize;
137 vol->data_offset = HR_DATA_OFF;
[65706f1]138 vol->data_blkno = vol->nblocks - (vol->data_offset * vol->extent_no) -
139 (vol->nblocks / vol->extent_no);
[733564a]140 vol->strip_size = HR_STRIP_SIZE;
141
142 return EOK;
143}
144
[7b359f5]145void hr_raid5_status_event(hr_volume_t *vol)
146{
147 fibril_mutex_lock(&vol->lock);
[f1be66bf]148 fibril_rwlock_write_lock(&vol->states_lock);
[bf0a791]149 (void)hr_raid5_update_vol_status(vol);
[f1be66bf]150 fibril_rwlock_write_unlock(&vol->states_lock);
[7b359f5]151 fibril_mutex_unlock(&vol->lock);
152}
153
[aa7864b]154errno_t hr_raid5_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
155{
156 HR_DEBUG("hr_raid5_add_hotspare()\n");
157
158 fibril_mutex_lock(&vol->lock);
[f1be66bf]159 fibril_mutex_lock(&vol->hotspare_lock);
[aa7864b]160
161 if (vol->hotspare_no >= HR_MAX_HOTSPARES) {
162 HR_ERROR("hr_raid5_add_hotspare(): cannot add more hotspares "
163 "to \"%s\"\n", vol->devname);
164 fibril_mutex_unlock(&vol->lock);
165 return ELIMIT;
166 }
167
168 vol->hotspares[vol->hotspare_no].svc_id = hotspare;
[a0c3080]169
[aa7864b]170 vol->hotspare_no++;
171
[f1be66bf]172 hr_update_hotspare_status(vol, vol->hotspare_no - 1, HR_EXT_HOTSPARE);
173
[aa7864b]174 /*
175 * If the volume is degraded, start rebuild right away.
176 */
177 if (vol->status == HR_VOL_DEGRADED) {
178 HR_DEBUG("hr_raid5_add_hotspare(): volume in DEGRADED state, "
179 "spawning new rebuild fibril\n");
180 fid_t fib = fibril_create(hr_raid5_rebuild, vol);
[f1be66bf]181 if (fib == 0) {
182 fibril_mutex_unlock(&vol->hotspare_lock);
183 fibril_mutex_unlock(&vol->lock);
[a0c3080]184 return ENOMEM;
[f1be66bf]185 }
[aa7864b]186 fibril_start(fib);
187 fibril_detach(fib);
188 }
189
[f1be66bf]190 fibril_mutex_unlock(&vol->hotspare_lock);
[aa7864b]191 fibril_mutex_unlock(&vol->lock);
192
193 return EOK;
194}
195
[733564a]196static errno_t hr_raid5_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
197{
[7a80c63]198 HR_DEBUG("%s()\n", __func__);
199
200 hr_volume_t *vol = bd->srvs->sarg;
201
202 atomic_fetch_add_explicit(&vol->open_cnt, 1, memory_order_relaxed);
203
[733564a]204 return EOK;
205}
206
207static errno_t hr_raid5_bd_close(bd_srv_t *bd)
208{
[7a80c63]209 HR_DEBUG("%s()\n", __func__);
210
211 hr_volume_t *vol = bd->srvs->sarg;
212
213 atomic_fetch_sub_explicit(&vol->open_cnt, 1, memory_order_relaxed);
214
[733564a]215 return EOK;
216}
217
218static errno_t hr_raid5_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
219{
220 return hr_raid5_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
221}
222
223static errno_t hr_raid5_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
224 void *buf, size_t size)
225{
226 return hr_raid5_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
227}
228
229static errno_t hr_raid5_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
230 const void *data, size_t size)
231{
232 return hr_raid5_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
233}
234
235static errno_t hr_raid5_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
236{
237 hr_volume_t *vol = bd->srvs->sarg;
238
239 *rsize = vol->bsize;
240 return EOK;
241}
242
243static errno_t hr_raid5_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
244{
245 hr_volume_t *vol = bd->srvs->sarg;
246
247 *rnb = vol->data_blkno;
248 return EOK;
249}
250
[da0570a]251static errno_t hr_raid5_vol_usable(hr_volume_t *vol)
252{
253 if (vol->status == HR_VOL_ONLINE ||
[40bf2c6]254 vol->status == HR_VOL_DEGRADED ||
255 vol->status == HR_VOL_REBUILD)
[da0570a]256 return EOK;
[a0c3080]257 return EIO;
[da0570a]258}
259
260/*
261 * Returns (-1) if all extents are online,
262 * else returns index of first bad one.
263 */
264static ssize_t hr_raid5_get_bad_ext(hr_volume_t *vol)
265{
[65706f1]266 for (size_t i = 0; i < vol->extent_no; i++)
[da0570a]267 if (vol->extents[i].status != HR_EXT_ONLINE)
268 return i;
269 return -1;
270}
271
272static errno_t hr_raid5_update_vol_status(hr_volume_t *vol)
273{
274 hr_vol_status_t old_state = vol->status;
275 size_t bad = 0;
[65706f1]276 for (size_t i = 0; i < vol->extent_no; i++)
[da0570a]277 if (vol->extents[i].status != HR_EXT_ONLINE)
278 bad++;
279
280 switch (bad) {
281 case 0:
[a0c3080]282 if (old_state != HR_VOL_ONLINE)
283 hr_update_vol_status(vol, HR_VOL_ONLINE);
[da0570a]284 return EOK;
285 case 1:
[aa7864b]286 if (old_state != HR_VOL_DEGRADED &&
287 old_state != HR_VOL_REBUILD) {
[a0c3080]288
289 hr_update_vol_status(vol, HR_VOL_DEGRADED);
290
[aa7864b]291 if (vol->hotspare_no > 0) {
292 fid_t fib = fibril_create(hr_raid5_rebuild,
293 vol);
[a0c3080]294 if (fib == 0)
295 return ENOMEM;
[aa7864b]296 fibril_start(fib);
297 fibril_detach(fib);
298 }
[da0570a]299 }
300 return EOK;
301 default:
[a0c3080]302 if (old_state != HR_VOL_FAULTY)
303 hr_update_vol_status(vol, HR_VOL_FAULTY);
304 return EIO;
[da0570a]305 }
306}
307
[aa7864b]308static void hr_raid5_handle_extent_error(hr_volume_t *vol, size_t extent,
309 errno_t rc)
310{
311 if (rc == ENOENT)
312 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
313 else if (rc != EOK)
314 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
315}
316
[dceb6e7]317static void xor(void *dst, const void *src, size_t size)
318{
319 size_t i;
320 uint64_t *d = dst;
321 const uint64_t *s = src;
322
323 for (i = 0; i < size / sizeof(uint64_t); ++i)
324 *d++ ^= *s++;
325}
326
[da0570a]327static errno_t hr_raid5_read_degraded(hr_volume_t *vol, uint64_t bad,
328 uint64_t block, void *data, size_t cnt)
[dceb6e7]329{
330 errno_t rc;
[da0570a]331 size_t i;
[dceb6e7]332 void *xorbuf;
333 void *buf;
[da0570a]334 uint64_t len = vol->bsize * cnt;
[dceb6e7]335
[da0570a]336 xorbuf = malloc(len);
[dceb6e7]337 if (xorbuf == NULL)
338 return ENOMEM;
339
[da0570a]340 buf = malloc(len);
[c7b4452]341 if (buf == NULL) {
342 free(xorbuf);
[dceb6e7]343 return ENOMEM;
[c7b4452]344 }
[dceb6e7]345
[da0570a]346 /* read all other extents in the stripe */
[8160e4c0]347 bool first = true;
[65706f1]348 for (i = 0; i < vol->extent_no; i++) {
[8160e4c0]349 if (i == bad)
[da0570a]350 continue;
[8160e4c0]351
352 if (first) {
353 rc = block_read_direct(vol->extents[i].svc_id, block,
354 cnt, xorbuf);
355 if (rc != EOK)
356 goto end;
357
358 first = false;
[da0570a]359 } else {
360 rc = block_read_direct(vol->extents[i].svc_id, block,
361 cnt, buf);
362 if (rc != EOK)
363 goto end;
364 xor(xorbuf, buf, len);
365 }
366 }
[978130a]367
[da0570a]368 memcpy(data, xorbuf, len);
369end:
370 free(xorbuf);
371 free(buf);
372 return rc;
373}
374
375static errno_t hr_raid5_write(hr_volume_t *vol, uint64_t p_extent,
376 uint64_t extent, aoff64_t ba, const void *data, size_t cnt)
377{
378 errno_t rc;
379 size_t i;
380 void *xorbuf;
381 void *buf;
382 uint64_t len = vol->bsize * cnt;
383
384 ssize_t bad = hr_raid5_get_bad_ext(vol);
385 if (bad == -1 || (size_t)bad == p_extent) {
386 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
387 data);
388 if (rc != EOK)
389 return rc;
390 /*
391 * DEGRADED parity - skip parity write
392 */
393 if ((size_t)bad == p_extent)
394 return EOK;
395
396 rc = hr_raid5_write_parity(vol, p_extent, extent, ba, data,
397 cnt);
398 return rc;
399 }
400
401 xorbuf = malloc(len);
402 if (xorbuf == NULL)
403 return ENOMEM;
404
405 buf = malloc(len);
406 if (buf == NULL) {
407 free(xorbuf);
408 return ENOMEM;
409 }
410
[bf0a791]411 if (extent == (size_t)bad) {
[da0570a]412 /*
413 * new parity = read other and xor in new data
414 *
415 * write new parity
416 */
[8160e4c0]417 bool first = true;
[521b387]418 for (i = 0; i < vol->extent_no; i++) {
[bf0a791]419 if (i == (size_t)bad)
[da0570a]420 continue;
[521b387]421 if (i == p_extent)
422 continue;
[8160e4c0]423 if (first) {
424 rc = block_read_direct(vol->extents[i].svc_id,
425 ba, cnt, xorbuf);
426 if (rc != EOK)
427 goto end;
428
429 first = false;
[978130a]430 } else {
431 rc = block_read_direct(vol->extents[i].svc_id,
[da0570a]432 ba, cnt, buf);
[978130a]433 if (rc != EOK)
434 goto end;
[da0570a]435 xor(xorbuf, buf, len);
[978130a]436 }
[dceb6e7]437 }
[da0570a]438 xor(xorbuf, data, len);
439 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
440 xorbuf);
441 if (rc != EOK)
442 goto end;
443 } else {
444 /*
445 * new parity = xor original data and old parity and new data
446 *
447 * write parity, new data
448 */
449 rc = block_read_direct(vol->extents[extent].svc_id, ba, cnt,
450 xorbuf);
451 if (rc != EOK)
452 goto end;
453 rc = block_read_direct(vol->extents[p_extent].svc_id, ba, cnt,
454 buf);
455 if (rc != EOK)
456 goto end;
457
458 xor(xorbuf, buf, len);
[dceb6e7]459
[da0570a]460 xor(xorbuf, data, len);
461
462 rc = block_write_direct(vol->extents[p_extent].svc_id, ba, cnt,
[978130a]463 xorbuf);
464 if (rc != EOK)
465 goto end;
[da0570a]466 rc = block_write_direct(vol->extents[extent].svc_id, ba, cnt,
467 data);
468 if (rc != EOK)
469 goto end;
470 }
471end:
472 free(xorbuf);
473 free(buf);
474 return rc;
475}
476
477static errno_t hr_raid5_write_parity(hr_volume_t *vol, uint64_t p_extent,
478 uint64_t extent, uint64_t block, const void *data, size_t cnt)
479{
480 errno_t rc;
481 size_t i;
482 void *xorbuf;
483 void *buf;
484 uint64_t len = vol->bsize * cnt;
485
486 xorbuf = malloc(len);
487 if (xorbuf == NULL)
488 return ENOMEM;
489
490 buf = malloc(len);
491 if (buf == NULL) {
492 free(xorbuf);
493 return ENOMEM;
[978130a]494 }
[dceb6e7]495
[8160e4c0]496 bool first = true;
[65706f1]497 for (i = 0; i < vol->extent_no; i++) {
[da0570a]498 if (i == p_extent)
499 continue;
[8160e4c0]500
501 if (first) {
502 if (i == extent) {
503 memcpy(xorbuf, data, len);
504 } else {
505 rc = block_read_direct(vol->extents[i].svc_id,
506 block, cnt, xorbuf);
507 if (rc != EOK)
508 goto end;
509 }
510
511 first = false;
[da0570a]512 } else {
[8160e4c0]513 if (i == extent) {
514 xor(xorbuf, data, len);
515 } else {
516 rc = block_read_direct(vol->extents[i].svc_id,
517 block, cnt, buf);
518 if (rc != EOK)
519 goto end;
520
521 xor(xorbuf, buf, len);
522 }
[da0570a]523 }
524 }
525
526 rc = block_write_direct(vol->extents[p_extent].svc_id, block, cnt,
527 xorbuf);
[dceb6e7]528end:
529 free(xorbuf);
530 free(buf);
[12321f8]531 return rc;
[dceb6e7]532}
533
[fad91b9]534static errno_t hr_raid5_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
[da0570a]535 size_t cnt, void *dst, const void *src, size_t size)
[dceb6e7]536{
537 hr_volume_t *vol = bd->srvs->sarg;
538 errno_t rc;
[da0570a]539 uint64_t phys_block, len;
[978130a]540 size_t left;
[da0570a]541 const uint8_t *data_write = src;
542 uint8_t *data_read = dst;
543
544 /* propagate sync */
545 if (type == HR_BD_SYNC && ba == 0 && cnt == 0) {
546 hr_sync_all_extents(vol);
547 rc = hr_raid5_update_vol_status(vol);
548 return rc;
549 }
[fad91b9]550
551 if (type == HR_BD_READ || type == HR_BD_WRITE)
552 if (size < cnt * vol->bsize)
553 return EINVAL;
[dceb6e7]554
555 rc = hr_check_ba_range(vol, cnt, ba);
556 if (rc != EOK)
557 return rc;
558
[37a9c1e]559 uint8_t layout = vol->layout;
[d7768d11]560 hr_level_t level = vol->level;
561
[978130a]562 uint64_t strip_size = vol->strip_size / vol->bsize; /* in blocks */
563 uint64_t stripe = (ba / strip_size); /* stripe number */
[d7768d11]564
565 /* parity extent */
566 uint64_t p_extent;
[37a9c1e]567 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]568 p_extent = 0;
[37a9c1e]569 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]570 p_extent = vol->extent_no - 1;
[37a9c1e]571 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_0R) {
[d7768d11]572 p_extent = (stripe / (vol->extent_no - 1)) % vol->extent_no;
573 } else if (level == HR_LVL_5 &&
[37a9c1e]574 (layout == HR_RLQ_RAID5_NR || layout == HR_RLQ_RAID5_NC)) {
[d7768d11]575 p_extent = (vol->extent_no - 1) -
576 (stripe / (vol->extent_no - 1)) % vol->extent_no;
577 } else {
578 return EINVAL;
579 }
580
[978130a]581 uint64_t extent;
[37a9c1e]582 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]583 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]584 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]585 extent = stripe % (vol->extent_no - 1);
586 } else if (level == HR_LVL_5 &&
[37a9c1e]587 (layout == HR_RLQ_RAID5_0R || layout == HR_RLQ_RAID5_NR)) {
[d7768d11]588 if ((stripe % (vol->extent_no - 1)) < p_extent)
589 extent = stripe % (vol->extent_no - 1);
590 else
591 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]592 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_NC) {
[eb31781]593 extent =
594 ((stripe % (vol->extent_no - 1)) + p_extent + 1) %
595 vol->extent_no;
[d7768d11]596 } else {
597 return EINVAL;
598 }
599
[65706f1]600 uint64_t ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
[978130a]601 uint64_t strip_off = ba % strip_size; /* strip offset */
602
[abc2c4b]603 fibril_mutex_lock(&vol->lock);
[dceb6e7]604
[da0570a]605 rc = hr_raid5_vol_usable(vol);
606 if (rc != EOK) {
607 fibril_mutex_unlock(&vol->lock);
608 return EIO;
609 }
610
[fad91b9]611 left = cnt;
[a0c3080]612
[f1be66bf]613 fibril_rwlock_write_lock(&vol->states_lock);
[dceb6e7]614 while (left != 0) {
[978130a]615 phys_block = ext_stripe * strip_size + strip_off;
616 cnt = min(left, strip_size - strip_off);
[da0570a]617 len = vol->bsize * cnt;
[978130a]618 hr_add_ba_offset(vol, &phys_block);
[fad91b9]619 switch (type) {
620 case HR_BD_SYNC:
[da0570a]621 if (vol->extents[extent].status != HR_EXT_ONLINE)
622 break;
[fad91b9]623 rc = block_sync_cache(vol->extents[extent].svc_id,
[978130a]624 phys_block, cnt);
[da0570a]625 /* allow unsupported sync */
626 if (rc == ENOTSUP)
627 rc = EOK;
[fad91b9]628 break;
629 case HR_BD_READ:
[da0570a]630 retry_read:
631 ssize_t bad = hr_raid5_get_bad_ext(vol);
[521b387]632 if (bad > -1 && extent == (size_t)bad) {
[da0570a]633 rc = hr_raid5_read_degraded(vol, bad,
634 phys_block, data_read, cnt);
635 } else {
636 rc = block_read_direct(vol->extents[extent].svc_id,
637 phys_block, cnt, data_read);
638 }
639 data_read += len;
[fad91b9]640 break;
641 case HR_BD_WRITE:
[da0570a]642 retry_write:
643 rc = hr_raid5_write(vol, p_extent, extent, phys_block,
[978130a]644 data_write, cnt);
[da0570a]645 data_write += len;
[dceb6e7]646 break;
[fad91b9]647 default:
648 rc = EINVAL;
[da0570a]649 goto error;
[fad91b9]650 }
651
[da0570a]652 if (rc == ENOMEM)
[fad91b9]653 goto error;
654
[aa7864b]655 hr_raid5_handle_extent_error(vol, extent, rc);
[da0570a]656
657 if (rc != EOK) {
658 rc = hr_raid5_update_vol_status(vol);
659 if (rc == EOK) {
660 /*
661 * State changed from ONLINE -> DEGRADED,
662 * rewind and retry
663 */
664 if (type == HR_BD_WRITE) {
665 data_write -= len;
666 goto retry_write;
667 } else if (type == HR_BD_WRITE) {
668 data_read -= len;
669 goto retry_read;
670 }
671 } else {
672 rc = EIO;
673 goto error;
674 }
675 }
676
[978130a]677 left -= cnt;
678 strip_off = 0;
679 stripe++;
[d7768d11]680
681 ext_stripe = stripe / (vol->extent_no - 1); /* stripe level */
682
[37a9c1e]683 if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_0R) {
[eb31781]684 p_extent =
685 (stripe / (vol->extent_no - 1)) % vol->extent_no;
[d7768d11]686 } else if (level == HR_LVL_5 &&
[37a9c1e]687 (layout == HR_RLQ_RAID5_NR || layout == HR_RLQ_RAID5_NC)) {
[d7768d11]688 p_extent = (vol->extent_no - 1) -
689 (stripe / (vol->extent_no - 1)) % vol->extent_no;
690 }
691
[37a9c1e]692 if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_0) {
[d7768d11]693 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]694 } else if (level == HR_LVL_4 && layout == HR_RLQ_RAID4_N) {
[d7768d11]695 extent = stripe % (vol->extent_no - 1);
696 } else if (level == HR_LVL_5 &&
[37a9c1e]697 (layout == HR_RLQ_RAID5_0R || layout == HR_RLQ_RAID5_NR)) {
[d7768d11]698 if ((stripe % (vol->extent_no - 1)) < p_extent)
699 extent = stripe % (vol->extent_no - 1);
700 else
701 extent = (stripe % (vol->extent_no - 1)) + 1;
[37a9c1e]702 } else if (level == HR_LVL_5 && layout == HR_RLQ_RAID5_NC) {
[eb31781]703 extent =
704 ((stripe % (vol->extent_no - 1)) + p_extent + 1) %
705 vol->extent_no;
[d7768d11]706 }
[dceb6e7]707 }
708
[fad91b9]709error:
[bf0a791]710 (void)hr_raid5_update_vol_status(vol);
[f1be66bf]711 fibril_rwlock_write_unlock(&vol->states_lock);
[abc2c4b]712 fibril_mutex_unlock(&vol->lock);
[dceb6e7]713 return rc;
714}
715
[aa7864b]716static errno_t hr_raid5_rebuild(void *arg)
717{
718 HR_DEBUG("hr_raid5_rebuild()\n");
719
720 hr_volume_t *vol = arg;
721 errno_t rc = EOK;
722 void *buf = NULL, *xorbuf = NULL;
723
724 fibril_mutex_lock(&vol->lock);
[f1be66bf]725 fibril_rwlock_read_lock(&vol->extents_lock);
726 fibril_rwlock_write_lock(&vol->states_lock);
[aa7864b]727
728 if (vol->hotspare_no == 0) {
729 HR_WARN("hr_raid5_rebuild(): no free hotspares on \"%s\", "
730 "aborting rebuild\n", vol->devname);
731 /* retval isn't checked for now */
732 goto end;
733 }
734
[65706f1]735 size_t bad = vol->extent_no;
736 for (size_t i = 0; i < vol->extent_no; i++) {
[aa7864b]737 if (vol->extents[i].status == HR_EXT_FAILED) {
738 bad = i;
739 break;
740 }
741 }
742
[65706f1]743 if (bad == vol->extent_no) {
[aa7864b]744 HR_WARN("hr_raid5_rebuild(): no bad extent on \"%s\", "
745 "aborting rebuild\n", vol->devname);
746 /* retval isn't checked for now */
747 goto end;
748 }
749
750 size_t hotspare_idx = vol->hotspare_no - 1;
751
[a0c3080]752 hr_ext_status_t hs_state = vol->hotspares[hotspare_idx].status;
753 if (hs_state != HR_EXT_HOTSPARE) {
754 HR_ERROR("hr_raid5_rebuild(): invalid hotspare state \"%s\", "
755 "aborting rebuild\n", hr_get_ext_status_msg(hs_state));
756 rc = EINVAL;
757 goto end;
758 }
759
760 HR_DEBUG("hr_raid5_rebuild(): swapping in hotspare\n");
761
762 block_fini(vol->extents[bad].svc_id);
763
[aa7864b]764 vol->extents[bad].svc_id = vol->hotspares[hotspare_idx].svc_id;
[a0c3080]765 hr_update_ext_status(vol, bad, HR_EXT_HOTSPARE);
[aa7864b]766
767 vol->hotspares[hotspare_idx].svc_id = 0;
[f1be66bf]768 fibril_mutex_lock(&vol->hotspare_lock);
[a0c3080]769 hr_update_hotspare_status(vol, hotspare_idx, HR_EXT_MISSING);
[f1be66bf]770 fibril_mutex_unlock(&vol->hotspare_lock);
[aa7864b]771
[a0c3080]772 vol->hotspare_no--;
[aa7864b]773
[a0c3080]774 hr_extent_t *rebuild_ext = &vol->extents[bad];
[aa7864b]775
[a0c3080]776 rc = block_init(rebuild_ext->svc_id);
[aa7864b]777 if (rc != EOK) {
778 HR_ERROR("hr_raid5_rebuild(): initing (%lu) failed, "
[a0c3080]779 "aborting rebuild\n", rebuild_ext->svc_id);
[aa7864b]780 goto end;
781 }
782
[a0c3080]783 HR_DEBUG("hr_raid5_rebuild(): starting rebuild on (%lu)\n",
784 rebuild_ext->svc_id);
785
786 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
787 hr_update_vol_status(vol, HR_VOL_REBUILD);
788
[aa7864b]789 uint64_t max_blks = DATA_XFER_LIMIT / vol->bsize;
[65706f1]790 uint64_t left = vol->data_blkno / (vol->extent_no - 1);
[aa7864b]791 buf = malloc(max_blks * vol->bsize);
792 xorbuf = malloc(max_blks * vol->bsize);
793
794 uint64_t ba = 0, cnt;
795 hr_add_ba_offset(vol, &ba);
[a0c3080]796
[aa7864b]797 while (left != 0) {
798 cnt = min(left, max_blks);
799
800 /*
801 * Almost the same as read_degraded,
802 * but we don't want to allocate new
803 * xorbuf each blk rebuild batch.
804 */
805 bool first = true;
[65706f1]806 for (size_t i = 0; i < vol->extent_no; i++) {
[aa7864b]807 if (i == bad)
808 continue;
[8160e4c0]809 if (first)
810 rc = block_read_direct(vol->extents[i].svc_id,
811 ba, cnt, xorbuf);
812 else
813 rc = block_read_direct(vol->extents[i].svc_id,
814 ba, cnt, buf);
[aa7864b]815 if (rc != EOK) {
816 hr_raid5_handle_extent_error(vol, i, rc);
817 HR_ERROR("rebuild on \"%s\" (%lu), failed due "
818 "to a failed ONLINE extent, number %lu\n",
819 vol->devname, vol->svc_id, i);
820 goto end;
821 }
822
[8160e4c0]823 if (!first)
[aa7864b]824 xor(xorbuf, buf, cnt * vol->bsize);
[8160e4c0]825 else
826 first = false;
[aa7864b]827 }
828
[a0c3080]829 rc = block_write_direct(rebuild_ext->svc_id, ba, cnt, xorbuf);
[aa7864b]830 if (rc != EOK) {
831 hr_raid5_handle_extent_error(vol, bad, rc);
832 HR_ERROR("rebuild on \"%s\" (%lu), failed due to "
833 "the rebuilt extent number %lu failing\n",
834 vol->devname, vol->svc_id, bad);
835 goto end;
836 }
837
838 ba += cnt;
839 left -= cnt;
[40bf2c6]840
841 /*
842 * Let other IO requests be served
843 * during rebuild.
844 */
[f1be66bf]845 fibril_rwlock_write_unlock(&vol->states_lock);
[40bf2c6]846 fibril_mutex_unlock(&vol->lock);
847 fibril_mutex_lock(&vol->lock);
[f1be66bf]848 fibril_rwlock_write_lock(&vol->states_lock);
[aa7864b]849 }
850
851 HR_DEBUG("hr_raid5_rebuild(): rebuild finished on \"%s\" (%lu), "
852 "extent number %lu\n", vol->devname, vol->svc_id, hotspare_idx);
853
854 hr_update_ext_status(vol, bad, HR_EXT_ONLINE);
855 /*
856 * For now write metadata at the end, because
857 * we don't sync metada accross extents yet.
858 */
859 hr_write_meta_to_ext(vol, bad);
860end:
[bf0a791]861 (void)hr_raid5_update_vol_status(vol);
[aa7864b]862
[f1be66bf]863 fibril_rwlock_write_unlock(&vol->states_lock);
864 fibril_rwlock_read_unlock(&vol->extents_lock);
[aa7864b]865 fibril_mutex_unlock(&vol->lock);
866
867 if (buf != NULL)
868 free(buf);
869
870 if (xorbuf != NULL)
871 free(xorbuf);
872
873 return rc;
874}
875
[dceb6e7]876/** @}
877 */
Note: See TracBrowser for help on using the repository browser.