source: mainline/uspace/srv/bd/hr/raid1.c@ bf0a791

Last change on this file since bf0a791 was bf0a791, checked in by Miroslav Cimerman <mc@…>, 7 months ago

hr: cstyle

  • Property mode set to 100644
File size: 12.6 KB
RevLine 
[94d84a0]1/*
2 * Copyright (c) 2024 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <bd_srv.h>
37#include <block.h>
38#include <errno.h>
39#include <hr.h>
40#include <io/log.h>
41#include <ipc/hr.h>
42#include <ipc/services.h>
43#include <loc.h>
44#include <task.h>
45#include <stdio.h>
46#include <stdlib.h>
47#include <str_error.h>
48
[6b8e89b0]49#include "superblock.h"
[da5c257]50#include "util.h"
[b0f1366]51#include "var.h"
[94d84a0]52
53extern loc_srv_t *hr_srv;
54
[733564a]55static errno_t hr_raid1_check_vol_status(hr_volume_t *);
56static errno_t hr_raid1_update_vol_status(hr_volume_t *);
[f81960c5]57static void hr_raid1_handle_extent_error(hr_volume_t *, size_t, errno_t);
[733564a]58static errno_t hr_raid1_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
59 void *, const void *, size_t);
[5b320ac]60static errno_t hr_raid1_rebuild(void *);
[733564a]61
62/* bdops */
[94d84a0]63static errno_t hr_raid1_bd_open(bd_srvs_t *, bd_srv_t *);
64static errno_t hr_raid1_bd_close(bd_srv_t *);
65static errno_t hr_raid1_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
66 size_t);
67static errno_t hr_raid1_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
68static errno_t hr_raid1_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
69 const void *, size_t);
70static errno_t hr_raid1_bd_get_block_size(bd_srv_t *, size_t *);
71static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
72
73static bd_ops_t hr_raid1_bd_ops = {
74 .open = hr_raid1_bd_open,
75 .close = hr_raid1_bd_close,
76 .sync_cache = hr_raid1_bd_sync_cache,
77 .read_blocks = hr_raid1_bd_read_blocks,
78 .write_blocks = hr_raid1_bd_write_blocks,
79 .get_block_size = hr_raid1_bd_get_block_size,
80 .get_num_blocks = hr_raid1_bd_get_num_blocks
81};
82
[733564a]83errno_t hr_raid1_create(hr_volume_t *new_volume)
84{
85 errno_t rc;
86
87 assert(new_volume->level == HR_LVL_1);
88
89 if (new_volume->dev_no < 2) {
[d199a6f]90 HR_ERROR("RAID 1 array needs at least 2 devices\n");
[733564a]91 return EINVAL;
92 }
93
94 rc = hr_raid1_update_vol_status(new_volume);
95 if (rc != EOK)
96 return rc;
97
98 bd_srvs_init(&new_volume->hr_bds);
99 new_volume->hr_bds.ops = &hr_raid1_bd_ops;
100 new_volume->hr_bds.sarg = new_volume;
101
102 rc = hr_register_volume(new_volume);
103
104 return rc;
105}
106
107errno_t hr_raid1_init(hr_volume_t *vol)
108{
109 errno_t rc;
110 size_t bsize;
111 uint64_t total_blkno;
112
113 assert(vol->level == HR_LVL_1);
114
115 rc = hr_check_devs(vol, &total_blkno, &bsize);
116 if (rc != EOK)
117 return rc;
118
119 vol->nblocks = total_blkno / vol->dev_no;
120 vol->bsize = bsize;
121 vol->data_offset = HR_DATA_OFF;
122 vol->data_blkno = vol->nblocks - vol->data_offset;
123 vol->strip_size = 0;
124
125 return EOK;
126}
127
[7b359f5]128void hr_raid1_status_event(hr_volume_t *vol)
129{
130 fibril_mutex_lock(&vol->lock);
[bf0a791]131 (void)hr_raid1_update_vol_status(vol);
[7b359f5]132 fibril_mutex_unlock(&vol->lock);
133}
134
[5b320ac]135errno_t hr_raid1_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
136{
137 HR_DEBUG("hr_raid1_add_hotspare()\n");
138
139 fibril_mutex_lock(&vol->lock);
140
141 if (vol->hotspare_no >= HR_MAX_HOTSPARES) {
142 HR_ERROR("hr_raid1_add_hotspare(): cannot add more hotspares "
143 "to \"%s\"\n", vol->devname);
144 fibril_mutex_unlock(&vol->lock);
145 return ELIMIT;
146 }
147
148 vol->hotspares[vol->hotspare_no].svc_id = hotspare;
149 vol->hotspares[vol->hotspare_no].status = HR_EXT_HOTSPARE;
150 vol->hotspare_no++;
151
152 /*
153 * If the volume is degraded, start rebuild right away.
154 */
155 if (vol->status == HR_VOL_DEGRADED) {
156 HR_DEBUG("hr_raid1_add_hotspare(): volume in DEGRADED state, "
157 "spawning new rebuild fibril\n");
158 fid_t fib = fibril_create(hr_raid1_rebuild, vol);
159 if (fib == 0)
160 return EINVAL;
161 fibril_start(fib);
162 fibril_detach(fib);
163 }
164
165 fibril_mutex_unlock(&vol->lock);
166
167 return EOK;
168}
169
[733564a]170static errno_t hr_raid1_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
171{
[d199a6f]172 HR_DEBUG("hr_bd_open()\n");
[733564a]173 return EOK;
174}
175
176static errno_t hr_raid1_bd_close(bd_srv_t *bd)
177{
[d199a6f]178 HR_DEBUG("hr_bd_close()\n");
[733564a]179 return EOK;
180}
181
182static errno_t hr_raid1_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
183{
184 return hr_raid1_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
185}
186
187static errno_t hr_raid1_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
188 void *buf, size_t size)
189{
190 return hr_raid1_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
191}
192
193static errno_t hr_raid1_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
194 const void *data, size_t size)
195{
196 return hr_raid1_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
197}
198
199static errno_t hr_raid1_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
200{
201 hr_volume_t *vol = bd->srvs->sarg;
202
203 *rsize = vol->bsize;
204 return EOK;
205}
206
207static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
208{
209 hr_volume_t *vol = bd->srvs->sarg;
210
211 *rnb = vol->data_blkno;
212 return EOK;
213}
214
[d84773a]215static errno_t hr_raid1_check_vol_status(hr_volume_t *vol)
216{
217 if (vol->status == HR_VOL_ONLINE ||
[5b320ac]218 vol->status == HR_VOL_DEGRADED ||
219 vol->status == HR_VOL_REBUILD)
[d84773a]220 return EOK;
221 return EINVAL;
222}
223
224/*
225 * Update vol->status and return EOK if volume
226 * is usable
227 */
228static errno_t hr_raid1_update_vol_status(hr_volume_t *vol)
229{
230 hr_vol_status_t old_state = vol->status;
[5b320ac]231 size_t healthy = hr_count_extents(vol, HR_EXT_ONLINE);
[d84773a]232
233 if (healthy == 0) {
234 if (old_state != HR_VOL_FAULTY) {
[d199a6f]235 HR_WARN("RAID 1 needs at least 1 extent to be"
[5d96f427]236 "ONLINE, marking \"%s\" (%lu) volume as FAULTY",
[d84773a]237 vol->devname, vol->svc_id);
238 vol->status = HR_VOL_FAULTY;
239 }
240 return EINVAL;
241 } else if (healthy < vol->dev_no) {
[5b320ac]242 if (old_state != HR_VOL_DEGRADED &&
243 old_state != HR_VOL_REBUILD) {
[d199a6f]244 HR_WARN("RAID 1 array \"%s\" (%lu) has some "
[5b320ac]245 "unusable extent(s), marking volume as DEGRADED",
[d84773a]246 vol->devname, vol->svc_id);
[13ce552]247 vol->status = HR_VOL_DEGRADED;
[5b320ac]248 if (vol->hotspare_no > 0) {
249 fid_t fib = fibril_create(hr_raid1_rebuild,
250 vol);
251 if (fib == 0) {
252 return EINVAL;
253 }
254 fibril_start(fib);
255 fibril_detach(fib);
256 }
[d84773a]257 }
258 return EOK;
259 } else {
260 if (old_state != HR_VOL_ONLINE) {
[d199a6f]261 HR_WARN("RAID 1 array \"%s\" (%lu) has all extents "
[5d96f427]262 "active, marking volume as ONLINE",
[d84773a]263 vol->devname, vol->svc_id);
264 vol->status = HR_VOL_ONLINE;
265 }
266 return EOK;
267 }
268}
269
[f81960c5]270static void hr_raid1_handle_extent_error(hr_volume_t *vol, size_t extent,
[d84773a]271 errno_t rc)
272{
273 if (rc == ENOENT)
274 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
275 else if (rc != EOK)
276 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
277}
278
[fad91b9]279static errno_t hr_raid1_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
280 size_t cnt, void *data_read, const void *data_write, size_t size)
[94d84a0]281{
282 hr_volume_t *vol = bd->srvs->sarg;
283 errno_t rc;
284 size_t i;
285
[fad91b9]286 if (type == HR_BD_READ || type == HR_BD_WRITE)
287 if (size < cnt * vol->bsize)
288 return EINVAL;
289
[4a2a6b8b]290 rc = hr_check_ba_range(vol, cnt, ba);
291 if (rc != EOK)
[b0f1366]292 return rc;
[4a2a6b8b]293
294 hr_add_ba_offset(vol, &ba);
295
[abc2c4b]296 fibril_mutex_lock(&vol->lock);
[b0f1366]297
[d84773a]298 rc = hr_raid1_check_vol_status(vol);
299 if (rc != EOK) {
300 fibril_mutex_unlock(&vol->lock);
301 return EIO;
302 }
303
304 size_t successful = 0;
[fad91b9]305 switch (type) {
306 case HR_BD_SYNC:
307 for (i = 0; i < vol->dev_no; i++) {
[d84773a]308 if (vol->extents[i].status != HR_EXT_ONLINE)
309 continue;
[fad91b9]310 rc = block_sync_cache(vol->extents[i].svc_id, ba, cnt);
[182ffcc]311 if (rc != EOK && rc != ENOTSUP)
[f81960c5]312 hr_raid1_handle_extent_error(vol, i, rc);
[d84773a]313 else
314 successful++;
[fad91b9]315 }
316 break;
317 case HR_BD_READ:
318 for (i = 0; i < vol->dev_no; i++) {
[d84773a]319 if (vol->extents[i].status != HR_EXT_ONLINE)
320 continue;
[fad91b9]321 rc = block_read_direct(vol->extents[i].svc_id, ba, cnt,
322 data_read);
[d0f0744]323 if (rc != EOK) {
[f81960c5]324 hr_raid1_handle_extent_error(vol, i, rc);
[d0f0744]325 } else {
[d84773a]326 successful++;
[d0f0744]327 break;
328 }
[fad91b9]329 }
330 break;
331 case HR_BD_WRITE:
332 for (i = 0; i < vol->dev_no; i++) {
[b8409b9]333 if (vol->extents[i].status != HR_EXT_ONLINE ||
334 (vol->extents[i].status == HR_EXT_REBUILD &&
335 ba >= vol->rebuild_blk))
336 /*
337 * When the extent is being rebuilt,
338 * we only write to the part that is already
339 * rebuilt. If ba is more than vol->rebuild_blk,
340 * the write is going to be replicated later
341 * in the rebuild. TODO: test
342 */
[d84773a]343 continue;
[fad91b9]344 rc = block_write_direct(vol->extents[i].svc_id, ba, cnt,
345 data_write);
346 if (rc != EOK)
[f81960c5]347 hr_raid1_handle_extent_error(vol, i, rc);
[d84773a]348 else
349 successful++;
[fad91b9]350 }
351 break;
352 default:
353 rc = EINVAL;
[94d84a0]354 }
355
[d84773a]356 if (successful > 0)
357 rc = EOK;
358 else
359 rc = EIO;
360
[bf0a791]361 (void)hr_raid1_update_vol_status(vol);
[abc2c4b]362 fibril_mutex_unlock(&vol->lock);
[94d84a0]363 return rc;
364}
365
[5b320ac]366/*
367 * Put the last HOTSPARE extent in place
368 * of first DEGRADED, and start the rebuild.
369 */
370static errno_t hr_raid1_rebuild(void *arg)
371{
372 HR_DEBUG("hr_raid1_rebuild()\n");
373
374 hr_volume_t *vol = arg;
375 void *buf = NULL;
376 errno_t rc = EOK;
377
378 fibril_mutex_lock(&vol->lock);
379
380 if (vol->hotspare_no == 0) {
381 HR_WARN("hr_raid1_rebuild(): no free hotspares on \"%s\", "
382 "aborting rebuild\n", vol->devname);
383 /* retval isn't checked for now */
384 goto end;
385 }
386
387 size_t bad = vol->dev_no;
388 for (size_t i = 0; i < vol->dev_no; i++) {
389 if (vol->extents[i].status == HR_EXT_FAILED) {
390 bad = i;
391 break;
392 }
393 }
394
395 if (bad == vol->dev_no) {
396 HR_WARN("hr_raid1_rebuild(): no bad extent on \"%s\", "
397 "aborting rebuild\n", vol->devname);
398 /* retval isn't checked for now */
399 goto end;
400 }
401
402 block_fini(vol->extents[bad].svc_id);
403
404 size_t hotspare_idx = vol->hotspare_no - 1;
405
[b8409b9]406 vol->rebuild_blk = 0;
[5b320ac]407 vol->extents[bad].svc_id = vol->hotspares[hotspare_idx].svc_id;
408 hr_update_ext_status(vol, bad, HR_EXT_REBUILD);
409
410 vol->hotspares[hotspare_idx].svc_id = 0;
411 vol->hotspares[hotspare_idx].status = HR_EXT_MISSING;
412 vol->hotspare_no--;
413
414 HR_WARN("hr_raid1_rebuild(): changing volume \"%s\" (%lu) state "
415 "from %s to %s\n", vol->devname, vol->svc_id,
416 hr_get_vol_status_msg(vol->status),
417 hr_get_vol_status_msg(HR_VOL_REBUILD));
418 vol->status = HR_VOL_REBUILD;
419
420 hr_extent_t *hotspare = &vol->extents[bad];
421
422 HR_DEBUG("hr_raid1_rebuild(): initing (%lu)\n", hotspare->svc_id);
423
424 rc = block_init(hotspare->svc_id);
425 if (rc != EOK) {
426 HR_ERROR("hr_raid1_rebuild(): initing (%lu) failed, "
427 "aborting rebuild\n", hotspare->svc_id);
428 goto end;
429 }
430
431 size_t left = vol->data_blkno;
432 size_t max_blks = DATA_XFER_LIMIT / vol->bsize;
433 buf = malloc(max_blks * vol->bsize);
434
435 hr_extent_t *ext;
436
437 size_t cnt;
438 uint64_t ba = 0;
439 hr_add_ba_offset(vol, &ba);
440 while (left != 0) {
[b8409b9]441 vol->rebuild_blk = ba;
[5b320ac]442 cnt = min(max_blks, left);
443 for (size_t i = 0; i < vol->dev_no; i++) {
444 ext = &vol->extents[i];
445 if (ext->status == HR_EXT_ONLINE) {
446 rc = block_read_direct(ext->svc_id, ba, cnt,
447 buf);
448 if (rc != EOK) {
[f81960c5]449 hr_raid1_handle_extent_error(vol, i, rc);
[5b320ac]450 if (i + 1 < vol->dev_no) {
451 /* still might have one ONLINE */
452 continue;
453 } else {
454 HR_ERROR("rebuild on \"%s\" (%lu), failed due to "
455 "too many failed extents\n",
456 vol->devname, vol->svc_id);
457 goto end;
458 }
459 }
460 break;
461 }
462 }
463
464 rc = block_write_direct(hotspare->svc_id, ba, cnt, buf);
465 if (rc != EOK) {
[f81960c5]466 hr_raid1_handle_extent_error(vol, bad, rc);
467 HR_ERROR("rebuild on \"%s\" (%lu), failed due to "
468 "the rebuilt extent number %lu failing\n",
469 vol->devname, vol->svc_id, bad);
[5b320ac]470 goto end;
471
472 }
473
474 ba += cnt;
475 left -= cnt;
476 }
477
478 HR_DEBUG("hr_raid1_rebuild(): rebuild finished on \"%s\" (%lu), "
479 "extent number %lu\n", vol->devname, vol->svc_id, hotspare_idx);
480
[f81960c5]481 hr_update_ext_status(vol, bad, HR_EXT_ONLINE);
[5b320ac]482 /*
483 * For now write metadata at the end, because
484 * we don't sync metada accross extents yet.
485 */
486 hr_write_meta_to_ext(vol, bad);
487end:
[bf0a791]488 (void)hr_raid1_update_vol_status(vol);
[5b320ac]489
490 fibril_mutex_unlock(&vol->lock);
491
492 if (buf != NULL)
493 free(buf);
494
495 /* retval isn't checked anywhere for now */
496 return rc;
497}
498
[94d84a0]499/** @}
500 */
Note: See TracBrowser for help on using the repository browser.