source: mainline/uspace/srv/bd/hr/raid1.c@ 95ca19d

Last change on this file since 95ca19d was 95ca19d, checked in by Miroslav Cimerman <mc@…>, 5 weeks ago

hr: add —read-only volume flag

  • Property mode set to 100644
File size: 13.9 KB
Line 
1/*
2 * Copyright (c) 2025 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
41#include <inttypes.h>
42#include <io/log.h>
43#include <ipc/hr.h>
44#include <ipc/services.h>
45#include <loc.h>
46#include <task.h>
47#include <stdatomic.h>
48#include <stdio.h>
49#include <stdlib.h>
50#include <str_error.h>
51
52#include "fge.h"
53#include "io.h"
54#include "superblock.h"
55#include "util.h"
56#include "var.h"
57
58static void hr_raid1_vol_state_eval_forced(hr_volume_t *);
59static size_t hr_raid1_count_good_extents(hr_volume_t *, uint64_t, size_t,
60 uint64_t);
61static errno_t hr_raid1_bd_op(hr_bd_op_type_t, hr_volume_t *, aoff64_t, size_t,
62 void *, const void *, size_t);
63static errno_t hr_raid1_rebuild(void *);
64
65/* bdops */
66static errno_t hr_raid1_bd_open(bd_srvs_t *, bd_srv_t *);
67static errno_t hr_raid1_bd_close(bd_srv_t *);
68static errno_t hr_raid1_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
69 size_t);
70static errno_t hr_raid1_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
71static errno_t hr_raid1_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
72 const void *, size_t);
73static errno_t hr_raid1_bd_get_block_size(bd_srv_t *, size_t *);
74static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
75
76static bd_ops_t hr_raid1_bd_ops = {
77 .open = hr_raid1_bd_open,
78 .close = hr_raid1_bd_close,
79 .sync_cache = hr_raid1_bd_sync_cache,
80 .read_blocks = hr_raid1_bd_read_blocks,
81 .write_blocks = hr_raid1_bd_write_blocks,
82 .get_block_size = hr_raid1_bd_get_block_size,
83 .get_num_blocks = hr_raid1_bd_get_num_blocks
84};
85
86extern loc_srv_t *hr_srv;
87
88errno_t hr_raid1_create(hr_volume_t *new_volume)
89{
90 HR_DEBUG("%s()", __func__);
91
92 if (new_volume->level != HR_LVL_1)
93 return EINVAL;
94
95 if (new_volume->extent_no < 2) {
96 HR_ERROR("RAID 1 volume needs at least 2 devices\n");
97 return EINVAL;
98 }
99
100 bd_srvs_init(&new_volume->hr_bds);
101 new_volume->hr_bds.ops = &hr_raid1_bd_ops;
102 new_volume->hr_bds.sarg = new_volume;
103
104 hr_raid1_vol_state_eval_forced(new_volume);
105
106 fibril_rwlock_read_lock(&new_volume->states_lock);
107 hr_vol_state_t state = new_volume->state;
108 fibril_rwlock_read_unlock(&new_volume->states_lock);
109 if (state == HR_VOL_FAULTY || state == HR_VOL_NONE) {
110 HR_NOTE("\"%s\": unusable state, not creating\n",
111 new_volume->devname);
112 return EINVAL;
113 }
114
115 return EOK;
116}
117
118/*
119 * Called only once in volume's lifetime.
120 */
121errno_t hr_raid1_init(hr_volume_t *vol)
122{
123 HR_DEBUG("%s()", __func__);
124
125 if (vol->level != HR_LVL_1)
126 return EINVAL;
127
128 vol->data_offset = vol->meta_ops->get_data_offset();
129 vol->data_blkno = vol->truncated_blkno - vol->meta_ops->get_size();
130 vol->strip_size = 0;
131
132 return EOK;
133}
134
135errno_t hr_raid1_add_hotspare(hr_volume_t *vol, service_id_t hotspare)
136{
137 HR_DEBUG("%s()", __func__);
138
139 errno_t rc = hr_util_add_hotspare(vol, hotspare);
140
141 hr_raid1_vol_state_eval(vol);
142
143 return rc;
144}
145
146void hr_raid1_vol_state_eval(hr_volume_t *vol)
147{
148 HR_DEBUG("%s()", __func__);
149
150 bool exp = true;
151 if (!atomic_compare_exchange_strong(&vol->state_dirty, &exp, false))
152 return;
153
154 vol->meta_ops->inc_counter(vol);
155 vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
156
157 hr_raid1_vol_state_eval_forced(vol);
158}
159
160void hr_raid1_ext_state_cb(hr_volume_t *vol, size_t extent, errno_t rc)
161{
162 HR_DEBUG("%s()", __func__);
163
164 assert(fibril_rwlock_is_locked(&vol->extents_lock));
165
166 if (rc == EOK)
167 return;
168
169 fibril_rwlock_write_lock(&vol->states_lock);
170
171 switch (rc) {
172 case ENOENT:
173 hr_update_ext_state(vol, extent, HR_EXT_MISSING);
174 break;
175 default:
176 hr_update_ext_state(vol, extent, HR_EXT_FAILED);
177 }
178
179 hr_mark_vol_state_dirty(vol);
180
181 fibril_rwlock_write_unlock(&vol->states_lock);
182}
183
184static void hr_raid1_vol_state_eval_forced(hr_volume_t *vol)
185{
186 HR_DEBUG("%s()", __func__);
187
188 fibril_rwlock_read_lock(&vol->extents_lock);
189 fibril_rwlock_read_lock(&vol->states_lock);
190
191 hr_vol_state_t old_state = vol->state;
192 size_t healthy = hr_count_extents(vol, HR_EXT_ONLINE);
193
194 size_t invalid_no = hr_count_extents(vol, HR_EXT_INVALID);
195
196 size_t rebuild_no = hr_count_extents(vol, HR_EXT_REBUILD);
197
198 fibril_mutex_lock(&vol->hotspare_lock);
199 size_t hs_no = vol->hotspare_no;
200 fibril_mutex_unlock(&vol->hotspare_lock);
201
202 fibril_rwlock_read_unlock(&vol->states_lock);
203 fibril_rwlock_read_unlock(&vol->extents_lock);
204
205 if (healthy == 0) {
206 if (old_state != HR_VOL_FAULTY) {
207 fibril_rwlock_write_lock(&vol->states_lock);
208 hr_update_vol_state(vol, HR_VOL_FAULTY);
209 fibril_rwlock_write_unlock(&vol->states_lock);
210 }
211 } else if (healthy < vol->extent_no) {
212 if (old_state != HR_VOL_REBUILD &&
213 old_state != HR_VOL_DEGRADED) {
214 fibril_rwlock_write_lock(&vol->states_lock);
215 hr_update_vol_state(vol, HR_VOL_DEGRADED);
216 fibril_rwlock_write_unlock(&vol->states_lock);
217 }
218
219 if (old_state != HR_VOL_REBUILD) {
220 if (hs_no > 0 || invalid_no > 0 || rebuild_no > 0) {
221 fid_t fib = fibril_create(hr_raid1_rebuild,
222 vol);
223 if (fib == 0)
224 return;
225 fibril_start(fib);
226 fibril_detach(fib);
227 }
228 }
229 } else {
230 if (old_state != HR_VOL_OPTIMAL) {
231 fibril_rwlock_write_lock(&vol->states_lock);
232 hr_update_vol_state(vol, HR_VOL_OPTIMAL);
233 fibril_rwlock_write_unlock(&vol->states_lock);
234 }
235 }
236}
237
238static errno_t hr_raid1_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
239{
240 HR_DEBUG("%s()", __func__);
241
242 hr_volume_t *vol = bd->srvs->sarg;
243
244 atomic_fetch_add_explicit(&vol->open_cnt, 1, memory_order_relaxed);
245
246 return EOK;
247}
248
249static errno_t hr_raid1_bd_close(bd_srv_t *bd)
250{
251 HR_DEBUG("%s()", __func__);
252
253 hr_volume_t *vol = bd->srvs->sarg;
254
255 atomic_fetch_sub_explicit(&vol->open_cnt, 1, memory_order_relaxed);
256
257 return EOK;
258}
259
260static errno_t hr_raid1_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
261{
262 hr_volume_t *vol = bd->srvs->sarg;
263
264 return hr_sync_extents(vol);
265}
266
267static errno_t hr_raid1_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
268 void *buf, size_t size)
269{
270 hr_volume_t *vol = bd->srvs->sarg;
271
272 return hr_raid1_bd_op(HR_BD_READ, vol, ba, cnt, buf, NULL, size);
273}
274
275static errno_t hr_raid1_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
276 const void *data, size_t size)
277{
278 hr_volume_t *vol = bd->srvs->sarg;
279
280 if (vol->vflags & HR_VOL_FLAG_READ_ONLY)
281 return ENOTSUP;
282
283 return hr_raid1_bd_op(HR_BD_WRITE, vol, ba, cnt, NULL, data, size);
284}
285
286static errno_t hr_raid1_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
287{
288 hr_volume_t *vol = bd->srvs->sarg;
289
290 *rsize = vol->bsize;
291 return EOK;
292}
293
294static errno_t hr_raid1_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
295{
296 hr_volume_t *vol = bd->srvs->sarg;
297
298 *rnb = vol->data_blkno;
299 return EOK;
300}
301
302static size_t hr_raid1_count_good_extents(hr_volume_t *vol, uint64_t ba,
303 size_t cnt, uint64_t rebuild_blk)
304{
305 assert(fibril_rwlock_is_locked(&vol->extents_lock));
306 assert(fibril_rwlock_is_locked(&vol->states_lock));
307
308 size_t count = 0;
309 for (size_t i = 0; i < vol->extent_no; i++) {
310 if (vol->extents[i].state == HR_EXT_ONLINE ||
311 (vol->extents[i].state == HR_EXT_REBUILD &&
312 rebuild_blk >= ba)) {
313 count++;
314 }
315 }
316
317 return count;
318}
319
320static errno_t hr_raid1_bd_op(hr_bd_op_type_t type, hr_volume_t *vol,
321 aoff64_t ba, size_t cnt, void *data_read, const void *data_write,
322 size_t size)
323{
324 HR_DEBUG("%s()", __func__);
325
326 hr_range_lock_t *rl = NULL;
327 errno_t rc;
328 size_t i;
329 uint64_t rebuild_blk;
330
331 if (size < cnt * vol->bsize)
332 return EINVAL;
333
334 fibril_rwlock_read_lock(&vol->states_lock);
335 hr_vol_state_t vol_state = vol->state;
336 fibril_rwlock_read_unlock(&vol->states_lock);
337
338 if (vol_state == HR_VOL_FAULTY || vol_state == HR_VOL_NONE)
339 return EIO;
340
341 /* increment metadata counter only on first write */
342 bool exp = false;
343 if (type == HR_BD_WRITE &&
344 atomic_compare_exchange_strong(&vol->first_write, &exp, true)) {
345 vol->meta_ops->inc_counter(vol);
346 vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
347 }
348
349 rc = hr_check_ba_range(vol, cnt, ba);
350 if (rc != EOK)
351 return rc;
352
353 hr_add_data_offset(vol, &ba);
354
355 /*
356 * extent order has to be locked for the whole IO duration,
357 * so that workers have consistent targets
358 */
359 fibril_rwlock_read_lock(&vol->extents_lock);
360
361 size_t successful = 0;
362 switch (type) {
363 case HR_BD_READ:
364 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
365 memory_order_relaxed);
366
367 for (i = 0; i < vol->extent_no; i++) {
368 fibril_rwlock_read_lock(&vol->states_lock);
369 hr_ext_state_t state = vol->extents[i].state;
370 fibril_rwlock_read_unlock(&vol->states_lock);
371
372 if (state != HR_EXT_ONLINE &&
373 (state != HR_EXT_REBUILD ||
374 ba + cnt - 1 >= rebuild_blk)) {
375 continue;
376 }
377
378 rc = hr_read_direct(vol->extents[i].svc_id, ba, cnt,
379 data_read);
380 if (rc != EOK) {
381 hr_raid1_ext_state_cb(vol, i, rc);
382 } else {
383 successful++;
384 break;
385 }
386 }
387 break;
388 case HR_BD_WRITE:
389 rl = hr_range_lock_acquire(vol, ba, cnt);
390
391 fibril_rwlock_read_lock(&vol->states_lock);
392
393 rebuild_blk = atomic_load_explicit(&vol->rebuild_blk,
394 memory_order_relaxed);
395
396 size_t good = hr_raid1_count_good_extents(vol, ba, cnt,
397 rebuild_blk);
398
399 hr_fgroup_t *group = hr_fgroup_create(vol->fge, good);
400
401 for (i = 0; i < vol->extent_no; i++) {
402 if (vol->extents[i].state != HR_EXT_ONLINE &&
403 (vol->extents[i].state != HR_EXT_REBUILD ||
404 ba > rebuild_blk)) {
405 /*
406 * When the extent is being rebuilt,
407 * we only write to the part that is already
408 * rebuilt. If IO starts after vol->rebuild_blk
409 * we do not proceed, the write is going to
410 * be replicated later in the rebuild.
411 */
412 continue;
413 }
414
415 hr_io_t *io = hr_fgroup_alloc(group);
416 io->extent = i;
417 io->data_write = data_write;
418 io->data_read = data_read;
419 io->ba = ba;
420 io->cnt = cnt;
421 io->type = type;
422 io->vol = vol;
423
424 hr_fgroup_submit(group, hr_io_worker, io);
425 }
426
427 fibril_rwlock_read_unlock(&vol->states_lock);
428
429 (void)hr_fgroup_wait(group, &successful, NULL);
430
431 hr_range_lock_release(rl);
432
433 break;
434 default:
435 assert(0);
436 }
437
438 if (successful > 0)
439 rc = EOK;
440 else
441 rc = EIO;
442
443 fibril_rwlock_read_unlock(&vol->extents_lock);
444
445 hr_raid1_vol_state_eval(vol);
446
447 return rc;
448}
449
450static errno_t hr_raid1_rebuild(void *arg)
451{
452 HR_DEBUG("%s()", __func__);
453
454 hr_volume_t *vol = arg;
455 void *buf = NULL;
456 size_t rebuild_idx;
457 hr_extent_t *rebuild_ext = NULL;
458 errno_t rc;
459
460 if (vol->vflags & HR_VOL_FLAG_READ_ONLY)
461 return ENOTSUP;
462 if (!(vol->meta_ops->get_flags() & HR_METADATA_ALLOW_REBUILD))
463 return ENOTSUP;
464
465 rc = hr_init_rebuild(vol, &rebuild_idx);
466 if (rc != EOK)
467 return rc;
468
469 rebuild_ext = &vol->extents[rebuild_idx];
470
471 size_t left = vol->data_blkno - vol->rebuild_blk;
472 size_t max_blks = DATA_XFER_LIMIT / vol->bsize;
473 buf = hr_malloc_waitok(max_blks * vol->bsize);
474
475 size_t cnt;
476 uint64_t ba = vol->rebuild_blk;
477 hr_add_data_offset(vol, &ba);
478
479 /*
480 * this is not necessary because a rebuild is
481 * protected by itself, i.e. there can be only
482 * one REBUILD at a time
483 */
484 fibril_rwlock_read_lock(&vol->extents_lock);
485
486 /* increment metadata counter only on first write */
487 bool exp = false;
488 if (atomic_compare_exchange_strong(&vol->first_write, &exp, true)) {
489 vol->meta_ops->inc_counter(vol);
490 vol->meta_ops->save(vol, WITH_STATE_CALLBACK);
491 }
492
493 hr_range_lock_t *rl = NULL;
494
495 HR_NOTE("\"%s\": REBUILD started on extent no. %zu at block %lu.\n",
496 vol->devname, rebuild_idx, ba);
497
498 uint64_t written = 0;
499 unsigned int percent, old_percent = 100;
500 while (left != 0) {
501 cnt = min(max_blks, left);
502
503 rl = hr_range_lock_acquire(vol, ba, cnt);
504
505 atomic_store_explicit(&vol->rebuild_blk, ba,
506 memory_order_relaxed);
507
508 rc = hr_raid1_bd_op(HR_BD_READ, vol, ba, cnt, buf, NULL,
509 cnt * vol->bsize);
510 if (rc != EOK) {
511 hr_range_lock_release(rl);
512 goto end;
513 }
514
515 rc = hr_write_direct(rebuild_ext->svc_id, ba, cnt, buf);
516 if (rc != EOK) {
517 hr_raid1_ext_state_cb(vol, rebuild_idx, rc);
518 hr_range_lock_release(rl);
519 goto end;
520 }
521
522 percent = ((ba + cnt) * 100) / vol->data_blkno;
523 if (percent != old_percent) {
524 if (percent % 5 == 0)
525 HR_DEBUG("\"%s\" REBUILD progress: %u%%\n",
526 vol->devname, percent);
527 }
528
529 if (written * vol->bsize > HR_REBUILD_SAVE_BYTES) {
530 vol->meta_ops->save_ext(vol, rebuild_idx,
531 WITH_STATE_CALLBACK);
532 written = 0;
533 }
534
535 hr_range_lock_release(rl);
536
537 written += cnt;
538 ba += cnt;
539 left -= cnt;
540 old_percent = percent;
541 }
542
543 HR_DEBUG("hr_raid1_rebuild(): rebuild finished on \"%s\" (%" PRIun "), "
544 "extent no. %zu\n", vol->devname, vol->svc_id, rebuild_idx);
545
546 fibril_rwlock_write_lock(&vol->states_lock);
547
548 hr_update_ext_state(vol, rebuild_idx, HR_EXT_ONLINE);
549
550 atomic_store_explicit(&vol->rebuild_blk, 0, memory_order_relaxed);
551
552 hr_mark_vol_state_dirty(vol);
553
554 fibril_rwlock_write_unlock(&vol->states_lock);
555end:
556 fibril_rwlock_read_unlock(&vol->extents_lock);
557
558 hr_raid1_vol_state_eval(vol);
559
560 free(buf);
561
562 return rc;
563}
564
565/** @}
566 */
Note: See TracBrowser for help on using the repository browser.