source: mainline/uspace/srv/bd/hr/raid0.c@ 56214383

Last change on this file since 56214383 was 8a65373, checked in by Miroslav Cimerman <mc@…>, 9 months ago

hr: move registering out of specific RAIDs

  • Property mode set to 100644
File size: 8.6 KB
Line 
1/*
2 * Copyright (c) 2025 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <abi/ipc/ipc.h>
37#include <bd_srv.h>
38#include <block.h>
39#include <errno.h>
40#include <hr.h>
41#include <io/log.h>
42#include <ipc/hr.h>
43#include <ipc/services.h>
44#include <loc.h>
45#include <task.h>
46#include <stdio.h>
47#include <stdlib.h>
48#include <str_error.h>
49
50#include "io.h"
51#include "superblock.h"
52#include "util.h"
53#include "var.h"
54
55extern loc_srv_t *hr_srv;
56
57static void hr_raid0_update_vol_status(hr_volume_t *);
58static errno_t hr_raid0_bd_op(hr_bd_op_type_t, bd_srv_t *, aoff64_t, size_t,
59 void *, const void *, size_t);
60
61/* bdops */
62static errno_t hr_raid0_bd_open(bd_srvs_t *, bd_srv_t *);
63static errno_t hr_raid0_bd_close(bd_srv_t *);
64static errno_t hr_raid0_bd_read_blocks(bd_srv_t *, aoff64_t, size_t, void *,
65 size_t);
66static errno_t hr_raid0_bd_sync_cache(bd_srv_t *, aoff64_t, size_t);
67static errno_t hr_raid0_bd_write_blocks(bd_srv_t *, aoff64_t, size_t,
68 const void *, size_t);
69static errno_t hr_raid0_bd_get_block_size(bd_srv_t *, size_t *);
70static errno_t hr_raid0_bd_get_num_blocks(bd_srv_t *, aoff64_t *);
71
72static bd_ops_t hr_raid0_bd_ops = {
73 .open = hr_raid0_bd_open,
74 .close = hr_raid0_bd_close,
75 .sync_cache = hr_raid0_bd_sync_cache,
76 .read_blocks = hr_raid0_bd_read_blocks,
77 .write_blocks = hr_raid0_bd_write_blocks,
78 .get_block_size = hr_raid0_bd_get_block_size,
79 .get_num_blocks = hr_raid0_bd_get_num_blocks
80};
81
82errno_t hr_raid0_create(hr_volume_t *new_volume)
83{
84 assert(new_volume->level == HR_LVL_0);
85
86 if (new_volume->extent_no < 2) {
87 HR_ERROR("RAID 0 array needs at least 2 devices\n");
88 return EINVAL;
89 }
90
91 hr_raid0_update_vol_status(new_volume);
92 if (new_volume->status != HR_VOL_ONLINE)
93 return EINVAL;
94
95 bd_srvs_init(&new_volume->hr_bds);
96 new_volume->hr_bds.ops = &hr_raid0_bd_ops;
97 new_volume->hr_bds.sarg = new_volume;
98
99 return EOK;
100}
101
102errno_t hr_raid0_init(hr_volume_t *vol)
103{
104 errno_t rc;
105 size_t bsize;
106 uint64_t total_blkno;
107
108 assert(vol->level == HR_LVL_0);
109
110 rc = hr_check_devs(vol, &total_blkno, &bsize);
111 if (rc != EOK)
112 return rc;
113
114 vol->nblocks = total_blkno;
115 vol->bsize = bsize;
116 vol->data_offset = HR_DATA_OFF;
117 vol->data_blkno = vol->nblocks - (vol->data_offset * vol->extent_no);
118 vol->strip_size = HR_STRIP_SIZE;
119
120 return EOK;
121}
122
123void hr_raid0_status_event(hr_volume_t *vol)
124{
125 hr_raid0_update_vol_status(vol);
126}
127
128static errno_t hr_raid0_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
129{
130 HR_DEBUG("%s()", __func__);
131
132 hr_volume_t *vol = bd->srvs->sarg;
133
134 atomic_fetch_add_explicit(&vol->open_cnt, 1, memory_order_relaxed);
135
136 return EOK;
137}
138
139static errno_t hr_raid0_bd_close(bd_srv_t *bd)
140{
141 HR_DEBUG("%s()", __func__);
142
143 hr_volume_t *vol = bd->srvs->sarg;
144
145 atomic_fetch_sub_explicit(&vol->open_cnt, 1, memory_order_relaxed);
146
147 return EOK;
148}
149
150static errno_t hr_raid0_bd_sync_cache(bd_srv_t *bd, aoff64_t ba, size_t cnt)
151{
152 return hr_raid0_bd_op(HR_BD_SYNC, bd, ba, cnt, NULL, NULL, 0);
153}
154
155static errno_t hr_raid0_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
156 void *buf, size_t size)
157{
158 return hr_raid0_bd_op(HR_BD_READ, bd, ba, cnt, buf, NULL, size);
159}
160
161static errno_t hr_raid0_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
162 const void *data, size_t size)
163{
164 return hr_raid0_bd_op(HR_BD_WRITE, bd, ba, cnt, NULL, data, size);
165}
166
167static errno_t hr_raid0_bd_get_block_size(bd_srv_t *bd, size_t *rsize)
168{
169 hr_volume_t *vol = bd->srvs->sarg;
170
171 *rsize = vol->bsize;
172 return EOK;
173}
174
175static errno_t hr_raid0_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *rnb)
176{
177 hr_volume_t *vol = bd->srvs->sarg;
178
179 *rnb = vol->data_blkno;
180 return EOK;
181}
182
183static void hr_raid0_update_vol_status(hr_volume_t *vol)
184{
185 fibril_rwlock_read_lock(&vol->states_lock);
186
187 hr_vol_status_t old_state = vol->status;
188
189 for (size_t i = 0; i < vol->extent_no; i++) {
190 if (vol->extents[i].status != HR_EXT_ONLINE) {
191 fibril_rwlock_read_unlock(&vol->states_lock);
192
193 if (old_state != HR_VOL_FAULTY) {
194 fibril_rwlock_write_lock(&vol->states_lock);
195 hr_update_vol_status(vol, HR_VOL_FAULTY);
196 fibril_rwlock_write_unlock(&vol->states_lock);
197 }
198 return;
199 }
200 }
201 fibril_rwlock_read_unlock(&vol->states_lock);
202
203 if (old_state != HR_VOL_ONLINE) {
204 fibril_rwlock_write_lock(&vol->states_lock);
205 hr_update_vol_status(vol, HR_VOL_ONLINE);
206 fibril_rwlock_write_unlock(&vol->states_lock);
207 }
208}
209
210static void raid0_state_callback(hr_volume_t *vol, size_t extent, errno_t rc)
211{
212 if (rc == EOK)
213 return;
214
215 fibril_rwlock_write_lock(&vol->states_lock);
216
217 switch (rc) {
218 case ENOENT:
219 hr_update_ext_status(vol, extent, HR_EXT_MISSING);
220 break;
221 default:
222 hr_update_ext_status(vol, extent, HR_EXT_FAILED);
223 }
224
225 hr_update_vol_status(vol, HR_VOL_FAULTY);
226
227 fibril_rwlock_write_unlock(&vol->states_lock);
228}
229
230static errno_t hr_raid0_bd_op(hr_bd_op_type_t type, bd_srv_t *bd, aoff64_t ba,
231 size_t cnt, void *dst, const void *src, size_t size)
232{
233 hr_volume_t *vol = bd->srvs->sarg;
234 errno_t rc;
235 uint64_t phys_block, len;
236 size_t left;
237 const uint8_t *data_write = src;
238 uint8_t *data_read = dst;
239
240 fibril_rwlock_read_lock(&vol->states_lock);
241 if (vol->status != HR_VOL_ONLINE) {
242 fibril_rwlock_read_unlock(&vol->states_lock);
243 return EIO;
244 }
245 fibril_rwlock_read_unlock(&vol->states_lock);
246
247 /* propagate sync */
248 if (type == HR_BD_SYNC && ba == 0 && cnt == 0) {
249 hr_fgroup_t *group = hr_fgroup_create(vol->fge,
250 vol->extent_no);
251 if (group == NULL)
252 return ENOMEM;
253
254 for (size_t i = 0; i < vol->extent_no; i++) {
255 hr_io_t *io = hr_fgroup_alloc(group);
256 io->extent = i;
257 io->ba = ba;
258 io->cnt = cnt;
259 io->type = type;
260 io->vol = vol;
261 io->state_callback = raid0_state_callback;
262
263 hr_fgroup_submit(group, hr_io_worker, io);
264 }
265
266 size_t bad;
267 rc = hr_fgroup_wait(group, NULL, &bad);
268 if (rc == ENOMEM)
269 return ENOMEM;
270
271 if (bad > 0)
272 return EIO;
273
274 return EOK;
275 }
276
277 if (type == HR_BD_READ || type == HR_BD_WRITE)
278 if (size < cnt * vol->bsize)
279 return EINVAL;
280
281 rc = hr_check_ba_range(vol, cnt, ba);
282 if (rc != EOK)
283 return rc;
284
285 uint64_t strip_size = vol->strip_size / vol->bsize; /* in blocks */
286 uint64_t strip_no = ba / strip_size;
287 uint64_t extent = strip_no % vol->extent_no;
288 uint64_t stripe = strip_no / vol->extent_no;
289 uint64_t strip_off = ba % strip_size;
290
291 left = cnt;
292
293 /* calculate how many strips does the IO span */
294 size_t end_strip_no = (ba + cnt - 1) / strip_size;
295 size_t span = end_strip_no - strip_no + 1;
296
297 hr_fgroup_t *group = hr_fgroup_create(vol->fge, span);
298 if (group == NULL)
299 return ENOMEM;
300
301 while (left != 0) {
302 phys_block = stripe * strip_size + strip_off;
303 cnt = min(left, strip_size - strip_off);
304 len = vol->bsize * cnt;
305 hr_add_ba_offset(vol, &phys_block);
306
307 hr_io_t *io = hr_fgroup_alloc(group);
308 io->extent = extent;
309 io->data_write = data_write;
310 io->data_read = data_read;
311 io->ba = phys_block;
312 io->cnt = cnt;
313 io->type = type;
314 io->vol = vol;
315 io->state_callback = raid0_state_callback;
316
317 hr_fgroup_submit(group, hr_io_worker, io);
318
319 left -= cnt;
320 if (left == 0)
321 break;
322
323 if (type == HR_BD_READ)
324 data_read += len;
325 else if (type == HR_BD_WRITE)
326 data_write += len;
327
328 strip_off = 0;
329 extent++;
330 if (extent >= vol->extent_no) {
331 stripe++;
332 extent = 0;
333 }
334 }
335
336 size_t bad;
337 rc = hr_fgroup_wait(group, NULL, &bad);
338 if (rc == ENOMEM && type == HR_BD_READ)
339 return ENOMEM;
340
341 if (bad > 0)
342 return EIO;
343
344 return EOK;
345}
346
347/** @}
348 */
Note: See TracBrowser for help on using the repository browser.