source: mainline/uspace/srv/bd/hr/hr.c@ 9f15da1

Last change on this file since 9f15da1 was 401b9e42, checked in by Miroslav Cimerman <mc@…>, 7 months ago

hr: state_changed and peding_invalidation atomic flags

These flags are used to check whether there was a state
change, or if there is a pending invalidation, so that
we can avoid the slow code paths in a lockless fashion.

  • Property mode set to 100644
File size: 12.1 KB
Line 
1/*
2 * Copyright (c) 2025 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <async.h>
37#include <bd_srv.h>
38#include <errno.h>
39#include <hr.h>
40#include <io/log.h>
41#include <inttypes.h>
42#include <ipc/hr.h>
43#include <ipc/services.h>
44#include <loc.h>
45#include <task.h>
46#include <stdatomic.h>
47#include <stdio.h>
48#include <stdlib.h>
49#include <str.h>
50#include <str_error.h>
51
52#include "fge.h"
53#include "io.h"
54#include "superblock.h"
55#include "util.h"
56#include "var.h"
57
58loc_srv_t *hr_srv;
59
60static fibril_mutex_t hr_volumes_lock;
61static list_t hr_volumes;
62
63static service_id_t ctl_sid;
64
65static hr_volume_t *hr_get_volume(service_id_t svc_id)
66{
67 HR_DEBUG("hr_get_volume(): (%" PRIun ")\n", svc_id);
68
69 fibril_mutex_lock(&hr_volumes_lock);
70 list_foreach(hr_volumes, lvolumes, hr_volume_t, vol) {
71 if (vol->svc_id == svc_id) {
72 fibril_mutex_unlock(&hr_volumes_lock);
73 return vol;
74 }
75 }
76
77 fibril_mutex_unlock(&hr_volumes_lock);
78 return NULL;
79}
80
81static errno_t hr_remove_volume(service_id_t svc_id)
82{
83 HR_DEBUG("hr_remove_volume(): (%" PRIun ")\n", svc_id);
84
85 fibril_mutex_lock(&hr_volumes_lock);
86 list_foreach(hr_volumes, lvolumes, hr_volume_t, vol) {
87 if (vol->svc_id == svc_id) {
88 hr_fpool_destroy(vol->fge);
89 hr_fini_devs(vol);
90 list_remove(&vol->lvolumes);
91 free(vol);
92 fibril_mutex_unlock(&hr_volumes_lock);
93 return EOK;
94 }
95 }
96
97 fibril_mutex_unlock(&hr_volumes_lock);
98 return ENOENT;
99}
100
101static void hr_create_srv(ipc_call_t *icall, bool assemble)
102{
103 HR_DEBUG("hr_create_srv()\n");
104
105 errno_t rc;
106 size_t i, size;
107 hr_config_t *cfg;
108 hr_volume_t *new_volume;
109 ipc_call_t call;
110
111 if (!async_data_write_receive(&call, &size)) {
112 async_answer_0(&call, EREFUSED);
113 async_answer_0(icall, EREFUSED);
114 return;
115 }
116
117 if (size != sizeof(hr_config_t)) {
118 async_answer_0(&call, EINVAL);
119 async_answer_0(icall, EINVAL);
120 return;
121 }
122
123 cfg = calloc(1, sizeof(hr_config_t));
124 if (cfg == NULL) {
125 async_answer_0(&call, ENOMEM);
126 async_answer_0(icall, ENOMEM);
127 return;
128 }
129
130 rc = async_data_write_finalize(&call, cfg, size);
131 if (rc != EOK) {
132 free(cfg);
133 async_answer_0(&call, rc);
134 async_answer_0(icall, rc);
135 return;
136 }
137
138 /*
139 * If there was a missing device provided
140 * for creation of a new array, abort
141 */
142 if (!assemble) {
143 for (i = 0; i < cfg->dev_no; i++) {
144 if (cfg->devs[i] == 0) {
145 HR_ERROR("missing device provided for array "
146 "creation, aborting");
147 free(cfg);
148 async_answer_0(icall, EINVAL);
149 return;
150 }
151 }
152 }
153
154 new_volume = calloc(1, sizeof(hr_volume_t));
155 if (new_volume == NULL) {
156 free(cfg);
157 async_answer_0(icall, ENOMEM);
158 return;
159 }
160
161 hr_fpool_t *fge = hr_fpool_create(16, 32, sizeof(hr_io_t));
162 if (fge == NULL) {
163 free(new_volume);
164 free(cfg);
165 async_answer_0(icall, ENOMEM);
166 return;
167 }
168 new_volume->fge = fge;
169
170 str_cpy(new_volume->devname, HR_DEVNAME_LEN, cfg->devname);
171 for (i = 0; i < cfg->dev_no; i++)
172 new_volume->extents[i].svc_id = cfg->devs[i];
173 new_volume->level = cfg->level;
174 new_volume->extent_no = cfg->dev_no;
175
176 if (assemble) {
177 if (cfg->level != HR_LVL_UNKNOWN)
178 HR_WARN("level manually set when assembling, ingoring");
179 new_volume->level = HR_LVL_UNKNOWN;
180 }
181
182 rc = hr_init_devs(new_volume);
183 if (rc != EOK) {
184 free(cfg);
185 free(new_volume);
186 async_answer_0(icall, rc);
187 return;
188 }
189
190 if (assemble) {
191 /* just bsize needed for reading metadata later */
192 rc = hr_check_devs(new_volume, NULL, &new_volume->bsize);
193 if (rc != EOK)
194 goto error;
195
196 rc = hr_fill_vol_from_meta(new_volume);
197 if (rc != EOK)
198 goto error;
199 }
200
201 switch (new_volume->level) {
202 case HR_LVL_1:
203 if (!assemble)
204 new_volume->layout = 0x00; /* XXX: yet unused */
205 new_volume->hr_ops.create = hr_raid1_create;
206 new_volume->hr_ops.init = hr_raid1_init;
207 new_volume->hr_ops.status_event = hr_raid1_status_event;
208 new_volume->hr_ops.add_hotspare = hr_raid1_add_hotspare;
209 break;
210 case HR_LVL_0:
211 if (!assemble)
212 new_volume->layout = 0x00;
213 new_volume->hr_ops.create = hr_raid0_create;
214 new_volume->hr_ops.init = hr_raid0_init;
215 new_volume->hr_ops.status_event = hr_raid0_status_event;
216 break;
217 case HR_LVL_4:
218 if (!assemble)
219 new_volume->layout = HR_RLQ_RAID4_N;
220 new_volume->hr_ops.create = hr_raid5_create;
221 new_volume->hr_ops.init = hr_raid5_init;
222 new_volume->hr_ops.status_event = hr_raid5_status_event;
223 new_volume->hr_ops.add_hotspare = hr_raid5_add_hotspare;
224 break;
225 case HR_LVL_5:
226 if (!assemble)
227 new_volume->layout = HR_RLQ_RAID5_NR;
228 new_volume->hr_ops.create = hr_raid5_create;
229 new_volume->hr_ops.init = hr_raid5_init;
230 new_volume->hr_ops.status_event = hr_raid5_status_event;
231 new_volume->hr_ops.add_hotspare = hr_raid5_add_hotspare;
232 break;
233 default:
234 HR_ERROR("unkown level: %d, aborting\n", new_volume->level);
235 rc = EINVAL;
236 goto error;
237 }
238
239 if (!assemble) {
240 new_volume->hr_ops.init(new_volume);
241 if (rc != EOK)
242 goto error;
243
244 rc = hr_write_meta_to_vol(new_volume);
245 if (rc != EOK)
246 goto error;
247 }
248
249 fibril_mutex_initialize(&new_volume->lock); /* XXX: will remove this */
250
251 fibril_mutex_initialize(&new_volume->halt_lock);
252 new_volume->halt_please = false;
253
254 fibril_rwlock_initialize(&new_volume->extents_lock);
255 fibril_rwlock_initialize(&new_volume->states_lock);
256
257 fibril_mutex_initialize(&new_volume->hotspare_lock);
258
259 list_initialize(&new_volume->range_lock_list);
260 fibril_mutex_initialize(&new_volume->range_lock_list_lock);
261
262 fibril_mutex_initialize(&new_volume->deferred_list_lock);
263 list_initialize(&new_volume->deferred_invalidations_list);
264
265 atomic_init(&new_volume->rebuild_blk, 0);
266 atomic_init(&new_volume->state_changed, false);
267 atomic_init(&new_volume->pending_invalidation, false);
268
269 rc = new_volume->hr_ops.create(new_volume);
270 if (rc != EOK)
271 goto error;
272
273 fibril_mutex_lock(&hr_volumes_lock);
274 list_append(&new_volume->lvolumes, &hr_volumes);
275 fibril_mutex_unlock(&hr_volumes_lock);
276
277 if (assemble) {
278 HR_DEBUG("assembled volume \"%s\" (%" PRIun ")\n",
279 new_volume->devname, new_volume->svc_id);
280 } else {
281 HR_DEBUG("created volume \"%s\" (%" PRIun ")\n",
282 new_volume->devname, new_volume->svc_id);
283 }
284
285 free(cfg);
286 async_answer_0(icall, rc);
287 return;
288error:
289 free(cfg);
290 free(fge);
291 hr_fini_devs(new_volume);
292 free(new_volume);
293 async_answer_0(icall, rc);
294}
295
296static void hr_stop_srv(ipc_call_t *icall)
297{
298 HR_DEBUG("hr_stop_srv()\n");
299
300 errno_t rc = EOK;
301 service_id_t svc_id;
302 long fail_extent;
303 hr_volume_t *vol;
304
305 svc_id = ipc_get_arg1(icall);
306 fail_extent = (long)ipc_get_arg2(icall);
307
308 vol = hr_get_volume(svc_id);
309 if (vol == NULL) {
310 async_answer_0(icall, ENOENT);
311 return;
312 }
313
314 if (fail_extent == -1) {
315 rc = hr_remove_volume(svc_id);
316 if (rc != EOK) {
317 async_answer_0(icall, rc);
318 return;
319 }
320 rc = loc_service_unregister(hr_srv, svc_id);
321 } else {
322 fibril_rwlock_write_lock(&vol->states_lock);
323 fibril_rwlock_read_lock(&vol->extents_lock);
324
325 /* TODO: maybe expose extent state callbacks */
326 hr_update_ext_status(vol, fail_extent, HR_EXT_FAILED);
327 atomic_store(&vol->state_changed, true);
328
329 fibril_rwlock_read_unlock(&vol->extents_lock);
330 fibril_rwlock_write_unlock(&vol->states_lock);
331
332 vol->hr_ops.status_event(vol);
333 }
334 async_answer_0(icall, rc);
335}
336
337static void hr_add_hotspare_srv(ipc_call_t *icall)
338{
339 HR_DEBUG("hr_add_hotspare()\n");
340
341 errno_t rc = EOK;
342 service_id_t vol_svc_id;
343 service_id_t hotspare;
344 hr_volume_t *vol;
345
346 vol_svc_id = ipc_get_arg1(icall);
347 hotspare = ipc_get_arg2(icall);
348
349 vol = hr_get_volume(vol_svc_id);
350 if (vol == NULL) {
351 async_answer_0(icall, ENOENT);
352 return;
353 }
354
355 if (vol->hr_ops.add_hotspare == NULL) {
356 HR_DEBUG("hr_add_hotspare(): not supported on RAID level %d\n",
357 vol->level);
358 async_answer_0(icall, ENOTSUP);
359 return;
360 }
361
362 rc = vol->hr_ops.add_hotspare(vol, hotspare);
363
364 async_answer_0(icall, rc);
365}
366
367static void hr_print_status_srv(ipc_call_t *icall)
368{
369 HR_DEBUG("hr_status_srv()\n");
370
371 errno_t rc;
372 size_t vol_cnt = 0;
373 hr_vol_info_t info;
374 ipc_call_t call;
375 size_t size;
376
377 fibril_mutex_lock(&hr_volumes_lock);
378
379 vol_cnt = list_count(&hr_volumes);
380
381 if (!async_data_read_receive(&call, &size)) {
382 rc = EREFUSED;
383 goto error;
384 }
385
386 if (size != sizeof(size_t)) {
387 rc = EINVAL;
388 goto error;
389 }
390
391 rc = async_data_read_finalize(&call, &vol_cnt, size);
392 if (rc != EOK)
393 goto error;
394
395 list_foreach(hr_volumes, lvolumes, hr_volume_t, vol) {
396 memcpy(info.extents, vol->extents,
397 sizeof(hr_extent_t) * HR_MAX_EXTENTS);
398 memcpy(info.hotspares, vol->hotspares,
399 sizeof(hr_extent_t) * HR_MAX_HOTSPARES);
400 info.svc_id = vol->svc_id;
401 info.extent_no = vol->extent_no;
402 info.hotspare_no = vol->hotspare_no;
403 info.level = vol->level;
404 /* print usable number of blocks */
405 info.nblocks = vol->data_blkno;
406 info.strip_size = vol->strip_size;
407 info.bsize = vol->bsize;
408 info.status = vol->status;
409 info.layout = vol->layout;
410
411 if (!async_data_read_receive(&call, &size)) {
412 rc = EREFUSED;
413 goto error;
414 }
415
416 if (size != sizeof(hr_vol_info_t)) {
417 rc = EINVAL;
418 goto error;
419 }
420
421 rc = async_data_read_finalize(&call, &info, size);
422 if (rc != EOK)
423 goto error;
424 }
425
426 fibril_mutex_unlock(&hr_volumes_lock);
427 async_answer_0(icall, EOK);
428 return;
429error:
430 fibril_mutex_unlock(&hr_volumes_lock);
431 async_answer_0(&call, rc);
432 async_answer_0(icall, rc);
433}
434
435static void hr_ctl_conn(ipc_call_t *icall, void *arg)
436{
437 HR_DEBUG("hr_ctl_conn()\n");
438
439 async_accept_0(icall);
440
441 while (true) {
442 ipc_call_t call;
443 async_get_call(&call);
444 sysarg_t method = ipc_get_imethod(&call);
445
446 if (!method) {
447 async_answer_0(&call, EOK);
448 return;
449 }
450
451 switch (method) {
452 case HR_CREATE:
453 hr_create_srv(&call, false);
454 break;
455 case HR_ASSEMBLE:
456 hr_create_srv(&call, true);
457 break;
458 case HR_STOP:
459 hr_stop_srv(&call);
460 break;
461 case HR_ADD_HOTSPARE:
462 hr_add_hotspare_srv(&call);
463 break;
464 case HR_STATUS:
465 hr_print_status_srv(&call);
466 break;
467 default:
468 async_answer_0(&call, EINVAL);
469 }
470 }
471}
472
473static void hr_client_conn(ipc_call_t *icall, void *arg)
474{
475 HR_DEBUG("hr_client_conn()\n");
476
477 hr_volume_t *vol;
478
479 service_id_t svc_id = ipc_get_arg2(icall);
480
481 if (svc_id == ctl_sid) {
482 hr_ctl_conn(icall, arg);
483 } else {
484 HR_DEBUG("bd_conn()\n");
485 vol = hr_get_volume(svc_id);
486 if (vol == NULL)
487 async_answer_0(icall, EINVAL);
488 bd_conn(icall, &vol->hr_bds);
489 }
490}
491
492int main(int argc, char **argv)
493{
494 errno_t rc;
495
496 printf("%s: HelenRAID server\n", NAME);
497
498 rc = log_init(NAME);
499 if (rc != EOK) {
500 printf("%s: failed to initialize logging\n", NAME);
501 return 1;
502 }
503
504 fibril_mutex_initialize(&hr_volumes_lock);
505 list_initialize(&hr_volumes);
506
507 async_set_fallback_port_handler(hr_client_conn, NULL);
508
509 rc = loc_server_register(NAME, &hr_srv);
510 if (rc != EOK) {
511 HR_ERROR("failed registering server: %s", str_error(rc));
512 return EEXIST;
513 }
514
515 rc = loc_service_register(hr_srv, SERVICE_NAME_HR, &ctl_sid);
516 if (rc != EOK) {
517 HR_ERROR("failed registering service: %s", str_error(rc));
518 return EEXIST;
519 }
520
521 printf("%s: accepting connections\n", NAME);
522 task_retval(0);
523 async_manager();
524
525 return 0;
526}
527
528/** @}
529 */
Note: See TracBrowser for help on using the repository browser.