source: mainline/uspace/srv/bd/hr/hr.c@ 5fe0b9b5

Last change on this file since 5fe0b9b5 was d2da1be, checked in by Miroslav Cimerman <mc@…>, 5 months ago

hr: rename vol→state_changed → vol→state_dirty

  • Property mode set to 100644
File size: 11.9 KB
Line 
1/*
2 * Copyright (c) 2025 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <async.h>
37#include <bd_srv.h>
38#include <errno.h>
39#include <hr.h>
40#include <io/log.h>
41#include <inttypes.h>
42#include <ipc/hr.h>
43#include <ipc/services.h>
44#include <loc.h>
45#include <task.h>
46#include <stdatomic.h>
47#include <stdio.h>
48#include <stdlib.h>
49#include <str.h>
50#include <str_error.h>
51
52#include "fge.h"
53#include "io.h"
54#include "superblock.h"
55#include "util.h"
56#include "var.h"
57
58loc_srv_t *hr_srv;
59
60static fibril_mutex_t hr_volumes_lock;
61static list_t hr_volumes;
62
63static service_id_t ctl_sid;
64
65static hr_volume_t *hr_get_volume(service_id_t svc_id)
66{
67 HR_DEBUG("hr_get_volume(): (%" PRIun ")\n", svc_id);
68
69 fibril_mutex_lock(&hr_volumes_lock);
70 list_foreach(hr_volumes, lvolumes, hr_volume_t, vol) {
71 if (vol->svc_id == svc_id) {
72 fibril_mutex_unlock(&hr_volumes_lock);
73 return vol;
74 }
75 }
76
77 fibril_mutex_unlock(&hr_volumes_lock);
78 return NULL;
79}
80
81static errno_t hr_remove_volume(service_id_t svc_id)
82{
83 HR_DEBUG("hr_remove_volume(): (%" PRIun ")\n", svc_id);
84
85 fibril_mutex_lock(&hr_volumes_lock);
86 list_foreach(hr_volumes, lvolumes, hr_volume_t, vol) {
87 if (vol->svc_id == svc_id) {
88 hr_fpool_destroy(vol->fge);
89 hr_fini_devs(vol);
90 list_remove(&vol->lvolumes);
91 free(vol);
92 fibril_mutex_unlock(&hr_volumes_lock);
93 return EOK;
94 }
95 }
96
97 fibril_mutex_unlock(&hr_volumes_lock);
98 return ENOENT;
99}
100
101static void hr_create_srv(ipc_call_t *icall, bool assemble)
102{
103 HR_DEBUG("hr_create_srv()\n");
104
105 errno_t rc;
106 size_t i, size;
107 hr_config_t *cfg;
108 hr_volume_t *new_volume;
109 ipc_call_t call;
110
111 if (!async_data_write_receive(&call, &size)) {
112 async_answer_0(&call, EREFUSED);
113 async_answer_0(icall, EREFUSED);
114 return;
115 }
116
117 if (size != sizeof(hr_config_t)) {
118 async_answer_0(&call, EINVAL);
119 async_answer_0(icall, EINVAL);
120 return;
121 }
122
123 cfg = calloc(1, sizeof(hr_config_t));
124 if (cfg == NULL) {
125 async_answer_0(&call, ENOMEM);
126 async_answer_0(icall, ENOMEM);
127 return;
128 }
129
130 rc = async_data_write_finalize(&call, cfg, size);
131 if (rc != EOK) {
132 free(cfg);
133 async_answer_0(&call, rc);
134 async_answer_0(icall, rc);
135 return;
136 }
137
138 /*
139 * If there was a missing device provided
140 * for creation of a new array, abort
141 */
142 if (!assemble) {
143 for (i = 0; i < cfg->dev_no; i++) {
144 if (cfg->devs[i] == 0) {
145 HR_ERROR("missing device provided for array "
146 "creation, aborting");
147 free(cfg);
148 async_answer_0(icall, EINVAL);
149 return;
150 }
151 }
152 }
153
154 new_volume = calloc(1, sizeof(hr_volume_t));
155 if (new_volume == NULL) {
156 free(cfg);
157 async_answer_0(icall, ENOMEM);
158 return;
159 }
160
161 hr_fpool_t *fge = hr_fpool_create(16, 32, sizeof(hr_io_t));
162 if (fge == NULL) {
163 free(new_volume);
164 free(cfg);
165 async_answer_0(icall, ENOMEM);
166 return;
167 }
168 new_volume->fge = fge;
169
170 str_cpy(new_volume->devname, HR_DEVNAME_LEN, cfg->devname);
171 for (i = 0; i < cfg->dev_no; i++)
172 new_volume->extents[i].svc_id = cfg->devs[i];
173 new_volume->level = cfg->level;
174 new_volume->extent_no = cfg->dev_no;
175
176 if (assemble) {
177 if (cfg->level != HR_LVL_UNKNOWN)
178 HR_WARN("level manually set when assembling, ingoring");
179 new_volume->level = HR_LVL_UNKNOWN;
180 }
181
182 rc = hr_init_devs(new_volume);
183 if (rc != EOK) {
184 free(cfg);
185 free(new_volume);
186 async_answer_0(icall, rc);
187 return;
188 }
189
190 if (assemble) {
191 /* just bsize needed for reading metadata later */
192 rc = hr_check_devs(new_volume, NULL, &new_volume->bsize);
193 if (rc != EOK)
194 goto error;
195
196 rc = hr_fill_vol_from_meta(new_volume);
197 if (rc != EOK)
198 goto error;
199 }
200
201 switch (new_volume->level) {
202 case HR_LVL_1:
203 if (!assemble)
204 new_volume->layout = 0x00; /* XXX: yet unused */
205 new_volume->hr_ops.create = hr_raid1_create;
206 new_volume->hr_ops.init = hr_raid1_init;
207 new_volume->hr_ops.status_event = hr_raid1_status_event;
208 new_volume->hr_ops.add_hotspare = hr_raid1_add_hotspare;
209 break;
210 case HR_LVL_0:
211 if (!assemble)
212 new_volume->layout = 0x00;
213 new_volume->hr_ops.create = hr_raid0_create;
214 new_volume->hr_ops.init = hr_raid0_init;
215 new_volume->hr_ops.status_event = hr_raid0_status_event;
216 break;
217 case HR_LVL_4:
218 if (!assemble)
219 new_volume->layout = HR_RLQ_RAID4_N;
220 new_volume->hr_ops.create = hr_raid5_create;
221 new_volume->hr_ops.init = hr_raid5_init;
222 new_volume->hr_ops.status_event = hr_raid5_status_event;
223 new_volume->hr_ops.add_hotspare = hr_raid5_add_hotspare;
224 break;
225 case HR_LVL_5:
226 if (!assemble)
227 new_volume->layout = HR_RLQ_RAID5_NR;
228 new_volume->hr_ops.create = hr_raid5_create;
229 new_volume->hr_ops.init = hr_raid5_init;
230 new_volume->hr_ops.status_event = hr_raid5_status_event;
231 new_volume->hr_ops.add_hotspare = hr_raid5_add_hotspare;
232 break;
233 default:
234 HR_ERROR("unkown level: %d, aborting\n", new_volume->level);
235 rc = EINVAL;
236 goto error;
237 }
238
239 if (!assemble) {
240 new_volume->hr_ops.init(new_volume);
241 if (rc != EOK)
242 goto error;
243
244 rc = hr_write_meta_to_vol(new_volume);
245 if (rc != EOK)
246 goto error;
247 }
248
249 fibril_mutex_initialize(&new_volume->lock); /* XXX: will remove this */
250
251 fibril_rwlock_initialize(&new_volume->extents_lock);
252 fibril_rwlock_initialize(&new_volume->states_lock);
253
254 fibril_mutex_initialize(&new_volume->hotspare_lock);
255
256 list_initialize(&new_volume->range_lock_list);
257 fibril_mutex_initialize(&new_volume->range_lock_list_lock);
258
259 atomic_init(&new_volume->rebuild_blk, 0);
260 atomic_init(&new_volume->state_dirty, false);
261
262 rc = new_volume->hr_ops.create(new_volume);
263 if (rc != EOK)
264 goto error;
265
266 fibril_mutex_lock(&hr_volumes_lock);
267 list_append(&new_volume->lvolumes, &hr_volumes);
268 fibril_mutex_unlock(&hr_volumes_lock);
269
270 if (assemble) {
271 HR_DEBUG("assembled volume \"%s\" (%" PRIun ")\n",
272 new_volume->devname, new_volume->svc_id);
273 } else {
274 HR_DEBUG("created volume \"%s\" (%" PRIun ")\n",
275 new_volume->devname, new_volume->svc_id);
276 }
277
278 free(cfg);
279 async_answer_0(icall, rc);
280 return;
281error:
282 free(cfg);
283 free(fge);
284 hr_fini_devs(new_volume);
285 free(new_volume);
286 async_answer_0(icall, rc);
287}
288
289static void hr_stop_srv(ipc_call_t *icall)
290{
291 HR_DEBUG("hr_stop_srv()\n");
292
293 errno_t rc = EOK;
294 service_id_t svc_id;
295 long fail_extent;
296 hr_volume_t *vol;
297
298 svc_id = ipc_get_arg1(icall);
299 fail_extent = (long)ipc_get_arg2(icall);
300
301 vol = hr_get_volume(svc_id);
302 if (vol == NULL) {
303 async_answer_0(icall, ENOENT);
304 return;
305 }
306
307 if (fail_extent == -1) {
308 rc = hr_remove_volume(svc_id);
309 if (rc != EOK) {
310 async_answer_0(icall, rc);
311 return;
312 }
313 rc = loc_service_unregister(hr_srv, svc_id);
314 } else {
315 fibril_rwlock_write_lock(&vol->states_lock);
316 fibril_rwlock_read_lock(&vol->extents_lock);
317
318 /* TODO: maybe expose extent state callbacks */
319 hr_update_ext_status(vol, fail_extent, HR_EXT_FAILED);
320 hr_mark_vol_state_dirty(vol);
321
322 fibril_rwlock_read_unlock(&vol->extents_lock);
323 fibril_rwlock_write_unlock(&vol->states_lock);
324
325 vol->hr_ops.status_event(vol);
326 }
327 async_answer_0(icall, rc);
328}
329
330static void hr_add_hotspare_srv(ipc_call_t *icall)
331{
332 HR_DEBUG("hr_add_hotspare()\n");
333
334 errno_t rc = EOK;
335 service_id_t vol_svc_id;
336 service_id_t hotspare;
337 hr_volume_t *vol;
338
339 vol_svc_id = ipc_get_arg1(icall);
340 hotspare = ipc_get_arg2(icall);
341
342 vol = hr_get_volume(vol_svc_id);
343 if (vol == NULL) {
344 async_answer_0(icall, ENOENT);
345 return;
346 }
347
348 if (vol->hr_ops.add_hotspare == NULL) {
349 HR_DEBUG("hr_add_hotspare(): not supported on RAID level %d\n",
350 vol->level);
351 async_answer_0(icall, ENOTSUP);
352 return;
353 }
354
355 rc = vol->hr_ops.add_hotspare(vol, hotspare);
356
357 async_answer_0(icall, rc);
358}
359
360static void hr_print_status_srv(ipc_call_t *icall)
361{
362 HR_DEBUG("hr_status_srv()\n");
363
364 errno_t rc;
365 size_t vol_cnt = 0;
366 hr_vol_info_t info;
367 ipc_call_t call;
368 size_t size;
369
370 fibril_mutex_lock(&hr_volumes_lock);
371
372 vol_cnt = list_count(&hr_volumes);
373
374 if (!async_data_read_receive(&call, &size)) {
375 rc = EREFUSED;
376 goto error;
377 }
378
379 if (size != sizeof(size_t)) {
380 rc = EINVAL;
381 goto error;
382 }
383
384 rc = async_data_read_finalize(&call, &vol_cnt, size);
385 if (rc != EOK)
386 goto error;
387
388 list_foreach(hr_volumes, lvolumes, hr_volume_t, vol) {
389 memcpy(info.extents, vol->extents,
390 sizeof(hr_extent_t) * HR_MAX_EXTENTS);
391 memcpy(info.hotspares, vol->hotspares,
392 sizeof(hr_extent_t) * HR_MAX_HOTSPARES);
393 info.svc_id = vol->svc_id;
394 info.extent_no = vol->extent_no;
395 info.hotspare_no = vol->hotspare_no;
396 info.level = vol->level;
397 /* print usable number of blocks */
398 info.nblocks = vol->data_blkno;
399 info.strip_size = vol->strip_size;
400 info.bsize = vol->bsize;
401 info.status = vol->status;
402 info.layout = vol->layout;
403
404 if (!async_data_read_receive(&call, &size)) {
405 rc = EREFUSED;
406 goto error;
407 }
408
409 if (size != sizeof(hr_vol_info_t)) {
410 rc = EINVAL;
411 goto error;
412 }
413
414 rc = async_data_read_finalize(&call, &info, size);
415 if (rc != EOK)
416 goto error;
417 }
418
419 fibril_mutex_unlock(&hr_volumes_lock);
420 async_answer_0(icall, EOK);
421 return;
422error:
423 fibril_mutex_unlock(&hr_volumes_lock);
424 async_answer_0(&call, rc);
425 async_answer_0(icall, rc);
426}
427
428static void hr_ctl_conn(ipc_call_t *icall, void *arg)
429{
430 HR_DEBUG("hr_ctl_conn()\n");
431
432 async_accept_0(icall);
433
434 while (true) {
435 ipc_call_t call;
436 async_get_call(&call);
437 sysarg_t method = ipc_get_imethod(&call);
438
439 if (!method) {
440 async_answer_0(&call, EOK);
441 return;
442 }
443
444 switch (method) {
445 case HR_CREATE:
446 hr_create_srv(&call, false);
447 break;
448 case HR_ASSEMBLE:
449 hr_create_srv(&call, true);
450 break;
451 case HR_STOP:
452 hr_stop_srv(&call);
453 break;
454 case HR_ADD_HOTSPARE:
455 hr_add_hotspare_srv(&call);
456 break;
457 case HR_STATUS:
458 hr_print_status_srv(&call);
459 break;
460 default:
461 async_answer_0(&call, EINVAL);
462 }
463 }
464}
465
466static void hr_client_conn(ipc_call_t *icall, void *arg)
467{
468 HR_DEBUG("hr_client_conn()\n");
469
470 hr_volume_t *vol;
471
472 service_id_t svc_id = ipc_get_arg2(icall);
473
474 if (svc_id == ctl_sid) {
475 hr_ctl_conn(icall, arg);
476 } else {
477 HR_DEBUG("bd_conn()\n");
478 vol = hr_get_volume(svc_id);
479 if (vol == NULL)
480 async_answer_0(icall, EINVAL);
481 bd_conn(icall, &vol->hr_bds);
482 }
483}
484
485int main(int argc, char **argv)
486{
487 errno_t rc;
488
489 printf("%s: HelenRAID server\n", NAME);
490
491 rc = log_init(NAME);
492 if (rc != EOK) {
493 printf("%s: failed to initialize logging\n", NAME);
494 return 1;
495 }
496
497 fibril_mutex_initialize(&hr_volumes_lock);
498 list_initialize(&hr_volumes);
499
500 async_set_fallback_port_handler(hr_client_conn, NULL);
501
502 rc = loc_server_register(NAME, &hr_srv);
503 if (rc != EOK) {
504 HR_ERROR("failed registering server: %s", str_error(rc));
505 return EEXIST;
506 }
507
508 rc = loc_service_register(hr_srv, SERVICE_NAME_HR, &ctl_sid);
509 if (rc != EOK) {
510 HR_ERROR("failed registering service: %s", str_error(rc));
511 return EEXIST;
512 }
513
514 printf("%s: accepting connections\n", NAME);
515 task_retval(0);
516 async_manager();
517
518 return 0;
519}
520
521/** @}
522 */
Note: See TracBrowser for help on using the repository browser.