source: mainline/uspace/srv/bd/hr/hr.c@ 5b320ac

Last change on this file since 5b320ac was 5b320ac, checked in by Miroslav Cimerman <mc@…>, 9 months ago

hr: hotspares + RAID1 rebuild

  • Property mode set to 100644
File size: 10.6 KB
Line 
1/*
2 * Copyright (c) 2024 Miroslav Cimerman
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup hr
30 * @{
31 */
32/**
33 * @file
34 */
35
36#include <async.h>
37#include <bd_srv.h>
38#include <errno.h>
39#include <hr.h>
40#include <io/log.h>
41#include <inttypes.h>
42#include <ipc/hr.h>
43#include <ipc/services.h>
44#include <loc.h>
45#include <task.h>
46#include <stdio.h>
47#include <stdlib.h>
48#include <str.h>
49#include <str_error.h>
50
51#include "superblock.h"
52#include "util.h"
53#include "var.h"
54
55loc_srv_t *hr_srv;
56
57static fibril_mutex_t hr_volumes_lock;
58static list_t hr_volumes;
59
60static service_id_t ctl_sid;
61
62static hr_volume_t *hr_get_volume(service_id_t svc_id)
63{
64 HR_DEBUG("hr_get_volume(): (%" PRIun ")\n", svc_id);
65
66 fibril_mutex_lock(&hr_volumes_lock);
67 list_foreach(hr_volumes, lvolumes, hr_volume_t, vol) {
68 if (vol->svc_id == svc_id) {
69 fibril_mutex_unlock(&hr_volumes_lock);
70 return vol;
71 }
72 }
73
74 fibril_mutex_unlock(&hr_volumes_lock);
75 return NULL;
76}
77
78static errno_t hr_remove_volume(service_id_t svc_id)
79{
80 HR_DEBUG("hr_remove_volume(): (%" PRIun ")\n", svc_id);
81
82 fibril_mutex_lock(&hr_volumes_lock);
83 list_foreach(hr_volumes, lvolumes, hr_volume_t, vol) {
84 if (vol->svc_id == svc_id) {
85 hr_fini_devs(vol);
86 list_remove(&vol->lvolumes);
87 free(vol);
88 fibril_mutex_unlock(&hr_volumes_lock);
89 return EOK;
90 }
91 }
92
93 fibril_mutex_unlock(&hr_volumes_lock);
94 return ENOENT;
95}
96
97static void hr_create_srv(ipc_call_t *icall, bool assemble)
98{
99 HR_DEBUG("hr_create_srv()\n");
100
101 errno_t rc;
102 size_t i, size;
103 hr_config_t *cfg;
104 hr_volume_t *new_volume;
105 ipc_call_t call;
106
107 if (!async_data_write_receive(&call, &size)) {
108 async_answer_0(&call, EREFUSED);
109 async_answer_0(icall, EREFUSED);
110 return;
111 }
112
113 if (size != sizeof(hr_config_t)) {
114 async_answer_0(&call, EINVAL);
115 async_answer_0(icall, EINVAL);
116 return;
117 }
118
119 cfg = calloc(1, sizeof(hr_config_t));
120 if (cfg == NULL) {
121 async_answer_0(&call, ENOMEM);
122 async_answer_0(icall, ENOMEM);
123 return;
124 }
125
126 rc = async_data_write_finalize(&call, cfg, size);
127 if (rc != EOK) {
128 free(cfg);
129 async_answer_0(&call, rc);
130 async_answer_0(icall, rc);
131 return;
132 }
133
134 /*
135 * If there was a missing device provided
136 * for creation of a new array, abort
137 */
138 if (!assemble) {
139 for (i = 0; i < cfg->dev_no; i++) {
140 if (cfg->devs[i] == 0) {
141 HR_ERROR("missing device provided for array "
142 "creation, aborting");
143 free(cfg);
144 async_answer_0(icall, EINVAL);
145 return;
146 }
147 }
148 }
149
150 new_volume = calloc(1, sizeof(hr_volume_t));
151 if (new_volume == NULL) {
152 free(cfg);
153 async_answer_0(icall, ENOMEM);
154 return;
155 }
156
157 str_cpy(new_volume->devname, HR_DEVNAME_LEN, cfg->devname);
158 for (i = 0; i < cfg->dev_no; i++)
159 new_volume->extents[i].svc_id = cfg->devs[i];
160 new_volume->level = cfg->level;
161 new_volume->dev_no = cfg->dev_no;
162
163 if (assemble) {
164 if (cfg->level != HR_LVL_UNKNOWN)
165 HR_WARN("level manually set when assembling, ingoring");
166 new_volume->level = HR_LVL_UNKNOWN;
167 }
168
169 rc = hr_init_devs(new_volume);
170 if (rc != EOK) {
171 free(cfg);
172 free(new_volume);
173 async_answer_0(icall, rc);
174 return;
175 }
176
177 if (assemble) {
178 /* just bsize needed for reading metadata later */
179 rc = hr_check_devs(new_volume, NULL, &new_volume->bsize);
180 if (rc != EOK)
181 goto error;
182
183 rc = hr_fill_vol_from_meta(new_volume);
184 if (rc != EOK)
185 goto error;
186 }
187
188 switch (new_volume->level) {
189 case HR_LVL_1:
190 new_volume->hr_ops.create = hr_raid1_create;
191 new_volume->hr_ops.init = hr_raid1_init;
192 new_volume->hr_ops.status_event = hr_raid1_status_event;
193 new_volume->hr_ops.add_hotspare = hr_raid1_add_hotspare;
194 break;
195 case HR_LVL_0:
196 new_volume->hr_ops.create = hr_raid0_create;
197 new_volume->hr_ops.init = hr_raid0_init;
198 new_volume->hr_ops.status_event = hr_raid0_status_event;
199 break;
200 case HR_LVL_4:
201 new_volume->hr_ops.create = hr_raid4_create;
202 new_volume->hr_ops.init = hr_raid4_init;
203 new_volume->hr_ops.status_event = hr_raid4_status_event;
204 break;
205 case HR_LVL_5:
206 new_volume->hr_ops.create = hr_raid5_create;
207 new_volume->hr_ops.init = hr_raid5_init;
208 new_volume->hr_ops.status_event = hr_raid5_status_event;
209 break;
210 default:
211 HR_ERROR("unkown level: %d, aborting\n", new_volume->level);
212 rc = EINVAL;
213 goto error;
214 }
215
216 if (!assemble) {
217 new_volume->hr_ops.init(new_volume);
218 if (rc != EOK)
219 goto error;
220
221 rc = hr_write_meta_to_vol(new_volume);
222 if (rc != EOK)
223 goto error;
224 }
225
226 fibril_mutex_initialize(&new_volume->lock);
227
228 rc = new_volume->hr_ops.create(new_volume);
229 if (rc != EOK)
230 goto error;
231
232 fibril_mutex_lock(&hr_volumes_lock);
233 list_append(&new_volume->lvolumes, &hr_volumes);
234 fibril_mutex_unlock(&hr_volumes_lock);
235
236 if (assemble) {
237 HR_DEBUG("assembled volume \"%s\" (%" PRIun ")\n",
238 new_volume->devname, new_volume->svc_id);
239 } else {
240 HR_DEBUG("created volume \"%s\" (%" PRIun ")\n",
241 new_volume->devname, new_volume->svc_id);
242 }
243
244 free(cfg);
245 async_answer_0(icall, rc);
246 return;
247error:
248 free(cfg);
249 hr_fini_devs(new_volume);
250 free(new_volume);
251 async_answer_0(icall, rc);
252}
253
254static void hr_stop_srv(ipc_call_t *icall)
255{
256 HR_DEBUG("hr_stop_srv()\n");
257
258 errno_t rc = EOK;
259 service_id_t svc_id;
260 long fail_extent;
261 hr_volume_t *vol;
262
263 svc_id = ipc_get_arg1(icall);
264 fail_extent = (long) ipc_get_arg2(icall);
265
266 vol = hr_get_volume(svc_id);
267 if (vol == NULL) {
268 async_answer_0(icall, ENOENT);
269 return;
270 }
271
272 if (fail_extent == -1) {
273 rc = hr_remove_volume(svc_id);
274 if (rc != EOK) {
275 async_answer_0(icall, rc);
276 return;
277 }
278 rc = loc_service_unregister(hr_srv, svc_id);
279 } else {
280 /* fibril safe for now */
281 fibril_mutex_lock(&vol->lock);
282 hr_update_ext_status(vol, fail_extent, HR_EXT_FAILED);
283 fibril_mutex_unlock(&vol->lock);
284
285 vol->hr_ops.status_event(vol);
286 }
287 async_answer_0(icall, rc);
288}
289
290static void hr_add_hotspare_srv(ipc_call_t *icall)
291{
292 HR_DEBUG("hr_add_hotspare()\n");
293
294 errno_t rc = EOK;
295 service_id_t vol_svc_id;
296 service_id_t hotspare;
297 hr_volume_t *vol;
298
299 vol_svc_id = ipc_get_arg1(icall);
300 hotspare = ipc_get_arg2(icall);
301
302 vol = hr_get_volume(vol_svc_id);
303 if (vol == NULL) {
304 async_answer_0(icall, ENOENT);
305 return;
306 }
307
308 if (vol->hr_ops.add_hotspare == NULL) {
309 HR_DEBUG("hr_add_hotspare(): not supported on RAID level %d\n",
310 vol->level);
311 async_answer_0(icall, ENOTSUP);
312 return;
313 }
314
315 rc = vol->hr_ops.add_hotspare(vol, hotspare);
316
317 async_answer_0(icall, rc);
318}
319
320static void hr_print_status_srv(ipc_call_t *icall)
321{
322 HR_DEBUG("hr_status_srv()\n");
323
324 errno_t rc;
325 size_t vol_cnt = 0;
326 hr_vol_info_t info;
327 ipc_call_t call;
328 size_t size;
329
330 fibril_mutex_lock(&hr_volumes_lock);
331
332 vol_cnt = list_count(&hr_volumes);
333
334 if (!async_data_read_receive(&call, &size)) {
335 rc = EREFUSED;
336 goto error;
337 }
338
339 if (size != sizeof(size_t)) {
340 rc = EINVAL;
341 goto error;
342 }
343
344 rc = async_data_read_finalize(&call, &vol_cnt, size);
345 if (rc != EOK)
346 goto error;
347
348 list_foreach(hr_volumes, lvolumes, hr_volume_t, vol) {
349 memcpy(info.extents, vol->extents,
350 sizeof(hr_extent_t) * HR_MAX_EXTENTS);
351 memcpy(info.hotspares, vol->hotspares,
352 sizeof(hr_extent_t) * HR_MAX_HOTSPARES);
353 info.svc_id = vol->svc_id;
354 info.extent_no = vol->dev_no;
355 info.hotspare_no = vol->hotspare_no;
356 info.level = vol->level;
357 /* print usable number of blocks */
358 info.nblocks = vol->data_blkno;
359 info.strip_size = vol->strip_size;
360 info.bsize = vol->bsize;
361 info.status = vol->status;
362
363 if (!async_data_read_receive(&call, &size)) {
364 rc = EREFUSED;
365 goto error;
366 }
367
368 if (size != sizeof(hr_vol_info_t)) {
369 rc = EINVAL;
370 goto error;
371 }
372
373 rc = async_data_read_finalize(&call, &info, size);
374 if (rc != EOK)
375 goto error;
376 }
377
378 fibril_mutex_unlock(&hr_volumes_lock);
379 async_answer_0(icall, EOK);
380 return;
381error:
382 fibril_mutex_unlock(&hr_volumes_lock);
383 async_answer_0(&call, rc);
384 async_answer_0(icall, rc);
385}
386
387static void hr_ctl_conn(ipc_call_t *icall, void *arg)
388{
389 HR_DEBUG("hr_ctl_conn()\n");
390
391 async_accept_0(icall);
392
393 while (true) {
394 ipc_call_t call;
395 async_get_call(&call);
396 sysarg_t method = ipc_get_imethod(&call);
397
398 if (!method) {
399 async_answer_0(&call, EOK);
400 return;
401 }
402
403 switch (method) {
404 case HR_CREATE:
405 hr_create_srv(&call, false);
406 break;
407 case HR_ASSEMBLE:
408 hr_create_srv(&call, true);
409 break;
410 case HR_STOP:
411 hr_stop_srv(&call);
412 break;
413 case HR_ADD_HOTSPARE:
414 hr_add_hotspare_srv(&call);
415 break;
416 case HR_STATUS:
417 hr_print_status_srv(&call);
418 break;
419 default:
420 async_answer_0(&call, EINVAL);
421 }
422 }
423}
424
425static void hr_client_conn(ipc_call_t *icall, void *arg)
426{
427 HR_DEBUG("hr_client_conn()\n");
428
429 hr_volume_t *vol;
430
431 service_id_t svc_id = ipc_get_arg2(icall);
432
433 if (svc_id == ctl_sid) {
434 hr_ctl_conn(icall, arg);
435 } else {
436 HR_DEBUG("bd_conn()\n");
437 vol = hr_get_volume(svc_id);
438 if (vol == NULL)
439 async_answer_0(icall, EINVAL);
440 bd_conn(icall, &vol->hr_bds);
441 }
442}
443
444int main(int argc, char **argv)
445{
446 errno_t rc;
447
448 printf("%s: HelenRAID server\n", NAME);
449
450 rc = log_init(NAME);
451 if (rc != EOK) {
452 printf("%s: failed to initialize logging\n", NAME);
453 return 1;
454 }
455
456 fibril_mutex_initialize(&hr_volumes_lock);
457 list_initialize(&hr_volumes);
458
459 async_set_fallback_port_handler(hr_client_conn, NULL);
460
461 rc = loc_server_register(NAME, &hr_srv);
462 if (rc != EOK) {
463 HR_ERROR("failed registering server: %s", str_error(rc));
464 return EEXIST;
465 }
466
467 rc = loc_service_register(hr_srv, SERVICE_NAME_HR, &ctl_sid);
468 if (rc != EOK) {
469 HR_ERROR("failed registering service: %s", str_error(rc));
470 return EEXIST;
471 }
472
473 printf("%s: accepting connections\n", NAME);
474 task_retval(0);
475 async_manager();
476
477 return 0;
478}
479
480/** @}
481 */
Note: See TracBrowser for help on using the repository browser.