source: mainline/uspace/srv/bd/hr/fge.c@ 723f1d9

Last change on this file since 723f1d9 was 723f1d9, checked in by Miroslav Cimerman <mc@…>, 7 months ago

hr: fge: fgroup_create(): assert worker count > 0

  • Property mode set to 100644
File size: 11.3 KB
Line 
1/*
2 * Copyright (c) 2024 Miroslav Cimerman
3 * Copyright (c) 2024 Vojtech Horky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup hr
31 * @{
32 */
33/**
34 * @file
35 * @brief Fibril group executor
36 *
37 * Fibril pool with pre-allocated storage allowing
38 * execution of groups consisting of multiple work
39 * units.
40 */
41
42#include <adt/bitmap.h>
43#include <adt/circ_buf.h>
44#include <assert.h>
45#include <errno.h>
46#include <fibril_synch.h>
47#include <stdatomic.h>
48#include <stdbool.h>
49#include <stdio.h>
50#include <stdlib.h>
51#include <types/common.h>
52
53#include "fge.h"
54
55struct fge_fibril_data;
56typedef struct fge_fibril_data fge_fibril_data_t;
57struct wu_queue;
58typedef struct wu_queue wu_queue_t;
59
60static void *hr_fpool_make_storage(hr_fpool_t *, ssize_t *);
61static void hr_fpool_group_epilogue(hr_fpool_t *);
62static errno_t fge_fibril(void *);
63static errno_t wu_queue_init(wu_queue_t *, size_t);
64static void wu_queue_push(wu_queue_t *, fge_fibril_data_t *);
65static void wu_queue_pop(wu_queue_t *, fge_fibril_data_t *);
66static ssize_t hr_fpool_get_free_slot(hr_fpool_t *);
67
68typedef struct fge_fibril_data {
69 hr_wu_t wu; /* user-provided work unit fcn pointer */
70 void *arg;
71 hr_fgroup_t *group;
72 ssize_t memslot; /* index to pool bitmap slot */
73} fge_fibril_data_t;
74
75typedef struct wu_queue {
76 fibril_mutex_t lock;
77 fibril_condvar_t not_empty;
78 fibril_condvar_t not_full;
79 fge_fibril_data_t *fexecs;
80 circ_buf_t cbuf;
81} wu_queue_t;
82
83struct hr_fpool {
84 fibril_mutex_t lock;
85 fibril_condvar_t all_wus_done;
86 bitmap_t bitmap;
87 wu_queue_t queue;
88 fid_t *fibrils;
89 uint8_t *wu_storage;
90 size_t fibril_cnt;
91 size_t max_wus;
92 size_t active_groups;
93 bool stop;
94 size_t wu_size;
95 size_t wu_storage_free_count;
96};
97
98struct hr_fgroup {
99 hr_fpool_t *pool;
100 size_t wu_cnt; /* total wu count */
101 size_t submitted;
102 size_t reserved_cnt; /* no. of reserved wu storage slots */
103 size_t reserved_avail;
104 size_t *memslots; /* indices to pool bitmap */
105 void *own_mem;
106 size_t own_used;
107 errno_t final_errno;
108 size_t finished_okay;
109 size_t finished_fail;
110 fibril_mutex_t lock;
111 fibril_condvar_t all_done;
112};
113
114hr_fpool_t *hr_fpool_create(size_t fibril_cnt, size_t max_wus,
115 size_t wu_storage_size)
116{
117 assert(max_wus > 0 && wu_storage_size > 0);
118
119 void *bitmap_data = NULL;
120
121 hr_fpool_t *result = calloc(1, sizeof(hr_fpool_t));
122 if (result == NULL)
123 return NULL;
124
125 result->fibrils = malloc(sizeof(fid_t) * fibril_cnt);
126 if (result->fibrils == NULL)
127 goto bad;
128
129 result->wu_storage = malloc(wu_storage_size * max_wus);
130 if (result->wu_storage == NULL)
131 goto bad;
132
133 bitmap_data = calloc(1, bitmap_size(max_wus));
134 if (bitmap_data == NULL)
135 goto bad;
136 bitmap_initialize(&result->bitmap, max_wus, bitmap_data);
137
138 if (wu_queue_init(&result->queue, max_wus) != EOK)
139 goto bad;
140
141 fibril_mutex_initialize(&result->lock);
142 fibril_condvar_initialize(&result->all_wus_done);
143
144 result->max_wus = max_wus;
145 result->fibril_cnt = fibril_cnt;
146 result->wu_size = wu_storage_size;
147 result->wu_storage_free_count = max_wus;
148 result->stop = false;
149 result->active_groups = 0;
150
151 for (size_t i = 0; i < fibril_cnt; i++) {
152 result->fibrils[i] = fibril_create(fge_fibril, result);
153 fibril_start(result->fibrils[i]);
154 }
155
156 return result;
157bad:
158 if (result->queue.fexecs != NULL)
159 free(result->queue.fexecs);
160 if (bitmap_data != NULL)
161 free(bitmap_data);
162 if (result->wu_storage != NULL)
163 free(result->wu_storage);
164 if (result->fibrils != NULL)
165 free(result->fibrils);
166 free(result);
167
168 return NULL;
169}
170
171void hr_fpool_destroy(hr_fpool_t *pool)
172{
173 fibril_mutex_lock(&pool->lock);
174 pool->stop = true;
175 while (pool->active_groups > 0)
176 fibril_condvar_wait(&pool->all_wus_done, &pool->lock);
177
178 fibril_mutex_unlock(&pool->lock);
179
180 free(pool->bitmap.bits);
181 free(pool->queue.fexecs);
182 free(pool->wu_storage);
183 free(pool->fibrils);
184 free(pool);
185}
186
187hr_fgroup_t *hr_fgroup_create(hr_fpool_t *parent, size_t wu_cnt)
188{
189 assert(wu_cnt > 0);
190
191 hr_fgroup_t *result = malloc(sizeof(hr_fgroup_t));
192 if (result == NULL)
193 return NULL;
194
195 result->reserved_cnt = 0;
196 result->own_mem = NULL;
197 result->memslots = NULL;
198
199 fibril_mutex_lock(&parent->lock);
200
201 parent->active_groups++;
202
203 if (parent->wu_storage_free_count >= wu_cnt) {
204 parent->wu_storage_free_count -= wu_cnt;
205 result->reserved_cnt = wu_cnt;
206 } else {
207 /*
208 * Could be more conservative with memory here and
209 * allocate space only for one work unit and execute
210 * work units sequentially like it was first intended with
211 * the fallback storage.
212 */
213 size_t taking = parent->wu_storage_free_count;
214 result->own_mem = malloc(parent->wu_size * (wu_cnt - taking));
215 if (result->own_mem == NULL)
216 goto bad;
217 result->reserved_cnt = taking;
218 parent->wu_storage_free_count = 0;
219 }
220
221 if (result->reserved_cnt > 0) {
222 result->memslots =
223 malloc(sizeof(size_t) * result->reserved_cnt);
224 if (result->memslots == NULL)
225 goto bad;
226 }
227
228 fibril_mutex_unlock(&parent->lock);
229
230 result->pool = parent;
231 result->wu_cnt = wu_cnt;
232 result->submitted = 0;
233 result->reserved_avail = result->reserved_cnt;
234 result->own_used = 0;
235 result->final_errno = EOK;
236 result->finished_okay = 0;
237 result->finished_fail = 0;
238
239 fibril_mutex_initialize(&result->lock);
240 fibril_condvar_initialize(&result->all_done);
241
242 return result;
243
244bad:
245 parent->wu_storage_free_count += result->reserved_cnt;
246 fibril_mutex_unlock(&parent->lock);
247
248 if (result->memslots != NULL)
249 free(result->memslots);
250 if (result->own_mem != NULL)
251 free(result->own_mem);
252 free(result);
253
254 return NULL;
255}
256
257void *hr_fgroup_alloc(hr_fgroup_t *group)
258{
259 void *storage;
260
261 fibril_mutex_lock(&group->lock);
262
263 if (group->reserved_avail > 0) {
264 ssize_t memslot;
265 storage = hr_fpool_make_storage(group->pool, &memslot);
266 assert(storage != NULL);
267 group->reserved_avail--;
268 group->memslots[group->submitted] = memslot;
269 } else {
270 storage =
271 group->own_mem + group->pool->wu_size * group->own_used;
272 group->own_used++;
273 }
274
275 fibril_mutex_unlock(&group->lock);
276
277 return storage;
278}
279
280void hr_fgroup_submit(hr_fgroup_t *group, hr_wu_t wu, void *arg)
281{
282 fibril_mutex_lock(&group->lock);
283 assert(group->submitted < group->wu_cnt);
284
285 fge_fibril_data_t executor;
286 executor.wu = wu;
287 executor.arg = arg;
288 executor.group = group;
289
290 if (group->submitted < group->reserved_cnt)
291 executor.memslot = group->memslots[group->submitted];
292 else
293 executor.memslot = -1;
294
295 group->submitted++;
296 fibril_mutex_unlock(&group->lock);
297
298 wu_queue_push(&group->pool->queue, &executor);
299}
300
301errno_t hr_fgroup_wait(hr_fgroup_t *group, size_t *rokay, size_t *rfailed)
302{
303 assert(group->submitted == group->wu_cnt);
304
305 fibril_mutex_lock(&group->lock);
306 while (true) {
307 size_t finished = group->finished_fail + group->finished_okay;
308 if (group->wu_cnt == finished)
309 break;
310
311 fibril_condvar_wait(&group->all_done, &group->lock);
312 }
313
314 if (rokay)
315 *rokay = group->finished_okay;
316 if (rfailed)
317 *rfailed = group->finished_fail;
318
319 errno_t rc = EOK;
320 if (group->finished_okay != group->wu_cnt)
321 rc = EIO;
322
323 fibril_mutex_unlock(&group->lock);
324
325 hr_fpool_group_epilogue(group->pool);
326
327 if (group->memslots != NULL)
328 free(group->memslots);
329 if (group->own_mem != NULL)
330 free(group->own_mem);
331 free(group);
332
333 return rc;
334}
335
336static void *hr_fpool_make_storage(hr_fpool_t *pool, ssize_t *rmemslot)
337{
338 fibril_mutex_lock(&pool->lock);
339 ssize_t memslot = hr_fpool_get_free_slot(pool);
340 assert(memslot != -1);
341
342 bitmap_set(&pool->bitmap, memslot, 1);
343
344 fibril_mutex_unlock(&pool->lock);
345
346 if (rmemslot)
347 *rmemslot = memslot;
348
349 return pool->wu_storage + pool->wu_size * memslot;
350}
351
352static void hr_fpool_group_epilogue(hr_fpool_t *pool)
353{
354 fibril_mutex_lock(&pool->lock);
355
356 pool->active_groups--;
357 if (pool->active_groups == 0)
358 fibril_condvar_signal(&pool->all_wus_done);
359
360 fibril_mutex_unlock(&pool->lock);
361}
362
363static errno_t fge_fibril(void *arg)
364{
365 hr_fpool_t *pool = arg;
366 while (true) {
367 fge_fibril_data_t executor;
368 fibril_mutex_lock(&pool->lock);
369
370 while (circ_buf_nused(&pool->queue.cbuf) == 0 && !pool->stop) {
371 fibril_condvar_wait(&pool->queue.not_empty,
372 &pool->lock);
373 }
374
375 if (pool->stop && circ_buf_nused(&pool->queue.cbuf) == 0) {
376 fibril_mutex_unlock(&pool->lock);
377 break;
378 }
379
380 wu_queue_pop(&pool->queue, &executor);
381
382 fibril_mutex_unlock(&pool->lock);
383
384 hr_fgroup_t *group = executor.group;
385
386 errno_t rc = executor.wu(executor.arg);
387
388 if (rc == EOK) {
389 fibril_mutex_lock(&group->lock);
390 group->finished_okay++;
391 fibril_mutex_unlock(&group->lock);
392 } else {
393 fibril_mutex_lock(&group->lock);
394 group->finished_fail++;
395 fibril_mutex_unlock(&group->lock);
396 }
397
398 fibril_mutex_lock(&pool->lock);
399 if (executor.memslot > -1) {
400 bitmap_set(&pool->bitmap, executor.memslot, 0);
401 pool->wu_storage_free_count++;
402 }
403
404 fibril_mutex_lock(&group->lock);
405 size_t finished = group->finished_fail + group->finished_okay;
406 fibril_mutex_unlock(&group->lock);
407 if (finished == group->wu_cnt)
408 fibril_condvar_signal(&group->all_done);
409
410 fibril_mutex_unlock(&pool->lock);
411 }
412 return EOK;
413}
414
415static errno_t wu_queue_init(wu_queue_t *queue, size_t nmemb)
416{
417 queue->fexecs = malloc(sizeof(fge_fibril_data_t) * nmemb);
418 if (queue->fexecs == NULL)
419 return ENOMEM;
420
421 circ_buf_init(&queue->cbuf, queue->fexecs, nmemb,
422 sizeof(fge_fibril_data_t));
423
424 fibril_mutex_initialize(&queue->lock);
425 fibril_condvar_initialize(&queue->not_empty);
426 fibril_condvar_initialize(&queue->not_full);
427
428 return EOK;
429}
430
431static void wu_queue_push(wu_queue_t *queue, fge_fibril_data_t *executor)
432{
433 fibril_mutex_lock(&queue->lock);
434
435 while (circ_buf_push(&queue->cbuf, executor) == EAGAIN)
436 fibril_condvar_wait(&queue->not_full, &queue->lock);
437
438 fibril_condvar_signal(&queue->not_empty);
439
440 fibril_mutex_unlock(&queue->lock);
441}
442
443static void wu_queue_pop(wu_queue_t *queue, fge_fibril_data_t *executor)
444{
445 fibril_mutex_lock(&queue->lock);
446
447 while (circ_buf_pop(&queue->cbuf, executor) == EAGAIN)
448 fibril_condvar_wait(&queue->not_empty, &queue->lock);
449
450 fibril_condvar_signal(&queue->not_full);
451
452 fibril_mutex_unlock(&queue->lock);
453}
454
455static ssize_t hr_fpool_get_free_slot(hr_fpool_t *pool)
456{
457 bitmap_t *bitmap = &pool->bitmap;
458 for (size_t i = 0; i < pool->max_wus; i++)
459 if (!bitmap_get(bitmap, i))
460 return i;
461 return -1;
462}
463
464/** @}
465 */
Note: See TracBrowser for help on using the repository browser.