source: mainline/uspace/srv/bd/hr/fge.c@ d574c11

Last change on this file since d574c11 was d574c11, checked in by Miroslav Cimerman <mc@…>, 2 months ago

hr: fge: use malloc_waitok()

  • Property mode set to 100644
File size: 10.0 KB
Line 
1/*
2 * Copyright (c) 2025 Miroslav Cimerman
3 * Copyright (c) 2024 Vojtech Horky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup hr
31 * @{
32 */
33/**
34 * @file
35 * @brief Fibril group executor
36 *
37 * Fibril pool with pre-allocated storage allowing
38 * execution of groups consisting of multiple work
39 * units.
40 */
41
42#include <adt/bitmap.h>
43#include <adt/circ_buf.h>
44#include <assert.h>
45#include <errno.h>
46#include <fibril_synch.h>
47#include <stdatomic.h>
48#include <stdbool.h>
49#include <stdio.h>
50#include <stdlib.h>
51#include <types/common.h>
52
53#include "fge.h"
54#include "util.h"
55
56static void *hr_fpool_make_storage(hr_fpool_t *, ssize_t *);
57static void hr_fpool_group_epilogue(hr_fpool_t *);
58static errno_t fge_fibril(void *);
59static errno_t wu_queue_init(wu_queue_t *, size_t);
60static void wu_queue_push(wu_queue_t *, fge_fibril_data_t *);
61static void wu_queue_pop(wu_queue_t *, fge_fibril_data_t *);
62static ssize_t hr_fpool_get_free_slot(hr_fpool_t *);
63
64hr_fpool_t *hr_fpool_create(size_t fibril_cnt, size_t max_wus,
65 size_t wu_storage_size)
66{
67 assert(max_wus > 0 && wu_storage_size > 0);
68
69 void *bitmap_data = NULL;
70
71 hr_fpool_t *result = calloc(1, sizeof(hr_fpool_t));
72 if (result == NULL)
73 return NULL;
74
75 result->fibrils = malloc(sizeof(fid_t) * fibril_cnt);
76 if (result->fibrils == NULL)
77 goto bad;
78
79 result->wu_storage = malloc(wu_storage_size * max_wus);
80 if (result->wu_storage == NULL)
81 goto bad;
82
83 bitmap_data = calloc(1, bitmap_size(max_wus));
84 if (bitmap_data == NULL)
85 goto bad;
86 bitmap_initialize(&result->bitmap, max_wus, bitmap_data);
87
88 if (wu_queue_init(&result->queue, max_wus) != EOK)
89 goto bad;
90
91 fibril_mutex_initialize(&result->lock);
92 fibril_condvar_initialize(&result->all_wus_done);
93
94 result->max_wus = max_wus;
95 result->fibril_cnt = fibril_cnt;
96 result->wu_size = wu_storage_size;
97 result->wu_storage_free_count = max_wus;
98 result->stop = false;
99 result->active_groups = 0;
100
101 for (size_t i = 0; i < fibril_cnt; i++) {
102 result->fibrils[i] = fibril_create(fge_fibril, result);
103 fibril_start(result->fibrils[i]);
104 /* fibril_detach(result->fibrils[i]); */
105 }
106
107 return result;
108bad:
109 if (result->queue.fexecs != NULL)
110 free(result->queue.fexecs);
111 if (bitmap_data != NULL)
112 free(bitmap_data);
113 if (result->wu_storage != NULL)
114 free(result->wu_storage);
115 if (result->fibrils != NULL)
116 free(result->fibrils);
117 free(result);
118
119 return NULL;
120}
121
122void hr_fpool_destroy(hr_fpool_t *pool)
123{
124 fibril_mutex_lock(&pool->lock);
125 pool->stop = true;
126 while (pool->active_groups > 0)
127 fibril_condvar_wait(&pool->all_wus_done, &pool->lock);
128
129 fibril_mutex_unlock(&pool->lock);
130
131 free(pool->bitmap.bits);
132 free(pool->queue.fexecs);
133 free(pool->wu_storage);
134 free(pool->fibrils);
135 free(pool);
136}
137
138hr_fgroup_t *hr_fgroup_create(hr_fpool_t *parent, size_t wu_cnt)
139{
140 assert(wu_cnt > 0);
141
142 hr_fgroup_t *result = malloc_waitok(sizeof(hr_fgroup_t));
143
144 result->reserved_cnt = 0;
145 result->own_mem = NULL;
146 result->memslots = NULL;
147
148 fibril_mutex_lock(&parent->lock);
149
150 parent->active_groups++;
151
152 if (parent->wu_storage_free_count >= wu_cnt) {
153 parent->wu_storage_free_count -= wu_cnt;
154 result->reserved_cnt = wu_cnt;
155 } else {
156 /*
157 * Could be more conservative with memory here and
158 * allocate space only for one work unit and execute
159 * work units sequentially like it was first intended with
160 * the fallback storage.
161 */
162 size_t taking = parent->wu_storage_free_count;
163 result->own_mem = malloc_waitok(parent->wu_size * (wu_cnt - taking));
164 result->reserved_cnt = taking;
165 parent->wu_storage_free_count = 0;
166 }
167
168 if (result->reserved_cnt > 0) {
169 result->memslots =
170 malloc_waitok(sizeof(size_t) * result->reserved_cnt);
171 }
172
173 fibril_mutex_unlock(&parent->lock);
174
175 result->pool = parent;
176 result->wu_cnt = wu_cnt;
177 result->submitted = 0;
178 result->reserved_avail = result->reserved_cnt;
179 result->own_used = 0;
180 result->final_errno = EOK;
181 result->finished_okay = 0;
182 result->finished_fail = 0;
183
184 fibril_mutex_initialize(&result->lock);
185 fibril_condvar_initialize(&result->all_done);
186
187 return result;
188}
189
190void *hr_fgroup_alloc(hr_fgroup_t *group)
191{
192 void *storage;
193
194 fibril_mutex_lock(&group->lock);
195
196 assert(group->submitted < group->wu_cnt);
197
198 if (group->reserved_avail > 0) {
199 ssize_t memslot;
200 storage = hr_fpool_make_storage(group->pool, &memslot);
201 assert(storage != NULL);
202 group->reserved_avail--;
203 group->memslots[group->submitted] = memslot;
204 } else {
205 assert(group->own_mem != NULL);
206 storage =
207 group->own_mem + group->pool->wu_size * group->own_used;
208 group->own_used++;
209 }
210
211 fibril_mutex_unlock(&group->lock);
212
213 return storage;
214}
215
216void hr_fgroup_submit(hr_fgroup_t *group, hr_wu_t wu, void *arg)
217{
218 fibril_mutex_lock(&group->lock);
219 assert(group->submitted < group->wu_cnt);
220
221 fge_fibril_data_t executor;
222 executor.wu = wu;
223 executor.arg = arg;
224 executor.group = group;
225
226 if (group->submitted < group->reserved_cnt)
227 executor.memslot = group->memslots[group->submitted];
228 else
229 executor.memslot = -1;
230
231 group->submitted++;
232 fibril_mutex_unlock(&group->lock);
233
234 wu_queue_push(&group->pool->queue, &executor);
235}
236
237errno_t hr_fgroup_wait(hr_fgroup_t *group, size_t *rokay, size_t *rfailed)
238{
239 fibril_mutex_lock(&group->lock);
240 assert(group->submitted <= group->wu_cnt);
241
242 while (true) {
243 size_t finished = group->finished_fail + group->finished_okay;
244 if (finished == group->submitted)
245 break;
246
247 fibril_condvar_wait(&group->all_done, &group->lock);
248 }
249
250 if (rokay)
251 *rokay = group->finished_okay;
252 if (rfailed)
253 *rfailed = group->finished_fail;
254
255 errno_t rc = group->final_errno;
256
257 fibril_mutex_unlock(&group->lock);
258
259 hr_fpool_group_epilogue(group->pool);
260
261 if (group->memslots != NULL)
262 free(group->memslots);
263 if (group->own_mem != NULL)
264 free(group->own_mem);
265 free(group);
266
267 return rc;
268}
269
270static void *hr_fpool_make_storage(hr_fpool_t *pool, ssize_t *rmemslot)
271{
272 fibril_mutex_lock(&pool->lock);
273 ssize_t memslot = hr_fpool_get_free_slot(pool);
274 assert(memslot != -1);
275
276 bitmap_set(&pool->bitmap, memslot, 1);
277
278 fibril_mutex_unlock(&pool->lock);
279
280 if (rmemslot)
281 *rmemslot = memslot;
282
283 return pool->wu_storage + pool->wu_size * memslot;
284}
285
286static void hr_fpool_group_epilogue(hr_fpool_t *pool)
287{
288 fibril_mutex_lock(&pool->lock);
289
290 pool->active_groups--;
291 if (pool->active_groups == 0)
292 fibril_condvar_signal(&pool->all_wus_done);
293
294 fibril_mutex_unlock(&pool->lock);
295}
296
297static errno_t fge_fibril(void *arg)
298{
299 hr_fpool_t *pool = arg;
300 while (true) {
301 fge_fibril_data_t executor;
302 fibril_mutex_lock(&pool->lock);
303
304 while (circ_buf_nused(&pool->queue.cbuf) == 0 && !pool->stop) {
305 fibril_condvar_wait(&pool->queue.not_empty,
306 &pool->lock);
307 }
308
309 if (pool->stop && circ_buf_nused(&pool->queue.cbuf) == 0) {
310 fibril_mutex_unlock(&pool->lock);
311 break;
312 }
313
314 wu_queue_pop(&pool->queue, &executor);
315
316 fibril_mutex_unlock(&pool->lock);
317
318 hr_fgroup_t *group = executor.group;
319
320 errno_t rc = executor.wu(executor.arg);
321
322 if (rc == EOK) {
323 fibril_mutex_lock(&group->lock);
324 group->finished_okay++;
325 fibril_mutex_unlock(&group->lock);
326 } else {
327 fibril_mutex_lock(&group->lock);
328 group->finished_fail++;
329 if (rc == ENOMEM)
330 group->final_errno = ENOMEM;
331 fibril_mutex_unlock(&group->lock);
332 }
333
334 fibril_mutex_lock(&pool->lock);
335 if (executor.memslot > -1) {
336 bitmap_set(&pool->bitmap, executor.memslot, 0);
337 pool->wu_storage_free_count++;
338 }
339
340 fibril_mutex_lock(&group->lock);
341 size_t finished = group->finished_fail + group->finished_okay;
342 if (finished == group->submitted)
343 fibril_condvar_signal(&group->all_done);
344 fibril_mutex_unlock(&group->lock);
345
346 fibril_mutex_unlock(&pool->lock);
347 }
348 return EOK;
349}
350
351static errno_t wu_queue_init(wu_queue_t *queue, size_t nmemb)
352{
353 queue->fexecs = malloc(sizeof(fge_fibril_data_t) * nmemb);
354 if (queue->fexecs == NULL)
355 return ENOMEM;
356
357 circ_buf_init(&queue->cbuf, queue->fexecs, nmemb,
358 sizeof(fge_fibril_data_t));
359
360 fibril_mutex_initialize(&queue->lock);
361 fibril_condvar_initialize(&queue->not_empty);
362 fibril_condvar_initialize(&queue->not_full);
363
364 return EOK;
365}
366
367static void wu_queue_push(wu_queue_t *queue, fge_fibril_data_t *executor)
368{
369 fibril_mutex_lock(&queue->lock);
370
371 while (circ_buf_push(&queue->cbuf, executor) == EAGAIN)
372 fibril_condvar_wait(&queue->not_full, &queue->lock);
373
374 fibril_condvar_signal(&queue->not_empty);
375
376 fibril_mutex_unlock(&queue->lock);
377}
378
379static void wu_queue_pop(wu_queue_t *queue, fge_fibril_data_t *executor)
380{
381 fibril_mutex_lock(&queue->lock);
382
383 while (circ_buf_pop(&queue->cbuf, executor) == EAGAIN)
384 fibril_condvar_wait(&queue->not_empty, &queue->lock);
385
386 fibril_condvar_signal(&queue->not_full);
387
388 fibril_mutex_unlock(&queue->lock);
389}
390
391static ssize_t hr_fpool_get_free_slot(hr_fpool_t *pool)
392{
393 bitmap_t *bitmap = &pool->bitmap;
394 for (size_t i = 0; i < pool->max_wus; i++)
395 if (!bitmap_get(bitmap, i))
396 return i;
397 return -1;
398}
399
400/** @}
401 */
Note: See TracBrowser for help on using the repository browser.