source: mainline/kernel/generic/src/cap/cap.c@ 45226285

Last change on this file since 45226285 was 0db0df2, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 3 months ago

Hash table improvements

Implement hash_table_foreach macro, analogous to list_foreach.

Remove superfluous argument to hash_table_find_next().
(If the user needs to recheck the part of the list already
checked by hash_table_find(), they can just rerun that function.)

Add hash argument to hash_table_ops_t::key_equal.
The big change here is that users with big keys can store the hash
value alongside key in their entries, and for the low low cost of
sizeof(size_t) bytes eliminate a bunch of expensive key comparisons.

Also added a hash function for strings and arbitrary data.
Found this one by asking ChatGPT, because the latency of accesses
to my book collection is currently a couple of hours.

+ Some drive-by unused #include removal.

  • Property mode set to 100644
File size: 14.8 KB
Line 
1/*
2 * Copyright (c) 2017 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_generic
30 * @{
31 */
32/** @file
33 */
34
35/*
36 * HelenOS capabilities are task-local names for references to kernel objects.
37 * Kernel objects are reference-counted wrappers for a select group of objects
38 * allocated in and by the kernel that can be made accessible to userspace in a
39 * controlled way via integer handles.
40 *
41 * A kernel object (kobject_t) encapsulates one of the following raw objects:
42 *
43 * - IPC call
44 * - IPC phone
45 * - IRQ object
46 *
47 * A capability (cap_t) is either free, allocated or published. Free
48 * capabilities can be allocated, which reserves the capability handle in the
49 * task-local capability space. Allocated capabilities can be published, which
50 * associates them with an existing kernel object. Userspace can only access
51 * published capabilities.
52 *
53 * A published capability may get unpublished, which disassociates it from the
54 * underlying kernel object and puts it back into the allocated state. An
55 * allocated capability can be freed to become available for future use.
56 *
57 * There is a 1:1 correspondence between a kernel object (kobject_t) and the
58 * actual raw object it encapsulates. A kernel object (kobject_t) may have
59 * multiple references, either implicit from one or more capabilities (cap_t),
60 * even from capabilities in different tasks, or explicit as a result of
61 * creating a new reference from a capability handle using kobject_get(), or
62 * creating a new reference from an already existing reference by
63 * kobject_add_ref() or as a result of unpublishing a capability and
64 * disassociating it from its kobject_t using cap_unpublish().
65 *
66 * A holder of an explicit reference to a kernel object may revoke access to it
67 * from all capabilities that point to it by calling cap_revoke().
68 *
69 * As kernel objects are reference-counted, they get automatically destroyed
70 * when their last reference is dropped in kobject_put(). The idea is that
71 * whenever a kernel object is inserted into some sort of a container (e.g. a
72 * list or hash table), its reference count should be incremented via
73 * kobject_get() or kobject_add_ref(). When the kernel object is removed from
74 * the container, the reference count should go down via a call to
75 * kobject_put().
76 */
77
78#include <cap/cap.h>
79#include <abi/cap.h>
80#include <proc/task.h>
81#include <synch/mutex.h>
82#include <abi/errno.h>
83#include <mm/slab.h>
84#include <adt/list.h>
85#include <synch/syswaitq.h>
86#include <ipc/ipcrsc.h>
87#include <ipc/ipc.h>
88#include <ipc/irq.h>
89
90#include <limits.h>
91#include <stdint.h>
92#include <stdlib.h>
93
94#define CAPS_START ((intptr_t) CAP_NIL + 1)
95#define CAPS_SIZE (INT_MAX - (int) CAPS_START)
96#define CAPS_LAST (CAPS_SIZE - 1)
97
98static slab_cache_t *cap_cache;
99static slab_cache_t *kobject_cache;
100
101kobject_ops_t *kobject_ops[KOBJECT_TYPE_MAX] = {
102 [KOBJECT_TYPE_CALL] = &call_kobject_ops,
103 [KOBJECT_TYPE_IRQ] = &irq_kobject_ops,
104 [KOBJECT_TYPE_PHONE] = &phone_kobject_ops,
105 [KOBJECT_TYPE_WAITQ] = &waitq_kobject_ops
106};
107
108static size_t caps_hash(const ht_link_t *item)
109{
110 cap_t *cap = hash_table_get_inst(item, cap_t, caps_link);
111 return hash_mix(cap_handle_raw(cap->handle));
112}
113
114static size_t caps_key_hash(const void *key)
115{
116 const cap_handle_t *handle = key;
117 return hash_mix(cap_handle_raw(*handle));
118}
119
120static bool caps_key_equal(const void *key, size_t hash, const ht_link_t *item)
121{
122 const cap_handle_t *handle = key;
123 cap_t *cap = hash_table_get_inst(item, cap_t, caps_link);
124 return *handle == cap->handle;
125}
126
127static const hash_table_ops_t caps_ops = {
128 .hash = caps_hash,
129 .key_hash = caps_key_hash,
130 .key_equal = caps_key_equal
131};
132
133void caps_init(void)
134{
135 cap_cache = slab_cache_create("cap_t", sizeof(cap_t), 0, NULL,
136 NULL, 0);
137 kobject_cache = slab_cache_create("kobject_t", sizeof(kobject_t), 0,
138 NULL, NULL, 0);
139}
140
141/** Allocate the capability info structure
142 *
143 * @param task Task for which to allocate the info structure.
144 */
145errno_t caps_task_alloc(task_t *task)
146{
147 task->cap_info = (cap_info_t *) malloc(sizeof(cap_info_t));
148 if (!task->cap_info)
149 return ENOMEM;
150 task->cap_info->handles = ra_arena_create();
151 if (!task->cap_info->handles)
152 goto error_handles;
153 if (!ra_span_add(task->cap_info->handles, CAPS_START, CAPS_SIZE))
154 goto error_span;
155 if (!hash_table_create(&task->cap_info->caps, 0, 0, &caps_ops))
156 goto error_span;
157 return EOK;
158
159error_span:
160 ra_arena_destroy(task->cap_info->handles);
161error_handles:
162 free(task->cap_info);
163 return ENOMEM;
164}
165
166/** Initialize the capability info structure
167 *
168 * @param task Task for which to initialize the info structure.
169 */
170void caps_task_init(task_t *task)
171{
172 mutex_initialize(&task->cap_info->lock, MUTEX_RECURSIVE);
173
174 for (kobject_type_t t = 0; t < KOBJECT_TYPE_MAX; t++)
175 list_initialize(&task->cap_info->type_list[t]);
176}
177
178/** Deallocate the capability info structure
179 *
180 * @param task Task from which to deallocate the info structure.
181 */
182void caps_task_free(task_t *task)
183{
184 hash_table_destroy(&task->cap_info->caps);
185 ra_arena_destroy(task->cap_info->handles);
186 free(task->cap_info);
187}
188
189/** Invoke callback function on task's capabilites of given type
190 *
191 * @param task Task where the invocation should take place.
192 * @param type Kernel object type of the task's capabilities that will be
193 * subject to the callback invocation.
194 * @param cb Callback function.
195 * @param arg Argument for the callback function.
196 *
197 * @return True if the callback was called on all matching capabilities.
198 * @return False if the callback was applied only partially.
199 */
200bool caps_apply_to_kobject_type(task_t *task, kobject_type_t type,
201 bool (*cb)(cap_t *, void *), void *arg)
202{
203 bool done = true;
204
205 mutex_lock(&task->cap_info->lock);
206 list_foreach_safe(task->cap_info->type_list[type], cur, next) {
207 cap_t *cap = list_get_instance(cur, cap_t, type_link);
208 done = cb(cap, arg);
209 if (!done)
210 break;
211 }
212 mutex_unlock(&task->cap_info->lock);
213
214 return done;
215}
216
217/** Initialize capability and associate it with its handle
218 *
219 * @param cap Address of the capability.
220 * @param task Backling to the owning task.
221 * @param handle Capability handle.
222 */
223static void cap_initialize(cap_t *cap, task_t *task, cap_handle_t handle)
224{
225 cap->state = CAP_STATE_FREE;
226 cap->task = task;
227 cap->handle = handle;
228 link_initialize(&cap->kobj_link);
229 link_initialize(&cap->type_link);
230}
231
232/** Get capability using capability handle
233 *
234 * @param task Task whose capability to get.
235 * @param handle Capability handle of the desired capability.
236 * @param state State in which the capability must be.
237 *
238 * @return Address of the desired capability if it exists and its state matches.
239 * @return NULL if no such capability exists or it's in a different state.
240 */
241static cap_t *cap_get(task_t *task, cap_handle_t handle, cap_state_t state)
242{
243 assert(mutex_locked(&task->cap_info->lock));
244
245 if ((cap_handle_raw(handle) < CAPS_START) ||
246 (cap_handle_raw(handle) > CAPS_LAST))
247 return NULL;
248 ht_link_t *link = hash_table_find(&task->cap_info->caps, &handle);
249 if (!link)
250 return NULL;
251 cap_t *cap = hash_table_get_inst(link, cap_t, caps_link);
252 if (cap->state != state)
253 return NULL;
254 return cap;
255}
256
257/** Allocate new capability
258 *
259 * @param task Task for which to allocate the new capability.
260 *
261 * @param[out] handle New capability handle on success.
262 *
263 * @return An error code in case of error.
264 */
265errno_t cap_alloc(task_t *task, cap_handle_t *handle)
266{
267 mutex_lock(&task->cap_info->lock);
268 cap_t *cap = slab_alloc(cap_cache, FRAME_ATOMIC);
269 if (!cap) {
270 mutex_unlock(&task->cap_info->lock);
271 return ENOMEM;
272 }
273 uintptr_t hbase;
274 if (!ra_alloc(task->cap_info->handles, 1, 1, &hbase)) {
275 slab_free(cap_cache, cap);
276 mutex_unlock(&task->cap_info->lock);
277 return ENOMEM;
278 }
279 cap_initialize(cap, task, (cap_handle_t) hbase);
280 hash_table_insert(&task->cap_info->caps, &cap->caps_link);
281
282 cap->state = CAP_STATE_ALLOCATED;
283 *handle = cap->handle;
284 mutex_unlock(&task->cap_info->lock);
285
286 return EOK;
287}
288
289/** Publish allocated capability
290 *
291 * The kernel object is moved into the capability. In other words, its reference
292 * is handed over to the capability. Once published, userspace can access and
293 * manipulate the capability.
294 *
295 * @param task Task in which to publish the capability.
296 * @param handle Capability handle.
297 * @param kobj Kernel object.
298 */
299void
300cap_publish(task_t *task, cap_handle_t handle, kobject_t *kobj)
301{
302 mutex_lock(&kobj->caps_list_lock);
303 mutex_lock(&task->cap_info->lock);
304 cap_t *cap = cap_get(task, handle, CAP_STATE_ALLOCATED);
305 assert(cap);
306 cap->state = CAP_STATE_PUBLISHED;
307 /* Hand over kobj's reference to cap */
308 cap->kobject = kobj;
309 list_append(&cap->kobj_link, &kobj->caps_list);
310 list_append(&cap->type_link, &task->cap_info->type_list[kobj->type]);
311 mutex_unlock(&task->cap_info->lock);
312 mutex_unlock(&kobj->caps_list_lock);
313}
314
315static void cap_unpublish_unsafe(cap_t *cap)
316{
317 cap->kobject = NULL;
318 list_remove(&cap->kobj_link);
319 list_remove(&cap->type_link);
320 cap->state = CAP_STATE_ALLOCATED;
321}
322
323/** Unpublish published capability
324 *
325 * The kernel object is moved out of the capability. In other words, the
326 * capability's reference to the objects is handed over to the kernel object
327 * pointer returned by this function. Once unpublished, the capability does not
328 * refer to any kernel object anymore.
329 *
330 * @param task Task in which to unpublish the capability.
331 * @param handle Capability handle.
332 * @param type Kernel object type of the object associated with the
333 * capability.
334 *
335 * @return Pointer and explicit reference to the kobject that was associated
336 * with the capability.
337 */
338kobject_t *cap_unpublish(task_t *task, cap_handle_t handle, kobject_type_t type)
339{
340 kobject_t *kobj = NULL;
341
342restart:
343 mutex_lock(&task->cap_info->lock);
344 cap_t *cap = cap_get(task, handle, CAP_STATE_PUBLISHED);
345 if (cap) {
346 if (cap->kobject->type == type) {
347 /* Hand over cap's reference to kobj */
348 kobj = cap->kobject;
349 if (mutex_trylock(&kobj->caps_list_lock) != EOK) {
350 mutex_unlock(&task->cap_info->lock);
351 kobj = NULL;
352 goto restart;
353 }
354 cap_unpublish_unsafe(cap);
355 mutex_unlock(&kobj->caps_list_lock);
356 }
357 }
358 mutex_unlock(&task->cap_info->lock);
359
360 return kobj;
361}
362
363/** Revoke access to kobject from all existing capabilities
364 *
365 * All published capabilities associated with the kobject are unpublished (i.e.
366 * their new state is set to CAP_STATE_ALLOCATED) and no longer point to the
367 * kobject. Kobject's reference count is decreased accordingly.
368 *
369 * Note that the caller is supposed to hold an explicit reference to the kobject
370 * so that the kobject is guaranteed to exist when this function returns.
371 *
372 * @param kobj Pointer and explicit reference to the kobject capabilities of
373 * which are about to be unpublished.
374 */
375void cap_revoke(kobject_t *kobj)
376{
377 mutex_lock(&kobj->caps_list_lock);
378 list_foreach_safe(kobj->caps_list, cur, hlp) {
379 cap_t *cap = list_get_instance(cur, cap_t, kobj_link);
380 mutex_lock(&cap->task->cap_info->lock);
381 cap_unpublish_unsafe(cap);
382 /* Drop the reference for the unpublished capability */
383 kobject_put(kobj);
384 mutex_unlock(&cap->task->cap_info->lock);
385 }
386 mutex_unlock(&kobj->caps_list_lock);
387}
388
389/** Free allocated capability
390 *
391 * @param task Task in which to free the capability.
392 * @param handle Capability handle.
393 */
394void cap_free(task_t *task, cap_handle_t handle)
395{
396 assert(cap_handle_raw(handle) >= CAPS_START);
397 assert(cap_handle_raw(handle) <= CAPS_LAST);
398
399 mutex_lock(&task->cap_info->lock);
400 cap_t *cap = cap_get(task, handle, CAP_STATE_ALLOCATED);
401
402 assert(cap);
403
404 hash_table_remove_item(&task->cap_info->caps, &cap->caps_link);
405 ra_free(task->cap_info->handles, cap_handle_raw(handle), 1);
406 slab_free(cap_cache, cap);
407 mutex_unlock(&task->cap_info->lock);
408}
409
410kobject_t *kobject_alloc(unsigned int flags)
411{
412 return slab_alloc(kobject_cache, flags);
413}
414
415void kobject_free(kobject_t *kobj)
416{
417 slab_free(kobject_cache, kobj);
418}
419
420/** Initialize kernel object
421 *
422 * @param kobj Kernel object to initialize.
423 * @param type Type of the kernel object.
424 * @param raw Raw pointer to the encapsulated object.
425 */
426void kobject_initialize(kobject_t *kobj, kobject_type_t type, void *raw)
427{
428 atomic_store(&kobj->refcnt, 1);
429
430 mutex_initialize(&kobj->caps_list_lock, MUTEX_PASSIVE);
431 list_initialize(&kobj->caps_list);
432
433 kobj->type = type;
434 kobj->raw = raw;
435}
436
437/** Get new reference to kernel object from capability
438 *
439 * @param task Task from which to get the reference.
440 * @param handle Capability handle.
441 * @param type Kernel object type of the object associated with the
442 * capability referenced by handle.
443 *
444 * @return Kernel object with incremented reference count on success.
445 * @return NULL if there is no matching capability or kernel object.
446 */
447kobject_t *
448kobject_get(struct task *task, cap_handle_t handle, kobject_type_t type)
449{
450 kobject_t *kobj = NULL;
451
452 mutex_lock(&task->cap_info->lock);
453 cap_t *cap = cap_get(task, handle, CAP_STATE_PUBLISHED);
454 if (cap) {
455 if (cap->kobject->type == type) {
456 kobj = cap->kobject;
457 atomic_inc(&kobj->refcnt);
458 }
459 }
460 mutex_unlock(&task->cap_info->lock);
461
462 return kobj;
463}
464
465/** Record new reference
466 *
467 * @param kobj Kernel object from which the new reference is created.
468 */
469void kobject_add_ref(kobject_t *kobj)
470{
471 atomic_inc(&kobj->refcnt);
472}
473
474/** Drop reference to kernel object
475 *
476 * The encapsulated object and the kobject_t wrapper are both destroyed when the
477 * last reference is dropped.
478 *
479 * @param kobj Kernel object whose reference to drop.
480 */
481void kobject_put(kobject_t *kobj)
482{
483 if (atomic_postdec(&kobj->refcnt) == 1) {
484 KOBJECT_OP(kobj)->destroy(kobj->raw);
485 kobject_free(kobj);
486 }
487}
488
489/** @}
490 */
Note: See TracBrowser for help on using the repository browser.