source: mainline/kernel/generic/src/cap/cap.c@ a4e78743

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a4e78743 was 5e801dc, checked in by GitHub <noreply@…>, 7 years ago

Indicate and enforce constness of hash table key in certain functions (#158)

The assumption here is that modifying key in the hash/equal functions in something completely unexpected, and not something you would ever want to do intentionally, so it makes sense to disallow it entirely to get that extra level of checking.

  • Property mode set to 100644
File size: 14.6 KB
Line 
1/*
2 * Copyright (c) 2017 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_generic
30 * @{
31 */
32/** @file
33 */
34
35/*
36 * HelenOS capabilities are task-local names for references to kernel objects.
37 * Kernel objects are reference-counted wrappers for a select group of objects
38 * allocated in and by the kernel that can be made accessible to userspace in a
39 * controlled way via integer handles.
40 *
41 * A kernel object (kobject_t) encapsulates one of the following raw objects:
42 *
43 * - IPC call
44 * - IPC phone
45 * - IRQ object
46 *
47 * A capability (cap_t) is either free, allocated or published. Free
48 * capabilities can be allocated, which reserves the capability handle in the
49 * task-local capability space. Allocated capabilities can be published, which
50 * associates them with an existing kernel object. Userspace can only access
51 * published capabilities.
52 *
53 * A published capability may get unpublished, which disassociates it from the
54 * underlying kernel object and puts it back into the allocated state. An
55 * allocated capability can be freed to become available for future use.
56 *
57 * There is a 1:1 correspondence between a kernel object (kobject_t) and the
58 * actual raw object it encapsulates. A kernel object (kobject_t) may have
59 * multiple references, either implicit from one or more capabilities (cap_t),
60 * even from capabilities in different tasks, or explicit as a result of
61 * creating a new reference from a capability handle using kobject_get(), or
62 * creating a new reference from an already existing reference by
63 * kobject_add_ref() or as a result of unpublishing a capability and
64 * disassociating it from its kobject_t using cap_unpublish().
65 *
66 * A holder of an explicit reference to a kernel object may revoke access to it
67 * from all capabilities that point to it by calling cap_revoke().
68 *
69 * As kernel objects are reference-counted, they get automatically destroyed
70 * when their last reference is dropped in kobject_put(). The idea is that
71 * whenever a kernel object is inserted into some sort of a container (e.g. a
72 * list or hash table), its reference count should be incremented via
73 * kobject_get() or kobject_add_ref(). When the kernel object is removed from
74 * the container, the reference count should go down via a call to
75 * kobject_put().
76 */
77
78#include <cap/cap.h>
79#include <abi/cap.h>
80#include <proc/task.h>
81#include <synch/mutex.h>
82#include <abi/errno.h>
83#include <mm/slab.h>
84#include <adt/list.h>
85
86#include <limits.h>
87#include <stdint.h>
88#include <stdlib.h>
89
90#define CAPS_START (CAP_NIL + 1)
91#define CAPS_SIZE (INT_MAX - CAPS_START)
92#define CAPS_LAST (CAPS_SIZE - 1)
93
94static slab_cache_t *cap_cache;
95static slab_cache_t *kobject_cache;
96
97static size_t caps_hash(const ht_link_t *item)
98{
99 cap_t *cap = hash_table_get_inst(item, cap_t, caps_link);
100 return hash_mix(cap_handle_raw(cap->handle));
101}
102
103static size_t caps_key_hash(const void *key)
104{
105 const cap_handle_t *handle = key;
106 return hash_mix(cap_handle_raw(*handle));
107}
108
109static bool caps_key_equal(const void *key, const ht_link_t *item)
110{
111 const cap_handle_t *handle = key;
112 cap_t *cap = hash_table_get_inst(item, cap_t, caps_link);
113 return *handle == cap->handle;
114}
115
116static hash_table_ops_t caps_ops = {
117 .hash = caps_hash,
118 .key_hash = caps_key_hash,
119 .key_equal = caps_key_equal
120};
121
122void caps_init(void)
123{
124 cap_cache = slab_cache_create("cap_t", sizeof(cap_t), 0, NULL,
125 NULL, 0);
126 kobject_cache = slab_cache_create("kobject_t", sizeof(kobject_t), 0,
127 NULL, NULL, 0);
128}
129
130/** Allocate the capability info structure
131 *
132 * @param task Task for which to allocate the info structure.
133 */
134errno_t caps_task_alloc(task_t *task)
135{
136 task->cap_info = (cap_info_t *) malloc(sizeof(cap_info_t));
137 if (!task->cap_info)
138 return ENOMEM;
139 task->cap_info->handles = ra_arena_create();
140 if (!task->cap_info->handles)
141 goto error_handles;
142 if (!ra_span_add(task->cap_info->handles, CAPS_START, CAPS_SIZE))
143 goto error_span;
144 if (!hash_table_create(&task->cap_info->caps, 0, 0, &caps_ops))
145 goto error_span;
146 return EOK;
147
148error_span:
149 ra_arena_destroy(task->cap_info->handles);
150error_handles:
151 free(task->cap_info);
152 return ENOMEM;
153}
154
155/** Initialize the capability info structure
156 *
157 * @param task Task for which to initialize the info structure.
158 */
159void caps_task_init(task_t *task)
160{
161 mutex_initialize(&task->cap_info->lock, MUTEX_RECURSIVE);
162
163 for (kobject_type_t t = 0; t < KOBJECT_TYPE_MAX; t++)
164 list_initialize(&task->cap_info->type_list[t]);
165}
166
167/** Deallocate the capability info structure
168 *
169 * @param task Task from which to deallocate the info structure.
170 */
171void caps_task_free(task_t *task)
172{
173 hash_table_destroy(&task->cap_info->caps);
174 ra_arena_destroy(task->cap_info->handles);
175 free(task->cap_info);
176}
177
178/** Invoke callback function on task's capabilites of given type
179 *
180 * @param task Task where the invocation should take place.
181 * @param type Kernel object type of the task's capabilities that will be
182 * subject to the callback invocation.
183 * @param cb Callback function.
184 * @param arg Argument for the callback function.
185 *
186 * @return True if the callback was called on all matching capabilities.
187 * @return False if the callback was applied only partially.
188 */
189bool caps_apply_to_kobject_type(task_t *task, kobject_type_t type,
190 bool (*cb)(cap_t *, void *), void *arg)
191{
192 bool done = true;
193
194 mutex_lock(&task->cap_info->lock);
195 list_foreach_safe(task->cap_info->type_list[type], cur, next) {
196 cap_t *cap = list_get_instance(cur, cap_t, type_link);
197 done = cb(cap, arg);
198 if (!done)
199 break;
200 }
201 mutex_unlock(&task->cap_info->lock);
202
203 return done;
204}
205
206/** Initialize capability and associate it with its handle
207 *
208 * @param cap Address of the capability.
209 * @param task Backling to the owning task.
210 * @param handle Capability handle.
211 */
212static void cap_initialize(cap_t *cap, task_t *task, cap_handle_t handle)
213{
214 cap->state = CAP_STATE_FREE;
215 cap->task = task;
216 cap->handle = handle;
217 link_initialize(&cap->kobj_link);
218 link_initialize(&cap->type_link);
219}
220
221/** Get capability using capability handle
222 *
223 * @param task Task whose capability to get.
224 * @param handle Capability handle of the desired capability.
225 * @param state State in which the capability must be.
226 *
227 * @return Address of the desired capability if it exists and its state matches.
228 * @return NULL if no such capability exists or it's in a different state.
229 */
230static cap_t *cap_get(task_t *task, cap_handle_t handle, cap_state_t state)
231{
232 assert(mutex_locked(&task->cap_info->lock));
233
234 if ((cap_handle_raw(handle) < CAPS_START) ||
235 (cap_handle_raw(handle) > CAPS_LAST))
236 return NULL;
237 ht_link_t *link = hash_table_find(&task->cap_info->caps, &handle);
238 if (!link)
239 return NULL;
240 cap_t *cap = hash_table_get_inst(link, cap_t, caps_link);
241 if (cap->state != state)
242 return NULL;
243 return cap;
244}
245
246/** Allocate new capability
247 *
248 * @param task Task for which to allocate the new capability.
249 *
250 * @param[out] handle New capability handle on success.
251 *
252 * @return An error code in case of error.
253 */
254errno_t cap_alloc(task_t *task, cap_handle_t *handle)
255{
256 mutex_lock(&task->cap_info->lock);
257 cap_t *cap = slab_alloc(cap_cache, FRAME_ATOMIC);
258 if (!cap) {
259 mutex_unlock(&task->cap_info->lock);
260 return ENOMEM;
261 }
262 uintptr_t hbase;
263 if (!ra_alloc(task->cap_info->handles, 1, 1, &hbase)) {
264 slab_free(cap_cache, cap);
265 mutex_unlock(&task->cap_info->lock);
266 return ENOMEM;
267 }
268 cap_initialize(cap, task, (cap_handle_t) hbase);
269 hash_table_insert(&task->cap_info->caps, &cap->caps_link);
270
271 cap->state = CAP_STATE_ALLOCATED;
272 *handle = cap->handle;
273 mutex_unlock(&task->cap_info->lock);
274
275 return EOK;
276}
277
278/** Publish allocated capability
279 *
280 * The kernel object is moved into the capability. In other words, its reference
281 * is handed over to the capability. Once published, userspace can access and
282 * manipulate the capability.
283 *
284 * @param task Task in which to publish the capability.
285 * @param handle Capability handle.
286 * @param kobj Kernel object.
287 */
288void
289cap_publish(task_t *task, cap_handle_t handle, kobject_t *kobj)
290{
291 mutex_lock(&kobj->caps_list_lock);
292 mutex_lock(&task->cap_info->lock);
293 cap_t *cap = cap_get(task, handle, CAP_STATE_ALLOCATED);
294 assert(cap);
295 cap->state = CAP_STATE_PUBLISHED;
296 /* Hand over kobj's reference to cap */
297 cap->kobject = kobj;
298 list_append(&cap->kobj_link, &kobj->caps_list);
299 list_append(&cap->type_link, &task->cap_info->type_list[kobj->type]);
300 mutex_unlock(&task->cap_info->lock);
301 mutex_unlock(&kobj->caps_list_lock);
302}
303
304static void cap_unpublish_unsafe(cap_t *cap)
305{
306 cap->kobject = NULL;
307 list_remove(&cap->kobj_link);
308 list_remove(&cap->type_link);
309 cap->state = CAP_STATE_ALLOCATED;
310}
311
312/** Unpublish published capability
313 *
314 * The kernel object is moved out of the capability. In other words, the
315 * capability's reference to the objects is handed over to the kernel object
316 * pointer returned by this function. Once unpublished, the capability does not
317 * refer to any kernel object anymore.
318 *
319 * @param task Task in which to unpublish the capability.
320 * @param handle Capability handle.
321 * @param type Kernel object type of the object associated with the
322 * capability.
323 *
324 * @return Pointer and explicit reference to the kobject that was associated
325 * with the capability.
326 */
327kobject_t *cap_unpublish(task_t *task, cap_handle_t handle, kobject_type_t type)
328{
329 kobject_t *kobj = NULL;
330
331restart:
332 mutex_lock(&task->cap_info->lock);
333 cap_t *cap = cap_get(task, handle, CAP_STATE_PUBLISHED);
334 if (cap) {
335 if (cap->kobject->type == type) {
336 /* Hand over cap's reference to kobj */
337 kobj = cap->kobject;
338 if (!mutex_trylock(&kobj->caps_list_lock)) {
339 mutex_unlock(&task->cap_info->lock);
340 kobj = NULL;
341 goto restart;
342 }
343 cap_unpublish_unsafe(cap);
344 mutex_unlock(&kobj->caps_list_lock);
345 }
346 }
347 mutex_unlock(&task->cap_info->lock);
348
349 return kobj;
350}
351
352/** Revoke access to kobject from all existing capabilities
353 *
354 * All published capabilities associated with the kobject are unpublished (i.e.
355 * their new state is set to CAP_STATE_ALLOCATED) and no longer point to the
356 * kobject. Kobject's reference count is decreased accordingly.
357 *
358 * Note that the caller is supposed to hold an explicit reference to the kobject
359 * so that the kobject is guaranteed to exist when this function returns.
360 *
361 * @param kobj Pointer and explicit reference to the kobject capabilities of
362 * which are about to be unpublished.
363 */
364void cap_revoke(kobject_t *kobj)
365{
366 mutex_lock(&kobj->caps_list_lock);
367 list_foreach_safe(kobj->caps_list, cur, hlp) {
368 cap_t *cap = list_get_instance(cur, cap_t, kobj_link);
369 mutex_lock(&cap->task->cap_info->lock);
370 cap_unpublish_unsafe(cap);
371 /* Drop the reference for the unpublished capability */
372 kobject_put(kobj);
373 mutex_unlock(&cap->task->cap_info->lock);
374 }
375 mutex_unlock(&kobj->caps_list_lock);
376}
377
378/** Free allocated capability
379 *
380 * @param task Task in which to free the capability.
381 * @param handle Capability handle.
382 */
383void cap_free(task_t *task, cap_handle_t handle)
384{
385 assert(cap_handle_raw(handle) >= CAPS_START);
386 assert(cap_handle_raw(handle) <= CAPS_LAST);
387
388 mutex_lock(&task->cap_info->lock);
389 cap_t *cap = cap_get(task, handle, CAP_STATE_ALLOCATED);
390
391 assert(cap);
392
393 hash_table_remove_item(&task->cap_info->caps, &cap->caps_link);
394 ra_free(task->cap_info->handles, cap_handle_raw(handle), 1);
395 slab_free(cap_cache, cap);
396 mutex_unlock(&task->cap_info->lock);
397}
398
399kobject_t *kobject_alloc(unsigned int flags)
400{
401 return slab_alloc(kobject_cache, flags);
402}
403
404void kobject_free(kobject_t *kobj)
405{
406 slab_free(kobject_cache, kobj);
407}
408
409/** Initialize kernel object
410 *
411 * @param kobj Kernel object to initialize.
412 * @param type Type of the kernel object.
413 * @param raw Raw pointer to the encapsulated object.
414 * @param ops Pointer to kernel object operations for the respective type.
415 */
416void kobject_initialize(kobject_t *kobj, kobject_type_t type, void *raw,
417 kobject_ops_t *ops)
418{
419 atomic_store(&kobj->refcnt, 1);
420
421 mutex_initialize(&kobj->caps_list_lock, MUTEX_PASSIVE);
422 list_initialize(&kobj->caps_list);
423
424 kobj->type = type;
425 kobj->raw = raw;
426 kobj->ops = ops;
427}
428
429/** Get new reference to kernel object from capability
430 *
431 * @param task Task from which to get the reference.
432 * @param handle Capability handle.
433 * @param type Kernel object type of the object associated with the
434 * capability referenced by handle.
435 *
436 * @return Kernel object with incremented reference count on success.
437 * @return NULL if there is no matching capability or kernel object.
438 */
439kobject_t *
440kobject_get(struct task *task, cap_handle_t handle, kobject_type_t type)
441{
442 kobject_t *kobj = NULL;
443
444 mutex_lock(&task->cap_info->lock);
445 cap_t *cap = cap_get(task, handle, CAP_STATE_PUBLISHED);
446 if (cap) {
447 if (cap->kobject->type == type) {
448 kobj = cap->kobject;
449 atomic_inc(&kobj->refcnt);
450 }
451 }
452 mutex_unlock(&task->cap_info->lock);
453
454 return kobj;
455}
456
457/** Record new reference
458 *
459 * @param kobj Kernel object from which the new reference is created.
460 */
461void kobject_add_ref(kobject_t *kobj)
462{
463 atomic_inc(&kobj->refcnt);
464}
465
466/** Drop reference to kernel object
467 *
468 * The encapsulated object and the kobject_t wrapper are both destroyed when the
469 * last reference is dropped.
470 *
471 * @param kobj Kernel object whose reference to drop.
472 */
473void kobject_put(kobject_t *kobj)
474{
475 if (atomic_postdec(&kobj->refcnt) == 1) {
476 kobj->ops->destroy(kobj->raw);
477 kobject_free(kobj);
478 }
479}
480
481/** @}
482 */
Note: See TracBrowser for help on using the repository browser.