source: mainline/kernel/generic/src/cap/cap.c@ 58f4c0f

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 58f4c0f was c1f68b0, checked in by Jakub Jermar <jakub@…>, 8 years ago

Use recursive mutex to protect task_t::cap_info

This makes it possible to use the mutex-protected capability APIs even
inside caps_apply_to_kobject_type() callbacks. Now there is no need to
provide eg. cap_unpublish_locked() and cap_free_locked(). Likewise,
ipc_irq_unsubscribe() can be used when the task's cap_info is already
locked by the current thread inside of a callback.

  • Property mode set to 100644
File size: 13.0 KB
Line 
1/*
2 * Copyright (c) 2017 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup generic
30 * @{
31 */
32/** @file
33 */
34
35/*
36 * HelenOS capabilities are task-local names for references to kernel objects.
37 * Kernel objects are reference-counted wrappers for a select group of objects
38 * allocated in and by the kernel that can be made accessible to userspace in a
39 * controlled way via integer handles.
40 *
41 * A kernel object (kobject_t) encapsulates one of the following raw objects:
42 *
43 * - IPC phone
44 * - IRQ object
45 *
46 * A capability (cap_t) is either free, allocated or published. Free
47 * capabilities can be allocated, which reserves the capability handle in the
48 * task-local capability space. Allocated capabilities can be published, which
49 * associates them with an existing kernel object. Userspace can only access
50 * published capabilities.
51 *
52 * A published capability may get unpublished, which disassociates it from the
53 * underlying kernel object and puts it back into the allocated state. An
54 * allocated capability can be freed to become available for future use.
55 *
56 * There is a 1:1 correspondence between a kernel object (kobject_t) and the
57 * actual raw object it encapsulates. A kernel object (kobject_t) may have
58 * multiple references, either implicit from one or more capabilities (cap_t),
59 * even from capabilities in different tasks, or explicit as a result of
60 * creating a new reference from a capability handle using kobject_get(), or
61 * creating a new reference from an already existing reference by
62 * kobject_add_ref() or as a result of unpublishing a capability and
63 * disassociating it from its kobject_t using cap_unpublish().
64 *
65 * As kernel objects are reference-counted, they get automatically destroyed
66 * when their last reference is dropped in kobject_put(). The idea is that
67 * whenever a kernel object is inserted into some sort of a container (e.g. a
68 * list or hash table), its reference count should be incremented via
69 * kobject_get() or kobject_add_ref(). When the kernel object is removed from
70 * the container, the reference count should go down via a call to
71 * kobject_put().
72 */
73
74#include <cap/cap.h>
75#include <proc/task.h>
76#include <synch/mutex.h>
77#include <abi/errno.h>
78#include <mm/slab.h>
79#include <adt/list.h>
80
81#include <stdint.h>
82
83#define MAX_CAPS INT_MAX
84
85static slab_cache_t *cap_slab;
86
87static size_t caps_hash(const ht_link_t *item)
88{
89 cap_t *cap = hash_table_get_inst(item, cap_t, caps_link);
90 return hash_mix(cap->handle);
91}
92
93static size_t caps_key_hash(void *key)
94{
95 cap_handle_t *handle = (cap_handle_t *) key;
96 return hash_mix(*handle);
97}
98
99static bool caps_key_equal(void *key, const ht_link_t *item)
100{
101 cap_handle_t *handle = (cap_handle_t *) key;
102 cap_t *cap = hash_table_get_inst(item, cap_t, caps_link);
103 return *handle == cap->handle;
104}
105
106static hash_table_ops_t caps_ops = {
107 .hash = caps_hash,
108 .key_hash = caps_key_hash,
109 .key_equal = caps_key_equal
110};
111
112void caps_init(void)
113{
114 cap_slab = slab_cache_create("cap_t", sizeof(cap_t), 0, NULL,
115 NULL, 0);
116}
117
118/** Allocate the capability info structure
119 *
120 * @param task Task for which to allocate the info structure.
121 */
122int caps_task_alloc(task_t *task)
123{
124 task->cap_info = (cap_info_t *) malloc(sizeof(cap_info_t),
125 FRAME_ATOMIC);
126 if (!task->cap_info)
127 return ENOMEM;
128 task->cap_info->handles = ra_arena_create();
129 if (!task->cap_info->handles)
130 goto error_handles;
131 if (!ra_span_add(task->cap_info->handles, 0, MAX_CAPS))
132 goto error_span;
133 if (!hash_table_create(&task->cap_info->caps, 0, 0, &caps_ops))
134 goto error_span;
135 return EOK;
136
137error_span:
138 ra_arena_destroy(task->cap_info->handles);
139error_handles:
140 free(task->cap_info);
141 return ENOMEM;
142}
143
144/** Initialize the capability info structure
145 *
146 * @param task Task for which to initialize the info structure.
147 */
148void caps_task_init(task_t *task)
149{
150 mutex_initialize(&task->cap_info->lock, MUTEX_RECURSIVE);
151
152 for (kobject_type_t t = 0; t < KOBJECT_TYPE_MAX; t++)
153 list_initialize(&task->cap_info->type_list[t]);
154}
155
156/** Deallocate the capability info structure
157 *
158 * @param task Task from which to deallocate the info structure.
159 */
160void caps_task_free(task_t *task)
161{
162 hash_table_destroy(&task->cap_info->caps);
163 ra_arena_destroy(task->cap_info->handles);
164 free(task->cap_info);
165}
166
167/** Invoke callback function on task's capabilites of given type
168 *
169 * @param task Task where the invocation should take place.
170 * @param type Kernel object type of the task's capabilities that will be
171 * subject to the callback invocation.
172 * @param cb Callback function.
173 * @param arg Argument for the callback function.
174 *
175 * @return True if the callback was called on all matching capabilities.
176 * @return False if the callback was applied only partially.
177 */
178bool caps_apply_to_kobject_type(task_t *task, kobject_type_t type,
179 bool (*cb)(cap_t *, void *), void *arg)
180{
181 bool done = true;
182
183 mutex_lock(&task->cap_info->lock);
184 list_foreach_safe(task->cap_info->type_list[type], cur, next) {
185 cap_t *cap = list_get_instance(cur, cap_t, type_link);
186 done = cb(cap, arg);
187 if (!done)
188 break;
189 }
190 mutex_unlock(&task->cap_info->lock);
191
192 return done;
193}
194
195/** Initialize capability and associate it with its handle
196 *
197 * @param cap Address of the capability.
198 * @param task Backling to the owning task.
199 * @param handle Capability handle.
200 */
201static void cap_initialize(cap_t *cap, task_t *task, cap_handle_t handle)
202{
203 cap->state = CAP_STATE_FREE;
204 cap->task = task;
205 cap->handle = handle;
206 link_initialize(&cap->type_link);
207}
208
209/** Get capability using capability handle
210 *
211 * @param task Task whose capability to get.
212 * @param handle Capability handle of the desired capability.
213 * @param state State in which the capability must be.
214 *
215 * @return Address of the desired capability if it exists and its state matches.
216 * @return NULL if no such capability exists or it's in a different state.
217 */
218static cap_t *cap_get(task_t *task, cap_handle_t handle, cap_state_t state)
219{
220 assert(mutex_locked(&task->cap_info->lock));
221
222 if ((handle < 0) || (handle >= MAX_CAPS))
223 return NULL;
224 ht_link_t *link = hash_table_find(&task->cap_info->caps, &handle);
225 if (!link)
226 return NULL;
227 cap_t *cap = hash_table_get_inst(link, cap_t, caps_link);
228 if (cap->state != state)
229 return NULL;
230 return cap;
231}
232
233static bool cap_reclaimer(ht_link_t *link, void *arg)
234{
235 cap_t **result = (cap_t **) arg;
236 cap_t *cap = hash_table_get_inst(link, cap_t, caps_link);
237
238 if (cap->state == CAP_STATE_PUBLISHED && cap->kobject->ops->reclaim &&
239 cap->kobject->ops->reclaim(cap->kobject)) {
240 kobject_t *kobj = cap_unpublish(cap->task, cap->handle,
241 cap->kobject->type);
242 kobject_put(kobj);
243 cap_initialize(cap, cap->task, cap->handle);
244 *result = cap;
245 return false;
246 }
247
248 return true;
249}
250
251/** Allocate new capability
252 *
253 * @param task Task for which to allocate the new capability.
254 *
255 * @return New capability handle on success.
256 * @return Negative error code in case of error.
257 */
258cap_handle_t cap_alloc(task_t *task)
259{
260 cap_t *cap = NULL;
261 cap_handle_t handle;
262
263 /*
264 * First of all, see if we can reclaim a capability. Note that this
265 * feature is only temporary and capability reclamaition will eventually
266 * be phased out.
267 */
268 mutex_lock(&task->cap_info->lock);
269 hash_table_apply(&task->cap_info->caps, cap_reclaimer, &cap);
270
271 /*
272 * If we don't have a capability by now, try to allocate a new one.
273 */
274 if (!cap) {
275 cap = slab_alloc(cap_slab, FRAME_ATOMIC);
276 if (!cap) {
277 mutex_unlock(&task->cap_info->lock);
278 return ENOMEM;
279 }
280 uintptr_t hbase;
281 if (!ra_alloc(task->cap_info->handles, 1, 1, &hbase)) {
282 slab_free(cap_slab, cap);
283 mutex_unlock(&task->cap_info->lock);
284 return ENOMEM;
285 }
286 cap_initialize(cap, task, (cap_handle_t) hbase);
287 hash_table_insert(&task->cap_info->caps, &cap->caps_link);
288 }
289
290 cap->state = CAP_STATE_ALLOCATED;
291 handle = cap->handle;
292 mutex_unlock(&task->cap_info->lock);
293
294 return handle;
295}
296
297/** Publish allocated capability
298 *
299 * The kernel object is moved into the capability. In other words, its reference
300 * is handed over to the capability. Once published, userspace can access and
301 * manipulate the capability.
302 *
303 * @param task Task in which to publish the capability.
304 * @param handle Capability handle.
305 * @param kobj Kernel object.
306 */
307void
308cap_publish(task_t *task, cap_handle_t handle, kobject_t *kobj)
309{
310 mutex_lock(&task->cap_info->lock);
311 cap_t *cap = cap_get(task, handle, CAP_STATE_ALLOCATED);
312 assert(cap);
313 cap->state = CAP_STATE_PUBLISHED;
314 /* Hand over kobj's reference to cap */
315 cap->kobject = kobj;
316 list_append(&cap->type_link, &task->cap_info->type_list[kobj->type]);
317 mutex_unlock(&task->cap_info->lock);
318}
319
320/** Unpublish published capability
321 *
322 * The kernel object is moved out of the capability. In other words, the
323 * capability's reference to the objects is handed over to the kernel object
324 * pointer returned by this function. Once unpublished, the capability does not
325 * refer to any kernel object anymore.
326 *
327 * @param task Task in which to unpublish the capability.
328 * @param handle Capability handle.
329 * @param type Kernel object type of the object associated with the
330 * capability.
331 */
332kobject_t *cap_unpublish(task_t *task, cap_handle_t handle, kobject_type_t type)
333{
334 kobject_t *kobj = NULL;
335
336 mutex_lock(&task->cap_info->lock);
337 cap_t *cap = cap_get(task, handle, CAP_STATE_PUBLISHED);
338 if (cap) {
339 if (cap->kobject->type == type) {
340 /* Hand over cap's reference to kobj */
341 kobj = cap->kobject;
342 cap->kobject = NULL;
343 list_remove(&cap->type_link);
344 cap->state = CAP_STATE_ALLOCATED;
345 }
346 }
347 mutex_unlock(&task->cap_info->lock);
348
349 return kobj;
350}
351
352/** Free allocated capability
353 *
354 * @param task Task in which to free the capability.
355 * @param handle Capability handle.
356 */
357void cap_free(task_t *task, cap_handle_t handle)
358{
359 assert(handle >= 0);
360 assert(handle < MAX_CAPS);
361
362 mutex_lock(&task->cap_info->lock);
363 cap_t *cap = cap_get(task, handle, CAP_STATE_ALLOCATED);
364
365 assert(cap);
366
367 hash_table_remove_item(&task->cap_info->caps, &cap->caps_link);
368 ra_free(task->cap_info->handles, handle, 1);
369 slab_free(cap_slab, cap);
370 mutex_unlock(&task->cap_info->lock);
371}
372
373/** Initialize kernel object
374 *
375 * @param kobj Kernel object to initialize.
376 * @param type Type of the kernel object.
377 * @param raw Raw pointer to the encapsulated object.
378 * @param ops Pointer to kernel object operations for the respective type.
379 */
380void kobject_initialize(kobject_t *kobj, kobject_type_t type, void *raw,
381 kobject_ops_t *ops)
382{
383 atomic_set(&kobj->refcnt, 1);
384 kobj->type = type;
385 kobj->raw = raw;
386 kobj->ops = ops;
387}
388
389/** Get new reference to kernel object from capability
390 *
391 * @param task Task from which to get the reference.
392 * @param handle Capability handle.
393 * @param type Kernel object type of the object associated with the
394 * capability referenced by handle.
395 *
396 * @return Kernel object with incremented reference count on success.
397 * @return NULL if there is no matching capability or kernel object.
398 */
399kobject_t *
400kobject_get(struct task *task, cap_handle_t handle, kobject_type_t type)
401{
402 kobject_t *kobj = NULL;
403
404 mutex_lock(&task->cap_info->lock);
405 cap_t *cap = cap_get(task, handle, CAP_STATE_PUBLISHED);
406 if (cap) {
407 if (cap->kobject->type == type) {
408 kobj = cap->kobject;
409 atomic_inc(&kobj->refcnt);
410 }
411 }
412 mutex_unlock(&task->cap_info->lock);
413
414 return kobj;
415}
416
417/** Record new reference
418 *
419 * @param kobj Kernel object from which the new reference is created.
420 */
421void kobject_add_ref(kobject_t *kobj)
422{
423 atomic_inc(&kobj->refcnt);
424}
425
426/** Drop reference to kernel object
427 *
428 * The encapsulated object and the kobject_t wrapper are both destroyed when the
429 * last reference is dropped.
430 *
431 * @param kobj Kernel object whose reference to drop.
432 */
433void kobject_put(kobject_t *kobj)
434{
435 if (atomic_postdec(&kobj->refcnt) == 1) {
436 kobj->ops->destroy(kobj->raw);
437 free(kobj);
438 }
439}
440
441/** @}
442 */
Note: See TracBrowser for help on using the repository browser.