source: mainline/kernel/generic/src/proc/task.c@ bed67f2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bed67f2 was b294126, checked in by GitHub <noreply@…>, 7 years ago

Merge pull request #52 from jermar/asrefcnt

Fix as_t reference counting

  • Property mode set to 100644
File size: 16.6 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * Copyright (c) 2018 Jiri Svoboda
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup kernel_generic_proc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief Task management.
37 */
38
39#include <assert.h>
40#include <proc/thread.h>
41#include <proc/task.h>
42#include <mm/as.h>
43#include <mm/slab.h>
44#include <atomic.h>
45#include <synch/futex.h>
46#include <synch/spinlock.h>
47#include <synch/waitq.h>
48#include <arch.h>
49#include <barrier.h>
50#include <adt/list.h>
51#include <adt/odict.h>
52#include <cap/cap.h>
53#include <ipc/ipc.h>
54#include <ipc/ipcrsc.h>
55#include <ipc/event.h>
56#include <stdio.h>
57#include <errno.h>
58#include <halt.h>
59#include <str.h>
60#include <syscall/copy.h>
61#include <macros.h>
62
63/** Spinlock protecting the @c tasks ordered dictionary. */
64IRQ_SPINLOCK_INITIALIZE(tasks_lock);
65
66/** Ordered dictionary of active tasks by task ID.
67 *
68 * Members are task_t structures.
69 *
70 * The task is guaranteed to exist after it was found in the @c tasks
71 * dictionary as long as:
72 *
73 * @li the tasks_lock is held,
74 * @li the task's lock is held when task's lock is acquired before releasing
75 * tasks_lock or
76 * @li the task's refcount is greater than 0
77 *
78 */
79odict_t tasks;
80
81static task_id_t task_counter = 0;
82
83static slab_cache_t *task_cache;
84
85/* Forward declarations. */
86static void task_kill_internal(task_t *);
87static errno_t tsk_constructor(void *, unsigned int);
88static size_t tsk_destructor(void *);
89
90static void *tasks_getkey(odlink_t *);
91static int tasks_cmp(void *, void *);
92
93/** Initialize kernel tasks support.
94 *
95 */
96void task_init(void)
97{
98 TASK = NULL;
99 odict_initialize(&tasks, tasks_getkey, tasks_cmp);
100 task_cache = slab_cache_create("task_t", sizeof(task_t), 0,
101 tsk_constructor, tsk_destructor, 0);
102}
103
104/** Kill all tasks except the current task.
105 *
106 */
107void task_done(void)
108{
109 size_t tasks_left;
110 task_t *task;
111
112 if (ipc_box_0) {
113 task_t *task_0 = ipc_box_0->task;
114 ipc_box_0 = NULL;
115 /*
116 * The first task is held by kinit(), we need to release it or
117 * it will never finish cleanup.
118 */
119 task_release(task_0);
120 }
121
122 /* Repeat until there are any tasks except TASK */
123 do {
124#ifdef CONFIG_DEBUG
125 printf("Killing tasks... ");
126#endif
127 irq_spinlock_lock(&tasks_lock, true);
128 tasks_left = 0;
129
130 task = task_first();
131 while (task != NULL) {
132 if (task != TASK) {
133 tasks_left++;
134#ifdef CONFIG_DEBUG
135 printf("[%" PRIu64 "] ", task->taskid);
136#endif
137 task_kill_internal(task);
138 }
139
140 task = task_next(task);
141 }
142
143 irq_spinlock_unlock(&tasks_lock, true);
144
145 thread_sleep(1);
146
147#ifdef CONFIG_DEBUG
148 printf("\n");
149#endif
150 } while (tasks_left > 0);
151}
152
153errno_t tsk_constructor(void *obj, unsigned int kmflags)
154{
155 task_t *task = (task_t *) obj;
156
157 errno_t rc = caps_task_alloc(task);
158 if (rc != EOK)
159 return rc;
160
161 atomic_store(&task->refcount, 0);
162 atomic_store(&task->lifecount, 0);
163
164 irq_spinlock_initialize(&task->lock, "task_t_lock");
165
166 list_initialize(&task->threads);
167
168 ipc_answerbox_init(&task->answerbox, task);
169
170 spinlock_initialize(&task->active_calls_lock, "active_calls_lock");
171 list_initialize(&task->active_calls);
172
173#ifdef CONFIG_UDEBUG
174 /* Init kbox stuff */
175 task->kb.thread = NULL;
176 ipc_answerbox_init(&task->kb.box, task);
177 mutex_initialize(&task->kb.cleanup_lock, MUTEX_PASSIVE);
178#endif
179
180 return EOK;
181}
182
183size_t tsk_destructor(void *obj)
184{
185 task_t *task = (task_t *) obj;
186
187 caps_task_free(task);
188 return 0;
189}
190
191/** Create new task with no threads.
192 *
193 * @param as Task's address space.
194 * @param name Symbolic name (a copy is made).
195 *
196 * @return New task's structure.
197 *
198 */
199task_t *task_create(as_t *as, const char *name)
200{
201 task_t *task = (task_t *) slab_alloc(task_cache, 0);
202 if (task == NULL) {
203 return NULL;
204 }
205
206 task_create_arch(task);
207
208 task->as = as;
209 str_cpy(task->name, TASK_NAME_BUFLEN, name);
210
211 task->container = CONTAINER;
212 task->perms = 0;
213 task->ucycles = 0;
214 task->kcycles = 0;
215
216 caps_task_init(task);
217
218 task->ipc_info.call_sent = 0;
219 task->ipc_info.call_received = 0;
220 task->ipc_info.answer_sent = 0;
221 task->ipc_info.answer_received = 0;
222 task->ipc_info.irq_notif_received = 0;
223 task->ipc_info.forwarded = 0;
224
225 event_task_init(task);
226
227 task->answerbox.active = true;
228
229#ifdef CONFIG_UDEBUG
230 /* Init debugging stuff */
231 udebug_task_init(&task->udebug);
232
233 /* Init kbox stuff */
234 task->kb.box.active = true;
235 task->kb.finished = false;
236#endif
237
238 if ((ipc_box_0) &&
239 (container_check(ipc_box_0->task->container, task->container))) {
240 cap_phone_handle_t phone_handle;
241 errno_t rc = phone_alloc(task, true, &phone_handle, NULL);
242 if (rc != EOK) {
243 task->as = NULL;
244 task_destroy_arch(task);
245 slab_free(task_cache, task);
246 return NULL;
247 }
248
249 kobject_t *phone_obj = kobject_get(task, phone_handle,
250 KOBJECT_TYPE_PHONE);
251 (void) ipc_phone_connect(phone_obj->phone, ipc_box_0);
252 }
253
254 futex_task_init(task);
255
256 irq_spinlock_lock(&tasks_lock, true);
257
258 task->taskid = ++task_counter;
259 odlink_initialize(&task->ltasks);
260 odict_insert(&task->ltasks, &tasks, NULL);
261
262 irq_spinlock_unlock(&tasks_lock, true);
263
264 return task;
265}
266
267/** Destroy task.
268 *
269 * @param task Task to be destroyed.
270 *
271 */
272void task_destroy(task_t *task)
273{
274 /*
275 * Remove the task from the task B+tree.
276 */
277 irq_spinlock_lock(&tasks_lock, true);
278 odict_remove(&task->ltasks);
279 irq_spinlock_unlock(&tasks_lock, true);
280
281 /*
282 * Perform architecture specific task destruction.
283 */
284 task_destroy_arch(task);
285
286 /*
287 * Free up dynamically allocated state.
288 */
289 futex_task_deinit(task);
290
291 /*
292 * Drop our reference to the address space.
293 */
294 as_release(task->as);
295
296 slab_free(task_cache, task);
297}
298
299/** Hold a reference to a task.
300 *
301 * Holding a reference to a task prevents destruction of that task.
302 *
303 * @param task Task to be held.
304 *
305 */
306void task_hold(task_t *task)
307{
308 atomic_inc(&task->refcount);
309}
310
311/** Release a reference to a task.
312 *
313 * The last one to release a reference to a task destroys the task.
314 *
315 * @param task Task to be released.
316 *
317 */
318void task_release(task_t *task)
319{
320 if ((atomic_predec(&task->refcount)) == 0)
321 task_destroy(task);
322}
323
324#ifdef __32_BITS__
325
326/** Syscall for reading task ID from userspace (32 bits)
327 *
328 * @param uspace_taskid Pointer to user-space buffer
329 * where to store current task ID.
330 *
331 * @return Zero on success or an error code from @ref errno.h.
332 *
333 */
334sys_errno_t sys_task_get_id(sysarg64_t *uspace_taskid)
335{
336 /*
337 * No need to acquire lock on TASK because taskid remains constant for
338 * the lifespan of the task.
339 */
340 return (sys_errno_t) copy_to_uspace(uspace_taskid, &TASK->taskid,
341 sizeof(TASK->taskid));
342}
343
344#endif /* __32_BITS__ */
345
346#ifdef __64_BITS__
347
348/** Syscall for reading task ID from userspace (64 bits)
349 *
350 * @return Current task ID.
351 *
352 */
353sysarg_t sys_task_get_id(void)
354{
355 /*
356 * No need to acquire lock on TASK because taskid remains constant for
357 * the lifespan of the task.
358 */
359 return TASK->taskid;
360}
361
362#endif /* __64_BITS__ */
363
364/** Syscall for setting the task name.
365 *
366 * The name simplifies identifying the task in the task list.
367 *
368 * @param name The new name for the task. (typically the same
369 * as the command used to execute it).
370 *
371 * @return 0 on success or an error code from @ref errno.h.
372 *
373 */
374sys_errno_t sys_task_set_name(const char *uspace_name, size_t name_len)
375{
376 char namebuf[TASK_NAME_BUFLEN];
377
378 /* Cap length of name and copy it from userspace. */
379 if (name_len > TASK_NAME_BUFLEN - 1)
380 name_len = TASK_NAME_BUFLEN - 1;
381
382 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len);
383 if (rc != EOK)
384 return (sys_errno_t) rc;
385
386 namebuf[name_len] = '\0';
387
388 /*
389 * As the task name is referenced also from the
390 * threads, lock the threads' lock for the course
391 * of the update.
392 */
393
394 irq_spinlock_lock(&tasks_lock, true);
395 irq_spinlock_lock(&TASK->lock, false);
396 irq_spinlock_lock(&threads_lock, false);
397
398 /* Set task name */
399 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf);
400
401 irq_spinlock_unlock(&threads_lock, false);
402 irq_spinlock_unlock(&TASK->lock, false);
403 irq_spinlock_unlock(&tasks_lock, true);
404
405 return EOK;
406}
407
408/** Syscall to forcefully terminate a task
409 *
410 * @param uspace_taskid Pointer to task ID in user space.
411 *
412 * @return 0 on success or an error code from @ref errno.h.
413 *
414 */
415sys_errno_t sys_task_kill(task_id_t *uspace_taskid)
416{
417 task_id_t taskid;
418 errno_t rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid));
419 if (rc != EOK)
420 return (sys_errno_t) rc;
421
422 return (sys_errno_t) task_kill(taskid);
423}
424
425/** Find task structure corresponding to task ID.
426 *
427 * The tasks_lock must be already held by the caller of this function and
428 * interrupts must be disabled.
429 *
430 * @param id Task ID.
431 *
432 * @return Task structure address or NULL if there is no such task ID.
433 *
434 */
435task_t *task_find_by_id(task_id_t id)
436{
437 assert(interrupts_disabled());
438 assert(irq_spinlock_locked(&tasks_lock));
439
440 odlink_t *odlink = odict_find_eq(&tasks, &id, NULL);
441 if (odlink != NULL)
442 return odict_get_instance(odlink, task_t, ltasks);
443
444 return NULL;
445}
446
447/** Get count of tasks.
448 *
449 * @return Number of tasks in the system
450 */
451size_t task_count(void)
452{
453 assert(interrupts_disabled());
454 assert(irq_spinlock_locked(&tasks_lock));
455
456 return odict_count(&tasks);
457}
458
459/** Get first task (task with lowest ID).
460 *
461 * @return Pointer to first task or @c NULL if there are none.
462 */
463task_t *task_first(void)
464{
465 odlink_t *odlink;
466
467 assert(interrupts_disabled());
468 assert(irq_spinlock_locked(&tasks_lock));
469
470 odlink = odict_first(&tasks);
471 if (odlink == NULL)
472 return NULL;
473
474 return odict_get_instance(odlink, task_t, ltasks);
475}
476
477/** Get next task (with higher task ID).
478 *
479 * @param cur Current task
480 * @return Pointer to next task or @c NULL if there are no more tasks.
481 */
482task_t *task_next(task_t *cur)
483{
484 odlink_t *odlink;
485
486 assert(interrupts_disabled());
487 assert(irq_spinlock_locked(&tasks_lock));
488
489 odlink = odict_next(&cur->ltasks, &tasks);
490 if (odlink == NULL)
491 return NULL;
492
493 return odict_get_instance(odlink, task_t, ltasks);
494}
495
496/** Get accounting data of given task.
497 *
498 * Note that task lock of 'task' must be already held and interrupts must be
499 * already disabled.
500 *
501 * @param task Pointer to the task.
502 * @param ucycles Out pointer to sum of all user cycles.
503 * @param kcycles Out pointer to sum of all kernel cycles.
504 *
505 */
506void task_get_accounting(task_t *task, uint64_t *ucycles, uint64_t *kcycles)
507{
508 assert(interrupts_disabled());
509 assert(irq_spinlock_locked(&task->lock));
510
511 /* Accumulated values of task */
512 uint64_t uret = task->ucycles;
513 uint64_t kret = task->kcycles;
514
515 /* Current values of threads */
516 list_foreach(task->threads, th_link, thread_t, thread) {
517 irq_spinlock_lock(&thread->lock, false);
518
519 /* Process only counted threads */
520 if (!thread->uncounted) {
521 if (thread == THREAD) {
522 /* Update accounting of current thread */
523 thread_update_accounting(false);
524 }
525
526 uret += thread->ucycles;
527 kret += thread->kcycles;
528 }
529
530 irq_spinlock_unlock(&thread->lock, false);
531 }
532
533 *ucycles = uret;
534 *kcycles = kret;
535}
536
537static void task_kill_internal(task_t *task)
538{
539 irq_spinlock_lock(&task->lock, false);
540 irq_spinlock_lock(&threads_lock, false);
541
542 /*
543 * Interrupt all threads.
544 */
545
546 list_foreach(task->threads, th_link, thread_t, thread) {
547 bool sleeping = false;
548
549 irq_spinlock_lock(&thread->lock, false);
550
551 thread->interrupted = true;
552 if (thread->state == Sleeping)
553 sleeping = true;
554
555 irq_spinlock_unlock(&thread->lock, false);
556
557 if (sleeping)
558 waitq_interrupt_sleep(thread);
559 }
560
561 irq_spinlock_unlock(&threads_lock, false);
562 irq_spinlock_unlock(&task->lock, false);
563}
564
565/** Kill task.
566 *
567 * This function is idempotent.
568 * It signals all the task's threads to bail it out.
569 *
570 * @param id ID of the task to be killed.
571 *
572 * @return Zero on success or an error code from errno.h.
573 *
574 */
575errno_t task_kill(task_id_t id)
576{
577 if (id == 1)
578 return EPERM;
579
580 irq_spinlock_lock(&tasks_lock, true);
581
582 task_t *task = task_find_by_id(id);
583 if (!task) {
584 irq_spinlock_unlock(&tasks_lock, true);
585 return ENOENT;
586 }
587
588 task_kill_internal(task);
589 irq_spinlock_unlock(&tasks_lock, true);
590
591 return EOK;
592}
593
594/** Kill the currently running task.
595 *
596 * @param notify Send out fault notifications.
597 *
598 * @return Zero on success or an error code from errno.h.
599 *
600 */
601void task_kill_self(bool notify)
602{
603 /*
604 * User space can subscribe for FAULT events to take action
605 * whenever a task faults (to take a dump, run a debugger, etc.).
606 * The notification is always available, but unless udebug is enabled,
607 * that's all you get.
608 */
609 if (notify) {
610 /* Notify the subscriber that a fault occurred. */
611 if (event_notify_3(EVENT_FAULT, false, LOWER32(TASK->taskid),
612 UPPER32(TASK->taskid), (sysarg_t) THREAD) == EOK) {
613#ifdef CONFIG_UDEBUG
614 /* Wait for a debugging session. */
615 udebug_thread_fault();
616#endif
617 }
618 }
619
620 irq_spinlock_lock(&tasks_lock, true);
621 task_kill_internal(TASK);
622 irq_spinlock_unlock(&tasks_lock, true);
623
624 thread_exit();
625}
626
627/** Process syscall to terminate the current task.
628 *
629 * @param notify Send out fault notifications.
630 *
631 */
632sys_errno_t sys_task_exit(sysarg_t notify)
633{
634 task_kill_self(notify);
635
636 /* Unreachable */
637 return EOK;
638}
639
640static void task_print(task_t *task, bool additional)
641{
642 irq_spinlock_lock(&task->lock, false);
643
644 uint64_t ucycles;
645 uint64_t kcycles;
646 char usuffix, ksuffix;
647 task_get_accounting(task, &ucycles, &kcycles);
648 order_suffix(ucycles, &ucycles, &usuffix);
649 order_suffix(kcycles, &kcycles, &ksuffix);
650
651#ifdef __32_BITS__
652 if (additional)
653 printf("%-8" PRIu64 " %9zu", task->taskid,
654 atomic_load(&task->refcount));
655 else
656 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p"
657 " %9" PRIu64 "%c %9" PRIu64 "%c\n", task->taskid,
658 task->name, task->container, task, task->as,
659 ucycles, usuffix, kcycles, ksuffix);
660#endif
661
662#ifdef __64_BITS__
663 if (additional)
664 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c "
665 "%9zu\n", task->taskid, ucycles, usuffix, kcycles,
666 ksuffix, atomic_load(&task->refcount));
667 else
668 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n",
669 task->taskid, task->name, task->container, task, task->as);
670#endif
671
672 irq_spinlock_unlock(&task->lock, false);
673}
674
675/** Print task list
676 *
677 * @param additional Print additional information.
678 *
679 */
680void task_print_list(bool additional)
681{
682 /* Messing with task structures, avoid deadlock */
683 irq_spinlock_lock(&tasks_lock, true);
684
685#ifdef __32_BITS__
686 if (additional)
687 printf("[id ] [threads] [calls] [callee\n");
688 else
689 printf("[id ] [name ] [ctn] [address ] [as ]"
690 " [ucycles ] [kcycles ]\n");
691#endif
692
693#ifdef __64_BITS__
694 if (additional)
695 printf("[id ] [ucycles ] [kcycles ] [threads] [calls]"
696 " [callee\n");
697 else
698 printf("[id ] [name ] [ctn] [address ]"
699 " [as ]\n");
700#endif
701
702 task_t *task;
703
704 task = task_first();
705 while (task != NULL) {
706 task_print(task, additional);
707 task = task_next(task);
708 }
709
710 irq_spinlock_unlock(&tasks_lock, true);
711}
712
713/** Get key function for the @c tasks ordered dictionary.
714 *
715 * @param odlink Link
716 * @return Pointer to task ID cast as 'void *'
717 */
718static void *tasks_getkey(odlink_t *odlink)
719{
720 task_t *task = odict_get_instance(odlink, task_t, ltasks);
721 return (void *) &task->taskid;
722}
723
724/** Key comparison function for the @c tasks ordered dictionary.
725 *
726 * @param a Pointer to thread A ID
727 * @param b Pointer to thread B ID
728 * @return -1, 0, 1 iff ID A is less than, equal to, greater than B
729 */
730static int tasks_cmp(void *a, void *b)
731{
732 task_id_t ida = *(task_id_t *)a;
733 task_id_t idb = *(task_id_t *)b;
734
735 if (ida < idb)
736 return -1;
737 else if (ida == idb)
738 return 0;
739 else
740 return +1;
741}
742
743/** @}
744 */
Note: See TracBrowser for help on using the repository browser.