source: mainline/kernel/generic/src/smp/smp_call.c@ d70fc74

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since d70fc74 was d70fc74, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

smp_call: Minor fixes and comments.

  • Property mode set to 100644
File size: 5.7 KB
RevLine 
[2ee1ccc]1#include <smp/smp_call.h>
2#include <arch.h>
3#include <config.h>
4#include <preemption.h>
5#include <compiler/barrier.h>
6#include <arch/barrier.h>
7#include <arch/asm.h> /* interrupt_disable */
8
9
10static void call_start(smp_call_t *call_info, smp_call_func_t func, void *arg);
11static void call_done(smp_call_t *call_info);
12static void call_wait(smp_call_t *call_info);
13
14
15void smp_call_init(void)
16{
17 ASSERT(CPU);
18 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
19
20 spinlock_initialize(&CPU->smp_calls_lock, "cpu[].smp_calls_lock");
21 list_initialize(&CPU->smp_pending_calls);
22}
23
24/** Invokes a function on a specific cpu and waits for it to complete.
25 *
26 * Calls @a func on the CPU denoted by its logical id @cpu_id .
27 * The function will execute with interrupts disabled. It should
28 * be a quick and simple function and must never block.
29 *
30 * If @a cpu_id is the local CPU, the function will be invoked
31 * directly.
32 *
33 * @param cpu_id Destination CPU's logical id (eg CPU->id)
34 * @param func Function to call.
35 * @param arg Argument to pass to the user supplied function @a func.
36 */
37void smp_call(unsigned int cpu_id, smp_call_func_t func, void *arg)
38{
39 smp_call_t call_info;
40 smp_call_async(cpu_id, func, arg, &call_info);
41 smp_call_wait(&call_info);
42}
43
44/** Invokes a function on a specific cpu asynchronously.
45 *
46 * Calls @a func on the CPU denoted by its logical id @cpu_id .
47 * The function will execute with interrupts disabled. It should
48 * be a quick and simple function and must never block.
49 *
50 * Pass @a call_info to smp_call_wait() in order to wait for
51 * @a func to complete.
52 *
53 * @a call_info must be valid until @a func returns.
54 *
55 * If @a cpu_id is the local CPU, the function will be invoked
[d70fc74]56 * directly. If the destination cpu id @a cpu_id is invalid
57 * or denotes an inactive cpu, the call is discarded immediately.
[2ee1ccc]58 *
[d71331b]59 * Interrupts must be enabled. Otherwise you run the risk
60 * of a deadlock.
61 *
[d70fc74]62 * @param cpu_id Destination CPU's logical id (eg CPU->id).
[2ee1ccc]63 * @param func Function to call.
64 * @param arg Argument to pass to the user supplied function @a func.
65 * @param call_info Use it to wait for the function to complete. Must
66 * be valid until the function completes.
67 */
68void smp_call_async(unsigned int cpu_id, smp_call_func_t func, void *arg,
69 smp_call_t *call_info)
70{
71 /* todo: doc deadlock */
72 ASSERT(!interrupts_disabled());
73 ASSERT(call_info != NULL);
74
75 /* Discard invalid calls. */
[d70fc74]76 if (config.cpu_count <= cpu_id || !cpus[cpu_id].active) {
[2ee1ccc]77 call_start(call_info, func, arg);
78 call_done(call_info);
79 return;
80 }
81
82 /* Protect cpu->id against migration. */
83 preemption_disable();
[d70fc74]84
[2ee1ccc]85 call_start(call_info, func, arg);
86
87 if (cpu_id != CPU->id) {
88#ifdef CONFIG_SMP
89 spinlock_lock(&cpus[cpu_id].smp_calls_lock);
90 list_append(&call_info->calls_link, &cpus[cpu_id].smp_pending_calls);
91 spinlock_unlock(&cpus[cpu_id].smp_calls_lock);
[d70fc74]92
93 /*
94 * If a platform supports SMP it must implement arch_smp_call_ipi().
95 * It should issue an IPI an cpu_id and invoke smp_call_ipi_recv()
96 * on cpu_id in turn.
97 *
98 * Do not implement as just an empty dummy function. Instead
99 * consider providing a full implementation or at least a version
100 * that panics if invoked. Note that smp_call_async() never
101 * calls arch_smp_call_ipi() on uniprocessors even if CONFIG_SMP.
102 */
[2ee1ccc]103 arch_smp_call_ipi(cpu_id);
104#endif
105 } else {
106 /* Invoke local smp calls in place. */
107 ipl_t ipl = interrupts_disable();
108 func(arg);
109 interrupts_restore(ipl);
110
111 call_done(call_info);
112 }
113
114 preemption_enable();
115}
116
117/** Waits for a function invoked on another CPU asynchronously to complete.
[d71331b]118 *
119 * Does not sleep but rather spins.
[2ee1ccc]120 *
121 * Example usage:
122 * @code
123 * void hello(void *p) {
124 * puts((char*)p);
125 * }
126 *
127 * smp_call_t call_info;
128 * smp_call_async(cpus[2].id, hello, "hi!\n", &call_info);
129 * // Do some work. In the meantime, hello() is executed on cpu2.
130 * smp_call_wait(&call_info);
131 * @endcode
132 *
133 * @param call_info Initialized by smp_call_async().
134 */
135void smp_call_wait(smp_call_t *call_info)
136{
137 call_wait(call_info);
138}
139
[d71331b]140#ifdef CONFIG_SMP
141
[2ee1ccc]142/** Architecture independent smp call IPI handler.
143 *
[d70fc74]144 * Interrupts must be disabled. Tolerates spurious calls.
[2ee1ccc]145 */
146void smp_call_ipi_recv(void)
147{
148 ASSERT(interrupts_disabled());
149 ASSERT(CPU);
150
151 list_t calls_list;
152 list_initialize(&calls_list);
153
154 spinlock_lock(&CPU->smp_calls_lock);
155 list_splice(&CPU->smp_pending_calls, &calls_list.head);
156 spinlock_unlock(&CPU->smp_calls_lock);
157
158 /* Walk the list manually, so that we can safely remove list items. */
159 for (link_t *cur = calls_list.head.next, *next = cur->next;
160 !list_empty(&calls_list); cur = next, next = cur->next) {
161
162 smp_call_t *call_info = list_get_instance(cur, smp_call_t, calls_link);
163 list_remove(cur);
164
165 call_info->func(call_info->arg);
166 call_done(call_info);
167 }
168}
169
[d71331b]170#endif /* CONFIG_SMP */
[2ee1ccc]171
172static void call_start(smp_call_t *call_info, smp_call_func_t func, void *arg)
173{
174 link_initialize(&call_info->calls_link);
175 call_info->func = func;
176 call_info->arg = arg;
177
178 /*
179 * We can't use standard spinlocks here because we want to lock
180 * the structure on one cpu and unlock it on another (without
181 * messing up the preemption count).
182 */
183 atomic_set(&call_info->pending, 1);
184
185 /* Let initialization complete before continuing. */
186 memory_barrier();
187}
188
189static void call_done(smp_call_t *call_info)
190{
191 /*
192 * Separate memory accesses of the called function from the
193 * announcement of its completion.
194 */
195 memory_barrier();
196 atomic_set(&call_info->pending, 0);
197}
198
199static void call_wait(smp_call_t *call_info)
200{
201 do {
202 /*
203 * Ensure memory accesses following call_wait() are ordered
204 * after completion of the called function on another cpu.
205 * Also, speed up loading of call_info->pending.
206 */
207 memory_barrier();
208 } while (atomic_get(&call_info->pending));
209}
210
211
212
Note: See TracBrowser for help on using the repository browser.