source: mainline/kernel/generic/src/smp/smp_call.c@ 22b5924

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 22b5924 was e0c80f66, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

smp_call: Added license.

  • Property mode set to 100644
File size: 7.3 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Facility to invoke functions on other cpus via IPIs.
36 */
37
38#include <smp/smp_call.h>
39#include <arch.h>
40#include <config.h>
41#include <preemption.h>
42#include <arch/barrier.h>
43#include <arch/asm.h> /* interrupt_disable */
44
45
46static void call_start(smp_call_t *call_info, smp_call_func_t func, void *arg);
47static void call_done(smp_call_t *call_info);
48static void call_wait(smp_call_t *call_info);
49
50
51/** Init smp_call() on the local cpu. */
52void smp_call_init(void)
53{
54 ASSERT(CPU);
55 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
56
57 spinlock_initialize(&CPU->smp_calls_lock, "cpu[].smp_calls_lock");
58 list_initialize(&CPU->smp_pending_calls);
59}
60
61/** Invokes a function on a specific cpu and waits for it to complete.
62 *
63 * Calls @a func on the CPU denoted by its logical id @cpu_id .
64 * The function will execute with interrupts disabled. It should
65 * be a quick and simple function and must never block.
66 *
67 * If @a cpu_id is the local CPU, the function will be invoked
68 * directly.
69 *
70 * @param cpu_id Destination CPU's logical id (eg CPU->id)
71 * @param func Function to call.
72 * @param arg Argument to pass to the user supplied function @a func.
73 */
74void smp_call(unsigned int cpu_id, smp_call_func_t func, void *arg)
75{
76 smp_call_t call_info;
77 smp_call_async(cpu_id, func, arg, &call_info);
78 smp_call_wait(&call_info);
79}
80
81/** Invokes a function on a specific cpu asynchronously.
82 *
83 * Calls @a func on the CPU denoted by its logical id @cpu_id .
84 * The function will execute with interrupts disabled. It should
85 * be a quick and simple function and must never block.
86 *
87 * Pass @a call_info to smp_call_wait() in order to wait for
88 * @a func to complete.
89 *
90 * @a call_info must be valid until @a func returns.
91 *
92 * If @a cpu_id is the local CPU, the function will be invoked
93 * directly. If the destination cpu id @a cpu_id is invalid
94 * or denotes an inactive cpu, the call is discarded immediately.
95 *
96 * Interrupts must be enabled. Otherwise you run the risk
97 * of a deadlock.
98 *
99 * @param cpu_id Destination CPU's logical id (eg CPU->id).
100 * @param func Function to call.
101 * @param arg Argument to pass to the user supplied function @a func.
102 * @param call_info Use it to wait for the function to complete. Must
103 * be valid until the function completes.
104 */
105void smp_call_async(unsigned int cpu_id, smp_call_func_t func, void *arg,
106 smp_call_t *call_info)
107{
108 /* todo: doc deadlock */
109 ASSERT(!interrupts_disabled());
110 ASSERT(call_info != NULL);
111
112 /* Discard invalid calls. */
113 if (config.cpu_count <= cpu_id || !cpus[cpu_id].active) {
114 call_start(call_info, func, arg);
115 call_done(call_info);
116 return;
117 }
118
119 /* Protect cpu->id against migration. */
120 preemption_disable();
121
122 call_start(call_info, func, arg);
123
124 if (cpu_id != CPU->id) {
125#ifdef CONFIG_SMP
126 spinlock_lock(&cpus[cpu_id].smp_calls_lock);
127 list_append(&call_info->calls_link, &cpus[cpu_id].smp_pending_calls);
128 spinlock_unlock(&cpus[cpu_id].smp_calls_lock);
129
130 /*
131 * If a platform supports SMP it must implement arch_smp_call_ipi().
132 * It should issue an IPI an cpu_id and invoke smp_call_ipi_recv()
133 * on cpu_id in turn.
134 *
135 * Do not implement as just an empty dummy function. Instead
136 * consider providing a full implementation or at least a version
137 * that panics if invoked. Note that smp_call_async() never
138 * calls arch_smp_call_ipi() on uniprocessors even if CONFIG_SMP.
139 */
140 arch_smp_call_ipi(cpu_id);
141#endif
142 } else {
143 /* Invoke local smp calls in place. */
144 ipl_t ipl = interrupts_disable();
145 func(arg);
146 interrupts_restore(ipl);
147
148 call_done(call_info);
149 }
150
151 preemption_enable();
152}
153
154/** Waits for a function invoked on another CPU asynchronously to complete.
155 *
156 * Does not sleep but rather spins.
157 *
158 * Example usage:
159 * @code
160 * void hello(void *p) {
161 * puts((char*)p);
162 * }
163 *
164 * smp_call_t call_info;
165 * smp_call_async(cpus[2].id, hello, "hi!\n", &call_info);
166 * // Do some work. In the meantime, hello() is executed on cpu2.
167 * smp_call_wait(&call_info);
168 * @endcode
169 *
170 * @param call_info Initialized by smp_call_async().
171 */
172void smp_call_wait(smp_call_t *call_info)
173{
174 call_wait(call_info);
175}
176
177#ifdef CONFIG_SMP
178
179/** Architecture independent smp call IPI handler.
180 *
181 * Interrupts must be disabled. Tolerates spurious calls.
182 */
183void smp_call_ipi_recv(void)
184{
185 ASSERT(interrupts_disabled());
186 ASSERT(CPU);
187
188 list_t calls_list;
189 list_initialize(&calls_list);
190
191 spinlock_lock(&CPU->smp_calls_lock);
192 list_splice(&CPU->smp_pending_calls, &calls_list.head);
193 spinlock_unlock(&CPU->smp_calls_lock);
194
195 /* Walk the list manually, so that we can safely remove list items. */
196 for (link_t *cur = calls_list.head.next, *next = cur->next;
197 !list_empty(&calls_list); cur = next, next = cur->next) {
198
199 smp_call_t *call_info = list_get_instance(cur, smp_call_t, calls_link);
200 list_remove(cur);
201
202 call_info->func(call_info->arg);
203 call_done(call_info);
204 }
205}
206
207#endif /* CONFIG_SMP */
208
209static void call_start(smp_call_t *call_info, smp_call_func_t func, void *arg)
210{
211 link_initialize(&call_info->calls_link);
212 call_info->func = func;
213 call_info->arg = arg;
214
215 /*
216 * We can't use standard spinlocks here because we want to lock
217 * the structure on one cpu and unlock it on another (without
218 * messing up the preemption count).
219 */
220 atomic_set(&call_info->pending, 1);
221
222 /* Let initialization complete before continuing. */
223 memory_barrier();
224}
225
226static void call_done(smp_call_t *call_info)
227{
228 /*
229 * Separate memory accesses of the called function from the
230 * announcement of its completion.
231 */
232 memory_barrier();
233 atomic_set(&call_info->pending, 0);
234}
235
236static void call_wait(smp_call_t *call_info)
237{
238 do {
239 /*
240 * Ensure memory accesses following call_wait() are ordered
241 * after completion of the called function on another cpu.
242 * Also, speed up loading of call_info->pending.
243 */
244 memory_barrier();
245 } while (atomic_get(&call_info->pending));
246}
247
248
249/** @}
250 */
Note: See TracBrowser for help on using the repository browser.