source: mainline/kernel/generic/src/smp/smp_call.c@ 7cfe5c0

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 7cfe5c0 was c14762e, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

adt: Removed duplicate implementations of list_concat().

  • Property mode set to 100644
File size: 7.3 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup generic
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Facility to invoke functions on other cpus via IPIs.
36 */
37
38#include <smp/smp_call.h>
39#include <arch/barrier.h>
40#include <arch/asm.h> /* interrupt_disable */
41#include <arch.h>
42#include <config.h>
43#include <preemption.h>
44#include <debug.h>
45#include <cpu.h>
46
47static void call_start(smp_call_t *call_info, smp_call_func_t func, void *arg);
48static void call_done(smp_call_t *call_info);
49static void call_wait(smp_call_t *call_info);
50
51
52/** Init smp_call() on the local cpu. */
53void smp_call_init(void)
54{
55 ASSERT(CPU);
56 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
57
58 spinlock_initialize(&CPU->smp_calls_lock, "cpu[].smp_calls_lock");
59 list_initialize(&CPU->smp_pending_calls);
60}
61
62/** Invokes a function on a specific cpu and waits for it to complete.
63 *
64 * Calls @a func on the CPU denoted by its logical id @cpu_id .
65 * The function will execute with interrupts disabled. It should
66 * be a quick and simple function and must never block.
67 *
68 * If @a cpu_id is the local CPU, the function will be invoked
69 * directly.
70 *
71 * @param cpu_id Destination CPU's logical id (eg CPU->id)
72 * @param func Function to call.
73 * @param arg Argument to pass to the user supplied function @a func.
74 */
75void smp_call(unsigned int cpu_id, smp_call_func_t func, void *arg)
76{
77 smp_call_t call_info;
78 smp_call_async(cpu_id, func, arg, &call_info);
79 smp_call_wait(&call_info);
80}
81
82/** Invokes a function on a specific cpu asynchronously.
83 *
84 * Calls @a func on the CPU denoted by its logical id @cpu_id .
85 * The function will execute with interrupts disabled. It should
86 * be a quick and simple function and must never block.
87 *
88 * Pass @a call_info to smp_call_wait() in order to wait for
89 * @a func to complete.
90 *
91 * @a call_info must be valid until @a func returns.
92 *
93 * If @a cpu_id is the local CPU, the function will be invoked
94 * directly. If the destination cpu id @a cpu_id is invalid
95 * or denotes an inactive cpu, the call is discarded immediately.
96 *
97 * Interrupts must be enabled. Otherwise you run the risk
98 * of a deadlock.
99 *
100 * @param cpu_id Destination CPU's logical id (eg CPU->id).
101 * @param func Function to call.
102 * @param arg Argument to pass to the user supplied function @a func.
103 * @param call_info Use it to wait for the function to complete. Must
104 * be valid until the function completes.
105 */
106void smp_call_async(unsigned int cpu_id, smp_call_func_t func, void *arg,
107 smp_call_t *call_info)
108{
109 /* todo: doc deadlock */
110 ASSERT(!interrupts_disabled());
111 ASSERT(call_info != NULL);
112
113 /* Discard invalid calls. */
114 if (config.cpu_count <= cpu_id || !cpus[cpu_id].active) {
115 call_start(call_info, func, arg);
116 call_done(call_info);
117 return;
118 }
119
120 /* Protect cpu->id against migration. */
121 preemption_disable();
122
123 call_start(call_info, func, arg);
124
125 if (cpu_id != CPU->id) {
126#ifdef CONFIG_SMP
127 spinlock_lock(&cpus[cpu_id].smp_calls_lock);
128 list_append(&call_info->calls_link, &cpus[cpu_id].smp_pending_calls);
129 spinlock_unlock(&cpus[cpu_id].smp_calls_lock);
130
131 /*
132 * If a platform supports SMP it must implement arch_smp_call_ipi().
133 * It should issue an IPI an cpu_id and invoke smp_call_ipi_recv()
134 * on cpu_id in turn.
135 *
136 * Do not implement as just an empty dummy function. Instead
137 * consider providing a full implementation or at least a version
138 * that panics if invoked. Note that smp_call_async() never
139 * calls arch_smp_call_ipi() on uniprocessors even if CONFIG_SMP.
140 */
141 arch_smp_call_ipi(cpu_id);
142#endif
143 } else {
144 /* Invoke local smp calls in place. */
145 ipl_t ipl = interrupts_disable();
146 func(arg);
147 interrupts_restore(ipl);
148
149 call_done(call_info);
150 }
151
152 preemption_enable();
153}
154
155/** Waits for a function invoked on another CPU asynchronously to complete.
156 *
157 * Does not sleep but rather spins.
158 *
159 * Example usage:
160 * @code
161 * void hello(void *p) {
162 * puts((char*)p);
163 * }
164 *
165 * smp_call_t call_info;
166 * smp_call_async(cpus[2].id, hello, "hi!\n", &call_info);
167 * // Do some work. In the meantime, hello() is executed on cpu2.
168 * smp_call_wait(&call_info);
169 * @endcode
170 *
171 * @param call_info Initialized by smp_call_async().
172 */
173void smp_call_wait(smp_call_t *call_info)
174{
175 call_wait(call_info);
176}
177
178#ifdef CONFIG_SMP
179
180/** Architecture independent smp call IPI handler.
181 *
182 * Interrupts must be disabled. Tolerates spurious calls.
183 */
184void smp_call_ipi_recv(void)
185{
186 ASSERT(interrupts_disabled());
187 ASSERT(CPU);
188
189 list_t calls_list;
190 list_initialize(&calls_list);
191
192 spinlock_lock(&CPU->smp_calls_lock);
193 list_concat(&calls_list, &CPU->smp_pending_calls);
194 spinlock_unlock(&CPU->smp_calls_lock);
195
196 /* Walk the list manually, so that we can safely remove list items. */
197 for (link_t *cur = calls_list.head.next, *next = cur->next;
198 !list_empty(&calls_list); cur = next, next = cur->next) {
199
200 smp_call_t *call_info = list_get_instance(cur, smp_call_t, calls_link);
201 list_remove(cur);
202
203 call_info->func(call_info->arg);
204 call_done(call_info);
205 }
206}
207
208#endif /* CONFIG_SMP */
209
210static void call_start(smp_call_t *call_info, smp_call_func_t func, void *arg)
211{
212 link_initialize(&call_info->calls_link);
213 call_info->func = func;
214 call_info->arg = arg;
215
216 /*
217 * We can't use standard spinlocks here because we want to lock
218 * the structure on one cpu and unlock it on another (without
219 * messing up the preemption count).
220 */
221 atomic_set(&call_info->pending, 1);
222
223 /* Let initialization complete before continuing. */
224 memory_barrier();
225}
226
227static void call_done(smp_call_t *call_info)
228{
229 /*
230 * Separate memory accesses of the called function from the
231 * announcement of its completion.
232 */
233 memory_barrier();
234 atomic_set(&call_info->pending, 0);
235}
236
237static void call_wait(smp_call_t *call_info)
238{
239 do {
240 /*
241 * Ensure memory accesses following call_wait() are ordered
242 * after completion of the called function on another cpu.
243 * Also, speed up loading of call_info->pending.
244 */
245 memory_barrier();
246 } while (atomic_get(&call_info->pending));
247}
248
249
250/** @}
251 */
Note: See TracBrowser for help on using the repository browser.