source: mainline/kernel/arch/ia64/include/asm.h@ 8b4cfb9d

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8b4cfb9d was 8b4cfb9d, checked in by Jakub Jermar <jakub@…>, 16 years ago

Serialize after the RSM instruction in pk_disable().

  • Property mode set to 100644
File size: 8.1 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup ia64
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_ia64_ASM_H_
36#define KERN_ia64_ASM_H_
37
38#include <config.h>
39#include <typedefs.h>
40#include <arch/types.h>
41#include <arch/register.h>
42
43#define IA64_IOSPACE_ADDRESS 0xE001000000000000ULL
44
45static inline void pio_write_8(ioport8_t *port, uint8_t v)
46{
47 uintptr_t prt = (uintptr_t) port;
48
49 *((ioport8_t *) (IA64_IOSPACE_ADDRESS +
50 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v;
51
52 asm volatile (
53 "mf\n"
54 ::: "memory"
55 );
56}
57
58static inline void pio_write_16(ioport16_t *port, uint16_t v)
59{
60 uintptr_t prt = (uintptr_t) port;
61
62 *((ioport16_t *) (IA64_IOSPACE_ADDRESS +
63 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v;
64
65 asm volatile (
66 "mf\n"
67 ::: "memory"
68 );
69}
70
71static inline void pio_write_32(ioport32_t *port, uint32_t v)
72{
73 uintptr_t prt = (uintptr_t) port;
74
75 *((ioport32_t *) (IA64_IOSPACE_ADDRESS +
76 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v;
77
78 asm volatile (
79 "mf\n"
80 ::: "memory"
81 );
82}
83
84static inline uint8_t pio_read_8(ioport8_t *port)
85{
86 uintptr_t prt = (uintptr_t) port;
87
88 asm volatile (
89 "mf\n"
90 ::: "memory"
91 );
92
93 return *((ioport8_t *) (IA64_IOSPACE_ADDRESS +
94 ((prt & 0xfff) | ((prt >> 2) << 12))));
95}
96
97static inline uint16_t pio_read_16(ioport16_t *port)
98{
99 uintptr_t prt = (uintptr_t) port;
100
101 asm volatile (
102 "mf\n"
103 ::: "memory"
104 );
105
106 return *((ioport16_t *) (IA64_IOSPACE_ADDRESS +
107 ((prt & 0xfff) | ((prt >> 2) << 12))));
108}
109
110static inline uint32_t pio_read_32(ioport32_t *port)
111{
112 uintptr_t prt = (uintptr_t) port;
113
114 asm volatile (
115 "mf\n"
116 ::: "memory"
117 );
118
119 return *((ioport32_t *) (IA64_IOSPACE_ADDRESS +
120 ((prt & 0xfff) | ((prt >> 2) << 12))));
121}
122
123/** Return base address of current stack
124 *
125 * Return the base address of the current stack.
126 * The stack is assumed to be STACK_SIZE long.
127 * The stack must start on page boundary.
128 */
129static inline uintptr_t get_stack_base(void)
130{
131 uint64_t v;
132
133 /* I'm not sure why but this code bad inlines in scheduler,
134 so THE shifts about 16B and causes kernel panic
135
136 asm volatile (
137 "and %[value] = %[mask], r12"
138 : [value] "=r" (v)
139 : [mask] "r" (~(STACK_SIZE - 1))
140 );
141 return v;
142
143 This code have the same meaning but inlines well.
144 */
145
146 asm volatile (
147 "mov %[value] = r12"
148 : [value] "=r" (v)
149 );
150
151 return (v & (~(STACK_SIZE - 1)));
152}
153
154/** Return Processor State Register.
155 *
156 * @return PSR.
157 */
158static inline uint64_t psr_read(void)
159{
160 uint64_t v;
161
162 asm volatile (
163 "mov %[value] = psr\n"
164 : [value] "=r" (v)
165 );
166
167 return v;
168}
169
170/** Read IVA (Interruption Vector Address).
171 *
172 * @return Return location of interruption vector table.
173 */
174static inline uint64_t iva_read(void)
175{
176 uint64_t v;
177
178 asm volatile (
179 "mov %[value] = cr.iva\n"
180 : [value] "=r" (v)
181 );
182
183 return v;
184}
185
186/** Write IVA (Interruption Vector Address) register.
187 *
188 * @param v New location of interruption vector table.
189 */
190static inline void iva_write(uint64_t v)
191{
192 asm volatile (
193 "mov cr.iva = %[value]\n"
194 :: [value] "r" (v)
195 );
196}
197
198
199/** Read IVR (External Interrupt Vector Register).
200 *
201 * @return Highest priority, pending, unmasked external interrupt vector.
202 */
203static inline uint64_t ivr_read(void)
204{
205 uint64_t v;
206
207 asm volatile (
208 "mov %[value] = cr.ivr\n"
209 : [value] "=r" (v)
210 );
211
212 return v;
213}
214
215static inline uint64_t cr64_read(void)
216{
217 uint64_t v;
218
219 asm volatile (
220 "mov %[value] = cr64\n"
221 : [value] "=r" (v)
222 );
223
224 return v;
225}
226
227
228/** Write ITC (Interval Timer Counter) register.
229 *
230 * @param v New counter value.
231 */
232static inline void itc_write(uint64_t v)
233{
234 asm volatile (
235 "mov ar.itc = %[value]\n"
236 :: [value] "r" (v)
237 );
238}
239
240/** Read ITC (Interval Timer Counter) register.
241 *
242 * @return Current counter value.
243 */
244static inline uint64_t itc_read(void)
245{
246 uint64_t v;
247
248 asm volatile (
249 "mov %[value] = ar.itc\n"
250 : [value] "=r" (v)
251 );
252
253 return v;
254}
255
256/** Write ITM (Interval Timer Match) register.
257 *
258 * @param v New match value.
259 */
260static inline void itm_write(uint64_t v)
261{
262 asm volatile (
263 "mov cr.itm = %[value]\n"
264 :: [value] "r" (v)
265 );
266}
267
268/** Read ITM (Interval Timer Match) register.
269 *
270 * @return Match value.
271 */
272static inline uint64_t itm_read(void)
273{
274 uint64_t v;
275
276 asm volatile (
277 "mov %[value] = cr.itm\n"
278 : [value] "=r" (v)
279 );
280
281 return v;
282}
283
284/** Read ITV (Interval Timer Vector) register.
285 *
286 * @return Current vector and mask bit.
287 */
288static inline uint64_t itv_read(void)
289{
290 uint64_t v;
291
292 asm volatile (
293 "mov %[value] = cr.itv\n"
294 : [value] "=r" (v)
295 );
296
297 return v;
298}
299
300/** Write ITV (Interval Timer Vector) register.
301 *
302 * @param v New vector and mask bit.
303 */
304static inline void itv_write(uint64_t v)
305{
306 asm volatile (
307 "mov cr.itv = %[value]\n"
308 :: [value] "r" (v)
309 );
310}
311
312/** Write EOI (End Of Interrupt) register.
313 *
314 * @param v This value is ignored.
315 */
316static inline void eoi_write(uint64_t v)
317{
318 asm volatile (
319 "mov cr.eoi = %[value]\n"
320 :: [value] "r" (v)
321 );
322}
323
324/** Read TPR (Task Priority Register).
325 *
326 * @return Current value of TPR.
327 */
328static inline uint64_t tpr_read(void)
329{
330 uint64_t v;
331
332 asm volatile (
333 "mov %[value] = cr.tpr\n"
334 : [value] "=r" (v)
335 );
336
337 return v;
338}
339
340/** Write TPR (Task Priority Register).
341 *
342 * @param v New value of TPR.
343 */
344static inline void tpr_write(uint64_t v)
345{
346 asm volatile (
347 "mov cr.tpr = %[value]\n"
348 :: [value] "r" (v)
349 );
350}
351
352/** Disable interrupts.
353 *
354 * Disable interrupts and return previous
355 * value of PSR.
356 *
357 * @return Old interrupt priority level.
358 */
359static ipl_t interrupts_disable(void)
360{
361 uint64_t v;
362
363 asm volatile (
364 "mov %[value] = psr\n"
365 "rsm %[mask]\n"
366 : [value] "=r" (v)
367 : [mask] "i" (PSR_I_MASK)
368 );
369
370 return (ipl_t) v;
371}
372
373/** Enable interrupts.
374 *
375 * Enable interrupts and return previous
376 * value of PSR.
377 *
378 * @return Old interrupt priority level.
379 */
380static ipl_t interrupts_enable(void)
381{
382 uint64_t v;
383
384 asm volatile (
385 "mov %[value] = psr\n"
386 "ssm %[mask]\n"
387 ";;\n"
388 "srlz.d\n"
389 : [value] "=r" (v)
390 : [mask] "i" (PSR_I_MASK)
391 );
392
393 return (ipl_t) v;
394}
395
396/** Restore interrupt priority level.
397 *
398 * Restore PSR.
399 *
400 * @param ipl Saved interrupt priority level.
401 */
402static inline void interrupts_restore(ipl_t ipl)
403{
404 if (ipl & PSR_I_MASK)
405 (void) interrupts_enable();
406 else
407 (void) interrupts_disable();
408}
409
410/** Return interrupt priority level.
411 *
412 * @return PSR.
413 */
414static inline ipl_t interrupts_read(void)
415{
416 return (ipl_t) psr_read();
417}
418
419/** Disable protection key checking. */
420static inline void pk_disable(void)
421{
422 asm volatile (
423 "rsm %[mask]\n"
424 ";;\n"
425 "srlz.d\n"
426 :: [mask] "i" (PSR_PK_MASK)
427 );
428}
429
430extern void cpu_halt(void);
431extern void cpu_sleep(void);
432extern void asm_delay_loop(uint32_t t);
433
434extern void switch_to_userspace(uintptr_t, uintptr_t, uintptr_t, uintptr_t,
435 uint64_t, uint64_t);
436
437#endif
438
439/** @}
440 */
Note: See TracBrowser for help on using the repository browser.