source: mainline/kernel/arch/ia32/include/atomic.h@ ee06f2a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since ee06f2a was 82b72e40, checked in by Jakub Jermar <jakub@…>, 17 years ago

Prevent 'tmp' in ia32's atomic_lock_arch() from being allocated
to the same register as 'val→count'.

  • Property mode set to 100644
File size: 3.2 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[06e1e95]29/** @addtogroup ia32
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[06e1e95]35#ifndef KERN_ia32_ATOMIC_H_
36#define KERN_ia32_ATOMIC_H_
[f761f1eb]37
38#include <arch/types.h>
[53f9821]39#include <arch/barrier.h>
40#include <preemption.h>
[59e07c91]41
42static inline void atomic_inc(atomic_t *val) {
[5f85c91]43#ifdef CONFIG_SMP
[9f491d7]44 asm volatile ("lock incl %0\n" : "+m" (val->count));
[18e0a6c]45#else
[9f491d7]46 asm volatile ("incl %0\n" : "+m" (val->count));
[5f85c91]47#endif /* CONFIG_SMP */
[18e0a6c]48}
49
[59e07c91]50static inline void atomic_dec(atomic_t *val) {
[5f85c91]51#ifdef CONFIG_SMP
[9f491d7]52 asm volatile ("lock decl %0\n" : "+m" (val->count));
[18e0a6c]53#else
[9f491d7]54 asm volatile ("decl %0\n" : "+m" (val->count));
[5f85c91]55#endif /* CONFIG_SMP */
[18e0a6c]56}
57
[23684b7]58static inline long atomic_postinc(atomic_t *val)
[73a4bab]59{
[e5dc7b8]60 long r = 1;
[10c071e]61
[e7b7be3f]62 asm volatile (
[e5dc7b8]63 "lock xaddl %1, %0\n"
[9f491d7]64 : "+m" (val->count), "+r" (r)
[73a4bab]65 );
[10c071e]66
[73a4bab]67 return r;
68}
69
[23684b7]70static inline long atomic_postdec(atomic_t *val)
[73a4bab]71{
[e5dc7b8]72 long r = -1;
[10c071e]73
[e7b7be3f]74 asm volatile (
[e5dc7b8]75 "lock xaddl %1, %0\n"
[9f491d7]76 : "+m" (val->count), "+r"(r)
[73a4bab]77 );
[10c071e]78
[73a4bab]79 return r;
80}
81
[9f491d7]82#define atomic_preinc(val) (atomic_postinc(val) + 1)
83#define atomic_predec(val) (atomic_postdec(val) - 1)
[73a4bab]84
[7f1c620]85static inline uint32_t test_and_set(atomic_t *val) {
86 uint32_t v;
[18e0a6c]87
[e7b7be3f]88 asm volatile (
[18e0a6c]89 "movl $1, %0\n"
[345ce2f]90 "xchgl %0, %1\n"
[9f491d7]91 : "=r" (v),"+m" (val->count)
[18e0a6c]92 );
93
94 return v;
95}
96
[23684b7]97/** ia32 specific fast spinlock */
[53f9821]98static inline void atomic_lock_arch(atomic_t *val)
99{
[7f1c620]100 uint32_t tmp;
[f761f1eb]101
[53f9821]102 preemption_disable();
[e7b7be3f]103 asm volatile (
[9f491d7]104 "0:\n"
[53f9821]105#ifdef CONFIG_HT
[9f491d7]106 "pause\n" /* Pentium 4's HT love this instruction */
[53f9821]107#endif
[9f491d7]108 "mov %0, %1\n"
109 "testl %1, %1\n"
110 "jnz 0b\n" /* lightweight looping on locked spinlock */
[53f9821]111
[9f491d7]112 "incl %1\n" /* now use the atomic operation */
113 "xchgl %0, %1\n"
114 "testl %1, %1\n"
115 "jnz 0b\n"
[82b72e40]116 : "+m" (val->count), "=&r"(tmp)
[9f491d7]117 );
[53f9821]118 /*
119 * Prevent critical section code from bleeding out this way up.
120 */
121 CS_ENTER_BARRIER();
122}
[f761f1eb]123
124#endif
[b45c443]125
[06e1e95]126/** @}
[b45c443]127 */
Note: See TracBrowser for help on using the repository browser.