source: mainline/kernel/arch/amd64/include/atomic.h@ 86018c1

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 86018c1 was c00589d, checked in by Martin Decky <martin@…>, 16 years ago

remove the confusing "Improved support for hyperthreading" configuration option

  • Property mode set to 100644
File size: 3.3 KB
RevLine 
[5753fbb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[5753fbb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[f24d300]29/** @addtogroup amd64
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[06e1e95]35#ifndef KERN_amd64_ATOMIC_H_
36#define KERN_amd64_ATOMIC_H_
[5753fbb]37
38#include <arch/types.h>
[53f9821]39#include <arch/barrier.h>
40#include <preemption.h>
[5753fbb]41
42static inline void atomic_inc(atomic_t *val) {
43#ifdef CONFIG_SMP
[f24d300]44 asm volatile (
45 "lock incq %[count]\n"
46 : [count] "+m" (val->count)
47 );
[5753fbb]48#else
[f24d300]49 asm volatile (
50 "incq %[count]\n"
51 : [count] "+m" (val->count)
52 );
[5753fbb]53#endif /* CONFIG_SMP */
54}
55
56static inline void atomic_dec(atomic_t *val) {
57#ifdef CONFIG_SMP
[f24d300]58 asm volatile (
59 "lock decq %[count]\n"
60 : [count] "+m" (val->count)
61 );
[5753fbb]62#else
[f24d300]63 asm volatile (
64 "decq %[count]\n"
65 : [count] "+m" (val->count)
66 );
[5753fbb]67#endif /* CONFIG_SMP */
68}
69
[23684b7]70static inline long atomic_postinc(atomic_t *val)
[5753fbb]71{
[0b917dd]72 long r = 1;
[f24d300]73
[e7b7be3f]74 asm volatile (
[f24d300]75 "lock xaddq %[r], %[count]\n"
76 : [count] "+m" (val->count), [r] "+r" (r)
[5753fbb]77 );
[f24d300]78
[5753fbb]79 return r;
80}
81
[23684b7]82static inline long atomic_postdec(atomic_t *val)
[5753fbb]83{
[0b917dd]84 long r = -1;
[5753fbb]85
[e7b7be3f]86 asm volatile (
[f24d300]87 "lock xaddq %[r], %[count]\n"
88 : [count] "+m" (val->count), [r] "+r" (r)
[5753fbb]89 );
90
91 return r;
92}
93
[f24d300]94#define atomic_preinc(val) (atomic_postinc(val) + 1)
95#define atomic_predec(val) (atomic_postdec(val) - 1)
[5753fbb]96
[7f1c620]97static inline uint64_t test_and_set(atomic_t *val) {
98 uint64_t v;
[5753fbb]99
[e7b7be3f]100 asm volatile (
[f24d300]101 "movq $1, %[v]\n"
102 "xchgq %[v], %[count]\n"
103 : [v] "=r" (v), [count] "+m" (val->count)
[5753fbb]104 );
105
106 return v;
107}
108
109
[23684b7]110/** amd64 specific fast spinlock */
[53f9821]111static inline void atomic_lock_arch(atomic_t *val)
112{
[7f1c620]113 uint64_t tmp;
[f24d300]114
[53f9821]115 preemption_disable();
[e7b7be3f]116 asm volatile (
[ad2e39b]117 "0:\n"
118 "pause\n"
[f24d300]119 "mov %[count], %[tmp]\n"
120 "testq %[tmp], %[tmp]\n"
[43b7b35]121 "jnz 0b\n" /* lightweight looping on locked spinlock */
[53f9821]122
[f24d300]123 "incq %[tmp]\n" /* now use the atomic operation */
124 "xchgq %[count], %[tmp]\n"
125 "testq %[tmp], %[tmp]\n"
[ad2e39b]126 "jnz 0b\n"
[f24d300]127 : [count] "+m" (val->count), [tmp] "=&r" (tmp)
[ad2e39b]128 );
[53f9821]129 /*
130 * Prevent critical section code from bleeding out this way up.
131 */
132 CS_ENTER_BARRIER();
133}
[5753fbb]134
135#endif
[b45c443]136
[06e1e95]137/** @}
[b45c443]138 */
Note: See TracBrowser for help on using the repository browser.