source: mainline/kernel/arch/arm32/src/atomic.c

Last change on this file was 133461c, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 21 months ago

Align arm32 atomic op prototypes with compiler's expectations

Based on a patch by @vhotspur, but without explicit casts
between pointer types. Those are evil footguns.

  • Property mode set to 100644
File size: 3.7 KB
Line 
1/*
2 * Copyright (c) 2012 Adam Hraska
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_arm32
30 * @{
31 */
32/** @file
33 * @brief Atomic operations emulation.
34 */
35
36#include <synch/spinlock.h>
37#include <arch/barrier.h>
38#include <arch/asm.h>
39
40unsigned __atomic_fetch_add_4(volatile void *mem0, unsigned val, int model)
41{
42 volatile unsigned *mem = mem0;
43
44 /*
45 * This implementation is for UP pre-ARMv6 systems where we do not have
46 * the LDREX and STREX instructions.
47 */
48 ipl_t ipl = interrupts_disable();
49 unsigned ret = *mem;
50 *mem += val;
51 interrupts_restore(ipl);
52 return ret;
53}
54
55unsigned __atomic_fetch_sub_4(volatile void *mem0, unsigned val, int model)
56{
57 volatile unsigned *mem = mem0;
58
59 ipl_t ipl = interrupts_disable();
60 unsigned ret = *mem;
61 *mem -= val;
62 interrupts_restore(ipl);
63 return ret;
64}
65
66IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(cas_lock, "arm-cas-lock");
67
68/** Implements GCC's missing compare-and-swap intrinsic for ARM.
69 *
70 * Sets \a *ptr to \a new_val if it is equal to \a expected. In any case,
71 * returns the previous value of \a *ptr.
72 */
73unsigned __sync_val_compare_and_swap_4(volatile void *ptr0, unsigned expected,
74 unsigned new_val)
75{
76 volatile unsigned *ptr = ptr0;
77
78 /*
79 * Using an interrupt disabling spinlock might still lead to deadlock
80 * if CAS() is used in an exception handler. Eg. if a CAS() results
81 * in a page fault exception and the exception handler again tries
82 * to invoke CAS() (even for a different memory location), the spinlock
83 * would deadlock.
84 */
85 irq_spinlock_lock(&cas_lock, true);
86
87 unsigned cur_val = *ptr;
88
89 if (cur_val == expected) {
90 *ptr = new_val;
91 }
92
93 irq_spinlock_unlock(&cas_lock, true);
94
95 return cur_val;
96}
97
98void __sync_synchronize(void)
99{
100 dsb();
101}
102
103/* Naive implementations of the newer intrinsics. */
104
105_Bool __atomic_compare_exchange_4(volatile void *mem, void *expected0,
106 unsigned desired, _Bool weak, int success, int failure)
107{
108 unsigned *expected = expected0;
109
110 (void) weak;
111 (void) success;
112 (void) failure;
113
114 unsigned old = *expected;
115 unsigned new = __sync_val_compare_and_swap_4(mem, old, desired);
116 if (old == new) {
117 return 1;
118 } else {
119 *expected = new;
120 return 0;
121 }
122}
123
124unsigned __atomic_exchange_4(volatile void *mem0, unsigned val, int model)
125{
126 volatile unsigned *mem = mem0;
127
128 (void) model;
129
130 irq_spinlock_lock(&cas_lock, true);
131 unsigned old = *mem;
132 *mem = val;
133 irq_spinlock_unlock(&cas_lock, true);
134
135 return old;
136}
137
138/** @}
139 */
Note: See TracBrowser for help on using the repository browser.