source: mainline/uspace/lib/c/arch/arm32/src/atomic.c@ 133461c

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 133461c was 133461c, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 2 years ago

Align arm32 atomic op prototypes with compiler's expectations

Based on a patch by @vhotspur, but without explicit casts
between pointer types. Those are evil footguns.

  • Property mode set to 100644
File size: 5.1 KB
Line 
1/*
2 * Copyright (c) 2007 Michal Kebrt
3 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * Older ARMs don't have atomic instructions, so we need to define a bunch
32 * of symbols for GCC to use.
33 */
34
35#include <stdbool.h>
36#include "ras_page.h"
37
38volatile unsigned *ras_page;
39
40bool __atomic_compare_exchange_4(volatile void *mem0, void *expected0,
41 unsigned desired, bool weak, int success, int failure)
42{
43 volatile unsigned *mem = mem0;
44 unsigned *expected = expected0;
45
46 (void) success;
47 (void) failure;
48 (void) weak;
49
50 unsigned ov = *expected;
51 unsigned ret;
52
53 /*
54 * The following instructions between labels 1 and 2 constitute a
55 * Restartable Atomic Sequence. Should the sequence be non-atomic,
56 * the kernel will restart it.
57 */
58 asm volatile (
59 "1:\n"
60 " adr %[ret], 1b\n"
61 " str %[ret], %[rp0]\n"
62 " adr %[ret], 2f\n"
63 " str %[ret], %[rp1]\n"
64
65 " ldr %[ret], %[addr]\n"
66 " cmp %[ret], %[ov]\n"
67 " streq %[nv], %[addr]\n"
68 "2:\n"
69 : [ret] "=&r" (ret),
70 [rp0] "=m" (ras_page[0]),
71 [rp1] "=m" (ras_page[1]),
72 [addr] "+m" (*mem)
73 : [ov] "r" (ov),
74 [nv] "r" (desired)
75 : "memory"
76 );
77
78 ras_page[0] = 0;
79 ras_page[1] = 0xffffffff;
80
81 if (ret == ov)
82 return true;
83
84 *expected = ret;
85 return false;
86}
87
88unsigned short __atomic_fetch_add_2(volatile void *mem0, unsigned short val,
89 int model)
90{
91 volatile unsigned short *mem = mem0;
92
93 (void) model;
94
95 unsigned short ret;
96
97 /*
98 * The following instructions between labels 1 and 2 constitute a
99 * Restartable Atomic Seqeunce. Should the sequence be non-atomic,
100 * the kernel will restart it.
101 */
102 asm volatile (
103 "1:\n"
104 " adr %[ret], 1b\n"
105 " str %[ret], %[rp0]\n"
106 " adr %[ret], 2f\n"
107 " str %[ret], %[rp1]\n"
108 " ldrh %[ret], %[addr]\n"
109 " add %[ret], %[ret], %[imm]\n"
110 " strh %[ret], %[addr]\n"
111 "2:\n"
112 : [ret] "=&r" (ret),
113 [rp0] "=m" (ras_page[0]),
114 [rp1] "=m" (ras_page[1]),
115 [addr] "+m" (*mem)
116 : [imm] "r" (val)
117 );
118
119 ras_page[0] = 0;
120 ras_page[1] = 0xffffffff;
121
122 return ret - val;
123}
124
125unsigned __atomic_fetch_add_4(volatile void *mem0, unsigned val, int model)
126{
127 volatile unsigned *mem = mem0;
128
129 (void) model;
130
131 unsigned ret;
132
133 /*
134 * The following instructions between labels 1 and 2 constitute a
135 * Restartable Atomic Seqeunce. Should the sequence be non-atomic,
136 * the kernel will restart it.
137 */
138 asm volatile (
139 "1:\n"
140 " adr %[ret], 1b\n"
141 " str %[ret], %[rp0]\n"
142 " adr %[ret], 2f\n"
143 " str %[ret], %[rp1]\n"
144 " ldr %[ret], %[addr]\n"
145 " add %[ret], %[ret], %[imm]\n"
146 " str %[ret], %[addr]\n"
147 "2:\n"
148 : [ret] "=&r" (ret),
149 [rp0] "=m" (ras_page[0]),
150 [rp1] "=m" (ras_page[1]),
151 [addr] "+m" (*mem)
152 : [imm] "r" (val)
153 );
154
155 ras_page[0] = 0;
156 ras_page[1] = 0xffffffff;
157
158 return ret - val;
159}
160
161unsigned __atomic_fetch_sub_4(volatile void *mem, unsigned val, int model)
162{
163 return __atomic_fetch_add_4(mem, -val, model);
164}
165
166void __sync_synchronize(void)
167{
168 // FIXME: Full memory barrier. We might need a syscall for this.
169}
170
171unsigned __sync_add_and_fetch_4(volatile void *vptr, unsigned val)
172{
173 return __atomic_fetch_add_4(vptr, val, __ATOMIC_SEQ_CST) + val;
174}
175
176unsigned __sync_sub_and_fetch_4(volatile void *vptr, unsigned val)
177{
178 return __atomic_fetch_sub_4(vptr, val, __ATOMIC_SEQ_CST) - val;
179}
180
181bool __sync_bool_compare_and_swap_4(volatile void *ptr, unsigned old_val, unsigned new_val)
182{
183 return __atomic_compare_exchange_4(ptr, &old_val, new_val, false,
184 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
185}
186
187unsigned __sync_val_compare_and_swap_4(volatile void *ptr, unsigned old_val, unsigned new_val)
188{
189 __atomic_compare_exchange_4(ptr, &old_val, new_val, false,
190 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
191 return old_val;
192}
Note: See TracBrowser for help on using the repository browser.