- Timestamp:
- 2009-12-02T23:33:48Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 4924675
- Parents:
- 089d746 (diff), 8d04f709 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- uspace/lib/libc
- Files:
-
- 1 added
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/lib/libc/arch/amd64/include/atomic.h
r089d746 r84b14e2 37 37 #ifndef LIBC_amd64_ATOMIC_H_ 38 38 #define LIBC_amd64_ATOMIC_H_ 39 40 #define LIBC_ARCH_ATOMIC_H_ 41 42 #include <atomicdflt.h> 39 43 40 44 static inline void atomic_inc(atomic_t *val) { -
uspace/lib/libc/arch/arm32/include/atomic.h
r089d746 r84b14e2 37 37 #define LIBC_arm32_ATOMIC_H_ 38 38 39 #include <bool.h> 40 41 typedef struct atomic { 42 volatile long count; 43 } atomic_t; 44 45 static inline void atomic_set(atomic_t *val, long i) 46 { 47 val->count = i; 48 } 49 50 static inline long atomic_get(atomic_t *val) 51 { 52 return val->count; 53 } 54 55 static inline bool cas(atomic_t *val, long ov, long nv) 56 { 57 /* FIXME: is not atomic */ 58 if (val->count == ov) { 59 val->count = nv; 60 return true; 61 } 62 return false; 63 } 64 39 65 /** Atomic addition. 40 66 * … … 49 75 volatile long * mem = &(val->count); 50 76 77 /* FIXME: is not atomic, is broken */ 51 78 asm volatile ( 52 79 "1:\n" -
uspace/lib/libc/arch/ia32/Makefile.inc
r089d746 r84b14e2 39 39 arch/$(UARCH)/src/setjmp.S 40 40 41 GCC_CFLAGS += -march=pentium 41 42 LFLAGS += -N 42 43 -
uspace/lib/libc/arch/ia32/include/atomic.h
r089d746 r84b14e2 35 35 #ifndef LIBC_ia32_ATOMIC_H_ 36 36 #define LIBC_ia32_ATOMIC_H_ 37 38 #define LIBC_ARCH_ATOMIC_H_ 39 40 #include <atomicdflt.h> 37 41 38 42 static inline void atomic_inc(atomic_t *val) { -
uspace/lib/libc/arch/ia64/include/atomic.h
r089d746 r84b14e2 35 35 #ifndef LIBC_ia64_ATOMIC_H_ 36 36 #define LIBC_ia64_ATOMIC_H_ 37 38 #define LIBC_ARCH_ATOMIC_H_ 39 40 #include <atomicdflt.h> 37 41 38 42 static inline void atomic_inc(atomic_t *val) -
uspace/lib/libc/arch/mips32/include/atomic.h
r089d746 r84b14e2 36 36 #ifndef LIBC_mips32_ATOMIC_H_ 37 37 #define LIBC_mips32_ATOMIC_H_ 38 39 #define LIBC_ARCH_ATOMIC_H_ 40 41 #include <atomicdflt.h> 38 42 39 43 #define atomic_inc(x) ((void) atomic_add(x, 1)) -
uspace/lib/libc/arch/ppc32/include/atomic.h
r089d746 r84b14e2 35 35 #ifndef LIBC_ppc32_ATOMIC_H_ 36 36 #define LIBC_ppc32_ATOMIC_H_ 37 38 #define LIBC_ARCH_ATOMIC_H_ 39 40 #include <atomicdflt.h> 37 41 38 42 static inline void atomic_inc(atomic_t *val) -
uspace/lib/libc/arch/sparc64/include/atomic.h
r089d746 r84b14e2 36 36 #define LIBC_sparc64_ATOMIC_H_ 37 37 38 #define LIBC_ARCH_ATOMIC_H_ 39 40 #include <atomicdflt.h> 38 41 #include <sys/types.h> 39 42 -
uspace/lib/libc/generic/futex.c
r089d746 r84b14e2 36 36 #include <atomic.h> 37 37 #include <libc.h> 38 #include <stdio.h>39 38 #include <sys/types.h> 40 #include <kernel/synch/synch.h>41 42 /*43 * Note about race conditions.44 * Because of non-atomic nature of operations performed sequentially on the45 * futex counter and the futex wait queue, there is a race condition:46 *47 * (wq->missed_wakeups == 1) && (futex->count = 1)48 *49 * Scenario 1 (wait queue timeout vs. futex_up()):50 * 1. assume wq->missed_wakeups == 0 && futex->count == -151 * (ie. thread A sleeping, thread B in the critical section)52 * 2. A receives timeout and gets removed from the wait queue53 * 3. B wants to leave the critical section and calls futex_up()54 * 4. B thus changes futex->count from -1 to 055 * 5. B has to call SYS_FUTEX_WAKEUP syscall to wake up the sleeping thread56 * 6. B finds the wait queue empty and changes wq->missed_wakeups from 0 to 157 * 7. A fixes futex->count (i.e. the number of waiting threads) by changing it58 * from 0 to 159 *60 * Scenario 2 (conditional down operation vs. futex_up)61 * 1. assume wq->missed_wakeups == 0 && futex->count == 062 * (i.e. thread A is in the critical section)63 * 2. thread B performs futex_trydown() operation and changes futex->count from64 * 0 to -165 * B is now obliged to call SYS_FUTEX_SLEEP syscall66 * 3. A wants to leave the critical section and does futex_up()67 * 4. A thus changes futex->count from -1 to 0 and must call SYS_FUTEX_WAKEUP68 * syscall69 * 5. B finds the wait queue empty and immediatelly aborts the conditional sleep70 * 6. No thread is queueing in the wait queue so wq->missed_wakeups changes from71 * 0 to 172 * 6. B fixes futex->count (i.e. the number of waiting threads) by changing it73 * from 0 to 174 *75 * Both scenarios allow two threads to be in the critical section76 * simultaneously. One without kernel intervention and the other through77 * wq->missed_wakeups being 1.78 *79 * To mitigate this problem, futex_down_timeout() detects that the syscall80 * didn't sleep in the wait queue, fixes the futex counter and RETRIES the81 * whole operation again.82 */83 39 84 40 /** Initialize futex counter. … … 92 48 } 93 49 94 int futex_down(futex_t *futex)95 {96 return futex_down_timeout(futex, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);97 }98 99 int futex_trydown(futex_t *futex)100 {101 return futex_down_timeout(futex, SYNCH_NO_TIMEOUT,102 SYNCH_FLAGS_NON_BLOCKING);103 }104 105 50 /** Try to down the futex. 106 51 * 107 52 * @param futex Futex. 108 * @param usec Microseconds to wait. Zero value means sleep without 109 * timeout. 110 * @param flags Select mode of operation. See comment for 111 * waitq_sleep_timeout(). 53 * @return Non-zero if the futex was acquired. 54 * @return Zero if the futex was not acquired. 55 */ 56 int futex_trydown(futex_t *futex) 57 { 58 return cas(futex, 1, 0); 59 } 60 61 /** Down the futex. 112 62 * 113 * @return ENOENT if there is no such virtual address. One of 114 * ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED on success or 115 * ESYNCH_TIMEOUT if the lock was not acquired because of 116 * a timeout or ESYNCH_WOULD_BLOCK if the operation could 117 * not be carried out atomically (if requested so). 63 * @param futex Futex. 64 * @return ENOENT if there is no such virtual address. 65 * @return Zero in the uncontended case. 66 * @return Otherwise one of ESYNCH_OK_ATOMIC or ESYNCH_OK_BLOCKED. 118 67 */ 119 int futex_down _timeout(futex_t *futex, uint32_t usec, int flags)68 int futex_down(futex_t *futex) 120 69 { 121 int rc; 122 123 while (atomic_predec(futex) < 0) { 124 rc = __SYSCALL3(SYS_FUTEX_SLEEP, (sysarg_t) &futex->count, 125 (sysarg_t) usec, (sysarg_t) flags); 126 127 switch (rc) { 128 case ESYNCH_OK_ATOMIC: 129 /* 130 * Because of a race condition between timeout and 131 * futex_up() and between conditional 132 * futex_down_timeout() and futex_up(), we have to give 133 * up and try again in this special case. 134 */ 135 atomic_inc(futex); 136 break; 70 if (atomic_predec(futex) < 0) 71 return __SYSCALL1(SYS_FUTEX_SLEEP, (sysarg_t) &futex->count); 137 72 138 case ESYNCH_TIMEOUT: 139 atomic_inc(futex); 140 return ESYNCH_TIMEOUT; 141 break; 142 143 case ESYNCH_WOULD_BLOCK: 144 /* 145 * The conditional down operation should be implemented 146 * this way. The userspace-only variant tends to 147 * accumulate missed wakeups in the kernel futex wait 148 * queue. 149 */ 150 atomic_inc(futex); 151 return ESYNCH_WOULD_BLOCK; 152 break; 153 154 case ESYNCH_OK_BLOCKED: 155 /* 156 * Enter the critical section. 157 * The futex counter has already been incremented for 158 * us. 159 */ 160 return ESYNCH_OK_BLOCKED; 161 break; 162 default: 163 return rc; 164 } 165 } 166 167 /* 168 * Enter the critical section. 169 */ 170 return ESYNCH_OK_ATOMIC; 73 return 0; 171 74 } 172 75 … … 174 77 * 175 78 * @param futex Futex. 176 * 177 * @return ENOENT if there is no such virtual address. Otherwise 178 * zero. 79 * @return ENOENT if there is no such virtual address. 80 * @return Zero in the uncontended case. 179 81 */ 180 82 int futex_up(futex_t *futex) 181 83 { 182 long val; 183 184 val = atomic_postinc(futex); 185 if (val < 0) 84 if (atomic_postinc(futex) < 0) 186 85 return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) &futex->count); 187 86 -
uspace/lib/libc/generic/time.c
r089d746 r84b14e2 40 40 #include <unistd.h> 41 41 #include <atomic.h> 42 #include <futex.h>43 42 #include <sysinfo.h> 44 43 #include <ipc/services.h> 44 #include <libc.h> 45 45 46 46 #include <sysinfo.h> … … 191 191 int usleep(unsigned long usec) 192 192 { 193 atomic_t futex = FUTEX_INITIALIZER; 194 195 futex_initialize(&futex, 0); 196 futex_down_timeout(&futex, usec, 0); 193 (void) __SYSCALL1(SYS_THREAD_USLEEP, usec); 197 194 return 0; 198 195 } … … 201 198 unsigned int sleep(unsigned int seconds) 202 199 { 203 atomic_t futex = FUTEX_INITIALIZER;204 205 futex_initialize(&futex, 0);206 207 200 /* Sleep in 1000 second steps to support 208 201 full argument range */ … … 210 203 unsigned int period = (seconds > 1000) ? 1000 : seconds; 211 204 212 futex_down_timeout(&futex, period * 1000000,0);205 usleep(period * 1000000); 213 206 seconds -= period; 214 207 } -
uspace/lib/libc/include/atomic.h
r089d746 r84b14e2 1 1 /* 2 * Copyright (c) 200 6Jakub Jermar2 * Copyright (c) 2009 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 36 36 #define LIBC_ATOMIC_H_ 37 37 38 typedef struct atomic {39 volatile long count;40 } atomic_t;41 42 38 #include <libarch/atomic.h> 43 44 static inline void atomic_set(atomic_t *val, long i)45 {46 val->count = i;47 }48 49 static inline long atomic_get(atomic_t *val)50 {51 return val->count;52 }53 39 54 40 #endif -
uspace/lib/libc/include/futex.h
r089d746 r84b14e2 46 46 extern int futex_down(futex_t *futex); 47 47 extern int futex_trydown(futex_t *futex); 48 extern int futex_down_timeout(futex_t *futex, uint32_t usec, int flags);49 48 extern int futex_up(futex_t *futex); 50 49
Note:
See TracChangeset
for help on using the changeset viewer.