Changes in kernel/generic/src/synch/mutex.c [e88eb48:9f2f5ee] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/mutex.c
re88eb48 r9f2f5ee 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2025 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 38 39 #include <assert.h> 39 40 #include <errno.h> 41 #include <proc/thread.h> 42 #include <stdatomic.h> 40 43 #include <synch/mutex.h> 41 44 #include <synch/semaphore.h> 42 #include <arch.h>43 #include <stacktrace.h>44 #include <cpu.h>45 #include <proc/thread.h>46 45 47 46 /** Initialize mutex. … … 52 51 void mutex_initialize(mutex_t *mtx, mutex_type_t type) 53 52 { 54 mtx->type = type; 55 mtx->owner = NULL; 56 mtx->nesting = 0; 57 semaphore_initialize(&mtx->sem, 1); 53 *mtx = MUTEX_INITIALIZER(*mtx, type); 54 } 55 56 /** A race in mtx->owner access is unavoidable, so we have to make 57 * access to it formally atomic. These are convenience functions to 58 * read/write the variable without memory barriers, since we don't need 59 * them and C11 atomics default to the strongest possible memory ordering 60 * by default, which is utterly ridiculous. 61 */ 62 static inline thread_t *_get_owner(mutex_t *mtx) 63 { 64 return atomic_load_explicit(&mtx->owner, memory_order_relaxed); 65 } 66 67 /** Counterpart to _get_owner(). */ 68 static inline void _set_owner(mutex_t *mtx, thread_t *owner) 69 { 70 atomic_store_explicit(&mtx->owner, owner, memory_order_relaxed); 58 71 } 59 72 … … 66 79 bool mutex_locked(mutex_t *mtx) 67 80 { 68 return semaphore_count_get(&mtx->sem) <= 0; 81 if (!THREAD) 82 return mtx->nesting > 0; 83 84 return _get_owner(mtx) == THREAD; 69 85 } 70 71 #define MUTEX_DEADLOCK_THRESHOLD 10000000072 86 73 87 /** Acquire mutex. 74 88 * 75 * Timeout mode and non-blocking mode can be requested. 89 * This operation is uninterruptible and cannot fail. 90 */ 91 void mutex_lock(mutex_t *mtx) 92 { 93 if (!THREAD) { 94 assert(mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0); 95 mtx->nesting++; 96 return; 97 } 98 99 if (_get_owner(mtx) == THREAD) { 100 /* This will also detect nested locks on a non-recursive mutex. */ 101 assert(mtx->type == MUTEX_RECURSIVE); 102 assert(mtx->nesting > 0); 103 mtx->nesting++; 104 return; 105 } 106 107 semaphore_down(&mtx->sem); 108 109 _set_owner(mtx, THREAD); 110 assert(mtx->nesting == 0); 111 mtx->nesting = 1; 112 } 113 114 /** Acquire mutex with timeout. 76 115 * 77 116 * @param mtx Mutex. 78 117 * @param usec Timeout in microseconds. 79 * @param flags Specify mode of operation.80 118 * 81 * For exact description of possible combinations of usec and flags, see 82 * comment for waitq_sleep_timeout(). 83 * 84 * @return See comment for waitq_sleep_timeout(). 85 * 119 * @return EOK if lock was successfully acquired, something else otherwise. 86 120 */ 87 errno_t _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, unsigned int flags)121 errno_t mutex_lock_timeout(mutex_t *mtx, uint32_t usec) 88 122 { 89 errno_t rc; 90 91 if (mtx->type == MUTEX_PASSIVE && THREAD) { 92 rc = _semaphore_down_timeout(&mtx->sem, usec, flags); 93 } else if (mtx->type == MUTEX_RECURSIVE) { 94 assert(THREAD); 95 96 if (mtx->owner == THREAD) { 97 mtx->nesting++; 98 return EOK; 99 } else { 100 rc = _semaphore_down_timeout(&mtx->sem, usec, flags); 101 if (rc == EOK) { 102 mtx->owner = THREAD; 103 mtx->nesting = 1; 104 } 105 } 106 } else { 107 assert((mtx->type == MUTEX_ACTIVE) || !THREAD); 108 assert(usec == SYNCH_NO_TIMEOUT); 109 assert(!(flags & SYNCH_FLAGS_INTERRUPTIBLE)); 110 111 unsigned int cnt = 0; 112 bool deadlock_reported = false; 113 do { 114 if (cnt++ > MUTEX_DEADLOCK_THRESHOLD) { 115 printf("cpu%u: looping on active mutex %p\n", 116 CPU->id, mtx); 117 stack_trace(); 118 cnt = 0; 119 deadlock_reported = true; 120 } 121 rc = semaphore_trydown(&mtx->sem); 122 } while (rc != EOK && !(flags & SYNCH_FLAGS_NON_BLOCKING)); 123 if (deadlock_reported) 124 printf("cpu%u: not deadlocked\n", CPU->id); 123 if (!THREAD) { 124 assert(mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0); 125 mtx->nesting++; 126 return EOK; 125 127 } 126 128 127 return rc; 129 if (_get_owner(mtx) == THREAD) { 130 assert(mtx->type == MUTEX_RECURSIVE); 131 assert(mtx->nesting > 0); 132 mtx->nesting++; 133 return EOK; 134 } 135 136 errno_t rc = semaphore_down_timeout(&mtx->sem, usec); 137 if (rc != EOK) 138 return rc; 139 140 _set_owner(mtx, THREAD); 141 assert(mtx->nesting == 0); 142 mtx->nesting = 1; 143 return EOK; 144 } 145 146 /** Attempt to acquire mutex without blocking. 147 * 148 * @return EOK if lock was successfully acquired, something else otherwise. 149 */ 150 errno_t mutex_trylock(mutex_t *mtx) 151 { 152 return mutex_lock_timeout(mtx, 0); 128 153 } 129 154 … … 134 159 void mutex_unlock(mutex_t *mtx) 135 160 { 136 if (mtx->type == MUTEX_RECURSIVE) { 137 assert(mtx->owner == THREAD); 138 if (--mtx->nesting > 0) 139 return; 140 mtx->owner = NULL; 161 if (--mtx->nesting > 0) { 162 assert(mtx->type == MUTEX_RECURSIVE); 163 return; 141 164 } 165 166 assert(mtx->nesting == 0); 167 168 if (!THREAD) 169 return; 170 171 assert(_get_owner(mtx) == THREAD); 172 _set_owner(mtx, NULL); 173 142 174 semaphore_up(&mtx->sem); 143 175 }
Note:
See TracChangeset
for help on using the changeset viewer.