source: mainline/kernel/generic/src/synch/rwlock.c@ 384c488

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 384c488 was 08a19ba, checked in by Jakub Jermar <jakub@…>, 17 years ago

Support for active mutexes. Active mutexes implement busy waiting, pretty much
in the same way as spinlocks, but can be passed to condition variables, which is
the motivation for this enhancement.

  • Property mode set to 100644
File size: 10.5 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Reader/Writer locks.
36 *
37 * A reader/writer lock can be held by multiple readers at a time.
38 * Or it can be exclusively held by a sole writer at a time.
39 *
40 * These locks are not recursive.
41 * Because a technique called direct hand-off is used and because
42 * waiting takes place in a single wait queue, neither readers
43 * nor writers will suffer starvation.
44 *
45 * If there is a writer followed by a reader waiting for the rwlock
46 * and the writer times out, all leading readers are automatically woken up
47 * and allowed in.
48 */
49
50/*
51 * NOTE ON rwlock_holder_type
52 * This field is set on an attempt to acquire the exclusive mutex
53 * to the respective value depending whether the caller is a reader
54 * or a writer. The field is examined only if the thread had been
55 * previously blocked on the exclusive mutex. Thus it is save
56 * to store the rwlock type in the thread structure, because
57 * each thread can block on only one rwlock at a time.
58 */
59
60#include <synch/rwlock.h>
61#include <synch/spinlock.h>
62#include <synch/mutex.h>
63#include <synch/waitq.h>
64#include <synch/synch.h>
65#include <adt/list.h>
66#include <arch/asm.h>
67#include <arch.h>
68#include <proc/thread.h>
69#include <panic.h>
70
71#define ALLOW_ALL 0
72#define ALLOW_READERS_ONLY 1
73
74static void let_others_in(rwlock_t *rwl, int readers_only);
75static void release_spinlock(void *arg);
76
77/** Initialize reader/writer lock
78 *
79 * Initialize reader/writer lock.
80 *
81 * @param rwl Reader/Writer lock.
82 */
83void rwlock_initialize(rwlock_t *rwl) {
84 spinlock_initialize(&rwl->lock, "rwlock_t");
85 mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE);
86 rwl->readers_in = 0;
87}
88
89/** Acquire reader/writer lock for reading
90 *
91 * Acquire reader/writer lock for reading.
92 * Timeout and willingness to block may be specified.
93 *
94 * @param rwl Reader/Writer lock.
95 * @param usec Timeout in microseconds.
96 * @param flags Specify mode of operation.
97 *
98 * For exact description of possible combinations of
99 * usec and flags, see comment for waitq_sleep_timeout().
100 *
101 * @return See comment for waitq_sleep_timeout().
102 */
103int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
104{
105 ipl_t ipl;
106 int rc;
107
108 ipl = interrupts_disable();
109 spinlock_lock(&THREAD->lock);
110 THREAD->rwlock_holder_type = RWLOCK_WRITER;
111 spinlock_unlock(&THREAD->lock);
112 interrupts_restore(ipl);
113
114 /*
115 * Writers take the easy part.
116 * They just need to acquire the exclusive mutex.
117 */
118 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
119 if (SYNCH_FAILED(rc)) {
120
121 /*
122 * Lock operation timed out or was interrupted.
123 * The state of rwl is UNKNOWN at this point.
124 * No claims about its holder can be made.
125 */
126
127 ipl = interrupts_disable();
128 spinlock_lock(&rwl->lock);
129 /*
130 * Now when rwl is locked, we can inspect it again.
131 * If it is held by some readers already, we can let
132 * readers from the head of the wait queue in.
133 */
134 if (rwl->readers_in)
135 let_others_in(rwl, ALLOW_READERS_ONLY);
136 spinlock_unlock(&rwl->lock);
137 interrupts_restore(ipl);
138 }
139
140 return rc;
141}
142
143/** Acquire reader/writer lock for writing
144 *
145 * Acquire reader/writer lock for writing.
146 * Timeout and willingness to block may be specified.
147 *
148 * @param rwl Reader/Writer lock.
149 * @param usec Timeout in microseconds.
150 * @param flags Select mode of operation.
151 *
152 * For exact description of possible combinations of
153 * usec and flags, see comment for waitq_sleep_timeout().
154 *
155 * @return See comment for waitq_sleep_timeout().
156 */
157int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
158{
159 int rc;
160 ipl_t ipl;
161
162 ipl = interrupts_disable();
163 spinlock_lock(&THREAD->lock);
164 THREAD->rwlock_holder_type = RWLOCK_READER;
165 spinlock_unlock(&THREAD->lock);
166
167 spinlock_lock(&rwl->lock);
168
169 /*
170 * Find out whether we can get what we want without blocking.
171 */
172 rc = mutex_trylock(&rwl->exclusive);
173 if (SYNCH_FAILED(rc)) {
174
175 /*
176 * 'exclusive' mutex is being held by someone else.
177 * If the holder is a reader and there is no one
178 * else waiting for it, we can enter the critical
179 * section.
180 */
181
182 if (rwl->readers_in) {
183 spinlock_lock(&rwl->exclusive.sem.wq.lock);
184 if (list_empty(&rwl->exclusive.sem.wq.head)) {
185 /*
186 * We can enter.
187 */
188 spinlock_unlock(&rwl->exclusive.sem.wq.lock);
189 goto shortcut;
190 }
191 spinlock_unlock(&rwl->exclusive.sem.wq.lock);
192 }
193
194 /*
195 * In order to prevent a race condition when a reader
196 * could block another reader at the head of the waitq,
197 * we register a function to unlock rwl->lock
198 * after this thread is put asleep.
199 */
200 #ifdef CONFIG_SMP
201 thread_register_call_me(release_spinlock, &rwl->lock);
202 #else
203 thread_register_call_me(release_spinlock, NULL);
204 #endif
205
206 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
207 switch (rc) {
208 case ESYNCH_WOULD_BLOCK:
209 /*
210 * release_spinlock() wasn't called
211 */
212 thread_register_call_me(NULL, NULL);
213 spinlock_unlock(&rwl->lock);
214 case ESYNCH_TIMEOUT:
215 case ESYNCH_INTERRUPTED:
216 /*
217 * The sleep timed out.
218 * We just restore interrupt priority level.
219 */
220 case ESYNCH_OK_BLOCKED:
221 /*
222 * We were woken with rwl->readers_in already
223 * incremented.
224 *
225 * Note that this arrangement avoids race condition
226 * between two concurrent readers. (Race is avoided if
227 * 'exclusive' is locked at the same time as
228 * 'readers_in' is incremented. Same time means both
229 * events happen atomically when rwl->lock is held.)
230 */
231 interrupts_restore(ipl);
232 break;
233 case ESYNCH_OK_ATOMIC:
234 panic("_mutex_lock_timeout() == ESYNCH_OK_ATOMIC\n");
235 break;
236 default:
237 panic("invalid ESYNCH\n");
238 break;
239 }
240 return rc;
241 }
242
243shortcut:
244
245 /*
246 * We can increment readers_in only if we didn't go to sleep.
247 * For sleepers, rwlock_let_others_in() will do the job.
248 */
249 rwl->readers_in++;
250
251 spinlock_unlock(&rwl->lock);
252 interrupts_restore(ipl);
253
254 return ESYNCH_OK_ATOMIC;
255}
256
257/** Release reader/writer lock held by writer
258 *
259 * Release reader/writer lock held by writer.
260 * Handoff reader/writer lock ownership directly
261 * to waiting readers or a writer.
262 *
263 * @param rwl Reader/Writer lock.
264 */
265void rwlock_write_unlock(rwlock_t *rwl)
266{
267 ipl_t ipl;
268
269 ipl = interrupts_disable();
270 spinlock_lock(&rwl->lock);
271 let_others_in(rwl, ALLOW_ALL);
272 spinlock_unlock(&rwl->lock);
273 interrupts_restore(ipl);
274
275}
276
277/** Release reader/writer lock held by reader
278 *
279 * Release reader/writer lock held by reader.
280 * Handoff reader/writer lock ownership directly
281 * to a waiting writer or don't do anything if more
282 * readers poses the lock.
283 *
284 * @param rwl Reader/Writer lock.
285 */
286void rwlock_read_unlock(rwlock_t *rwl)
287{
288 ipl_t ipl;
289
290 ipl = interrupts_disable();
291 spinlock_lock(&rwl->lock);
292 if (!--rwl->readers_in)
293 let_others_in(rwl, ALLOW_ALL);
294 spinlock_unlock(&rwl->lock);
295 interrupts_restore(ipl);
296}
297
298
299/** Direct handoff of reader/writer lock ownership.
300 *
301 * Direct handoff of reader/writer lock ownership
302 * to waiting readers or a writer.
303 *
304 * Must be called with rwl->lock locked.
305 * Must be called with interrupts_disable()'d.
306 *
307 * @param rwl Reader/Writer lock.
308 * @param readers_only See the description below.
309 *
310 * If readers_only is false: (unlock scenario)
311 * Let the first sleeper on 'exclusive' mutex in, no matter
312 * whether it is a reader or a writer. If there are more leading
313 * readers in line, let each of them in.
314 *
315 * Otherwise: (timeout scenario)
316 * Let all leading readers in.
317 */
318void let_others_in(rwlock_t *rwl, int readers_only)
319{
320 rwlock_type_t type = RWLOCK_NONE;
321 thread_t *t = NULL;
322 bool one_more = true;
323
324 spinlock_lock(&rwl->exclusive.sem.wq.lock);
325
326 if (!list_empty(&rwl->exclusive.sem.wq.head))
327 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t,
328 wq_link);
329 do {
330 if (t) {
331 spinlock_lock(&t->lock);
332 type = t->rwlock_holder_type;
333 spinlock_unlock(&t->lock);
334 }
335
336 /*
337 * If readers_only is true, we wake all leading readers
338 * if and only if rwl is locked by another reader.
339 * Assumption: readers_only ==> rwl->readers_in
340 */
341 if (readers_only && (type != RWLOCK_READER))
342 break;
343
344
345 if (type == RWLOCK_READER) {
346 /*
347 * Waking up a reader.
348 * We are responsible for incrementing rwl->readers_in
349 * for it.
350 */
351 rwl->readers_in++;
352 }
353
354 /*
355 * Only the last iteration through this loop can increment
356 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
357 * iterations will wake up a thread.
358 */
359 /* We call the internal version of waitq_wakeup, which
360 * relies on the fact that the waitq is already locked.
361 */
362 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
363
364 t = NULL;
365 if (!list_empty(&rwl->exclusive.sem.wq.head)) {
366 t = list_get_instance(rwl->exclusive.sem.wq.head.next,
367 thread_t, wq_link);
368 if (t) {
369 spinlock_lock(&t->lock);
370 if (t->rwlock_holder_type != RWLOCK_READER)
371 one_more = false;
372 spinlock_unlock(&t->lock);
373 }
374 }
375 } while ((type == RWLOCK_READER) && t && one_more);
376
377 spinlock_unlock(&rwl->exclusive.sem.wq.lock);
378}
379
380/** Release spinlock callback
381 *
382 * This is a callback function invoked from the scheduler.
383 * The callback is registered in _rwlock_read_lock_timeout().
384 *
385 * @param arg Spinlock.
386 */
387void release_spinlock(void *arg)
388{
389 spinlock_unlock((spinlock_t *) arg);
390}
391
392/** @}
393 */
Note: See TracBrowser for help on using the repository browser.