source: mainline/kernel/generic/src/synch/rwlock.c@ b3b7e14a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b3b7e14a was da1bafb, checked in by Martin Decky <martin@…>, 16 years ago

major code revision

  • replace spinlocks taken with interrupts disabled with irq_spinlocks
  • change spacing (not indendation) to be tab-size independent
  • use unsigned integer types where appropriate (especially bit flags)
  • visual separation
  • remove argument names in function prototypes
  • string changes
  • correct some formating directives
  • replace various cryptic single-character variables (t, a, m, c, b, etc.) with proper identifiers (thread, task, timeout, as, itm, itc, etc.)
  • unify some assembler constructs
  • unused page table levels are now optimized out in compile time
  • replace several ints (with boolean semantics) with bools
  • use specifically sized types instead of generic types where appropriate (size_t, uint32_t, btree_key_t)
  • improve comments
  • split asserts with conjuction into multiple independent asserts
  • Property mode set to 100644
File size: 10.8 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Reader/Writer locks.
36 *
37 * A reader/writer lock can be held by multiple readers at a time.
38 * Or it can be exclusively held by a sole writer at a time.
39 *
40 * These locks are not recursive.
41 * Because a technique called direct hand-off is used and because
42 * waiting takes place in a single wait queue, neither readers
43 * nor writers will suffer starvation.
44 *
45 * If there is a writer followed by a reader waiting for the rwlock
46 * and the writer times out, all leading readers are automatically woken up
47 * and allowed in.
48 */
49
50/*
51 * NOTE ON rwlock_holder_type
52 * This field is set on an attempt to acquire the exclusive mutex
53 * to the respective value depending whether the caller is a reader
54 * or a writer. The field is examined only if the thread had been
55 * previously blocked on the exclusive mutex. Thus it is save
56 * to store the rwlock type in the thread structure, because
57 * each thread can block on only one rwlock at a time.
58 */
59
60#include <synch/rwlock.h>
61#include <synch/spinlock.h>
62#include <synch/mutex.h>
63#include <synch/waitq.h>
64#include <synch/synch.h>
65#include <adt/list.h>
66#include <arch/asm.h>
67#include <arch.h>
68#include <proc/thread.h>
69#include <panic.h>
70
71#define ALLOW_ALL 0
72#define ALLOW_READERS_ONLY 1
73
74/** Initialize reader/writer lock
75 *
76 * Initialize reader/writer lock.
77 *
78 * @param rwl Reader/Writer lock.
79 *
80 */
81void rwlock_initialize(rwlock_t *rwl) {
82 irq_spinlock_initialize(&rwl->lock, "rwl.lock");
83 mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE);
84 rwl->readers_in = 0;
85}
86
87/** Direct handoff of reader/writer lock ownership.
88 *
89 * Direct handoff of reader/writer lock ownership
90 * to waiting readers or a writer.
91 *
92 * Must be called with rwl->lock locked.
93 * Must be called with interrupts_disable()'d.
94 *
95 * @param rwl Reader/Writer lock.
96 * @param readers_only See the description below.
97 *
98 * If readers_only is false: (unlock scenario)
99 * Let the first sleeper on 'exclusive' mutex in, no matter
100 * whether it is a reader or a writer. If there are more leading
101 * readers in line, let each of them in.
102 *
103 * Otherwise: (timeout scenario)
104 * Let all leading readers in.
105 *
106 */
107static void let_others_in(rwlock_t *rwl, int readers_only)
108{
109 rwlock_type_t type = RWLOCK_NONE;
110 thread_t *thread = NULL;
111 bool one_more = true;
112
113 irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
114
115 if (!list_empty(&rwl->exclusive.sem.wq.head))
116 thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
117 thread_t, wq_link);
118
119 do {
120 if (thread) {
121 irq_spinlock_lock(&thread->lock, false);
122 type = thread->rwlock_holder_type;
123 irq_spinlock_unlock(&thread->lock, false);
124 }
125
126 /*
127 * If readers_only is true, we wake all leading readers
128 * if and only if rwl is locked by another reader.
129 * Assumption: readers_only ==> rwl->readers_in
130 *
131 */
132 if ((readers_only) && (type != RWLOCK_READER))
133 break;
134
135 if (type == RWLOCK_READER) {
136 /*
137 * Waking up a reader.
138 * We are responsible for incrementing rwl->readers_in
139 * for it.
140 *
141 */
142 rwl->readers_in++;
143 }
144
145 /*
146 * Only the last iteration through this loop can increment
147 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
148 * iterations will wake up a thread.
149 *
150 */
151
152 /*
153 * We call the internal version of waitq_wakeup, which
154 * relies on the fact that the waitq is already locked.
155 *
156 */
157 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
158
159 thread = NULL;
160 if (!list_empty(&rwl->exclusive.sem.wq.head)) {
161 thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
162 thread_t, wq_link);
163
164 if (thread) {
165 irq_spinlock_lock(&thread->lock, false);
166 if (thread->rwlock_holder_type != RWLOCK_READER)
167 one_more = false;
168 irq_spinlock_unlock(&thread->lock, false);
169 }
170 }
171 } while ((type == RWLOCK_READER) && (thread) && (one_more));
172
173 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
174}
175
176/** Acquire reader/writer lock for reading
177 *
178 * Acquire reader/writer lock for reading.
179 * Timeout and willingness to block may be specified.
180 *
181 * @param rwl Reader/Writer lock.
182 * @param usec Timeout in microseconds.
183 * @param flags Specify mode of operation.
184 *
185 * For exact description of possible combinations of
186 * usec and flags, see comment for waitq_sleep_timeout().
187 *
188 * @return See comment for waitq_sleep_timeout().
189 *
190 */
191int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
192{
193 irq_spinlock_lock(&THREAD->lock, true);
194 THREAD->rwlock_holder_type = RWLOCK_WRITER;
195 irq_spinlock_unlock(&THREAD->lock, true);
196
197 /*
198 * Writers take the easy part.
199 * They just need to acquire the exclusive mutex.
200 *
201 */
202 int rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
203 if (SYNCH_FAILED(rc)) {
204 /*
205 * Lock operation timed out or was interrupted.
206 * The state of rwl is UNKNOWN at this point.
207 * No claims about its holder can be made.
208 *
209 */
210 irq_spinlock_lock(&rwl->lock, true);
211
212 /*
213 * Now when rwl is locked, we can inspect it again.
214 * If it is held by some readers already, we can let
215 * readers from the head of the wait queue in.
216 *
217 */
218 if (rwl->readers_in)
219 let_others_in(rwl, ALLOW_READERS_ONLY);
220
221 irq_spinlock_unlock(&rwl->lock, true);
222 }
223
224 return rc;
225}
226
227/** Release spinlock callback
228 *
229 * This is a callback function invoked from the scheduler.
230 * The callback is registered in _rwlock_read_lock_timeout().
231 *
232 * @param arg Spinlock.
233 *
234 */
235static void release_spinlock(void *arg)
236{
237 if (arg != NULL)
238 irq_spinlock_unlock((irq_spinlock_t *) arg, false);
239}
240
241/** Acquire reader/writer lock for writing
242 *
243 * Acquire reader/writer lock for writing.
244 * Timeout and willingness to block may be specified.
245 *
246 * @param rwl Reader/Writer lock.
247 * @param usec Timeout in microseconds.
248 * @param flags Select mode of operation.
249 *
250 * For exact description of possible combinations of
251 * usec and flags, see comment for waitq_sleep_timeout().
252 *
253 * @return See comment for waitq_sleep_timeout().
254 *
255 */
256int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
257{
258 /*
259 * Since the locking scenarios get a little bit too
260 * complicated, we do not rely on internal irq_spinlock_t
261 * interrupt disabling logic here and control interrupts
262 * manually.
263 *
264 */
265 ipl_t ipl = interrupts_disable();
266
267 irq_spinlock_lock(&THREAD->lock, false);
268 THREAD->rwlock_holder_type = RWLOCK_READER;
269 irq_spinlock_pass(&THREAD->lock, &rwl->lock);
270
271 /*
272 * Find out whether we can get what we want without blocking.
273 *
274 */
275 int rc = mutex_trylock(&rwl->exclusive);
276 if (SYNCH_FAILED(rc)) {
277 /*
278 * 'exclusive' mutex is being held by someone else.
279 * If the holder is a reader and there is no one
280 * else waiting for it, we can enter the critical
281 * section.
282 *
283 */
284
285 if (rwl->readers_in) {
286 irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
287 if (list_empty(&rwl->exclusive.sem.wq.head)) {
288 /*
289 * We can enter.
290 */
291 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
292 goto shortcut;
293 }
294 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
295 }
296
297 /*
298 * In order to prevent a race condition when a reader
299 * could block another reader at the head of the waitq,
300 * we register a function to unlock rwl->lock
301 * after this thread is put asleep.
302 *
303 */
304#ifdef CONFIG_SMP
305 thread_register_call_me(release_spinlock, &rwl->lock);
306#else
307 thread_register_call_me(release_spinlock, NULL);
308#endif
309
310 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
311 switch (rc) {
312 case ESYNCH_WOULD_BLOCK:
313 /*
314 * release_spinlock() wasn't called
315 *
316 */
317 thread_register_call_me(NULL, NULL);
318 irq_spinlock_unlock(&rwl->lock, false);
319 case ESYNCH_TIMEOUT:
320 case ESYNCH_INTERRUPTED:
321 /*
322 * The sleep timed out.
323 * We just restore interrupt priority level.
324 *
325 */
326 case ESYNCH_OK_BLOCKED:
327 /*
328 * We were woken with rwl->readers_in already
329 * incremented.
330 *
331 * Note that this arrangement avoids race condition
332 * between two concurrent readers. (Race is avoided if
333 * 'exclusive' is locked at the same time as
334 * 'readers_in' is incremented. Same time means both
335 * events happen atomically when rwl->lock is held.)
336 *
337 */
338 interrupts_restore(ipl);
339 break;
340 case ESYNCH_OK_ATOMIC:
341 panic("_mutex_lock_timeout() == ESYNCH_OK_ATOMIC.");
342 break;
343 default:
344 panic("Invalid ESYNCH.");
345 break;
346 }
347 return rc;
348 }
349
350shortcut:
351 /*
352 * We can increment readers_in only if we didn't go to sleep.
353 * For sleepers, rwlock_let_others_in() will do the job.
354 *
355 */
356 rwl->readers_in++;
357 irq_spinlock_unlock(&rwl->lock, false);
358 interrupts_restore(ipl);
359
360 return ESYNCH_OK_ATOMIC;
361}
362
363/** Release reader/writer lock held by writer
364 *
365 * Release reader/writer lock held by writer.
366 * Handoff reader/writer lock ownership directly
367 * to waiting readers or a writer.
368 *
369 * @param rwl Reader/Writer lock.
370 *
371 */
372void rwlock_write_unlock(rwlock_t *rwl)
373{
374 irq_spinlock_lock(&rwl->lock, true);
375 let_others_in(rwl, ALLOW_ALL);
376 irq_spinlock_unlock(&rwl->lock, true);
377}
378
379/** Release reader/writer lock held by reader
380 *
381 * Release reader/writer lock held by reader.
382 * Handoff reader/writer lock ownership directly
383 * to a waiting writer or don't do anything if more
384 * readers poses the lock.
385 *
386 * @param rwl Reader/Writer lock.
387 *
388 */
389void rwlock_read_unlock(rwlock_t *rwl)
390{
391 irq_spinlock_lock(&rwl->lock, true);
392
393 if (!--rwl->readers_in)
394 let_others_in(rwl, ALLOW_ALL);
395
396 irq_spinlock_unlock(&rwl->lock, true);
397}
398
399/** @}
400 */
Note: See TracBrowser for help on using the repository browser.