00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059 #include <synch/rwlock.h>
00060 #include <synch/spinlock.h>
00061 #include <synch/mutex.h>
00062 #include <synch/waitq.h>
00063 #include <synch/synch.h>
00064 #include <adt/list.h>
00065 #include <typedefs.h>
00066 #include <arch/asm.h>
00067 #include <arch.h>
00068 #include <proc/thread.h>
00069 #include <panic.h>
00070
00071 #define ALLOW_ALL 0
00072 #define ALLOW_READERS_ONLY 1
00073
00074 static void let_others_in(rwlock_t *rwl, int readers_only);
00075 static void release_spinlock(void *arg);
00076
00083 void rwlock_initialize(rwlock_t *rwl) {
00084 spinlock_initialize(&rwl->lock, "rwlock_t");
00085 mutex_initialize(&rwl->exclusive);
00086 rwl->readers_in = 0;
00087 }
00088
00103 int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags)
00104 {
00105 ipl_t ipl;
00106 int rc;
00107
00108 ipl = interrupts_disable();
00109 spinlock_lock(&THREAD->lock);
00110 THREAD->rwlock_holder_type = RWLOCK_WRITER;
00111 spinlock_unlock(&THREAD->lock);
00112 interrupts_restore(ipl);
00113
00114
00115
00116
00117
00118 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
00119 if (SYNCH_FAILED(rc)) {
00120
00121
00122
00123
00124
00125
00126
00127 ipl = interrupts_disable();
00128 spinlock_lock(&rwl->lock);
00129
00130
00131
00132
00133
00134 if (rwl->readers_in)
00135 let_others_in(rwl, ALLOW_READERS_ONLY);
00136 spinlock_unlock(&rwl->lock);
00137 interrupts_restore(ipl);
00138 }
00139
00140 return rc;
00141 }
00142
00157 int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags)
00158 {
00159 int rc;
00160 ipl_t ipl;
00161
00162 ipl = interrupts_disable();
00163 spinlock_lock(&THREAD->lock);
00164 THREAD->rwlock_holder_type = RWLOCK_READER;
00165 spinlock_unlock(&THREAD->lock);
00166
00167 spinlock_lock(&rwl->lock);
00168
00169
00170
00171
00172 rc = mutex_trylock(&rwl->exclusive);
00173 if (SYNCH_FAILED(rc)) {
00174
00175
00176
00177
00178
00179
00180
00181
00182 if (rwl->readers_in) {
00183 spinlock_lock(&rwl->exclusive.sem.wq.lock);
00184 if (list_empty(&rwl->exclusive.sem.wq.head)) {
00185
00186
00187
00188 spinlock_unlock(&rwl->exclusive.sem.wq.lock);
00189 goto shortcut;
00190 }
00191 spinlock_unlock(&rwl->exclusive.sem.wq.lock);
00192 }
00193
00194
00195
00196
00197
00198
00199
00200 #ifdef CONFIG_SMP
00201 thread_register_call_me(release_spinlock, &rwl->lock);
00202 #else
00203 thread_register_call_me(release_spinlock, NULL);
00204 #endif
00205
00206 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
00207 switch (rc) {
00208 case ESYNCH_WOULD_BLOCK:
00209
00210
00211
00212 thread_register_call_me(NULL, NULL);
00213 spinlock_unlock(&rwl->lock);
00214 case ESYNCH_TIMEOUT:
00215 case ESYNCH_INTERRUPTED:
00216
00217
00218
00219
00220 case ESYNCH_OK_BLOCKED:
00221
00222
00223
00224
00225
00226
00227
00228
00229 interrupts_restore(ipl);
00230 break;
00231 case ESYNCH_OK_ATOMIC:
00232 panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC\n");
00233 break;
00234 default:
00235 panic("invalid ESYNCH\n");
00236 break;
00237 }
00238 return rc;
00239 }
00240
00241 shortcut:
00242
00243
00244
00245
00246
00247 rwl->readers_in++;
00248
00249 spinlock_unlock(&rwl->lock);
00250 interrupts_restore(ipl);
00251
00252 return ESYNCH_OK_ATOMIC;
00253 }
00254
00263 void rwlock_write_unlock(rwlock_t *rwl)
00264 {
00265 ipl_t ipl;
00266
00267 ipl = interrupts_disable();
00268 spinlock_lock(&rwl->lock);
00269 let_others_in(rwl, ALLOW_ALL);
00270 spinlock_unlock(&rwl->lock);
00271 interrupts_restore(ipl);
00272
00273 }
00274
00284 void rwlock_read_unlock(rwlock_t *rwl)
00285 {
00286 ipl_t ipl;
00287
00288 ipl = interrupts_disable();
00289 spinlock_lock(&rwl->lock);
00290 if (!--rwl->readers_in)
00291 let_others_in(rwl, ALLOW_ALL);
00292 spinlock_unlock(&rwl->lock);
00293 interrupts_restore(ipl);
00294 }
00295
00296
00316 void let_others_in(rwlock_t *rwl, int readers_only)
00317 {
00318 rwlock_type_t type = RWLOCK_NONE;
00319 thread_t *t = NULL;
00320 bool one_more = true;
00321
00322 spinlock_lock(&rwl->exclusive.sem.wq.lock);
00323
00324 if (!list_empty(&rwl->exclusive.sem.wq.head))
00325 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
00326 do {
00327 if (t) {
00328 spinlock_lock(&t->lock);
00329 type = t->rwlock_holder_type;
00330 spinlock_unlock(&t->lock);
00331 }
00332
00333
00334
00335
00336
00337
00338 if (readers_only && (type != RWLOCK_READER))
00339 break;
00340
00341
00342 if (type == RWLOCK_READER) {
00343
00344
00345
00346
00347 rwl->readers_in++;
00348 }
00349
00350
00351
00352
00353
00354
00355
00356
00357
00358 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
00359
00360 t = NULL;
00361 if (!list_empty(&rwl->exclusive.sem.wq.head)) {
00362 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
00363 if (t) {
00364 spinlock_lock(&t->lock);
00365 if (t->rwlock_holder_type != RWLOCK_READER)
00366 one_more = false;
00367 spinlock_unlock(&t->lock);
00368 }
00369 }
00370 } while ((type == RWLOCK_READER) && t && one_more);
00371
00372 spinlock_unlock(&rwl->exclusive.sem.wq.lock);
00373 }
00374
00382 void release_spinlock(void *arg)
00383 {
00384 spinlock_unlock((spinlock_t *) arg);
00385 }
00386