Changeset e16e036a in mainline for generic/src/proc/scheduler.c
- Timestamp:
- 2005-11-07T20:04:30Z (20 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- c4e8ed9d
- Parents:
- d90ca68
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/src/proc/scheduler.c
rd90ca68 re16e036a 117 117 * 118 118 */ 119 st ruct thread *find_best_thread(void)119 static struct thread *find_best_thread(void) 120 120 { 121 121 thread_t *t; … … 223 223 * 224 224 */ 225 void relink_rq(int start)225 static void relink_rq(int start) 226 226 { 227 227 link_t head; … … 255 255 256 256 257 /** Scheduler stack switch wrapper 258 * 259 * Second part of the scheduler() function 260 * using new stack. Handling the actual context 261 * switch to a new thread. 262 * 263 */ 264 static void scheduler_separated_stack(void) 265 { 266 int priority; 267 268 ASSERT(CPU != NULL); 269 270 if (THREAD) { 271 switch (THREAD->state) { 272 case Running: 273 THREAD->state = Ready; 274 spinlock_unlock(&THREAD->lock); 275 thread_ready(THREAD); 276 break; 277 278 case Exiting: 279 frame_free((__address) THREAD->kstack); 280 if (THREAD->ustack) { 281 frame_free((__address) THREAD->ustack); 282 } 283 284 /* 285 * Detach from the containing task. 286 */ 287 spinlock_lock(&TASK->lock); 288 list_remove(&THREAD->th_link); 289 spinlock_unlock(&TASK->lock); 290 291 spinlock_unlock(&THREAD->lock); 292 293 spinlock_lock(&threads_lock); 294 list_remove(&THREAD->threads_link); 295 spinlock_unlock(&threads_lock); 296 297 spinlock_lock(&CPU->lock); 298 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; 299 spinlock_unlock(&CPU->lock); 300 301 free(THREAD); 302 303 break; 304 305 case Sleeping: 306 /* 307 * Prefer the thread after it's woken up. 308 */ 309 THREAD->priority = -1; 310 311 /* 312 * We need to release wq->lock which we locked in waitq_sleep(). 313 * Address of wq->lock is kept in THREAD->sleep_queue. 314 */ 315 spinlock_unlock(&THREAD->sleep_queue->lock); 316 317 /* 318 * Check for possible requests for out-of-context invocation. 319 */ 320 if (THREAD->call_me) { 321 THREAD->call_me(THREAD->call_me_with); 322 THREAD->call_me = NULL; 323 THREAD->call_me_with = NULL; 324 } 325 326 spinlock_unlock(&THREAD->lock); 327 328 break; 329 330 default: 331 /* 332 * Entering state is unexpected. 333 */ 334 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); 335 break; 336 } 337 THREAD = NULL; 338 } 339 340 341 THREAD = find_best_thread(); 342 343 spinlock_lock(&THREAD->lock); 344 priority = THREAD->priority; 345 spinlock_unlock(&THREAD->lock); 346 347 relink_rq(priority); 348 349 spinlock_lock(&THREAD->lock); 350 351 /* 352 * If both the old and the new task are the same, lots of work is avoided. 353 */ 354 if (TASK != THREAD->task) { 355 vm_t *m1 = NULL; 356 vm_t *m2; 357 358 if (TASK) { 359 spinlock_lock(&TASK->lock); 360 m1 = TASK->vm; 361 spinlock_unlock(&TASK->lock); 362 } 363 364 spinlock_lock(&THREAD->task->lock); 365 m2 = THREAD->task->vm; 366 spinlock_unlock(&THREAD->task->lock); 367 368 /* 369 * Note that it is possible for two tasks to share one vm mapping. 370 */ 371 if (m1 != m2) { 372 /* 373 * Both tasks and vm mappings are different. 374 * Replace the old one with the new one. 375 */ 376 vm_install(m2); 377 } 378 TASK = THREAD->task; 379 } 380 381 THREAD->state = Running; 382 383 #ifdef SCHEDULER_VERBOSE 384 printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy); 385 #endif 386 387 /* 388 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. 389 */ 390 the_copy(THE, (the_t *) THREAD->kstack); 391 392 context_restore(&THREAD->saved_context); 393 /* not reached */ 394 } 395 396 257 397 /** The scheduler 258 398 * … … 320 460 321 461 322 /** Scheduler stack switch wrapper 323 * 324 * Second part of the scheduler() function 325 * using new stack. Handling the actual context 326 * switch to a new thread. 327 * 328 */ 329 void scheduler_separated_stack(void) 330 { 331 int priority; 332 333 ASSERT(CPU != NULL); 334 335 if (THREAD) { 336 switch (THREAD->state) { 337 case Running: 338 THREAD->state = Ready; 339 spinlock_unlock(&THREAD->lock); 340 thread_ready(THREAD); 341 break; 342 343 case Exiting: 344 frame_free((__address) THREAD->kstack); 345 if (THREAD->ustack) { 346 frame_free((__address) THREAD->ustack); 347 } 348 349 /* 350 * Detach from the containing task. 351 */ 352 spinlock_lock(&TASK->lock); 353 list_remove(&THREAD->th_link); 354 spinlock_unlock(&TASK->lock); 355 356 spinlock_unlock(&THREAD->lock); 357 358 spinlock_lock(&threads_lock); 359 list_remove(&THREAD->threads_link); 360 spinlock_unlock(&threads_lock); 361 362 spinlock_lock(&CPU->lock); 363 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; 364 spinlock_unlock(&CPU->lock); 365 366 free(THREAD); 367 368 break; 369 370 case Sleeping: 371 /* 372 * Prefer the thread after it's woken up. 373 */ 374 THREAD->priority = -1; 375 376 /* 377 * We need to release wq->lock which we locked in waitq_sleep(). 378 * Address of wq->lock is kept in THREAD->sleep_queue. 379 */ 380 spinlock_unlock(&THREAD->sleep_queue->lock); 381 382 /* 383 * Check for possible requests for out-of-context invocation. 384 */ 385 if (THREAD->call_me) { 386 THREAD->call_me(THREAD->call_me_with); 387 THREAD->call_me = NULL; 388 THREAD->call_me_with = NULL; 389 } 390 391 spinlock_unlock(&THREAD->lock); 392 393 break; 394 395 default: 396 /* 397 * Entering state is unexpected. 398 */ 399 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); 400 break; 401 } 402 THREAD = NULL; 403 } 404 405 406 THREAD = find_best_thread(); 407 408 spinlock_lock(&THREAD->lock); 409 priority = THREAD->priority; 410 spinlock_unlock(&THREAD->lock); 411 412 relink_rq(priority); 413 414 spinlock_lock(&THREAD->lock); 415 416 /* 417 * If both the old and the new task are the same, lots of work is avoided. 418 */ 419 if (TASK != THREAD->task) { 420 vm_t *m1 = NULL; 421 vm_t *m2; 422 423 if (TASK) { 424 spinlock_lock(&TASK->lock); 425 m1 = TASK->vm; 426 spinlock_unlock(&TASK->lock); 427 } 428 429 spinlock_lock(&THREAD->task->lock); 430 m2 = THREAD->task->vm; 431 spinlock_unlock(&THREAD->task->lock); 432 433 /* 434 * Note that it is possible for two tasks to share one vm mapping. 435 */ 436 if (m1 != m2) { 437 /* 438 * Both tasks and vm mappings are different. 439 * Replace the old one with the new one. 440 */ 441 vm_install(m2); 442 } 443 TASK = THREAD->task; 444 } 445 446 THREAD->state = Running; 447 448 #ifdef SCHEDULER_VERBOSE 449 printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy); 450 #endif 451 452 /* 453 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. 454 */ 455 the_copy(THE, (the_t *) THREAD->kstack); 456 457 context_restore(&THREAD->saved_context); 458 /* not reached */ 459 } 462 460 463 461 464
Note:
See TracChangeset
for help on using the changeset viewer.