# HG changeset patch # Parent d9f32d39d0a6e7e6ec3ca1ee2ac6a429c5451ec8 # Date 1364465533 -3600 diff --git a/include/barrelfish/threads.h b/include/barrelfish/threads.h --- a/include/barrelfish/threads.h +++ b/include/barrelfish/threads.h @@ -22,6 +22,8 @@ #include #include +struct dispatcher_generic; + __BEGIN_DECLS typedef int (*thread_func_t)(void *); @@ -69,6 +71,18 @@ void thread_set_tls(void *); void *thread_get_tls(void); +enum scheduler_reason { + SCHEDULER_UPCALL = 0, + SCHEDULER_YIELD, + SCHEDULER_BLOCKED, + SCHEDULER_EXIT +}; + +typedef struct thread *(*thread_scheduler_func_t)(struct dispatcher_generic *disp_gen, + enum scheduler_reason reason); + +void thread_set_scheduler_func(thread_scheduler_func_t scheduler_func); + __END_DECLS #endif diff --git a/lib/barrelfish/threads.c b/lib/barrelfish/threads.c --- a/lib/barrelfish/threads.c +++ b/lib/barrelfish/threads.c @@ -82,6 +82,9 @@ /// invalid here). __attribute__((unused)) static bool stack_warned=0; +/// Callback to select next thread to run -- NULL indicates default policy. +static thread_scheduler_func_t application_scheduler_func = NULL; + /// Wrapper function for most threads, runs given function then deletes itself static void thread_entry(thread_func_t start_func, void *start_data) { @@ -284,6 +287,66 @@ } /** + * \brief Set scheduler callback or pass NULL to use default policy. + * + * This will be invoked whenever a new thread needs to be selected to run, + * and can be used to implement application-specific scheduling policies. + * The same callback will be used across all the dispatchers. + */ +void thread_set_scheduler_func(thread_scheduler_func_t scheduler_func) { + application_scheduler_func = scheduler_func; +} + +/** + * \brief Selects the next thread to run or returns NULL. + * + * Note: this is called from the dispatcher (on its stack and while disabled!). + */ +static struct thread *thread_schedule_next_disabled( + struct dispatcher_generic *disp_gen, enum scheduler_reason reason) +{ + // If registered, hand over control to the application-level scheduler. + if (application_scheduler_func != NULL) { + return application_scheduler_func(disp_gen, reason); + } + + // Otherwise use the default policy: round robin with uniform priorities. + struct thread *me = disp_gen->current; + switch (reason) { + case SCHEDULER_YIELD: { + // Switch to the first thread which hasn't already yielded this + // timeslice, or return NULL to yield the dispatcher. + struct thread *next = me; + do { + assert_disabled(next != NULL); + next = next->next; + if (next == me) { + return NULL; // Everybody yielded this timeslice + } + } while(next->yield_epoch == disp_gen->timeslice); + return next; + } + case SCHEDULER_UPCALL: + case SCHEDULER_BLOCKED: + case SCHEDULER_EXIT: { + if (me != NULL) { + // Run next thread in the run queue; might be the same as the + // current thread. + assert_disabled(disp_gen->runq != NULL); + struct thread *next = me->next; + assert_disabled(next != NULL); + return next; + } else { + // Nothing running: take head of the run queue (possibly NULL). + return disp_gen->runq; + } + } + default: + USER_PANIC("unreachable"); + } +} + +/** * \brief Schedule and run the next active thread, or yield the dispatcher. * * This may only be called from the dispatcher (on its stack and while @@ -300,13 +363,16 @@ dispatcher_get_enabled_save_area(handle); if (disp_gen->current != NULL) { - assert_disabled(disp_gen->runq != NULL); - // check stack bounds warn_disabled(&stack_warned, thread_check_stack_bounds(disp_gen->current, enabled_area)); + } - struct thread *next = disp_gen->current->next; + + if (disp_gen->current != NULL) { + assert_disabled(disp_gen->runq != NULL); + + struct thread *next = thread_schedule_next_disabled(disp_gen, SCHEDULER_UPCALL); assert_disabled(next != NULL); if (next != disp_gen->current) { fpu_context_switch(disp_gen, next); @@ -321,10 +387,12 @@ disp_resume(handle, enabled_area); } } else if (disp_gen->runq != NULL) { - fpu_context_switch(disp_gen, disp_gen->runq); - disp_gen->current = disp_gen->runq; + struct thread *next = thread_schedule_next_disabled(disp_gen, SCHEDULER_UPCALL); + assert_disabled(next != NULL); + fpu_context_switch(disp_gen, next); + disp_gen->current = next; disp->haswork = true; - disp_resume(handle, &disp_gen->runq->regs); + disp_resume(handle, &next->regs); } else { // kernel gave us the CPU when we have nothing to do. block! disp->haswork = havework_disabled(handle); @@ -597,18 +665,10 @@ dispatcher_get_enabled_save_area(handle); struct thread *me = disp_gen->current; - struct thread *next = me; me->yield_epoch = disp_gen->timeslice; - do { - assert_disabled(next != NULL); - next = next->next; - if (next == me) { - break; // Everybody yielded this timeslice - } - } while(next->yield_epoch == disp_gen->timeslice); - - if (next != me) { + struct thread *next = thread_schedule_next_disabled(disp_gen, SCHEDULER_YIELD); + if (next != NULL) { fpu_context_switch(disp_gen, next); disp_gen->current = next; disp_switch(handle, &me->regs, &next->regs); @@ -662,7 +722,8 @@ assert(ft == NULL); // run the next thread, if any - struct thread *next = me->next; + struct thread *next = thread_schedule_next_disabled(disp_gen, SCHEDULER_EXIT); + assert_disabled(next != NULL); thread_remove_from_queue(&disp_gen->runq, me); if (next != me) { disp_gen->current = next; @@ -708,7 +769,8 @@ #endif // run the next thread, if any - struct thread *next = me->next; + struct thread *next = thread_schedule_next_disabled(disp_gen, SCHEDULER_EXIT); + assert_disabled(next != NULL); thread_remove_from_queue(&disp_gen->runq, me); if (next != me) { fpu_context_switch(disp_gen, next); @@ -780,7 +842,8 @@ #endif // run the next thread, if any - struct thread *next = me->next; + struct thread *next = thread_schedule_next_disabled(disp_gen, SCHEDULER_EXIT); + assert_disabled(next != NULL); thread_remove_from_queue(&disp_gen->runq, me); if (next != me) { fpu_context_switch(disp_gen, next); @@ -820,13 +883,14 @@ struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(handle); struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle); + struct thread *me = disp_gen->current; - struct thread *next = me->next; - assert_disabled(next != NULL); - assert_disabled(me->state == THREAD_STATE_RUNNABLE); me->state = THREAD_STATE_BLOCKED; + struct thread *next = thread_schedule_next_disabled(disp_gen, SCHEDULER_BLOCKED); + assert_disabled(next != NULL); + thread_remove_from_queue(&disp_gen->runq, me); if (queue != NULL) { thread_enqueue(me, queue);