# HG changeset patch # Parent 19b77e817f8ec79e7e67f7d5afb674440f046989 thread: consolidate scheduling policy decisions into a single callback. diff --git a/include/barrelfish/dispatcher.h b/include/barrelfish/dispatcher.h --- a/include/barrelfish/dispatcher.h +++ b/include/barrelfish/dispatcher.h @@ -30,10 +30,10 @@ /// all other dispatcher upcalls run on this stack uintptr_t stack[DISPATCHER_STACK_WORDS]; - /// Currently-running (or last-run) thread, if any + /// Head of run queue; currently-running (or last-run) thread, if any struct thread *current; - /// Thread run queue (all threads eligible to be run) + /// Circular thread run queue (all threads eligible to be run) struct thread *runq; /// Cap to this dispatcher, used for creating new endpoints diff --git a/lib/barrelfish/domain.c b/lib/barrelfish/domain.c --- a/lib/barrelfish/domain.c +++ b/lib/barrelfish/domain.c @@ -843,7 +843,7 @@ struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(mydisp); - struct thread *next = thread->next; + struct thread *next = thread_scheduler_func(disp_gen, SCHEDULER_YIELD); thread_remove_from_queue(&disp_gen->runq, thread); errval_t err = domain_wakeup_on_coreid_disabled(core_id, thread, mydisp); @@ -854,7 +854,7 @@ } // run the next thread, if any - if (next != thread) { + if (next != NULL) { disp_gen->current = next; disp_resume(mydisp, &next->regs); } else { diff --git a/lib/barrelfish/include/threads_priv.h b/lib/barrelfish/include/threads_priv.h --- a/lib/barrelfish/include/threads_priv.h +++ b/lib/barrelfish/include/threads_priv.h @@ -111,4 +111,19 @@ enum exception_type type, int subtype, void *addr, arch_registers_state_t *regs); +enum scheduler_reason { + SCHEDULER_UPCALL = 0, + SCHEDULER_YIELD, + SCHEDULER_BLOCKED, + SCHEDULER_EXIT +}; + +/// Scheduler callback: selects which thread to run next or returns NULL. +/// Note: this is invoked from the dispatcher (on its stack and while disabled!) +/// XXX: NULL thread currently not supported for SCHEDULER_UPCALL case. +typedef struct thread *(*scheduler_func_t)(struct dispatcher_generic *disp_gen, + enum scheduler_reason reason); + +extern scheduler_func_t thread_scheduler_func; + #endif // LIBBARRELFISH_THREADS_PRIV_H diff --git a/lib/barrelfish/threads.c b/lib/barrelfish/threads.c --- a/lib/barrelfish/threads.c +++ b/lib/barrelfish/threads.c @@ -82,6 +82,11 @@ /// invalid here). __attribute__((unused)) static bool stack_warned=0; +static struct thread *thread_schedule_round_robin_disabled( + struct dispatcher_generic *disp_gen, enum scheduler_reason reason); + +scheduler_func_t thread_scheduler_func = thread_schedule_round_robin_disabled; + /// Wrapper function for most threads, runs given function then deletes itself static void thread_entry(thread_func_t start_func, void *start_data) { @@ -284,6 +289,50 @@ } /** + * \brief Selects the next thread to run or returns NULL. + * This implements the default policy: round robin with uniform priorities. + * + * Note: this is called from the dispatcher (on its stack and while disabled!). + */ +static struct thread *thread_schedule_round_robin_disabled( + struct dispatcher_generic *disp_gen, enum scheduler_reason reason) +{ + struct thread *me = disp_gen->current; + switch (reason) { + case SCHEDULER_YIELD: { + // Switch to the first thread which hasn't already yielded this + // timeslice, or return NULL to yield the dispatcher. + struct thread *next = me; + do { + assert_disabled(next != NULL); + next = next->next; + if (next == me) { + return NULL; // Everybody yielded this timeslice + } + } while(next->yield_epoch == disp_gen->timeslice); + return next; + } + case SCHEDULER_UPCALL: + case SCHEDULER_BLOCKED: + case SCHEDULER_EXIT: { + if (me != NULL) { + // Run next thread in the run queue; might be the same as the + // current thread. + assert_disabled(disp_gen->runq != NULL); + struct thread *next = me->next; + assert_disabled(next != NULL); + return next; + } else { + // Nothing running: take head of the run queue (possibly NULL). + return disp_gen->runq; + } + } + default: + USER_PANIC("unreachable"); + } +} + +/** * \brief Schedule and run the next active thread, or yield the dispatcher. * * This may only be called from the dispatcher (on its stack and while @@ -306,7 +355,7 @@ warn_disabled(&stack_warned, thread_check_stack_bounds(disp_gen->current, enabled_area)); - struct thread *next = disp_gen->current->next; + struct thread *next = thread_scheduler_func(disp_gen, SCHEDULER_UPCALL); assert_disabled(next != NULL); if (next != disp_gen->current) { fpu_context_switch(disp_gen, next); @@ -321,10 +370,12 @@ disp_resume(handle, enabled_area); } } else if (disp_gen->runq != NULL) { - fpu_context_switch(disp_gen, disp_gen->runq); - disp_gen->current = disp_gen->runq; + struct thread *next = thread_scheduler_func(disp_gen, SCHEDULER_UPCALL); + assert_disabled(next != NULL); + fpu_context_switch(disp_gen, next); + disp_gen->current = next; disp->haswork = true; - disp_resume(handle, &disp_gen->runq->regs); + disp_resume(handle, &next->regs); } else { // kernel gave us the CPU when we have nothing to do. block! disp->haswork = havework_disabled(handle); @@ -597,18 +648,10 @@ dispatcher_get_enabled_save_area(handle); struct thread *me = disp_gen->current; - struct thread *next = me; me->yield_epoch = disp_gen->timeslice; - do { - assert_disabled(next != NULL); - next = next->next; - if (next == me) { - break; // Everybody yielded this timeslice - } - } while(next->yield_epoch == disp_gen->timeslice); - - if (next != me) { + struct thread *next = thread_scheduler_func(disp_gen, SCHEDULER_YIELD); + if (next != NULL) { fpu_context_switch(disp_gen, next); disp_gen->current = next; disp_switch(handle, &me->regs, &next->regs); @@ -662,7 +705,7 @@ assert(ft == NULL); // run the next thread, if any - struct thread *next = me->next; + struct thread *next = thread_scheduler_func(disp_gen, SCHEDULER_EXIT); thread_remove_from_queue(&disp_gen->runq, me); if (next != me) { disp_gen->current = next; @@ -708,7 +751,7 @@ #endif // run the next thread, if any - struct thread *next = me->next; + struct thread *next = thread_scheduler_func(disp_gen, SCHEDULER_EXIT); thread_remove_from_queue(&disp_gen->runq, me); if (next != me) { fpu_context_switch(disp_gen, next); @@ -780,7 +823,7 @@ #endif // run the next thread, if any - struct thread *next = me->next; + struct thread *next = thread_scheduler_func(disp_gen, SCHEDULER_EXIT); thread_remove_from_queue(&disp_gen->runq, me); if (next != me) { fpu_context_switch(disp_gen, next); @@ -820,13 +863,13 @@ struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(handle); struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle); + struct thread *me = disp_gen->current; - struct thread *next = me->next; - assert_disabled(next != NULL); - assert_disabled(me->state == THREAD_STATE_RUNNABLE); me->state = THREAD_STATE_BLOCKED; + struct thread *next = thread_scheduler_func(disp_gen, SCHEDULER_BLOCKED); + thread_remove_from_queue(&disp_gen->runq, me); if (queue != NULL) { thread_enqueue(me, queue);