Scheduler
struct idle_thread_data { uint64_t last_entry_ms; uint64_t last_exit_ms;};struct scheduler { atomic_bool tick_enabled; time_t tick_duration_ms; struct list_head urgent_threads; struct rbt thread_rbt; struct rbt completed_rbt; struct list_head rt_threads; struct list_head bg_threads; struct rbt climb_threads; uint8_t queue_bitmap; struct thread *current; struct thread *drop_last_ref; size_t thread_count[THREAD_PRIO_CLASS_COUNT]; size_t total_thread_count; size_t total_weight; bool period_enabled; uint64_t current_period; time_t period_ms; time_t period_start_ms; uint64_t core_id; atomic_bool being_robbed; atomic_bool stealing_work; struct spinlock lock; struct thread *idle_thread; struct idle_thread_data idle_thread_data; struct scheduler *other_locked; struct rt_scheduler_percpu *rt;};struct scheduler referenced types:
time_tstruct list_headstruct rbtstruct threadstruct spinlockstruct idle_thread_datastruct schedulerstruct rt_scheduler_percpu
struct scheduler_data { uint32_t max_concurrent_stealers; uint32_t active_stealers; uint32_t total_threads; int64_t steal_min_diff;};void scheduler_init();void scheduler_add_thread(struct scheduler *sched, struct thread *thread, bool lock_held);scheduler_add_thread referenced types:
void scheduler_remove_thread(struct scheduler *sched, struct thread *t, bool lock_held);scheduler_remove_thread referenced types:
void schedule(void);void k_sch_main(void *);void scheduler_idle_main(void *);void scheduler_yield();void scheduler_period_start(struct scheduler *s, uint64_t now_ms);scheduler_period_start referenced types:
void switch_context(struct cpu_context *old, struct cpu_context *new);switch_context referenced types:
void load_context(struct cpu_context *new);load_context referenced types:
void save_context(struct cpu_context *new);save_context referenced types:
bool scheduler_can_steal_work(struct scheduler *sched);scheduler_can_steal_work referenced types:
bool scheduler_can_take_thread(size_t core, struct thread *target);scheduler_can_take_thread referenced types:
uint64_t scheduler_compute_steal_threshold();struct thread * scheduler_try_do_steal(struct scheduler *sched);scheduler_try_do_steal referenced types:
struct scheduler * scheduler_pick_victim(struct scheduler *self);scheduler_pick_victim referenced types:
struct thread * scheduler_steal_work(struct scheduler *new, struct scheduler *victim);scheduler_steal_work referenced types:
size_t scheduler_try_push_to_idle_core(struct scheduler *sched);scheduler_try_push_to_idle_core referenced types:
void scheduler_tick_enable();void scheduler_tick_disable();enum irq_result scheduler_timer_isr(void *ctx, uint8_t vector, struct irq_context *rsp);scheduler_timer_isr referenced types:
bool scheduler_self_in_resched();bool scheduler_mark_self_in_resched(bool new);bool scheduler_mark_core_needs_resched(struct core *c, bool new);scheduler_mark_core_needs_resched referenced types:
bool scheduler_mark_self_needs_resched(bool new);bool scheduler_self_needs_resched(void);void scheduler_mark_self_idle(bool new);void scheduler_resched_if_needed(void);bool scheduler_core_idle(struct core *c);scheduler_core_idle referenced types:
void scheduler_force_resched(struct scheduler *sched);scheduler_force_resched referenced types:
bool scheduler_preemption_disabled(void);Defines
Section titled “Defines”#define WORK_STEAL_THRESHOLD \ 75ULL /* How little work the core needs to be \ * doing to try to steal work from another \ * core. This means "% of the average" \ */#define SCHEDULER_DEFAULT_WORK_STEAL_MIN_DIFF 130#define TICKS_FOR_PRIO(level) (level == THREAD_PRIO_LOW ? 64 : 1ULL << level)