Scheduler
| Member Type | Member Name |
|---|---|
enum idle_thread_state | state |
atomic_bool | woken_from_timer |
uint64_t | last_entry_ms |
uint64_t | last_exit_ms |
| Member Type | Member Name |
|---|---|
uint32_t | max_concurrent_stealers |
uint32_t | active_stealers |
uint32_t | total_threads |
int64_t | steal_min_diff |
| Name | Value |
|---|---|
IDLE_THREAD_WORK_STEAL | 0 |
IDLE_THREAD_SLEEP | 1 |
voidscheduler_init()voidscheduler_add_thread(struct scheduler*sched,struct thread*thread,bool lock_held)voidscheduler_remove_thread(struct scheduler*sched,struct thread*t,bool lock_held)voidschedule(void)voidk_sch_main(void *)voidscheduler_idle_main(void *)voidscheduler_scheduler_preemption_enable()voidscheduler_scheduler_preemption_disable()voidscheduler_yield()voidscheduler_enqueue(struct thread*t)voidscheduler_enqueue_on_core(struct thread*t,uint64_t core_id)boolscheduler_wake(struct thread*t,enum thread_wake_reasonreason,enum thread_prio_classprio,void *wake_src)voidscheduler_period_start(struct scheduler*s,uint64_t now_ms)voidswitch_context(struct cpu_context*old,struct cpu_context*new)voidload_context(struct cpu_context*new)voidsave_context(struct cpu_context*new)boolscheduler_can_steal_work(struct scheduler*sched)boolscheduler_can_steal_thread(size_t core,struct thread*target)uint64_tscheduler_compute_steal_threshold()struct thread *scheduler_try_do_steal(struct scheduler*sched)struct scheduler *scheduler_pick_victim(struct scheduler*self)struct thread *scheduler_steal_work(struct scheduler*victim)size_tscheduler_try_push_to_idle_core(struct scheduler*sched)boolscheduler_inherit_priority(struct thread*boosted,size_t new_weight,enum thread_prio_classnew_class)voidscheduler_uninherit_priority()voidthread_migrate(struct thread*t,size_t dest_core)static inlinestruct thread *scheduler_get_current_thread()static inlineenum thread_flagsscheduler_pin_current_thread()static inline voidscheduler_unpin_current_thread(enum thread_flagsflags)static inlinestruct thread *thread_spawn(char *name,void (*entry)(void *),void *arg)static inlinestruct thread *thread_spawn_custom_stack(char *name,void (*entry)(void *),void *arg,size_t stack_size)static inlinestruct thread *thread_spawn_on_core(char *name,void (*entry)(void *),void *arg,uint64_t core_id)static inline voidscheduler_wake_from_io_block(struct thread*t,void *wake_src)static inline boolscheduler_self_in_resched()static inline boolscheduler_mark_self_in_resched(bool new)static inline boolscheduler_mark_core_needs_resched(struct core*c,bool new)static inline boolscheduler_mark_self_needs_resched(bool new)static inline boolscheduler_self_needs_resched(void)static inline voidscheduler_mark_self_idle(bool new)static inline voidscheduler_resched_if_needed(void)static inline boolscheduler_core_idle(struct core*c)static inline voidscheduler_force_resched(struct scheduler*sched)static inline uint32_tscheduler_preemption_disable(void)static inline uint32_tscheduler_preemption_enable(void)static inline boolscheduler_preemption_disabled(void)
Defines
Section titled “Defines”75ULL /* How little work the core needs to be * doing to try to steal work from another * core. This means "% of the average" */