Skip to content

Scheduler

struct idle_thread_data {
uint64_t last_entry_ms;
uint64_t last_exit_ms;
};
struct scheduler {
atomic_bool tick_enabled;
time_t tick_duration_ms;
struct list_head urgent_threads;
struct rbt thread_rbt;
struct rbt completed_rbt;
struct list_head rt_threads;
struct list_head bg_threads;
struct rbt climb_threads;
uint8_t queue_bitmap;
struct thread *current;
struct thread *drop_last_ref;
size_t thread_count[THREAD_PRIO_CLASS_COUNT];
size_t total_thread_count;
size_t total_weight;
bool period_enabled;
uint64_t current_period;
time_t period_ms;
time_t period_start_ms;
uint64_t core_id;
atomic_bool being_robbed;
atomic_bool stealing_work;
struct spinlock lock;
struct thread *idle_thread;
struct idle_thread_data idle_thread_data;
struct scheduler *other_locked;
struct rt_scheduler_percpu *rt;
};

struct scheduler referenced types:

struct scheduler_data {
uint32_t max_concurrent_stealers;
uint32_t active_stealers;
uint32_t total_threads;
int64_t steal_min_diff;
};
bool scheduler_self_in_resched();
bool scheduler_mark_self_in_resched(bool new);
bool scheduler_mark_core_needs_resched(struct core *c, bool new);

scheduler_mark_core_needs_resched referenced types:

bool scheduler_mark_self_needs_resched(bool new);
bool scheduler_self_needs_resched(void);
void scheduler_mark_self_idle(bool new);
void scheduler_resched_if_needed(void);
bool scheduler_core_idle(struct core *c);

scheduler_core_idle referenced types:

void scheduler_force_resched(struct scheduler *sched);

scheduler_force_resched referenced types:

bool scheduler_preemption_disabled(void);
#define WORK_STEAL_THRESHOLD \ 75ULL /* How little work the core needs to be \ * doing to try to steal work from another \ * core. This means "% of the average" \ */
#define SCHEDULER_DEFAULT_WORK_STEAL_MIN_DIFF 130
#define TICKS_FOR_PRIO(level) (level == THREAD_PRIO_LOW ? 64 : 1ULL << level)