92 if (sched_fixed_cmp(childClient->
vminEligible, minEligible) < 0)
114 panic(
NULL,
"Failed to create idle thread");
219 if (totalWeight == 0)
226 sched->
vtime += lag / totalWeight;
241 if (load < leastLoad)
275 if (mostLoaded ==
NULL)
307 while (current !=
NULL)
338 LOG_WARN(
"No eligible threads found, falling back to first thread in runqueue\n");
405 bool shouldWake =
SELF->id != target->
id || !
SELF->inInterrupt;
419 bool hasChildren =
false;
434 panic(
NULL,
"vminEligible incorrect for node with vdeadline %lld, expected %lld but got %lld",
444 panic(
NULL,
"Leaf node vminEligible != veligible, vminEligible=%lld veligible=%lld",
452 bool runThreadFound =
false;
457 totalWeight += client->
weight;
468 runThreadFound =
true;
474 panic(
NULL,
"Running thread not found in runqueue");
477 if (totalWeight !=
atomic_load(&sched->totalWeight))
479 panic(
NULL,
"sched totalWeight incorrect, expected %lld but got %lld", totalWeight,
491 panic(
NULL,
"runqueue not sorted, node with vdeadline %lld found, but min is %lld",
518 LOG_DEBUG(
" process %lld thread %lld lag=%lld veligible=%lld vdeadline=%lld weight=%lld\n",
586 panic(
NULL,
"Thread in invalid state in sched_do() state=%d", state);
609 if (threadToFree !=
NULL)
669 panic(
NULL,
"Return to sched_thread_exit");
688 panic(
NULL,
"Return to syscall_thread_exit");
#define assert(expression)
#define CONFIG_CACHE_HOT_THRESHOLD
Cache hot threshold configuration.
#define CONFIG_TIME_SLICE
Time slice configuration.
#define CLI_SCOPE()
Macro to increment CLI depth for the duration of the current scope.
static void cli_pop(void)
Decrements the CLI depth, re-enabling interrupts if depth reaches zero and interrupts were enabled pr...
static void cli_push(void)
Increments the CLI depth, disabling interrupts if depth was zero.
#define GDT_SS_RING0
Value to load into the SS register for kernel data.
#define GDT_CS_RING0
Value to load into the CS register for kernel code.
void ipi_invoke(void)
Invoke a IPI interrupt on the current CPU.
void ipi_wake_up(cpu_t *cpu, ipi_flags_t flags)
Wake up one or more CPUs.
@ IPI_SINGLE
Send the IPI to the specified CPU.
#define SELF_PTR(ptr)
Macro to get a pointer to a percpu variable on the current CPU.
#define SELF
Macro to access data in the current cpu.
#define CPU_PTR(id, ptr)
Macro to get a pointer to a percpu variable on a specific CPU.
#define PERCPU_DEFINE_CTOR(type, name)
Macro to define a percpu variable with a constructor.
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
#define CPU_FOR_EACH(cpu)
Macro to iterate over all CPUs.
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
#define LOG_WARN(format,...)
#define LOG_DEBUG(format,...)
void process_kill(process_t *process, const char *status)
Kills a process, pushing it to the reaper.
process_t * process_get_kernel(void)
Gets the kernel process.
clock_t clock_uptime(void)
Retrieve the time in nanoseconds since boot.
void thread_free(thread_t *thread)
Frees a thread structure.
void thread_load(thread_t *thread, interrupt_frame_t *frame)
Load state from a thread.
static thread_t * thread_current(void)
Retrieves the currently running thread.
_NORETURN void thread_jump(thread_t *thread)
Jump to a thread by calling thread_load() and then loading its interrupt frame.
thread_t * thread_new(process_t *process)
Creates a new thread structure.
void thread_save(thread_t *thread, const interrupt_frame_t *frame)
Save state to a thread.
thread_state_t
Thread state enum.
@ THREAD_UNBLOCKING
Has started unblocking, used to prevent the same thread being unblocked multiple times.
@ THREAD_PRE_BLOCK
Has started the process of blocking but has not yet been given to a owner cpu.
@ THREAD_DYING
The thread is currently dying, it will be freed by the scheduler once its invoked.
@ THREAD_ACTIVE
Is either running or ready to run.
#define WAIT_BLOCK_TIMEOUT(queue, condition, timeout)
Blocks until the condition is true, condition will be tested on every wakeup. Reaching the timeout wi...
bool wait_block_finalize(interrupt_frame_t *frame, thread_t *thread, clock_t uptime)
Finalize blocking of a thread.
#define WAIT_QUEUE_CREATE(name)
Create a wait queue initializer.
#define SCHED_FIXED_TO(x)
Convert a regular integer to fixed-point representation.
#define SCHED_FIXED_FROM(x)
Convert a fixed-point value to a regular integer.
#define SCHED_EPSILON
The minimum difference between two virtual clock or lag values to consider then unequal.
void sched_disable(void)
Disables preemption on the current CPU.
void sched_client_init(sched_client_t *client)
Initialize the scheduler context for a thread.
void sched_enable(void)
Enables preemption on the current CPU.
sched_t PERCPU _pcpu_sched
The per CPU scheduler.
int128_t vclock_t
Virtual clock type.
void sched_start(thread_t *bootThread)
Starts the scheduler by jumping to the boot thread.
void sched_yield(void)
Yield the current thread's time slice to allow other threads to run.
#define SCHED_WEIGHT_BASE
Base weight added to all threads.
void sched_exits(const char *status)
Terminates the currently executing process and all it's threads.
bool sched_is_idle(cpu_t *cpu)
Checks if the CPU is currently idle.
void sched_submit(thread_t *thread)
Submits a thread to the scheduler.
_NORETURN void sched_idle_loop(void)
The idle loop for the scheduler.
uint64_t sched_nanosleep(clock_t timeout)
Sleeps the current thread for a specified duration in nanoseconds.
void sched_do(interrupt_frame_t *frame)
Perform a scheduling operation.
#define SCHED_FIXED_ZERO
Fixed-point zero.
void sched_thread_exit(void)
Terminates the currently executing thread.
static void lock_init(lock_t *lock)
Initializes a lock.
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
static void lock_release(lock_t *lock)
Releases a lock.
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
void rcu_report_quiescent(void)
Called during a context switch to report a quiescent state.
void timer_set(clock_t now, clock_t deadline)
Schedule a one-shot timer interrupt on the current CPU.
#define RBTREE_FOR_EACH(elem, tree, member)
Iterates over a Red-Black Tree in ascending order.
rbnode_t * rbtree_find_min(rbnode_t *node)
Find the minimum node in a subtree.
rbnode_direction_t
Red-Black Tree Node Directions.
void rbtree_init(rbtree_t *tree, rbnode_compare_t compare, rbnode_update_t update)
Initialize a Red-Black Tree.
void rbtree_remove(rbtree_t *tree, rbnode_t *node)
Remove a node from the Red-Black Tree.
#define RBNODE_CREATE
Create a Red-Black Tree Node initializer.
void rbtree_insert(rbtree_t *tree, rbnode_t *node)
Insert a node into the Red-Black Tree.
void rbtree_fix(rbtree_t *tree, rbnode_t *node)
Move the node to its correct position in the Red-Black Tree.
bool rbtree_is_empty(const rbtree_t *tree)
Check if the Red-Black Tree is empty.
clock_t uptime(void)
System call for retreving the time since boot.
#define NULL
Pointer error value.
#define CONTAINER_OF(ptr, type, member)
Container of macro.
#define CONTAINER_OF_SAFE(ptr, type, member)
Safe container of macro.
__UINT64_TYPE__ clock_t
A nanosecond time.
#define RFLAGS_INTERRUPT_ENABLE
#define RFLAGS_ALWAYS_SET
static cpu_t * sched_get_least_loaded(void)
static int64_t sched_node_compare(const rbnode_t *aNode, const rbnode_t *bNode)
static bool sched_is_cache_hot(thread_t *thread, clock_t uptime)
static thread_t * sched_steal(void)
void sched_client_update_veligible(sched_client_t *client, vclock_t newVeligible)
static void sched_verify(sched_t *sched)
static wait_queue_t sleepQueue
static void sched_enter(sched_t *sched, thread_t *thread, clock_t uptime)
static void sched_verify_min_eligible(sched_t *sched, rbnode_t *node)
static void sched_vtime_update(sched_t *sched, clock_t uptime)
static void sched_leave(sched_t *sched, thread_t *thread, clock_t uptime)
static thread_t * sched_first_eligible(sched_t *sched)
static void sched_vtime_reset(sched_t *sched, clock_t uptime)
static void sched_node_update(rbnode_t *node)
#define atomic_store(object, desired)
#define atomic_fetch_sub(object, operand)
#define atomic_load(object)
#define ATOMIC_VAR_INIT(value)
#define atomic_fetch_add(object, operand)
#define atomic_init(obj, value)
__UINTPTR_TYPE__ uintptr_t
rbnode_t * children[RBNODE_AMOUNT]
Per-thread scheduler context.
clock_t stop
The real time when the thread previously stopped executing.
vclock_t veligible
The virtual time at which the thread becomes eligible to run (lag >= 0).
vclock_t vminEligible
The minimum virtual eligible time of the subtree in the runqueue.
rbnode_t node
The node in the scheduler's runqueue.
cpu_t * lastCpu
The last CPU the thread was scheduled on, it stoped running at stop time.
int64_t weight
The weight of the thread.
vclock_t vtime
The current virtual time of the CPU.
lock_t lock
The lock protecting the scheduler.
clock_t lastUpdate
The real time when the last vtime update occurred.
thread_t *volatile runThread
The currently running thread on this CPU.
rbtree_t runqueue
Contains all runnable threads, including the currently running thread, sorted by vdeadline.
thread_t *volatile idleThread
The idle thread for this CPU.
uintptr_t top
The top of the stack, this address is not inclusive.
Thread of execution structure.
process_t * process
The parent process that the thread executes within.
stack_pointer_t kernelStack
The kernel stack of the thread.
tid_t id
The thread id, unique within a process_t.
The primitive that threads block on.