89 if (sched_fixed_cmp(childClient->
vminEligible, minEligible) < 0)
189 if (totalWeight == 0)
196 sched->
vtime += lag / totalWeight;
211 if (load < leastLoad)
249 if (mostLoaded ==
NULL)
281 while (current !=
NULL)
337 panic(
NULL,
"Failed to create idle thread");
413 bool hasChildren =
false;
428 panic(
NULL,
"vminEligible incorrect for node with vdeadline %lld, expected %lld but got %lld",
438 panic(
NULL,
"Leaf node vminEligible != veligible, vminEligible=%lld veligible=%lld",
446 bool runThreadFound =
false;
451 totalWeight += client->
weight;
462 runThreadFound =
true;
468 panic(
NULL,
"Running thread not found in runqueue");
471 if (totalWeight !=
atomic_load(&sched->totalWeight))
473 panic(
NULL,
"sched totalWeight incorrect, expected %lld but got %lld", totalWeight,
485 panic(
NULL,
"runqueue not sorted, node with vdeadline %lld found, but min is %lld",
512 LOG_DEBUG(
" process %lld thread %lld lag=%lld veligible=%lld vdeadline=%lld weight=%lld\n",
574 panic(
NULL,
"Thread in invalid state in sched_do() state=%d", state);
595 if (threadToFree !=
NULL)
654 panic(
NULL,
"sched_process_exit() returned unexpectedly");
662 panic(
NULL,
"Failed to send kill note to self in sched_thread_exit()");
666 panic(
NULL,
"Return to sched_thread_exit");
678 panic(
NULL,
"Return to syscall_process_exit");
685 panic(
NULL,
"Return to syscall_thread_exit");
#define assert(expression)
@ GDT_CS_RING0
Value to load into the CS register for kernel code.
@ GDT_SS_RING0
Value to load into the SS register for kernel data.
void ipi_invoke(void)
Invoke a IPI interrupt on the current CPU.
void ipi_wake_up(cpu_t *cpu, ipi_flags_t flags)
Wake up one or more CPUs.
@ IPI_SINGLE
Send the IPI to the specified CPU.
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
static cpu_t * cpu_get_unsafe(void)
Gets the current CPU structure without disabling interrupts.
static cpu_t * cpu_get(void)
Gets the current CPU structure.
static void cpu_put(void)
Releases the current CPU structure.
cpu_t * _cpus[CPU_MAX]
Array of pointers to cpu_t structures for each CPU, indexed by CPU ID.
#define CPU_FOR_EACH(cpu)
Macro to iterate over all CPUs.
uint16_t _cpuAmount
The number of CPUs currently identified.
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
#define LOG_DEBUG(format,...)
process_t * process_get_kernel(void)
Gets the kernel process.
void process_kill(process_t *process, int32_t status)
Kills a process.
void thread_free(thread_t *thread)
Frees a thread structure.
uint64_t thread_send_note(thread_t *thread, const void *buffer, uint64_t count)
Send a note to a thread.
void thread_load(thread_t *thread, interrupt_frame_t *frame)
Load state from a thread.
_NORETURN void thread_jump(thread_t *thread)
Jump to a thread by calling thread_load() and then loading its interrupt frame.
thread_t * thread_new(process_t *process)
Creates a new thread structure.
void thread_save(thread_t *thread, const interrupt_frame_t *frame)
Save state to a thread.
thread_state_t
Thread state enum.
@ THREAD_UNBLOCKING
Has started unblocking, used to prevent the same thread being unblocked multiple times.
@ THREAD_PRE_BLOCK
Has started the process of blocking but has not yet been given to a owner cpu.
@ THREAD_ACTIVE
Is either running or ready to run.
bool wait_block_finalize(interrupt_frame_t *frame, cpu_t *self, thread_t *thread, clock_t uptime)
Finalize blocking of a thread.
#define WAIT_BLOCK_TIMEOUT(queue, condition, timeout)
Blocks until the condition is true, condition will be tested on every wakeup. Reaching the timeout wi...
#define WAIT_QUEUE_CREATE(name)
Create a wait queue initializer.
void sched_process_exit(int32_t status)
Terminates the currently executing process and all it's threads.
#define SCHED_FIXED_TO(x)
Convert a regular integer to fixed-point representation.
#define SCHED_FIXED_FROM(x)
Convert a fixed-point value to a regular integer.
void sched_init(sched_t *sched)
Initialize the scheduler for a CPU.
#define SCHED_EPSILON
The minimum difference between two virtual clock or lag values to consider then unequal.
process_t * sched_process(void)
Retrieves the process of the currently running thread.
void sched_client_init(sched_client_t *client)
Initialize the scheduler context for a thread.
int128_t vclock_t
Virtual clock type.
void sched_start(thread_t *bootThread)
Starts the scheduler by jumping to the boot thread.
thread_t * sched_thread(void)
Retrieves the currently running thread.
void sched_yield(void)
Yield the current thread's time slice to allow other threads to run.
thread_t * sched_thread_unsafe(void)
Retrieves the currently running thread without disabling interrupts.
#define SCHED_WEIGHT_BASE
Base weight added to all threads.
bool sched_is_idle(cpu_t *cpu)
Checks if the CPU is currently idle.
void sched_submit(thread_t *thread)
Submits a thread to the scheduler.
_NORETURN void sched_idle_loop(void)
The idle loop for the scheduler.
uint64_t sched_nanosleep(clock_t timeout)
Sleeps the current thread for a specified duration in nanoseconds.
process_t * sched_process_unsafe(void)
Retrieves the process of the currently running thread without disabling interrupts.
#define SCHED_FIXED_ZERO
Fixed-point zero.
void sched_do(interrupt_frame_t *frame, cpu_t *self)
Perform a scheduling operation.
void sched_thread_exit(void)
Terminates the currently executing thread.
static void lock_init(lock_t *lock)
Initializes a lock.
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
static void lock_release(lock_t *lock)
Releases a lock.
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
clock_t sys_time_uptime(void)
Time since boot.
void timer_set(clock_t uptime, clock_t deadline)
Schedule a one-shot timer interrupt on the current CPU.
#define RBTREE_FOR_EACH(elem, tree, member)
Iterates over a Red-Black Tree in ascending order.
rbnode_t * rbtree_find_min(rbnode_t *node)
Find the minimum node in a subtree.
rbnode_direction_t
Red-Black Tree Node Directions.
void rbtree_init(rbtree_t *tree, rbnode_compare_t compare, rbnode_update_t update)
Initialize a Red-Black Tree.
void rbtree_remove(rbtree_t *tree, rbnode_t *node)
Remove a node from the Red-Black Tree.
#define RBNODE_CREATE
Create a Red-Black Tree Node initializer.
void rbtree_insert(rbtree_t *tree, rbnode_t *node)
Insert a node into the Red-Black Tree.
void rbtree_fix(rbtree_t *tree, rbnode_t *node)
Move the node to its correct position in the Red-Black Tree.
bool rbtree_is_empty(const rbtree_t *tree)
Check if the Red-Black Tree is empty.
#define CONFIG_CACHE_HOT_THRESHOLD
Cache hot threshold configuration.
#define CONFIG_TIME_SLICE
Time slice configuration.
clock_t uptime(void)
System call for retreving the time since boot.
#define NULL
Pointer error value.
#define ERR
Integer error value.
#define CONTAINER_OF(ptr, type, member)
Container of macro.
#define CONTAINER_OF_SAFE(ptr, type, member)
Safe container of macro.
__UINT64_TYPE__ clock_t
A nanosecond time.
#define RFLAGS_INTERRUPT_ENABLE
#define RFLAGS_ALWAYS_SET
static cpu_t * sched_get_least_loaded(void)
static int64_t sched_node_compare(const rbnode_t *aNode, const rbnode_t *bNode)
static bool sched_is_cache_hot(thread_t *thread, clock_t uptime)
static thread_t * sched_steal(void)
void sched_client_update_veligible(sched_client_t *client, vclock_t newVeligible)
static void sched_verify(sched_t *sched)
static wait_queue_t sleepQueue
static void sched_enter(sched_t *sched, thread_t *thread, clock_t uptime)
static void sched_verify_min_eligible(sched_t *sched, rbnode_t *node)
static void sched_vtime_update(sched_t *sched, clock_t uptime)
static void sched_leave(sched_t *sched, thread_t *thread, clock_t uptime)
static thread_t * sched_first_eligible(sched_t *sched)
static void sched_vtime_reset(sched_t *sched, clock_t uptime)
static void sched_node_update(rbnode_t *node)
#define atomic_store(object, desired)
#define atomic_fetch_sub(object, operand)
#define atomic_load(object)
#define ATOMIC_VAR_INIT(value)
#define atomic_fetch_add(object, operand)
#define atomic_init(obj, value)
__UINTPTR_TYPE__ uintptr_t
interrupt_ctx_t interrupt
rbnode_t * children[RBNODE_AMOUNT]
Per-thread scheduler context.
clock_t stop
The real time when the thread previously stopped executing.
vclock_t veligible
The virtual time at which the thread becomes eligible to run (lag >= 0).
vclock_t vminEligible
The minimum virtual eligible time of the subtree in the runqueue.
rbnode_t node
The node in the scheduler's runqueue.
cpu_t * lastCpu
The last CPU the thread was scheduled on, it stoped running at stop time.
int64_t weight
The weight of the thread.
vclock_t vtime
The current virtual time of the CPU.
lock_t lock
The lock protecting the scheduler.
clock_t lastUpdate
The real time when the last vtime update occurred.
thread_t *volatile runThread
The currently running thread on this CPU.
rbtree_t runqueue
Contains all runnable threads, including the currently running thread, sorted by vdeadline.
thread_t *volatile idleThread
The idle thread for this CPU.
uintptr_t top
The top of the stack, this address is not inclusive.
Thread of execution structure.
process_t * process
The parent process that the thread executes within.
stack_pointer_t kernelStack
The kernel stack of the thread.
tid_t id
The thread id, unique within a process_t.
The primitive that threads block on.