53 if (minPriority > highestPriority)
63 queues->
bitmap &= ~(1ULL << highestPriority);
102 panic(
NULL,
"Failed to create idle thread");
196 panic(
NULL,
"Return to sched_process_exit");
202 panic(
NULL,
"Return to syscall_process_exit");
208 panic(
NULL,
"Return to sched_thread_exit");
214 panic(
NULL,
"Return to syscall_thread_exit");
226 if (delta < thread->sched.recentBlockTime)
328 panic(
NULL,
"Invalid thread state for sched_push");
406 panic(
NULL,
"Invalid thread state for sched_push");
451 bool shouldNotifyNeighbor =
false;
452 while (selfLoad != neighborLoad)
464 shouldNotifyNeighbor =
true;
474 if (shouldNotifyNeighbor)
488 if (runThread ==
NULL)
502 threadToFree = runThread;
536 panic(
NULL,
"Invalid thread state %d (pid=%d tid=%d)", state, runThread->
process->
id, runThread->
id);
542 if (runThread ==
NULL)
569 if (runThread ==
NULL)
579 if (runThread !=
NULL)
605 if (threadToFree !=
NULL)
#define assert(expression)
@ GDT_CS_RING0
Value to load into the CS register for kernel code.
@ GDT_SS_RING0
Value to load into the SS register for kernel data.
@ INTERRUPT_DIE
Kills and frees the current thread.
static cpu_t * smp_cpu(cpuid_t id)
Returns a pointer to the cpu_t structure of the CPU with the given id.
static cpu_t * smp_self_unsafe(void)
Returns a pointer to the cpu_t structure of the current CPU.
static cpu_t * smp_self(void)
Returns a pointer to the cpu_t structure of the current CPU.
static uint16_t smp_cpu_amount(void)
Returns the number of CPUs currently identified.
static void smp_put(void)
Re-enables interrupts after a call to smp_self().
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
#define CPU_ID_BOOTSTRAP
ID of the bootstrap CPU.
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
#define LOG_DEBUG(format,...)
void process_kill(process_t *process, uint64_t status)
Kills a process.
process_t * process_get_kernel(void)
Gets the kernel process.
void thread_free(thread_t *thread)
Frees a thread structure.
void thread_load(thread_t *thread, interrupt_frame_t *frame)
Load state from a thread.
thread_t * thread_new(process_t *process)
Creates a new thread structure.
void thread_save(thread_t *thread, const interrupt_frame_t *frame)
Save state to a thread.
thread_state_t
Thread state enum.
@ THREAD_UNBLOCKING
Has started unblocking, used to prevent the same thread being unblocked multiple times.
@ THREAD_PRE_BLOCK
Has started the process of blocking but has not yet been given to a owner cpu.
@ THREAD_PARKED
Is doing nothing, not in a queue, not blocking, think of it as "other".
@ THREAD_RUNNING
Is currently running on a cpu.
@ THREAD_READY
Is ready to run and waiting to be scheduled.
bool wait_block_finalize(interrupt_frame_t *frame, cpu_t *self, thread_t *thread, clock_t uptime)
Finalize blocking of a thread.
#define WAIT_BLOCK_TIMEOUT(waitQueue, condition, timeout)
Block with timeout.
#define WAIT_QUEUE_CREATE(name)
Create a wait queue initializer.
void sched_push_new_thread(thread_t *thread, thread_t *parent)
Pushes a newly created thread onto the scheduling queue.
void sched_push(thread_t *thread, cpu_t *target)
Pushes a thread onto a scheduling queue.
process_t * sched_process(void)
Retrieves the process of the currently running thread.
void sched_done_with_boot_thread(void)
Specify that the boot thread is no longer needed.
thread_t * sched_thread(void)
Retrieves the currently running thread.
void sched_thread_ctx_init(sched_thread_ctx_t *ctx)
Initializes a thread's scheduling context.
void sched_yield(void)
Yields the CPU to another thread.
void sched_cpu_ctx_init(sched_cpu_ctx_t *ctx, cpu_t *self)
Initializes a CPU's scheduling context.
thread_t * sched_thread_unsafe(void)
Retrieves the currently running thread without disabling interrupts.
bool sched_is_idle(cpu_t *cpu)
Checks if the CPU is idle.
void sched_process_exit(uint64_t status)
Exits the current process.
void sched_invoke(interrupt_frame_t *frame, cpu_t *self, schedule_flags_t flags)
The main scheduling function.
uint64_t sched_nanosleep(clock_t timeout)
Puts the current thread to sleep.
process_t * sched_process_unsafe(void)
Retrieves the process of the currently running thread without disabling interrupts.
NORETURN void sched_idle_loop(void)
The idle loop for a CPU.
schedule_flags_t
Scheduling flags.
void sched_thread_exit(void)
Exits the current thread.
@ SCHED_NORMAL
No special flags.
@ SCHED_DIE
Kill and free the currently running thread.
static void lock_init(lock_t *lock)
Initializes a lock.
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
static void lock_release(lock_t *lock)
Releases a lock.
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
void timer_notify_self(void)
Trigger timer interrupt on self.
void timer_one_shot(cpu_t *self, clock_t uptime, clock_t timeout)
Schedule a one-shot timer interrupt.
void timer_subscribe(timer_ctx_t *ctx, timer_callback_t callback)
Subscribe to timer interrupts.
void timer_notify(cpu_t *cpu)
Trigger timer interrupt on cpu.
clock_t timer_uptime(void)
Time since boot.
#define CONFIG_MAX_TIME_SLICE
Maximum time slice configuration.
#define CONFIG_MAX_PRIORITY_BOOST
Maximum priority boost configuration.
#define CONFIG_LOAD_BALANCE_BIAS
Load balance bias configuration.
#define CONFIG_MAX_RECENT_BLOCK_TIME
Maximum recent block time configuration.
#define CONFIG_MIN_TIME_SLICE
Minimum time slice configuration.
#define CONFIG_MAX_PRIORITY_PENALTY
Maximum priority penalty configuration.
static void list_push(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
static bool list_is_empty(list_t *list)
Checks if a list is empty.
static void list_init(list_t *list)
Initializes a list.
static list_entry_t * list_pop(list_t *list)
Pops the first entry from the list.
#define LERP_INT(start, end, t, minT, maxT)
clock_t uptime(void)
System call for retreving the time since boot.
uint8_t priority_t
Priority type.
#define NULL
Pointer error value.
#define CONTAINER_OF(ptr, type, member)
Container of macro.
__UINT64_TYPE__ clock_t
A nanosecond time.
#define RFLAGS_INTERRUPT_ENABLE
#define RFLAGS_ALWAYS_SET
static void sched_compute_time_slice(thread_t *thread, thread_t *parent)
static wait_queue_t sleepQueue
static void sched_timer_handler(interrupt_frame_t *frame, cpu_t *self)
static void sched_compute_actual_priority(thread_t *thread)
static cpu_t * sched_find_least_loaded_cpu(cpu_t *exclude)
static void sched_queues_push(sched_queues_t *queues, thread_t *thread)
static void sched_load_balance(cpu_t *self)
static uint64_t sched_get_load(sched_cpu_ctx_t *ctx)
static bool sched_should_notify(cpu_t *target, priority_t priority)
static void sched_update_recent_idle_time(thread_t *thread, bool wasBlocking, clock_t uptime)
static cpu_t * sched_get_neighbor(cpu_t *self)
static thread_t * sched_queues_pop(sched_queues_t *queues, priority_t minPriority)
static void sched_queues_init(sched_queues_t *queues)
#define atomic_store(object, desired)
#define atomic_exchange(object, desired)
#define atomic_load(object)
__UINTPTR_TYPE__ uintptr_t
Per-CPU scheduling context.
cpu_t * owner
The cpu that owns this scheduling context.
thread_t * runThread
The currently running thread.
sched_queues_t * expired
Pointer to the currently expired queue.
sched_queues_t * active
Pointer to the currently active queue.
sched_queues_t queues[2]
Array storing both queues.
thread_t * idleThread
The thread that runs when the owner CPU is idling.
lock_t lock
The lock that protects this context, except the zombieThreads list.
Scheduling queues structure.
uint64_t length
The total number of threads in all lists.
uint64_t bitmap
A bitmap indicating which of the lists have threads in them.
list_t lists[PRIORITY_MAX]
An array of lists that store threads, one for each priority, used in a round robin fashion.
Per-thread scheduling context.
clock_t recentBlockTime
The amount of time within the last CONFIG_MAX_RECENT_BLOCK_TIME nanoseconds that the thread was block...
clock_t prevBlockCheck
The previous time when the recentBlockTime member was updated.
priority_t actualPriority
The actual priority of the thread.
clock_t deadline
The time when the time slice will actually expire, only valid while the thread is running.
clock_t timeSlice
The length of the threads time slice, used to determine its deadline when its scheduled.
uintptr_t top
The top of the stack, this address is not inclusive.
Thread of execution structure.
process_t * process
The parent process that the thread executes within.
list_entry_t entry
The entry for the scheduler and wait system.
stack_pointer_t kernelStack
The kernel stack of the thread.
tid_t id
The thread id, unique within a process_t.