PatchworkOS
Loading...
Searching...
No Matches
sched.c
Go to the documentation of this file.
2
3#include <kernel/cpu/cpu.h>
4#include <kernel/cpu/gdt.h>
6#include <kernel/cpu/smp.h>
9#include <kernel/log/log.h>
10#include <kernel/log/panic.h>
11#include <kernel/proc/process.h>
12#include <kernel/sched/sched.h>
13#include <kernel/sched/thread.h>
14#include <kernel/sched/timer.h>
15#include <kernel/sched/wait.h>
16#include <kernel/sync/lock.h>
17
18#include <assert.h>
19#include <sys/list.h>
20#include <sys/math.h>
21#include <sys/proc.h>
22
24
25static inline void sched_queues_init(sched_queues_t* queues)
26{
27 queues->length = 0;
28 queues->bitmap = 0;
29 for (uint64_t i = PRIORITY_MIN; i < PRIORITY_MAX; i++)
30 {
31 list_init(&queues->lists[i]);
32 }
33}
34
35static inline void sched_queues_push(sched_queues_t* queues, thread_t* thread)
36{
38
39 queues->length++;
40 queues->bitmap |= (1ULL << thread->sched.actualPriority);
41 list_push(&queues->lists[thread->sched.actualPriority], &thread->entry);
42}
43
44static inline thread_t* sched_queues_pop(sched_queues_t* queues, priority_t minPriority)
45{
46 if (queues->length == 0 || queues->bitmap == 0)
47 {
48 assert(queues->length == 0 && queues->bitmap == 0);
49 return NULL;
50 }
51
52 priority_t highestPriority = PRIORITY_MAX - 1 - __builtin_clzll(queues->bitmap);
53 if (minPriority > highestPriority)
54 {
55 return NULL;
56 }
57
58 queues->length--;
59 thread_t* thread = CONTAINER_OF(list_pop(&queues->lists[highestPriority]), thread_t, entry);
60
61 if (list_is_empty(&queues->lists[highestPriority]))
62 {
63 queues->bitmap &= ~(1ULL << highestPriority);
64 }
65
66 return thread;
67}
68
70{
71 ctx->timeSlice = 0;
72 ctx->deadline = 0;
73 ctx->actualPriority = 0;
74 ctx->recentBlockTime = 0;
75 ctx->prevBlockCheck = 0;
76}
77
79{
80 sched_invoke(frame, self, SCHED_NORMAL);
81}
82
84{
85 sched_queues_init(&ctx->queues[0]);
86 sched_queues_init(&ctx->queues[1]);
87 ctx->active = &ctx->queues[0];
88 ctx->expired = &ctx->queues[1];
89
90 // Bootstrap cpu is initalized early so we cant yet create the idle thread, the boot thread on the bootstrap cpu
91 // will become the idle thread.
92 if (self->id == CPU_ID_BOOTSTRAP)
93 {
94 ctx->idleThread = NULL;
95 ctx->runThread = NULL;
96 }
97 else
98 {
100 if (ctx->idleThread == NULL)
101 {
102 panic(NULL, "Failed to create idle thread");
103 }
104
110
111 ctx->runThread = ctx->idleThread;
113 }
114
115 lock_init(&ctx->lock);
116 ctx->owner = self;
117
119}
120
122{
123 cpu_t* self = smp_self_unsafe();
124 sched_cpu_ctx_t* ctx = &self->sched;
125
126 assert(self->id == CPU_ID_BOOTSTRAP && ctx->runThread->process == process_get_kernel() && ctx->runThread->id == 0);
127
128 // The boot thread becomes the bootstrap cpus idle thread.
129 ctx->runThread->sched.deadline = 0;
130 ctx->idleThread = ctx->runThread;
131
132 asm volatile("sti");
133
135 // When we return here the boot thread will be an idle thread so we just enter the idle loop.
137}
138
140{
141 return WAIT_BLOCK_TIMEOUT(&sleepQueue, false, timeout);
142}
143
145{
146 return sched_nanosleep(nanoseconds);
147}
148
150{
151 sched_cpu_ctx_t* ctx = &cpu->sched;
152 LOCK_SCOPE(&ctx->lock);
153 bool isIdle = ctx->runThread == ctx->idleThread;
154 return isIdle;
155}
156
158{
159 thread_t* thread = smp_self()->sched.runThread;
160 smp_put();
161 return thread;
162}
163
165{
166 thread_t* thread = sched_thread();
167 if (thread == NULL)
168 {
169 return NULL;
170 }
171
172 return thread->process;
173}
174
179
181{
182 thread_t* thread = sched_thread_unsafe();
183 if (thread == NULL)
184 {
185 return NULL;
186 }
187
188 return thread->process;
189}
190
192{
193 thread_t* thread = sched_thread();
194 process_kill(thread->process, status);
195 asm volatile("int %0" : : "i"(INTERRUPT_DIE));
196 panic(NULL, "Return to sched_process_exit");
197}
198
200{
201 sched_process_exit(status);
202 panic(NULL, "Return to syscall_process_exit");
203}
204
206{
207 asm volatile("int %0" : : "i"(INTERRUPT_DIE));
208 panic(NULL, "Return to sched_thread_exit");
209}
210
212{
214 panic(NULL, "Return to syscall_thread_exit");
215}
216
217static void sched_update_recent_idle_time(thread_t* thread, bool wasBlocking, clock_t uptime)
218{
219 clock_t delta = uptime - thread->sched.prevBlockCheck;
220 if (wasBlocking)
221 {
223 }
224 else
225 {
226 if (delta < thread->sched.recentBlockTime)
227 {
228 thread->sched.recentBlockTime -= delta;
229 }
230 else
231 {
232 thread->sched.recentBlockTime = 0;
233 }
234 }
235
236 thread->sched.prevBlockCheck = uptime;
237}
238
239static void sched_compute_time_slice(thread_t* thread, thread_t* parent)
240{
241 if (parent != NULL)
242 {
244 clock_t remaining = uptime <= parent->sched.deadline ? parent->sched.deadline - uptime : 0;
245
246 parent->sched.deadline = uptime + remaining / 2;
247
248 thread->sched.timeSlice = remaining / 2;
249 }
250 else
251 {
252 priority_t basePriority = atomic_load(&thread->process->priority);
253 thread->sched.timeSlice =
255 }
256}
257
259{
260 priority_t basePriority = atomic_load(&thread->process->priority);
261
263 {
264 priority_t boost = MIN(CONFIG_MAX_PRIORITY_BOOST, PRIORITY_MAX - 1 - basePriority);
265 thread->sched.actualPriority = basePriority +
268 }
269 else
270 {
271 priority_t penalty = MIN(CONFIG_MAX_PRIORITY_PENALTY, basePriority);
272 thread->sched.actualPriority =
273 basePriority - LERP_INT(0, penalty, thread->sched.recentBlockTime, 0, CONFIG_MAX_RECENT_BLOCK_TIME / 2);
274 }
275}
276
277void sched_yield(void)
278{
279 thread_t* thread = smp_self()->sched.runThread;
280 thread->sched.deadline = 0;
281 smp_put();
282}
283
285{
286 sched_yield();
288 return 0;
289}
290
291static bool sched_should_notify(cpu_t* target, priority_t priority)
292{
293 if (target->sched.runThread == target->sched.idleThread)
294 {
295 return true;
296 }
297 if (priority > target->sched.runThread->sched.actualPriority)
298 {
299 return true;
300 }
301
302 return false;
303}
304
305void sched_push(thread_t* thread, cpu_t* target)
306{
307 cpu_t* self = smp_self();
308
309 if (target == NULL)
310 {
311 target = self;
312 }
313
314 LOCK_SCOPE(&target->sched.lock);
315
316 thread_state_t state = atomic_exchange(&thread->state, THREAD_READY);
317 if (state == THREAD_PARKED)
318 {
319 sched_queues_push(target->sched.active, thread);
320 }
321 else if (state == THREAD_UNBLOCKING)
322 {
324 sched_queues_push(target->sched.active, thread);
325 }
326 else
327 {
328 panic(NULL, "Invalid thread state for sched_push");
329 }
330
333
334 if (sched_should_notify(target, thread->sched.actualPriority))
335 {
336 timer_notify(target);
337 }
338
339 smp_put();
340}
341
343{
344 LOCK_SCOPE(&ctx->lock);
345 return ctx->active->length + ctx->expired->length + (ctx->runThread != ctx->idleThread ? 1 : 0);
346}
347
349{
350 if (smp_cpu_amount() == 1)
351 {
352 return smp_cpu(0);
353 }
354
355 cpu_t* bestCpu = NULL;
356 uint64_t bestLoad = UINT64_MAX;
357
358 // Find the cpu with the best load ;)
359 for (uint64_t i = 0; i < smp_cpu_amount(); i++)
360 {
361 cpu_t* cpu = smp_cpu(i);
362 if (cpu == exclude)
363 {
364 continue;
365 }
366
367 uint64_t load = sched_get_load(&cpu->sched);
368
369 if (load < bestLoad)
370 {
371 bestLoad = load;
372 bestCpu = cpu;
373 }
374 }
375
376 // If given no choice then use the excluded cpu.
377 if (bestCpu == NULL)
378 {
379 bestCpu = exclude;
380 }
381
382 return bestCpu;
383}
384
386{
387 cpu_t* self = smp_self();
388
390 assert(target != NULL);
391
392 LOCK_SCOPE(&target->sched.lock);
393
394 thread_state_t state = atomic_exchange(&thread->state, THREAD_READY);
395 if (state == THREAD_PARKED)
396 {
397 sched_queues_push(target->sched.active, thread);
398 }
399 else if (state == THREAD_UNBLOCKING)
400 {
402 sched_queues_push(target->sched.active, thread);
403 }
404 else
405 {
406 panic(NULL, "Invalid thread state for sched_push");
407 }
408
409 sched_compute_time_slice(thread, parent);
411
412 if (sched_should_notify(target, thread->sched.actualPriority))
413 {
414 timer_notify(target);
415 }
416
417 smp_put();
418}
419
421{
422 if (smp_cpu_amount() == 1)
423 {
424 return NULL;
425 }
426
427 // Get the higher neighbor, the last cpu wraps around and gets the first.
428 return self->id != smp_cpu_amount() - 1 ? smp_cpu(self->id + 1) : smp_cpu(0);
429}
430
431static void sched_load_balance(cpu_t* self)
432{
433 // Technically there are race conditions here, but the worst case scenario is imperfect load balancing
434 // and we need to avoid holding the locks of two sched_cpu_ctx_t at the same time to prevent deadlocks.
435
436 if (smp_cpu_amount() == 1)
437 {
438 return;
439 }
440
441 cpu_t* neighbor = sched_get_neighbor(self);
442
443 uint64_t selfLoad = sched_get_load(&self->sched);
444 uint64_t neighborLoad = sched_get_load(&neighbor->sched);
445
446 if (selfLoad <= neighborLoad + CONFIG_LOAD_BALANCE_BIAS)
447 {
448 return;
449 }
450
451 bool shouldNotifyNeighbor = false;
452 while (selfLoad != neighborLoad)
453 {
454 lock_acquire(&self->sched.lock);
456 lock_release(&self->sched.lock);
457 if (thread == NULL)
458 {
459 break;
460 }
461
462 if (sched_should_notify(neighbor, thread->sched.actualPriority))
463 {
464 shouldNotifyNeighbor = true;
465 }
466
467 lock_acquire(&neighbor->sched.lock);
468 sched_queues_push(neighbor->sched.expired, thread);
469 lock_release(&neighbor->sched.lock);
470 selfLoad--;
471 neighborLoad++;
472 }
473
474 if (shouldNotifyNeighbor)
475 {
476 timer_notify(neighbor);
477 }
478}
479
481{
482 sched_cpu_ctx_t* ctx = &self->sched;
483 sched_load_balance(self);
484
485 lock_acquire(&ctx->lock);
486
487 thread_t* volatile runThread = ctx->runThread; // Prevent the compiler from being annoying.
488 if (runThread == NULL)
489 {
490 lock_release(&ctx->lock);
491 panic(NULL, "runThread is NULL");
492 }
493
495 sched_update_recent_idle_time(runThread, false, uptime);
496
497 thread_t* volatile threadToFree = NULL;
498 if (flags & SCHED_DIE)
499 {
500 assert(atomic_load(&runThread->state) == THREAD_RUNNING);
501
502 threadToFree = runThread;
503 runThread = NULL;
504 LOG_DEBUG("dying tid=%d pid=%d\n", threadToFree->id, threadToFree->process->id);
505 }
506 else
507 {
508 thread_state_t state = atomic_load(&runThread->state);
509 switch (state)
510 {
511 case THREAD_PRE_BLOCK:
513 {
514 assert(runThread != ctx->idleThread);
515
516 thread_save(runThread, frame);
517
518 if (wait_block_finalize(frame, self, runThread, uptime)) // Block finalized
519 {
520 thread_save(runThread, frame);
521 runThread = NULL; // Force a new thread to be loaded
522 }
523 else // Early unblock
524 {
525 atomic_store(&runThread->state, THREAD_RUNNING);
526 }
527 }
528 break;
529 case THREAD_RUNNING:
530 {
531 // Do nothing
532 }
533 break;
534 default:
535 {
536 panic(NULL, "Invalid thread state %d (pid=%d tid=%d)", state, runThread->process->id, runThread->id);
537 }
538 }
539 }
540
541 priority_t minPriority;
542 if (runThread == NULL)
543 {
544 minPriority = PRIORITY_MIN;
545 }
546 else if (runThread == ctx->idleThread)
547 {
548 minPriority = PRIORITY_MIN;
549 }
550 else if (runThread->sched.deadline < uptime)
551 {
552 minPriority = PRIORITY_MIN;
553 }
554 else
555 {
556 minPriority = runThread->sched.actualPriority;
557 }
558
559 if (ctx->active->length == 0)
560 {
561 sched_queues_t* temp = ctx->active;
562 ctx->active = ctx->expired;
563 ctx->expired = temp;
564 }
565
566 thread_t* next = sched_queues_pop(ctx->active, minPriority);
567 if (next == NULL)
568 {
569 if (runThread == NULL)
570 {
572 assert(oldState == THREAD_READY);
573 thread_load(ctx->idleThread, frame);
574 runThread = ctx->idleThread;
575 }
576 }
577 else
578 {
579 if (runThread != NULL)
580 {
581 thread_state_t oldState = atomic_exchange(&runThread->state, THREAD_READY);
582 assert(oldState == THREAD_RUNNING);
583 thread_save(runThread, frame);
584
585 if (runThread != ctx->idleThread)
586 {
587 sched_compute_time_slice(runThread, NULL);
589 sched_queues_push(ctx->expired, runThread);
590 }
591 }
592
593 next->sched.deadline = uptime + next->sched.timeSlice;
595 assert(oldState == THREAD_READY);
596 thread_load(next, frame);
597 runThread = next;
598 }
599
600 if (runThread != ctx->idleThread && runThread->sched.deadline > uptime)
601 {
602 timer_one_shot(self, uptime, runThread->sched.deadline - uptime);
603 }
604
605 if (threadToFree != NULL)
606 {
607 thread_free(threadToFree);
608 }
609
610 ctx->runThread = runThread;
611 lock_release(&ctx->lock);
612}
#define assert(expression)
Definition assert.h:29
@ GDT_CS_RING0
Value to load into the CS register for kernel code.
Definition gdt.h:45
@ GDT_SS_RING0
Value to load into the SS register for kernel data.
Definition gdt.h:46
@ INTERRUPT_DIE
Kills and frees the current thread.
Definition interrupt.h:135
static cpu_t * smp_cpu(cpuid_t id)
Returns a pointer to the cpu_t structure of the CPU with the given id.
Definition smp.h:77
static cpu_t * smp_self_unsafe(void)
Returns a pointer to the cpu_t structure of the current CPU.
Definition smp.h:90
static cpu_t * smp_self(void)
Returns a pointer to the cpu_t structure of the current CPU.
Definition smp.h:115
static uint16_t smp_cpu_amount(void)
Returns the number of CPUs currently identified.
Definition smp.h:66
static void smp_put(void)
Re-enables interrupts after a call to smp_self().
Definition smp.h:125
#define SYS_PROCESS_EXIT
Definition syscalls.h:22
#define SYS_NANOSLEEP
Definition syscalls.h:25
#define SYS_YIELD
Definition syscalls.h:46
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
Definition syscalls.h:100
#define SYS_THREAD_EXIT
Definition syscalls.h:23
#define CPU_ID_BOOTSTRAP
ID of the bootstrap CPU.
Definition cpu_id.h:19
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
Definition panic.c:362
#define LOG_DEBUG(format,...)
Definition log.h:81
void process_kill(process_t *process, uint64_t status)
Kills a process.
Definition process.c:483
process_t * process_get_kernel(void)
Gets the kernel process.
Definition process.c:594
void thread_free(thread_t *thread)
Frees a thread structure.
Definition thread.c:89
void thread_load(thread_t *thread, interrupt_frame_t *frame)
Load state from a thread.
Definition thread.c:113
thread_t * thread_new(process_t *process)
Creates a new thread structure.
Definition thread.c:69
void thread_save(thread_t *thread, const interrupt_frame_t *frame)
Save state to a thread.
Definition thread.c:107
thread_state_t
Thread state enum.
Definition thread.h:29
@ THREAD_UNBLOCKING
Has started unblocking, used to prevent the same thread being unblocked multiple times.
Definition thread.h:35
@ THREAD_PRE_BLOCK
Has started the process of blocking but has not yet been given to a owner cpu.
Definition thread.h:33
@ THREAD_PARKED
Is doing nothing, not in a queue, not blocking, think of it as "other".
Definition thread.h:30
@ THREAD_RUNNING
Is currently running on a cpu.
Definition thread.h:32
@ THREAD_READY
Is ready to run and waiting to be scheduled.
Definition thread.h:31
bool wait_block_finalize(interrupt_frame_t *frame, cpu_t *self, thread_t *thread, clock_t uptime)
Finalize blocking of a thread.
Definition wait.c:103
#define WAIT_BLOCK_TIMEOUT(waitQueue, condition, timeout)
Block with timeout.
Definition wait.h:64
#define WAIT_QUEUE_CREATE(name)
Create a wait queue initializer.
Definition wait.h:220
void sched_push_new_thread(thread_t *thread, thread_t *parent)
Pushes a newly created thread onto the scheduling queue.
Definition sched.c:385
void sched_push(thread_t *thread, cpu_t *target)
Pushes a thread onto a scheduling queue.
Definition sched.c:305
process_t * sched_process(void)
Retrieves the process of the currently running thread.
Definition sched.c:164
void sched_done_with_boot_thread(void)
Specify that the boot thread is no longer needed.
Definition sched.c:121
thread_t * sched_thread(void)
Retrieves the currently running thread.
Definition sched.c:157
void sched_thread_ctx_init(sched_thread_ctx_t *ctx)
Initializes a thread's scheduling context.
Definition sched.c:69
void sched_yield(void)
Yields the CPU to another thread.
Definition sched.c:277
void sched_cpu_ctx_init(sched_cpu_ctx_t *ctx, cpu_t *self)
Initializes a CPU's scheduling context.
Definition sched.c:83
thread_t * sched_thread_unsafe(void)
Retrieves the currently running thread without disabling interrupts.
Definition sched.c:175
bool sched_is_idle(cpu_t *cpu)
Checks if the CPU is idle.
Definition sched.c:149
void sched_process_exit(uint64_t status)
Exits the current process.
Definition sched.c:191
void sched_invoke(interrupt_frame_t *frame, cpu_t *self, schedule_flags_t flags)
The main scheduling function.
Definition sched.c:480
uint64_t sched_nanosleep(clock_t timeout)
Puts the current thread to sleep.
Definition sched.c:139
process_t * sched_process_unsafe(void)
Retrieves the process of the currently running thread without disabling interrupts.
Definition sched.c:180
NORETURN void sched_idle_loop(void)
The idle loop for a CPU.
schedule_flags_t
Scheduling flags.
Definition sched.h:275
void sched_thread_exit(void)
Exits the current thread.
Definition sched.c:205
@ SCHED_NORMAL
No special flags.
Definition sched.h:276
@ SCHED_DIE
Kill and free the currently running thread.
Definition sched.h:277
static void lock_init(lock_t *lock)
Initializes a lock.
Definition lock.h:80
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
Definition lock.h:57
static void lock_release(lock_t *lock)
Releases a lock.
Definition lock.h:140
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
Definition lock.h:97
void timer_notify_self(void)
Trigger timer interrupt on self.
Definition timer.c:202
void timer_one_shot(cpu_t *self, clock_t uptime, clock_t timeout)
Schedule a one-shot timer interrupt.
Definition timer.c:153
void timer_subscribe(timer_ctx_t *ctx, timer_callback_t callback)
Subscribe to timer interrupts.
Definition timer.c:111
void timer_notify(cpu_t *cpu)
Trigger timer interrupt on cpu.
Definition timer.c:197
clock_t timer_uptime(void)
Time since boot.
Definition timer.c:73
#define CONFIG_MAX_TIME_SLICE
Maximum time slice configuration.
Definition config.h:89
#define CONFIG_MAX_PRIORITY_BOOST
Maximum priority boost configuration.
Definition config.h:121
#define CONFIG_LOAD_BALANCE_BIAS
Load balance bias configuration.
Definition config.h:143
#define CONFIG_MAX_RECENT_BLOCK_TIME
Maximum recent block time configuration.
Definition config.h:110
#define CONFIG_MIN_TIME_SLICE
Minimum time slice configuration.
Definition config.h:99
#define CONFIG_MAX_PRIORITY_PENALTY
Maximum priority penalty configuration.
Definition config.h:132
static void list_push(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
Definition list.h:345
static bool list_is_empty(list_t *list)
Checks if a list is empty.
Definition list.h:229
static void list_init(list_t *list)
Initializes a list.
Definition list.h:198
static list_entry_t * list_pop(list_t *list)
Pops the first entry from the list.
Definition list.h:361
#define MIN(x, y)
Definition math.h:16
#define LERP_INT(start, end, t, minT, maxT)
Definition math.h:23
#define PRIORITY_MIN
Definition proc.h:44
clock_t uptime(void)
System call for retreving the time since boot.
Definition uptime.c:6
#define PRIORITY_MAX
Definition proc.h:42
uint8_t priority_t
Priority type.
Definition proc.h:41
#define NULL
Pointer error value.
Definition NULL.h:23
#define CONTAINER_OF(ptr, type, member)
Container of macro.
__UINT64_TYPE__ clock_t
A nanosecond time.
Definition clock_t.h:13
static atomic_long next
Definition main.c:10
#define RFLAGS_INTERRUPT_ENABLE
Definition regs.h:32
#define RFLAGS_ALWAYS_SET
Definition regs.h:24
static void sched_compute_time_slice(thread_t *thread, thread_t *parent)
Definition sched.c:239
static wait_queue_t sleepQueue
Definition sched.c:23
static void sched_timer_handler(interrupt_frame_t *frame, cpu_t *self)
Definition sched.c:78
static void sched_compute_actual_priority(thread_t *thread)
Definition sched.c:258
static cpu_t * sched_find_least_loaded_cpu(cpu_t *exclude)
Definition sched.c:348
static void sched_queues_push(sched_queues_t *queues, thread_t *thread)
Definition sched.c:35
static void sched_load_balance(cpu_t *self)
Definition sched.c:431
static uint64_t sched_get_load(sched_cpu_ctx_t *ctx)
Definition sched.c:342
static bool sched_should_notify(cpu_t *target, priority_t priority)
Definition sched.c:291
static void sched_update_recent_idle_time(thread_t *thread, bool wasBlocking, clock_t uptime)
Definition sched.c:217
static cpu_t * sched_get_neighbor(cpu_t *self)
Definition sched.c:420
static thread_t * sched_queues_pop(sched_queues_t *queues, priority_t minPriority)
Definition sched.c:44
static void sched_queues_init(sched_queues_t *queues)
Definition sched.c:25
#define atomic_store(object, desired)
Definition stdatomic.h:289
#define atomic_exchange(object, desired)
Definition stdatomic.h:282
#define atomic_load(object)
Definition stdatomic.h:288
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
#define UINT64_MAX
Definition stdint.h:74
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
CPU structure.
Definition cpu.h:42
timer_ctx_t timer
Definition cpu.h:49
cpuid_t id
Definition cpu.h:43
sched_cpu_ctx_t sched
Definition cpu.h:51
Trap Frame Structure.
Definition interrupt.h:42
uint64_t rflags
Definition interrupt.h:64
Process structure.
Definition process.h:53
pid_t id
Definition process.h:55
Per-CPU scheduling context.
Definition sched.h:95
cpu_t * owner
The cpu that owns this scheduling context.
Definition sched.h:130
thread_t * runThread
The currently running thread.
Definition sched.h:119
sched_queues_t * expired
Pointer to the currently expired queue.
Definition sched.h:110
sched_queues_t * active
Pointer to the currently active queue.
Definition sched.h:106
sched_queues_t queues[2]
Array storing both queues.
Definition sched.h:102
thread_t * idleThread
The thread that runs when the owner CPU is idling.
Definition sched.h:125
lock_t lock
The lock that protects this context, except the zombieThreads list.
Definition sched.h:129
Scheduling queues structure.
Definition sched.h:33
uint64_t length
The total number of threads in all lists.
Definition sched.h:37
uint64_t bitmap
A bitmap indicating which of the lists have threads in them.
Definition sched.h:41
list_t lists[PRIORITY_MAX]
An array of lists that store threads, one for each priority, used in a round robin fashion.
Definition sched.h:45
Per-thread scheduling context.
Definition sched.h:56
clock_t recentBlockTime
The amount of time within the last CONFIG_MAX_RECENT_BLOCK_TIME nanoseconds that the thread was block...
Definition sched.h:80
clock_t prevBlockCheck
The previous time when the recentBlockTime member was updated.
Definition sched.h:84
priority_t actualPriority
The actual priority of the thread.
Definition sched.h:72
clock_t deadline
The time when the time slice will actually expire, only valid while the thread is running.
Definition sched.h:64
clock_t timeSlice
The length of the threads time slice, used to determine its deadline when its scheduled.
Definition sched.h:60
uintptr_t top
The top of the stack, this address is not inclusive.
Thread of execution structure.
Definition thread.h:55
sched_thread_ctx_t sched
Definition thread.h:70
process_t * process
The parent process that the thread executes within.
Definition thread.h:57
interrupt_frame_t frame
Definition thread.h:79
list_entry_t entry
The entry for the scheduler and wait system.
Definition thread.h:56
stack_pointer_t kernelStack
The kernel stack of the thread.
Definition thread.h:68
tid_t id
The thread id, unique within a process_t.
Definition thread.h:59
Wait queue structure.
Definition wait.h:166