PatchworkOS  da8a090
A non-POSIX operating system.
Loading...
Searching...
No Matches
sched.c
Go to the documentation of this file.
1#include <kernel/config.h>
2#include <kernel/cpu/ipi.h>
3#include <kernel/cpu/irq.h>
5
6#include <kernel/cpu/cpu.h>
7#include <kernel/cpu/gdt.h>
10#include <kernel/log/log.h>
11#include <kernel/log/panic.h>
14#include <kernel/sched/thread.h>
15#include <kernel/sched/timer.h>
16#include <kernel/sched/wait.h>
17#include <kernel/sync/lock.h>
18#include <kernel/utils/rbtree.h>
19
20#include <assert.h>
21#include <stdatomic.h>
22#include <stdint.h>
23#include <stdlib.h>
24#include <sys/bitmap.h>
25#include <sys/list.h>
26#include <sys/math.h>
27#include <sys/proc.h>
28#include <time.h>
29
31
32static _Atomic(clock_t) lastLoadBalance = ATOMIC_VAR_INIT(0);
33
34static inline int64_t sched_fixed_cmp(int128_t a, int128_t b)
35{
36 int128_t diff = SCHED_FIXED_FROM(a - b);
37 if (diff > SCHED_EPSILON)
38 {
39 return 1;
40 }
41 else if (diff < -(SCHED_EPSILON + 1))
42 {
43 return -1;
44 }
45
46 return 0;
47}
48
49static int64_t sched_node_compare(const rbnode_t* aNode, const rbnode_t* bNode)
50{
51 const sched_client_t* a = CONTAINER_OF(aNode, sched_client_t, node);
52 const sched_client_t* b = CONTAINER_OF(bNode, sched_client_t, node);
53
54 if (a->vdeadline < b->vdeadline)
55 {
56 return -1;
57 }
58 else if (a->vdeadline > b->vdeadline)
59 {
60 return 1;
61 }
62
63 if (a < b)
64 {
65 return -1;
66 }
67 else if (a > b)
68 {
69 return 1;
70 }
71
72 return 0;
73}
74
75static void sched_node_update(rbnode_t* node)
76{
77 sched_client_t* client = CONTAINER_OF(node, sched_client_t, node);
78
79 vclock_t minEligible = client->veligible;
80 for (rbnode_direction_t dir = RBNODE_LEFT; dir <= RBNODE_RIGHT; dir++)
81 {
82 rbnode_t* child = node->children[dir];
83 if (child == NULL)
84 {
85 continue;
86 }
87
88 sched_client_t* childClient = CONTAINER_OF(child, sched_client_t, node);
89 if (sched_fixed_cmp(childClient->vminEligible, minEligible) < 0)
90 {
91 minEligible = childClient->vminEligible;
92 }
93 }
94 client->vminEligible = minEligible;
95}
96
98{
100}
101
103{
104 assert(client != NULL);
105
106 client->node = RBNODE_CREATE;
107 client->weight = UINT32_MAX; // Invalid
108 client->vdeadline = SCHED_FIXED_ZERO;
109 client->veligible = SCHED_FIXED_ZERO;
111 client->stop = 0;
112 client->lastCpu = NULL;
113}
114
116{
117 assert(client != NULL);
118
119 client->veligible = newVeligible;
120 client->vminEligible = newVeligible;
121 client->vdeadline = newVeligible + SCHED_FIXED_TO(CONFIG_TIME_SLICE) / client->weight;
122}
123
124// Must be called with the scheduler lock held.
126{
127 assert(sched != NULL);
128
129 sched->lastUpdate = uptime;
130 sched->vtime = SCHED_FIXED_ZERO;
131}
132
133// Must be called with the scheduler lock held.
135{
136 assert(sched != NULL);
137
138 clock_t delta = uptime - sched->lastUpdate;
139 sched->lastUpdate = uptime;
140
141 int64_t totalWeight = atomic_load(&sched->totalWeight);
142 if (totalWeight == 0 || sched->runThread == sched->idleThread)
143 {
145 return;
146 }
147
148 // Eq 5.
149 sched->vtime += SCHED_FIXED_TO(delta) / totalWeight;
150
152 sched->runThread->sched.veligible + SCHED_FIXED_TO(delta) / sched->runThread->sched.weight);
153 rbtree_fix(&sched->runqueue, &sched->runThread->sched.node);
154}
155
156// Should be called with sched lock held.
157static void sched_enter(sched_t* sched, thread_t* thread, clock_t uptime)
158{
159 assert(sched != NULL);
160 assert(thread != NULL);
161 assert(thread != sched->idleThread);
162
164
165 sched_client_t* client = &thread->sched;
166 client->weight = atomic_load(&thread->process->priority) + SCHED_WEIGHT_BASE;
167 atomic_fetch_add(&sched->totalWeight, client->weight);
168
169 sched_client_update_veligible(client, sched->vtime);
170
171 rbtree_insert(&sched->runqueue, &client->node);
172 atomic_store(&thread->state, THREAD_ACTIVE);
173}
174
175// Should be called with sched lock held.
176static void sched_leave(sched_t* sched, thread_t* thread, clock_t uptime)
177{
178 assert(sched != NULL);
179 assert(thread != NULL);
180 assert(thread != sched->idleThread);
181
182 sched_client_t* client = &thread->sched;
183
184 lag_t lag = (sched->vtime - client->veligible) * client->weight;
185 int64_t totalWeight = atomic_fetch_sub(&sched->totalWeight, client->weight) - client->weight;
186
187 rbtree_remove(&sched->runqueue, &client->node);
188
189 if (totalWeight == 0)
190 {
192 return;
193 }
194
195 // Adjust the scheduler's time such that the sum of all threads' lag remains zero.
196 sched->vtime += lag / totalWeight;
197}
198
200{
201 cpu_t* leastLoaded = NULL;
202 uint64_t leastLoad = UINT64_MAX;
203
204 for (uint32_t i = 0; i < _cpuAmount; i++)
205 {
206 cpu_t* cpu = _cpus[i];
207 sched_t* sched = &cpu->sched;
208
209 uint64_t load = atomic_load(&sched->totalWeight);
210
211 if (load < leastLoad)
212 {
213 leastLoad = load;
214 leastLoaded = cpu;
215 }
216 }
217
218 assert(leastLoaded != NULL);
219 return leastLoaded;
220}
221
223{
224 cpu_t* self = cpu_get_unsafe();
225 sched_t* sched = &self->sched;
226
227 cpu_t* mostLoaded = NULL;
228 uint64_t mostLoad = 0;
229
230 cpu_t* cpu;
231 CPU_FOR_EACH(cpu)
232 {
233 if (cpu == self)
234 {
235 continue;
236 }
237
238 sched_t* sched = &cpu->sched;
239
240 uint64_t load = atomic_load(&sched->totalWeight);
241
242 if (load > mostLoad)
243 {
244 mostLoad = load;
245 mostLoaded = cpu;
246 }
247 }
248
249 if (mostLoaded == NULL)
250 {
251 return NULL;
252 }
253
254 lock_acquire(&mostLoaded->sched.lock);
255
257
258 thread_t* thread;
259 RBTREE_FOR_EACH(thread, &mostLoaded->sched.runqueue, sched.node)
260 {
261 if (thread == mostLoaded->sched.runThread || sched_is_cache_hot(thread, uptime))
262 {
263 continue;
264 }
265
266 sched_leave(&mostLoaded->sched, thread, uptime);
267 lock_release(&mostLoaded->sched.lock);
268 return thread;
269 }
270
271 lock_release(&mostLoaded->sched.lock);
272 return NULL;
273}
274
275// Should be called with scheduler lock held.
277{
278 assert(sched != NULL);
279
280 rbnode_t* current = sched->runqueue.root;
281 while (current != NULL)
282 {
283 sched_client_t* client = CONTAINER_OF(current, sched_client_t, node);
284
285 rbnode_t* left = current->children[RBNODE_LEFT];
286 if (left != NULL)
287 {
288 sched_client_t* leftClient = CONTAINER_OF(left, sched_client_t, node);
289 if (sched_fixed_cmp(leftClient->vminEligible, sched->vtime) <= 0)
290 {
291 current = left;
292 continue;
293 }
294 }
295
296 if (sched_fixed_cmp(client->veligible, sched->vtime) <= 0)
297 {
298 return CONTAINER_OF(client, thread_t, sched);
299 }
300
301 current = current->children[RBNODE_RIGHT];
302 }
303
304#ifndef NDEBUG
305 if (!rbtree_is_empty(&sched->runqueue))
306 {
307 vclock_t vminEligible =
308 CONTAINER_OF_SAFE(rbtree_find_min(sched->runqueue.root), sched_client_t, node)->vminEligible;
309 panic(NULL, "No eligible threads found, vminEligible=%lld vtime=%lld", SCHED_FIXED_FROM(vminEligible),
310 SCHED_FIXED_FROM(sched->vtime));
311 }
312#endif
313
314 thread_t* victim = sched_steal();
315 if (victim != NULL)
316 {
317 sched_enter(sched, victim, sys_time_uptime());
318 return victim;
319 }
320
321 return sched->idleThread;
322}
323
324void sched_init(sched_t* sched)
325{
326 assert(sched != NULL);
327
328 atomic_init(&sched->totalWeight, 0);
330 sched->vtime = SCHED_FIXED_ZERO;
331 sched->lastUpdate = 0;
332 lock_init(&sched->lock);
333
335 if (sched->idleThread == NULL)
336 {
337 panic(NULL, "Failed to create idle thread");
338 }
339
341 sched->idleThread->frame.rsp = sched->idleThread->kernelStack.top;
345
346 sched->runThread = sched->idleThread;
347 atomic_store(&sched->runThread->state, THREAD_ACTIVE);
348}
349
350void sched_start(thread_t* bootThread)
351{
352 assert(bootThread != NULL);
353
354 cpu_t* self = cpu_get_unsafe();
355 sched_t* sched = &self->sched;
356
357 lock_acquire(&sched->lock);
358
359 assert(self->sched.runThread == self->sched.idleThread);
361 assert(atomic_load(&sched->totalWeight) == 0);
362
363 bootThread->sched.weight = atomic_load(&bootThread->process->priority) + SCHED_WEIGHT_BASE;
364 sched_client_update_veligible(&bootThread->sched, sched->vtime);
365 atomic_store(&bootThread->state, THREAD_ACTIVE);
366
367 atomic_fetch_add(&sched->totalWeight, bootThread->sched.weight);
368 sched->runThread = bootThread;
369 rbtree_insert(&sched->runqueue, &bootThread->sched.node);
370
371 lock_release(&sched->lock);
372 thread_jump(bootThread);
373}
374
376{
377 assert(thread != NULL);
378
379 cpu_t* self = cpu_get();
380
381 cpu_t* target;
382 if (thread->sched.lastCpu != NULL && sched_is_cache_hot(thread, sys_time_uptime()))
383 {
384 target = thread->sched.lastCpu;
385 }
386 else if (atomic_load(&self->sched.totalWeight) == 0)
387 {
388 target = self;
389 }
390 else
391 {
392 target = sched_get_least_loaded();
393 }
394
395 lock_acquire(&target->sched.lock);
396 sched_enter(&target->sched, thread, sys_time_uptime());
397 lock_release(&target->sched.lock);
398
399 bool shouldWake = self != target || !self->interrupt.inInterrupt;
400 cpu_put();
401
402 if (shouldWake)
403 {
404 ipi_wake_up(target, IPI_SINGLE);
405 }
406}
407
408#ifndef NDEBUG
410{
411 sched_client_t* client = CONTAINER_OF(node, sched_client_t, node);
412
413 bool hasChildren = false;
414 for (rbnode_direction_t dir = RBNODE_LEFT; dir <= RBNODE_RIGHT; dir++)
415 {
416 rbnode_t* child = node->children[dir];
417 if (child == NULL)
418 {
419 continue;
420 }
421
422 hasChildren = true;
423
424 sched_client_t* childClient = CONTAINER_OF(child, sched_client_t, node);
425
426 if (sched_fixed_cmp(client->vminEligible, childClient->vminEligible) > 0)
427 {
428 panic(NULL, "vminEligible incorrect for node with vdeadline %lld, expected %lld but got %lld",
431 }
432
433 sched_verify_min_eligible(sched, child);
434 }
435
436 if (!hasChildren && sched_fixed_cmp(client->vminEligible, client->veligible) != 0)
437 {
438 panic(NULL, "Leaf node vminEligible != veligible, vminEligible=%lld veligible=%lld",
440 }
441}
442
443static void sched_verify(sched_t* sched)
444{
445 int64_t totalWeight = 0;
446 bool runThreadFound = false;
447 sched_client_t* client;
448 RBTREE_FOR_EACH(client, &sched->runqueue, node)
449 {
450 thread_t* thread = CONTAINER_OF(client, thread_t, sched);
451 totalWeight += client->weight;
452 assert(client->weight > 0);
453 assert(thread != sched->idleThread);
454
455 if (atomic_load(&thread->state) != THREAD_ACTIVE && thread != sched->runThread)
456 {
457 panic(NULL, "Thread in runqueue has invalid state %d", atomic_load(&thread->state));
458 }
459
460 if (thread == sched->runThread)
461 {
462 runThreadFound = true;
463 }
464 }
465
466 if (sched->runThread != sched->idleThread && !runThreadFound)
467 {
468 panic(NULL, "Running thread not found in runqueue");
469 }
470
471 if (totalWeight != atomic_load(&sched->totalWeight))
472 {
473 panic(NULL, "sched totalWeight incorrect, expected %lld but got %lld", totalWeight,
474 atomic_load(&sched->totalWeight));
475 }
476
478 if (min != NULL)
479 {
480 sched_client_t* other;
481 RBTREE_FOR_EACH(other, &sched->runqueue, node)
482 {
483 if (sched_fixed_cmp(other->vdeadline, min->vdeadline) < 0)
484 {
485 panic(NULL, "runqueue not sorted, node with vdeadline %lld found, but min is %lld",
487 }
488 }
489 }
490
492 if (root != NULL)
493 {
494 sched_verify_min_eligible(sched, &root->node);
495 }
496
497 sched_client_t* iter;
498 lag_t sumLag = SCHED_FIXED_ZERO;
499 RBTREE_FOR_EACH(iter, &sched->runqueue, node)
500 {
501 lag_t lag = (sched->vtime - iter->veligible) * iter->weight;
502 sumLag += lag;
503 }
504 if (sched_fixed_cmp(sumLag, SCHED_FIXED_ZERO) != 0)
505 {
506 LOG_DEBUG("debug info (vtime=%lld lagValue=%lld lagFixed=%lld):\n", SCHED_FIXED_FROM(sched->vtime),
507 SCHED_FIXED_FROM(sumLag), sumLag);
508 RBTREE_FOR_EACH(iter, &sched->runqueue, node)
509 {
510 thread_t* thread = CONTAINER_OF(iter, thread_t, sched);
511 lag_t lag = (sched->vtime - iter->veligible) * iter->weight;
512 LOG_DEBUG(" process %lld thread %lld lag=%lld veligible=%lld vdeadline=%lld weight=%lld\n",
513 thread->process->id, thread->id, SCHED_FIXED_FROM(lag), SCHED_FIXED_FROM(iter->veligible),
514 SCHED_FIXED_FROM(iter->vdeadline), iter->weight);
515 }
516 panic(NULL, "Total lag is not zero, got %lld", SCHED_FIXED_FROM(sumLag));
517 }
518}
519#endif
520
522{
523 assert(frame != NULL);
524 assert(self != NULL);
525
526 sched_t* sched = &self->sched;
527 lock_acquire(&sched->lock);
528
529 if (sched->runThread == sched->idleThread && rbtree_is_empty(&sched->runqueue)) // Nothing to do
530 {
531 lock_release(&sched->lock);
532 return;
533 }
534
537
538 assert(sched->runThread != NULL);
539 assert(sched->idleThread != NULL);
540
541 // Cant free any potential threads while still using its address space, so we defer the free to after we have
542 // switched threads. Since we have a per-CPU interrupt stack, we dont need to worry about a use-after-free of the
543 // stack.
544 thread_t* volatile threadToFree = NULL;
545
546#ifndef NDEBUG
547 sched_verify(sched);
548#endif
549
550 thread_state_t state = atomic_load(&sched->runThread->state);
551 switch (state)
552 {
553 case THREAD_DYING:
554 assert(sched->runThread != sched->idleThread);
555
556 threadToFree = sched->runThread;
557 sched_leave(sched, sched->runThread, uptime);
558 break;
559 case THREAD_PRE_BLOCK:
561 assert(sched->runThread != sched->idleThread);
562 if (wait_block_finalize(frame, self, sched->runThread, uptime))
563 {
564 sched_leave(sched, sched->runThread, uptime);
565 break;
566 }
567
568 // Early unblock
569 atomic_store(&sched->runThread->state, THREAD_ACTIVE);
570 break;
571 case THREAD_ACTIVE:
572 break;
573 default:
574 panic(NULL, "Thread in invalid state in sched_do() state=%d", state);
575 }
576
578 assert(next != NULL);
579
580 if (next != sched->runThread)
581 {
582 sched->runThread->sched.lastCpu = self;
583 sched->runThread->sched.stop = uptime;
584 thread_save(sched->runThread, frame);
586 thread_load(next, frame);
587 sched->runThread = next;
588 }
589
590 if (sched->runThread != sched->idleThread)
591 {
593 }
594
595 if (threadToFree != NULL)
596 {
597 assert(threadToFree != sched->runThread);
598 thread_free(threadToFree);
599 }
600
601 lock_release(&sched->lock);
602}
603
605{
606 assert(cpu != NULL);
607
608 LOCK_SCOPE(&cpu->sched.lock);
609 return cpu->sched.runThread == cpu->sched.idleThread;
610}
611
613{
614 cpu_t* self = cpu_get();
615 thread_t* thread = self->sched.runThread;
616 cpu_put();
617 return thread;
618}
619
621{
622 thread_t* thread = sched_thread();
623 return thread->process;
624}
625
627{
628 cpu_t* self = cpu_get_unsafe();
629 return self->sched.runThread;
630}
631
633{
634 thread_t* thread = sched_thread_unsafe();
635 return thread->process;
636}
637
639{
640 return WAIT_BLOCK_TIMEOUT(&sleepQueue, false, timeout);
641}
642
643void sched_yield(void)
644{
646}
647
649{
650 thread_t* thread = sched_thread();
651 process_kill(thread->process, status);
652 ipi_invoke();
653
654 panic(NULL, "sched_process_exit() returned unexpectedly");
655}
656
658{
659 thread_t* thread = sched_thread();
660 if (thread_send_note(thread, "kill", 4) == ERR)
661 {
662 panic(NULL, "Failed to send kill note to self in sched_thread_exit()");
663 }
664 ipi_invoke();
665
666 panic(NULL, "Return to sched_thread_exit");
667}
668
670{
671 return sched_nanosleep(nanoseconds);
672}
673
675{
676 sched_process_exit(status);
677
678 panic(NULL, "Return to syscall_process_exit");
679}
680
682{
684
685 panic(NULL, "Return to syscall_thread_exit");
686}
687
689{
690 sched_yield();
691 return 0;
692}
#define assert(expression)
Definition assert.h:29
#define CLOCKS_PER_SEC
Definition clock_t.h:15
@ GDT_CS_RING0
Value to load into the CS register for kernel code.
Definition gdt.h:45
@ GDT_SS_RING0
Value to load into the SS register for kernel data.
Definition gdt.h:46
void ipi_invoke(void)
Invoke a IPI interrupt on the current CPU.
Definition ipi.c:267
void ipi_wake_up(cpu_t *cpu, ipi_flags_t flags)
Wake up one or more CPUs.
Definition ipi.c:213
@ IPI_SINGLE
Send the IPI to the specified CPU.
Definition ipi.h:105
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
Definition syscall.h:144
@ SYS_PROCESS_EXIT
Definition syscall.h:65
@ SYS_NANOSLEEP
Definition syscall.h:68
@ SYS_YIELD
Definition syscall.h:89
@ SYS_THREAD_EXIT
Definition syscall.h:66
static cpu_t * cpu_get_unsafe(void)
Gets the current CPU structure without disabling interrupts.
Definition cpu.h:289
static cpu_t * cpu_get(void)
Gets the current CPU structure.
Definition cpu.h:263
static void cpu_put(void)
Releases the current CPU structure.
Definition cpu.h:277
cpu_t * _cpus[CPU_MAX]
Array of pointers to cpu_t structures for each CPU, indexed by CPU ID.
Definition cpu.c:23
#define CPU_FOR_EACH(cpu)
Macro to iterate over all CPUs.
Definition cpu.h:339
uint16_t _cpuAmount
The number of CPUs currently identified.
Definition cpu.c:24
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
Definition panic.c:266
#define LOG_DEBUG(format,...)
Definition log.h:100
process_t * process_get_kernel(void)
Gets the kernel process.
Definition process.c:943
void process_kill(process_t *process, int32_t status)
Kills a process.
Definition process.c:776
void thread_free(thread_t *thread)
Frees a thread structure.
Definition thread.c:124
uint64_t thread_send_note(thread_t *thread, const void *buffer, uint64_t count)
Send a note to a thread.
Definition thread.c:161
void thread_load(thread_t *thread, interrupt_frame_t *frame)
Load state from a thread.
Definition thread.c:147
_NORETURN void thread_jump(thread_t *thread)
Jump to a thread by calling thread_load() and then loading its interrupt frame.
thread_t * thread_new(process_t *process)
Creates a new thread structure.
Definition thread.c:70
void thread_save(thread_t *thread, const interrupt_frame_t *frame)
Save state to a thread.
Definition thread.c:140
thread_state_t
Thread state enum.
Definition thread.h:30
@ THREAD_UNBLOCKING
Has started unblocking, used to prevent the same thread being unblocked multiple times.
Definition thread.h:35
@ THREAD_PRE_BLOCK
Has started the process of blocking but has not yet been given to a owner cpu.
Definition thread.h:33
@ THREAD_DYING
Definition thread.h:43
@ THREAD_ACTIVE
Is either running or ready to run.
Definition thread.h:32
bool wait_block_finalize(interrupt_frame_t *frame, cpu_t *self, thread_t *thread, clock_t uptime)
Finalize blocking of a thread.
Definition wait.c:231
#define WAIT_BLOCK_TIMEOUT(queue, condition, timeout)
Blocks until the condition is true, condition will be tested on every wakeup. Reaching the timeout wi...
Definition wait.h:72
#define WAIT_QUEUE_CREATE(name)
Create a wait queue initializer.
Definition wait.h:219
void sched_process_exit(int32_t status)
Terminates the currently executing process and all it's threads.
Definition sched.c:648
#define SCHED_FIXED_TO(x)
Convert a regular integer to fixed-point representation.
Definition sched.h:344
#define SCHED_FIXED_FROM(x)
Convert a fixed-point value to a regular integer.
Definition sched.h:349
void sched_init(sched_t *sched)
Initialize the scheduler for a CPU.
Definition sched.c:324
#define SCHED_EPSILON
The minimum difference between two virtual clock or lag values to consider then unequal.
Definition sched.h:339
process_t * sched_process(void)
Retrieves the process of the currently running thread.
Definition sched.c:620
void sched_client_init(sched_client_t *client)
Initialize the scheduler context for a thread.
Definition sched.c:102
int128_t vclock_t
Virtual clock type.
Definition sched.h:316
void sched_start(thread_t *bootThread)
Starts the scheduler by jumping to the boot thread.
Definition sched.c:350
thread_t * sched_thread(void)
Retrieves the currently running thread.
Definition sched.c:612
void sched_yield(void)
Yield the current thread's time slice to allow other threads to run.
Definition sched.c:643
thread_t * sched_thread_unsafe(void)
Retrieves the currently running thread without disabling interrupts.
Definition sched.c:626
#define SCHED_WEIGHT_BASE
Base weight added to all threads.
Definition sched.h:361
bool sched_is_idle(cpu_t *cpu)
Checks if the CPU is currently idle.
Definition sched.c:604
void sched_submit(thread_t *thread)
Submits a thread to the scheduler.
Definition sched.c:375
_NORETURN void sched_idle_loop(void)
The idle loop for the scheduler.
uint64_t sched_nanosleep(clock_t timeout)
Sleeps the current thread for a specified duration in nanoseconds.
Definition sched.c:638
process_t * sched_process_unsafe(void)
Retrieves the process of the currently running thread without disabling interrupts.
Definition sched.c:632
#define SCHED_FIXED_ZERO
Fixed-point zero.
Definition sched.h:334
void sched_do(interrupt_frame_t *frame, cpu_t *self)
Perform a scheduling operation.
Definition sched.c:521
void sched_thread_exit(void)
Terminates the currently executing thread.
Definition sched.c:657
static void lock_init(lock_t *lock)
Initializes a lock.
Definition lock.h:86
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
Definition lock.h:57
static void lock_release(lock_t *lock)
Releases a lock.
Definition lock.h:146
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
Definition lock.h:103
clock_t sys_time_uptime(void)
Time since boot.
Definition sys_time.c:99
void timer_set(clock_t uptime, clock_t deadline)
Schedule a one-shot timer interrupt on the current CPU.
Definition timer.c:134
#define RBTREE_FOR_EACH(elem, tree, member)
Iterates over a Red-Black Tree in ascending order.
Definition rbtree.h:258
rbnode_t * rbtree_find_min(rbnode_t *node)
Find the minimum node in a subtree.
Definition rbtree.c:184
rbnode_direction_t
Red-Black Tree Node Directions.
Definition rbtree.h:62
void rbtree_init(rbtree_t *tree, rbnode_compare_t compare, rbnode_update_t update)
Initialize a Red-Black Tree.
Definition rbtree.c:30
void rbtree_remove(rbtree_t *tree, rbnode_t *node)
Remove a node from the Red-Black Tree.
Definition rbtree.c:370
#define RBNODE_CREATE
Create a Red-Black Tree Node initializer.
Definition rbtree.h:104
void rbtree_insert(rbtree_t *tree, rbnode_t *node)
Insert a node into the Red-Black Tree.
Definition rbtree.c:149
void rbtree_fix(rbtree_t *tree, rbnode_t *node)
Move the node to its correct position in the Red-Black Tree.
Definition rbtree.c:526
bool rbtree_is_empty(const rbtree_t *tree)
Check if the Red-Black Tree is empty.
Definition rbtree.c:553
@ RBNODE_RIGHT
Definition rbtree.h:64
@ RBNODE_LEFT
Definition rbtree.h:63
#define CONFIG_CACHE_HOT_THRESHOLD
Cache hot threshold configuration.
Definition config.h:110
#define CONFIG_TIME_SLICE
Time slice configuration.
Definition config.h:99
clock_t uptime(void)
System call for retreving the time since boot.
Definition uptime.c:6
#define NULL
Pointer error value.
Definition NULL.h:23
#define ERR
Integer error value.
Definition ERR.h:17
#define CONTAINER_OF(ptr, type, member)
Container of macro.
#define CONTAINER_OF_SAFE(ptr, type, member)
Safe container of macro.
__UINT64_TYPE__ clock_t
A nanosecond time.
Definition clock_t.h:13
static atomic_long next
Definition main.c:11
#define RFLAGS_INTERRUPT_ENABLE
Definition regs.h:32
#define RFLAGS_ALWAYS_SET
Definition regs.h:24
static cpu_t * sched_get_least_loaded(void)
Definition sched.c:199
static int64_t sched_node_compare(const rbnode_t *aNode, const rbnode_t *bNode)
Definition sched.c:49
static bool sched_is_cache_hot(thread_t *thread, clock_t uptime)
Definition sched.c:97
static thread_t * sched_steal(void)
Definition sched.c:222
void sched_client_update_veligible(sched_client_t *client, vclock_t newVeligible)
Definition sched.c:115
static void sched_verify(sched_t *sched)
Definition sched.c:443
static wait_queue_t sleepQueue
Definition sched.c:30
static void sched_enter(sched_t *sched, thread_t *thread, clock_t uptime)
Definition sched.c:157
static void sched_verify_min_eligible(sched_t *sched, rbnode_t *node)
Definition sched.c:409
static void sched_vtime_update(sched_t *sched, clock_t uptime)
Definition sched.c:134
static void sched_leave(sched_t *sched, thread_t *thread, clock_t uptime)
Definition sched.c:176
static thread_t * sched_first_eligible(sched_t *sched)
Definition sched.c:276
static void sched_vtime_reset(sched_t *sched, clock_t uptime)
Definition sched.c:125
static void sched_node_update(rbnode_t *node)
Definition sched.c:75
#define atomic_store(object, desired)
Definition stdatomic.h:289
#define _Atomic(T)
Definition stdatomic.h:59
#define atomic_fetch_sub(object, operand)
Definition stdatomic.h:286
#define atomic_load(object)
Definition stdatomic.h:288
#define ATOMIC_VAR_INIT(value)
Definition stdatomic.h:74
#define atomic_fetch_add(object, operand)
Definition stdatomic.h:283
#define atomic_init(obj, value)
Definition stdatomic.h:75
__UINT32_TYPE__ uint32_t
Definition stdint.h:15
__INT32_TYPE__ int32_t
Definition stdint.h:14
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
#define UINT64_MAX
Definition stdint.h:74
__int128_t int128_t
Definition stdint.h:169
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
#define UINT32_MAX
Definition stdint.h:70
__INT64_TYPE__ int64_t
Definition stdint.h:16
CPU structure.
Definition cpu.h:122
interrupt_ctx_t interrupt
Definition cpu.h:130
sched_t sched
Definition cpu.h:134
Trap Frame Structure.
Definition interrupt.h:143
Lag type.
Process structure.
Definition process.h:158
pid_t id
Definition process.h:160
Red-Black Tree Node.
Definition rbtree.h:93
rbnode_t * children[RBNODE_AMOUNT]
Definition rbtree.h:95
rbnode_t * root
Definition rbtree.h:139
Per-thread scheduler context.
Definition sched.h:370
clock_t stop
The real time when the thread previously stopped executing.
Definition sched.h:379
vclock_t vdeadline
Definition sched.h:376
vclock_t veligible
The virtual time at which the thread becomes eligible to run (lag >= 0).
Definition sched.h:377
vclock_t vminEligible
The minimum virtual eligible time of the subtree in the runqueue.
Definition sched.h:378
rbnode_t node
The node in the scheduler's runqueue.
Definition sched.h:371
cpu_t * lastCpu
The last CPU the thread was scheduled on, it stoped running at stop time.
Definition sched.h:380
int64_t weight
The weight of the thread.
Definition sched.h:372
Per-CPU scheduler.
Definition sched.h:393
vclock_t vtime
The current virtual time of the CPU.
Definition sched.h:396
lock_t lock
The lock protecting the scheduler.
Definition sched.h:398
clock_t lastUpdate
The real time when the last vtime update occurred.
Definition sched.h:397
thread_t *volatile runThread
The currently running thread on this CPU.
Definition sched.h:400
rbtree_t runqueue
Contains all runnable threads, including the currently running thread, sorted by vdeadline.
Definition sched.h:395
thread_t *volatile idleThread
The idle thread for this CPU.
Definition sched.h:399
uintptr_t top
The top of the stack, this address is not inclusive.
Thread of execution structure.
Definition thread.h:63
process_t * process
The parent process that the thread executes within.
Definition thread.h:64
interrupt_frame_t frame
Definition thread.h:87
stack_pointer_t kernelStack
The kernel stack of the thread.
Definition thread.h:75
tid_t id
The thread id, unique within a process_t.
Definition thread.h:66
sched_client_t sched
Definition thread.h:77
The primitive that threads block on.
Definition wait.h:182