PatchworkOS  19e446b
A non-POSIX operating system.
Loading...
Searching...
No Matches
sched.c
Go to the documentation of this file.
1#include <kernel/config.h>
2#include <kernel/cpu/ipi.h>
3#include <kernel/cpu/irq.h>
5
6#include <kernel/cpu/cpu.h>
7#include <kernel/cpu/gdt.h>
10#include <kernel/log/log.h>
11#include <kernel/log/panic.h>
12#include <kernel/proc/process.h>
13#include <kernel/sched/clock.h>
14#include <kernel/sched/thread.h>
15#include <kernel/sched/timer.h>
16#include <kernel/sched/wait.h>
17#include <kernel/sync/lock.h>
18#include <kernel/utils/rbtree.h>
19
20#include <assert.h>
21#include <stdatomic.h>
22#include <stdint.h>
23#include <stdlib.h>
24#include <sys/bitmap.h>
25#include <sys/list.h>
26#include <sys/math.h>
27#include <sys/proc.h>
28#include <time.h>
29
31
32static _Atomic(clock_t) lastLoadBalance = ATOMIC_VAR_INIT(0);
33
34static inline int64_t sched_fixed_cmp(int128_t a, int128_t b)
35{
36 int128_t diff = SCHED_FIXED_FROM(a - b);
37 if (diff > SCHED_EPSILON)
38 {
39 return 1;
40 }
41
42 if (diff < -(SCHED_EPSILON + 1))
43 {
44 return -1;
45 }
46
47 return 0;
48}
49
50static int64_t sched_node_compare(const rbnode_t* aNode, const rbnode_t* bNode)
51{
52 const sched_client_t* a = CONTAINER_OF(aNode, sched_client_t, node);
53 const sched_client_t* b = CONTAINER_OF(bNode, sched_client_t, node);
54
55 if (a->vdeadline < b->vdeadline)
56 {
57 return -1;
58 }
59
60 if (a->vdeadline > b->vdeadline)
61 {
62 return 1;
63 }
64
65 if (a < b)
66 {
67 return -1;
68 }
69
70 if (a > b)
71 {
72 return 1;
73 }
74
75 return 0;
76}
77
78static void sched_node_update(rbnode_t* node)
79{
80 sched_client_t* client = CONTAINER_OF(node, sched_client_t, node);
81
82 vclock_t minEligible = client->veligible;
84 {
85 rbnode_t* child = node->children[dir];
86 if (child == NULL)
87 {
88 continue;
89 }
90
91 sched_client_t* childClient = CONTAINER_OF(child, sched_client_t, node);
92 if (sched_fixed_cmp(childClient->vminEligible, minEligible) < 0)
93 {
94 minEligible = childClient->vminEligible;
95 }
96 }
97 client->vminEligible = minEligible;
98}
99
101{
102 sched_t* sched = SELF_PTR(_pcpu_sched);
103
104 atomic_init(&sched->totalWeight, 0);
106 sched->vtime = SCHED_FIXED_ZERO;
107 sched->lastUpdate = 0;
108 lock_init(&sched->lock);
109 atomic_init(&sched->preemptCount, 0);
110
112 if (sched->idleThread == NULL)
113 {
114 panic(NULL, "Failed to create idle thread");
115 }
116
118 sched->idleThread->frame.rsp = sched->idleThread->kernelStack.top;
122
123 sched->runThread = sched->idleThread;
124 atomic_store(&sched->runThread->state, THREAD_ACTIVE);
125}
126
128{
129 return thread->sched.stop + CONFIG_CACHE_HOT_THRESHOLD > uptime;
130}
131
133{
134 assert(client != NULL);
135
136 client->node = RBNODE_CREATE;
137 client->weight = UINT32_MAX; // Invalid
138 client->vdeadline = SCHED_FIXED_ZERO;
139 client->veligible = SCHED_FIXED_ZERO;
141 client->stop = 0;
142 client->lastCpu = NULL;
143}
144
146{
147 assert(client != NULL);
148
149 client->veligible = newVeligible;
150 client->vminEligible = newVeligible;
151 client->vdeadline = newVeligible + SCHED_FIXED_TO(CONFIG_TIME_SLICE) / client->weight;
152}
153
154// Must be called with the scheduler lock held.
156{
157 assert(sched != NULL);
158
159 sched->lastUpdate = uptime;
160 sched->vtime = SCHED_FIXED_ZERO;
161}
162
163// Must be called with the scheduler lock held.
165{
166 assert(sched != NULL);
167
168 clock_t delta = uptime - sched->lastUpdate;
169 sched->lastUpdate = uptime;
170
171 int64_t totalWeight = atomic_load(&sched->totalWeight);
172 if (totalWeight == 0 || sched->runThread == sched->idleThread)
173 {
175 return;
176 }
177
178 // Eq 5.
179 sched->vtime += SCHED_FIXED_TO(delta) / totalWeight;
180
182 sched->runThread->sched.veligible + SCHED_FIXED_TO(delta) / sched->runThread->sched.weight);
183 rbtree_fix(&sched->runqueue, &sched->runThread->sched.node);
184}
185
186// Should be called with sched lock held.
187static void sched_enter(sched_t* sched, thread_t* thread, clock_t uptime)
188{
189 assert(sched != NULL);
190 assert(thread != NULL);
191 assert(thread != sched->idleThread);
192
194
195 sched_client_t* client = &thread->sched;
196 client->weight = atomic_load(&thread->process->priority) + SCHED_WEIGHT_BASE;
197 atomic_fetch_add(&sched->totalWeight, client->weight);
198
199 sched_client_update_veligible(client, sched->vtime);
200
201 rbtree_insert(&sched->runqueue, &client->node);
202 atomic_store(&thread->state, THREAD_ACTIVE);
203}
204
205// Should be called with sched lock held.
206static void sched_leave(sched_t* sched, thread_t* thread, clock_t uptime)
207{
208 assert(sched != NULL);
209 assert(thread != NULL);
210 assert(thread != sched->idleThread);
211
212 sched_client_t* client = &thread->sched;
213
214 lag_t lag = (sched->vtime - client->veligible) * client->weight;
215 int64_t totalWeight = atomic_fetch_sub(&sched->totalWeight, client->weight) - client->weight;
216
217 rbtree_remove(&sched->runqueue, &client->node);
218
219 if (totalWeight == 0)
220 {
222 return;
223 }
224
225 // Adjust the scheduler's time such that the sum of all threads' lag remains zero.
226 sched->vtime += lag / totalWeight;
227}
228
230{
231 cpu_t* leastLoaded = NULL;
232 uint64_t leastLoad = UINT64_MAX;
233
234 cpu_t* cpu;
235 CPU_FOR_EACH(cpu)
236 {
237 sched_t* sched = CPU_PTR(cpu->id, _pcpu_sched);
238
239 uint64_t load = atomic_load(&sched->totalWeight);
240
241 if (load < leastLoad)
242 {
243 leastLoad = load;
244 leastLoaded = cpu;
245 }
246 }
247
248 assert(leastLoaded != NULL);
249 return leastLoaded;
250}
251
253{
254 sched_t* mostLoaded = NULL;
255 uint64_t mostLoad = 0;
256
257 cpu_t* cpu;
258 CPU_FOR_EACH(cpu)
259 {
260 if (cpu->id == SELF->id)
261 {
262 continue;
263 }
264
265 sched_t* sched = CPU_PTR(cpu->id, _pcpu_sched);
266 uint64_t load = atomic_load(&sched->totalWeight);
267
268 if (load > mostLoad)
269 {
270 mostLoad = load;
271 mostLoaded = sched;
272 }
273 }
274
275 if (mostLoaded == NULL)
276 {
277 return NULL;
278 }
279
280 lock_acquire(&mostLoaded->lock);
281
283
284 thread_t* thread;
285 RBTREE_FOR_EACH(thread, &mostLoaded->runqueue, sched.node)
286 {
287 if (thread == mostLoaded->runThread || sched_is_cache_hot(thread, uptime))
288 {
289 continue;
290 }
291
292 sched_leave(mostLoaded, thread, uptime);
293 lock_release(&mostLoaded->lock);
294 return thread;
295 }
296
297 lock_release(&mostLoaded->lock);
298 return NULL;
299}
300
301// Should be called with scheduler lock held.
303{
304 assert(sched != NULL);
305
306 rbnode_t* current = sched->runqueue.root;
307 while (current != NULL)
308 {
309 sched_client_t* client = CONTAINER_OF(current, sched_client_t, node);
310
311 rbnode_t* left = current->children[RBNODE_LEFT];
312 if (left != NULL)
313 {
314 sched_client_t* leftClient = CONTAINER_OF(left, sched_client_t, node);
315 if (sched_fixed_cmp(leftClient->vminEligible, sched->vtime) <= 0)
316 {
317 current = left;
318 continue;
319 }
320 }
321
322 if (sched_fixed_cmp(client->veligible, sched->vtime) <= 0)
323 {
324 return CONTAINER_OF(client, thread_t, sched);
325 }
326
327 current = current->children[RBNODE_RIGHT];
328 }
329
330 if (!rbtree_is_empty(&sched->runqueue))
331 {
332#ifndef NDEBUG
333 vclock_t vminEligible =
334 CONTAINER_OF_SAFE(rbtree_find_min(sched->runqueue.root), sched_client_t, node)->vminEligible;
335 panic(NULL, "No eligible threads found, vminEligible=%lld vtime=%lld", SCHED_FIXED_FROM(vminEligible),
336 SCHED_FIXED_FROM(sched->vtime));
337#else
338 LOG_WARN("No eligible threads found, falling back to first thread in runqueue\n");
340 return CONTAINER_OF(first, thread_t, sched);
341#endif
342 }
343
344 thread_t* victim = sched_steal();
345 if (victim != NULL)
346 {
347 sched_enter(sched, victim, clock_uptime());
348 return victim;
349 }
350
351 return sched->idleThread;
352}
353
354void sched_start(thread_t* bootThread)
355{
356 assert(bootThread != NULL);
357
359
360 lock_acquire(&self->lock);
361
362 assert(self->runThread == self->idleThread);
364 assert(atomic_load(&self->totalWeight) == 0);
365
366 bootThread->sched.weight = atomic_load(&bootThread->process->priority) + SCHED_WEIGHT_BASE;
367 sched_client_update_veligible(&bootThread->sched, self->vtime);
368 atomic_store(&bootThread->state, THREAD_ACTIVE);
369
370 atomic_fetch_add(&self->totalWeight, bootThread->sched.weight);
371 self->runThread = bootThread;
372 rbtree_insert(&self->runqueue, &bootThread->sched.node);
373
374 lock_release(&self->lock);
375 thread_jump(bootThread);
376}
377
379{
380 assert(thread != NULL);
381
382 cli_push();
384
385 cpu_t* target;
386 if (thread->sched.lastCpu != NULL && sched_is_cache_hot(thread, clock_uptime()))
387 {
388 target = thread->sched.lastCpu;
389 }
390 else if (atomic_load(&self->totalWeight) == 0)
391 {
392 target = SELF->self;
393 }
394 else
395 {
396 target = sched_get_least_loaded();
397 }
398
399 sched_t* sched = CPU_PTR(target->id, _pcpu_sched);
400
401 lock_acquire(&sched->lock);
402 sched_enter(sched, thread, clock_uptime());
403 lock_release(&sched->lock);
404
405 bool shouldWake = SELF->id != target->id || !SELF->inInterrupt;
406 cli_pop();
407
408 if (shouldWake)
409 {
410 ipi_wake_up(target, IPI_SINGLE);
411 }
412}
413
414#ifndef NDEBUG
416{
417 sched_client_t* client = CONTAINER_OF(node, sched_client_t, node);
418
419 bool hasChildren = false;
421 {
422 rbnode_t* child = node->children[dir];
423 if (child == NULL)
424 {
425 continue;
426 }
427
428 hasChildren = true;
429
430 sched_client_t* childClient = CONTAINER_OF(child, sched_client_t, node);
431
432 if (sched_fixed_cmp(client->vminEligible, childClient->vminEligible) > 0)
433 {
434 panic(NULL, "vminEligible incorrect for node with vdeadline %lld, expected %lld but got %lld",
437 }
438
439 sched_verify_min_eligible(sched, child);
440 }
441
442 if (!hasChildren && sched_fixed_cmp(client->vminEligible, client->veligible) != 0)
443 {
444 panic(NULL, "Leaf node vminEligible != veligible, vminEligible=%lld veligible=%lld",
446 }
447}
448
449static void sched_verify(sched_t* sched)
450{
451 int64_t totalWeight = 0;
452 bool runThreadFound = false;
453 sched_client_t* client;
454 RBTREE_FOR_EACH(client, &sched->runqueue, node)
455 {
456 thread_t* thread = CONTAINER_OF(client, thread_t, sched);
457 totalWeight += client->weight;
458 assert(client->weight > 0);
459 assert(thread != sched->idleThread);
460
461 if (atomic_load(&thread->state) != THREAD_ACTIVE && thread != sched->runThread)
462 {
463 panic(NULL, "Thread in runqueue has invalid state %d", atomic_load(&thread->state));
464 }
465
466 if (thread == sched->runThread)
467 {
468 runThreadFound = true;
469 }
470 }
471
472 if (sched->runThread != sched->idleThread && !runThreadFound)
473 {
474 panic(NULL, "Running thread not found in runqueue");
475 }
476
477 if (totalWeight != atomic_load(&sched->totalWeight))
478 {
479 panic(NULL, "sched totalWeight incorrect, expected %lld but got %lld", totalWeight,
480 atomic_load(&sched->totalWeight));
481 }
482
484 if (min != NULL)
485 {
486 sched_client_t* other;
487 RBTREE_FOR_EACH(other, &sched->runqueue, node)
488 {
489 if (sched_fixed_cmp(other->vdeadline, min->vdeadline) < 0)
490 {
491 panic(NULL, "runqueue not sorted, node with vdeadline %lld found, but min is %lld",
493 }
494 }
495 }
496
498 if (root != NULL)
499 {
500 sched_verify_min_eligible(sched, &root->node);
501 }
502
503 sched_client_t* iter;
504 lag_t sumLag = SCHED_FIXED_ZERO;
505 RBTREE_FOR_EACH(iter, &sched->runqueue, node)
506 {
507 lag_t lag = (sched->vtime - iter->veligible) * iter->weight;
508 sumLag += lag;
509 }
510 if (sched_fixed_cmp(sumLag, SCHED_FIXED_ZERO) != 0)
511 {
512 LOG_DEBUG("debug info (vtime=%lld lagValue=%lld lagFixed=%lld):\n", SCHED_FIXED_FROM(sched->vtime),
513 SCHED_FIXED_FROM(sumLag), sumLag);
514 RBTREE_FOR_EACH(iter, &sched->runqueue, node)
515 {
516 thread_t* thread = CONTAINER_OF(iter, thread_t, sched);
517 lag_t lag = (sched->vtime - iter->veligible) * iter->weight;
518 LOG_DEBUG(" process %lld thread %lld lag=%lld veligible=%lld vdeadline=%lld weight=%lld\n",
519 thread->process->id, thread->id, SCHED_FIXED_FROM(lag), SCHED_FIXED_FROM(iter->veligible),
520 SCHED_FIXED_FROM(iter->vdeadline), iter->weight);
521 }
522 panic(NULL, "Total lag is not zero, got %lld", SCHED_FIXED_FROM(sumLag));
523 }
524}
525#endif
526
528{
529 assert(frame != NULL);
530
531 sched_t* sched = SELF_PTR(_pcpu_sched);
532 lock_acquire(&sched->lock);
533
534 if (atomic_load(&sched->preemptCount) > 0 && atomic_load(&sched->runThread->state) == THREAD_ACTIVE)
535 {
536 lock_release(&sched->lock);
537 return;
538 }
539
540 if (sched->runThread == sched->idleThread && rbtree_is_empty(&sched->runqueue)) // Nothing to do
541 {
542 lock_release(&sched->lock);
544 return;
545 }
546
549
550 assert(sched->runThread != NULL);
551 assert(sched->idleThread != NULL);
552
553 // Cant free any potential threads while still using its address space, so we defer the free to after we have
554 // switched threads. Since we have a per-CPU interrupt stack, we dont need to worry about a use-after-free of the
555 // stack.
556 thread_t* volatile threadToFree = NULL;
557
558#ifndef NDEBUG
559 sched_verify(sched);
560#endif
561
562 thread_state_t state = atomic_load(&sched->runThread->state);
563 switch (state)
564 {
565 case THREAD_DYING:
566 assert(sched->runThread != sched->idleThread);
567
568 threadToFree = sched->runThread;
569 sched_leave(sched, sched->runThread, uptime);
570 break;
571 case THREAD_PRE_BLOCK:
573 assert(sched->runThread != sched->idleThread);
574 if (wait_block_finalize(frame, sched->runThread, uptime))
575 {
576 sched_leave(sched, sched->runThread, uptime);
577 break;
578 }
579
580 // Early unblock
581 atomic_store(&sched->runThread->state, THREAD_ACTIVE);
582 break;
583 case THREAD_ACTIVE:
584 break;
585 default:
586 panic(NULL, "Thread in invalid state in sched_do() state=%d", state);
587 }
588
590 assert(next != NULL);
591
592 if (next != sched->runThread)
593 {
594 sched->runThread->sched.lastCpu = SELF->self;
595 sched->runThread->sched.stop = uptime;
596 thread_save(sched->runThread, frame);
598 thread_load(next, frame);
599 sched->runThread = next;
600 }
601
602 lock_release(&sched->lock);
603
604 if (sched->runThread != sched->idleThread)
605 {
607 }
608
609 if (threadToFree != NULL)
610 {
611 assert(threadToFree != sched->runThread);
612 thread_free(threadToFree); // Cant hold any locks here
613 }
614
616}
617
619{
620 assert(cpu != NULL);
621
622 sched_t* sched = CPU_PTR(cpu->id, _pcpu_sched);
623
624 LOCK_SCOPE(&sched->lock);
625 return sched->runThread == sched->idleThread;
626}
627
629{
630 return WAIT_BLOCK_TIMEOUT(&sleepQueue, false, timeout);
631}
632
633void sched_yield(void)
634{
636}
637
639{
640 CLI_SCOPE();
641
642 sched_t* sched = SELF_PTR(_pcpu_sched);
643 atomic_fetch_add(&sched->preemptCount, 1);
644}
645
646void sched_enable(void)
647{
648 CLI_SCOPE();
649
650 sched_t* sched = SELF_PTR(_pcpu_sched);
651 atomic_fetch_sub(&sched->preemptCount, 1);
652}
653
654void sched_exits(const char* status)
655{
656 thread_t* thread = thread_current();
657 process_kill(thread->process, status);
658
659 atomic_store(&thread->state, THREAD_DYING);
660 ipi_invoke();
661 panic(NULL, "Return to sched_exits");
662}
663
665{
666 thread_t* thread = thread_current();
667 atomic_store(&thread->state, THREAD_DYING);
668 ipi_invoke();
669 panic(NULL, "Return to sched_thread_exit");
670}
671
673{
674 return sched_nanosleep(nanoseconds);
675}
676
677SYSCALL_DEFINE(SYS_EXITS, void, const char* status)
678{
679 sched_exits(status);
680
681 panic(NULL, "Return to syscall_exits");
682}
683
685{
687
688 panic(NULL, "Return to syscall_thread_exit");
689}
690
692{
693 sched_yield();
694 return 0;
695}
#define assert(expression)
Definition assert.h:29
#define CLOCKS_PER_MS
Definition clock_t.h:16
static dentry_t * root
Definition devfs.c:25
static dentry_t * dir
Definition fb.c:16
#define CONFIG_CACHE_HOT_THRESHOLD
Cache hot threshold configuration.
Definition config.h:107
#define CONFIG_TIME_SLICE
Time slice configuration.
Definition config.h:97
#define CLI_SCOPE()
Macro to increment CLI depth for the duration of the current scope.
Definition cli.h:56
static void cli_pop(void)
Decrements the CLI depth, re-enabling interrupts if depth reaches zero and interrupts were enabled pr...
Definition cli.h:42
static void cli_push(void)
Increments the CLI depth, disabling interrupts if depth was zero.
Definition cli.h:24
#define GDT_SS_RING0
Value to load into the SS register for kernel data.
Definition gdt.h:36
#define GDT_CS_RING0
Value to load into the CS register for kernel code.
Definition gdt.h:35
void ipi_invoke(void)
Invoke a IPI interrupt on the current CPU.
Definition ipi.c:269
void ipi_wake_up(cpu_t *cpu, ipi_flags_t flags)
Wake up one or more CPUs.
Definition ipi.c:215
@ IPI_SINGLE
Send the IPI to the specified CPU.
Definition ipi.h:104
#define SELF_PTR(ptr)
Macro to get a pointer to a percpu variable on the current CPU.
Definition percpu.h:93
#define SELF
Macro to access data in the current cpu.
Definition percpu.h:85
#define CPU_PTR(id, ptr)
Macro to get a pointer to a percpu variable on a specific CPU.
Definition percpu.h:102
#define PERCPU_DEFINE_CTOR(type, name)
Macro to define a percpu variable with a constructor.
Definition percpu.h:130
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
Definition syscall.h:172
@ SYS_NANOSLEEP
Definition syscall.h:68
@ SYS_YIELD
Definition syscall.h:88
@ SYS_THREAD_EXIT
Definition syscall.h:66
@ SYS_EXITS
Definition syscall.h:65
#define CPU_FOR_EACH(cpu)
Macro to iterate over all CPUs.
Definition cpu.h:214
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
Definition panic.c:292
#define LOG_WARN(format,...)
Definition log.h:92
#define LOG_DEBUG(format,...)
Definition log.h:85
void process_kill(process_t *process, const char *status)
Kills a process, pushing it to the reaper.
Definition process.c:240
process_t * process_get_kernel(void)
Gets the kernel process.
Definition process.c:374
clock_t clock_uptime(void)
Retrieve the time in nanoseconds since boot.
Definition clock.c:99
void thread_free(thread_t *thread)
Frees a thread structure.
Definition thread.c:111
void thread_load(thread_t *thread, interrupt_frame_t *frame)
Load state from a thread.
Definition thread.c:160
static thread_t * thread_current(void)
Retrieves the currently running thread.
Definition thread.h:126
_NORETURN void thread_jump(thread_t *thread)
Jump to a thread by calling thread_load() and then loading its interrupt frame.
thread_t * thread_new(process_t *process)
Creates a new thread structure.
Definition thread.c:50
void thread_save(thread_t *thread, const interrupt_frame_t *frame)
Save state to a thread.
Definition thread.c:153
thread_state_t
Thread state enum.
Definition thread.h:35
@ THREAD_UNBLOCKING
Has started unblocking, used to prevent the same thread being unblocked multiple times.
Definition thread.h:40
@ THREAD_PRE_BLOCK
Has started the process of blocking but has not yet been given to a owner cpu.
Definition thread.h:38
@ THREAD_DYING
The thread is currently dying, it will be freed by the scheduler once its invoked.
Definition thread.h:41
@ THREAD_ACTIVE
Is either running or ready to run.
Definition thread.h:37
#define WAIT_BLOCK_TIMEOUT(queue, condition, timeout)
Blocks until the condition is true, condition will be tested on every wakeup. Reaching the timeout wi...
Definition wait.h:75
bool wait_block_finalize(interrupt_frame_t *frame, thread_t *thread, clock_t uptime)
Finalize blocking of a thread.
Definition wait.c:240
#define WAIT_QUEUE_CREATE(name)
Create a wait queue initializer.
Definition wait.h:222
#define SCHED_FIXED_TO(x)
Convert a regular integer to fixed-point representation.
Definition sched.h:349
#define SCHED_FIXED_FROM(x)
Convert a fixed-point value to a regular integer.
Definition sched.h:354
#define SCHED_EPSILON
The minimum difference between two virtual clock or lag values to consider then unequal.
Definition sched.h:344
void sched_disable(void)
Disables preemption on the current CPU.
Definition sched.c:638
void sched_client_init(sched_client_t *client)
Initialize the scheduler context for a thread.
Definition sched.c:132
void sched_enable(void)
Enables preemption on the current CPU.
Definition sched.c:646
sched_t PERCPU _pcpu_sched
The per CPU scheduler.
int128_t vclock_t
Virtual clock type.
Definition sched.h:321
void sched_start(thread_t *bootThread)
Starts the scheduler by jumping to the boot thread.
Definition sched.c:354
void sched_yield(void)
Yield the current thread's time slice to allow other threads to run.
Definition sched.c:633
#define SCHED_WEIGHT_BASE
Base weight added to all threads.
Definition sched.h:366
void sched_exits(const char *status)
Terminates the currently executing process and all it's threads.
Definition sched.c:654
bool sched_is_idle(cpu_t *cpu)
Checks if the CPU is currently idle.
Definition sched.c:618
void sched_submit(thread_t *thread)
Submits a thread to the scheduler.
Definition sched.c:378
_NORETURN void sched_idle_loop(void)
The idle loop for the scheduler.
uint64_t sched_nanosleep(clock_t timeout)
Sleeps the current thread for a specified duration in nanoseconds.
Definition sched.c:628
void sched_do(interrupt_frame_t *frame)
Perform a scheduling operation.
Definition sched.c:527
#define SCHED_FIXED_ZERO
Fixed-point zero.
Definition sched.h:339
void sched_thread_exit(void)
Terminates the currently executing thread.
Definition sched.c:664
static void lock_init(lock_t *lock)
Initializes a lock.
Definition lock.h:79
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
Definition lock.h:58
static void lock_release(lock_t *lock)
Releases a lock.
Definition lock.h:175
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
Definition lock.h:96
void rcu_report_quiescent(void)
Called during a context switch to report a quiescent state.
Definition rcu.c:173
void timer_set(clock_t now, clock_t deadline)
Schedule a one-shot timer interrupt on the current CPU.
Definition timer.c:139
#define RBTREE_FOR_EACH(elem, tree, member)
Iterates over a Red-Black Tree in ascending order.
Definition rbtree.h:258
rbnode_t * rbtree_find_min(rbnode_t *node)
Find the minimum node in a subtree.
Definition rbtree.c:184
rbnode_direction_t
Red-Black Tree Node Directions.
Definition rbtree.h:62
void rbtree_init(rbtree_t *tree, rbnode_compare_t compare, rbnode_update_t update)
Initialize a Red-Black Tree.
Definition rbtree.c:30
void rbtree_remove(rbtree_t *tree, rbnode_t *node)
Remove a node from the Red-Black Tree.
Definition rbtree.c:370
#define RBNODE_CREATE
Create a Red-Black Tree Node initializer.
Definition rbtree.h:104
void rbtree_insert(rbtree_t *tree, rbnode_t *node)
Insert a node into the Red-Black Tree.
Definition rbtree.c:149
void rbtree_fix(rbtree_t *tree, rbnode_t *node)
Move the node to its correct position in the Red-Black Tree.
Definition rbtree.c:526
bool rbtree_is_empty(const rbtree_t *tree)
Check if the Red-Black Tree is empty.
Definition rbtree.c:553
@ RBNODE_RIGHT
Definition rbtree.h:64
@ RBNODE_LEFT
Definition rbtree.h:63
clock_t uptime(void)
System call for retreving the time since boot.
Definition uptime.c:6
#define NULL
Pointer error value.
Definition NULL.h:25
#define CONTAINER_OF(ptr, type, member)
Container of macro.
#define CONTAINER_OF_SAFE(ptr, type, member)
Safe container of macro.
__UINT64_TYPE__ clock_t
A nanosecond time.
Definition clock_t.h:13
static atomic_long next
Definition main.c:12
#define RFLAGS_INTERRUPT_ENABLE
Definition regs.h:34
#define RFLAGS_ALWAYS_SET
Definition regs.h:26
static cpu_t * sched_get_least_loaded(void)
Definition sched.c:229
static int64_t sched_node_compare(const rbnode_t *aNode, const rbnode_t *bNode)
Definition sched.c:50
static bool sched_is_cache_hot(thread_t *thread, clock_t uptime)
Definition sched.c:127
static thread_t * sched_steal(void)
Definition sched.c:252
void sched_client_update_veligible(sched_client_t *client, vclock_t newVeligible)
Definition sched.c:145
static void sched_verify(sched_t *sched)
Definition sched.c:449
static wait_queue_t sleepQueue
Definition sched.c:30
static void sched_enter(sched_t *sched, thread_t *thread, clock_t uptime)
Definition sched.c:187
static void sched_verify_min_eligible(sched_t *sched, rbnode_t *node)
Definition sched.c:415
static void sched_vtime_update(sched_t *sched, clock_t uptime)
Definition sched.c:164
static void sched_leave(sched_t *sched, thread_t *thread, clock_t uptime)
Definition sched.c:206
static thread_t * sched_first_eligible(sched_t *sched)
Definition sched.c:302
static void sched_vtime_reset(sched_t *sched, clock_t uptime)
Definition sched.c:155
static void sched_node_update(rbnode_t *node)
Definition sched.c:78
#define atomic_store(object, desired)
Definition stdatomic.h:289
#define _Atomic(T)
Definition stdatomic.h:59
#define atomic_fetch_sub(object, operand)
Definition stdatomic.h:286
#define atomic_load(object)
Definition stdatomic.h:288
#define ATOMIC_VAR_INIT(value)
Definition stdatomic.h:74
#define atomic_fetch_add(object, operand)
Definition stdatomic.h:283
#define atomic_init(obj, value)
Definition stdatomic.h:75
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
#define UINT64_MAX
Definition stdint.h:74
__int128_t int128_t
Definition stdint.h:169
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
#define UINT32_MAX
Definition stdint.h:70
__INT64_TYPE__ int64_t
Definition stdint.h:16
CPU structure.
Definition cpu.h:84
cpu_id_t id
Definition cpu.h:86
cpu_t * self
Definition cpu.h:85
Trap Frame Structure.
Definition interrupt.h:195
Lag type.
pid_t id
Definition process.h:81
Red-Black Tree Node.
Definition rbtree.h:93
rbnode_t * children[RBNODE_AMOUNT]
Definition rbtree.h:95
rbnode_t * root
Definition rbtree.h:139
Per-thread scheduler context.
Definition sched.h:375
clock_t stop
The real time when the thread previously stopped executing.
Definition sched.h:384
vclock_t vdeadline
Definition sched.h:381
vclock_t veligible
The virtual time at which the thread becomes eligible to run (lag >= 0).
Definition sched.h:382
vclock_t vminEligible
The minimum virtual eligible time of the subtree in the runqueue.
Definition sched.h:383
rbnode_t node
The node in the scheduler's runqueue.
Definition sched.h:376
cpu_t * lastCpu
The last CPU the thread was scheduled on, it stoped running at stop time.
Definition sched.h:385
int64_t weight
The weight of the thread.
Definition sched.h:377
Per-CPU scheduler.
Definition sched.h:398
vclock_t vtime
The current virtual time of the CPU.
Definition sched.h:401
lock_t lock
The lock protecting the scheduler.
Definition sched.h:403
clock_t lastUpdate
The real time when the last vtime update occurred.
Definition sched.h:402
thread_t *volatile runThread
The currently running thread on this CPU.
Definition sched.h:406
rbtree_t runqueue
Contains all runnable threads, including the currently running thread, sorted by vdeadline.
Definition sched.h:400
thread_t *volatile idleThread
The idle thread for this CPU.
Definition sched.h:405
uintptr_t top
The top of the stack, this address is not inclusive.
Thread of execution structure.
Definition thread.h:61
process_t * process
The parent process that the thread executes within.
Definition thread.h:62
interrupt_frame_t frame
Definition thread.h:87
stack_pointer_t kernelStack
The kernel stack of the thread.
Definition thread.h:73
tid_t id
The thread id, unique within a process_t.
Definition thread.h:64
sched_client_t sched
Definition thread.h:75
The primitive that threads block on.
Definition wait.h:185