PatchworkOS
Loading...
Searching...
No Matches
space.c
Go to the documentation of this file.
1#include <kernel/mem/space.h>
2
3#include <kernel/cpu/cpu.h>
4#include <kernel/cpu/smp.h>
5#include <kernel/log/panic.h>
6#include <kernel/mem/paging.h>
7#include <kernel/mem/pmm.h>
8#include <kernel/mem/space.h>
9#include <kernel/mem/vmm.h>
10
11#include <assert.h>
12#include <errno.h>
13#include <stdlib.h>
14#include <string.h>
15#include <sys/math.h>
16#include <sys/proc.h>
17
19{
20 for (uint64_t i = 0; i < pageAmount; i++)
21 {
22 void* page = pmm_alloc_bitmap(1, UINT32_MAX, 0);
23 if (page == NULL)
24 {
25 for (uint64_t j = 0; j < i; j++)
26 {
27 pmm_free(pages[j]);
28 }
29 return ERR;
30 }
31 pages[i] = page;
32 }
33 return 0;
34}
35
37{
40
42 pml_index_t endIndex = PML_ADDR_TO_INDEX(end - 1, PML4) + 1; // Inclusive end
43
44 for (pml_index_t i = startIndex; i < endIndex; i++)
45 {
47 space->pageTable.pml4->entries[i].owned = 0;
48 }
49}
50
52{
54 pml_index_t endIndex = PML_ADDR_TO_INDEX(end - 1, PML4) + 1; // Inclusive end
55
56 for (pml_index_t i = startIndex; i < endIndex; i++)
57 {
58 space->pageTable.pml4->entries[i].raw = 0;
59 }
60}
61
62uint64_t space_init(space_t* space, uintptr_t startAddress, uintptr_t endAddress, space_flags_t flags)
63{
64 if (space == NULL)
65 {
66 errno = EINVAL;
67 return ERR;
68 }
69
70 if (map_init(&space->pinnedPages) == ERR)
71 {
72 return ERR;
73 }
74
75 if (flags & SPACE_USE_PMM_BITMAP)
76 {
78 {
79 errno = ENOMEM;
80 return ERR;
81 }
82 // We only use the specific pmm allocator for the page table itself, not for mappings.
84 }
85 else
86 {
88 {
89 errno = ENOMEM;
90 return ERR;
91 }
92 }
93
94 space->startAddress = startAddress;
95 space->endAddress = endAddress;
96 space->freeAddress = startAddress;
97 space->flags = flags;
98 space->callbacks = NULL;
99 space->callbacksLength = 0;
102 list_init(&space->cpus);
103 atomic_init(&space->shootdownAcks, 0);
104 lock_init(&space->lock);
105
106 if (flags & SPACE_MAP_KERNEL_BINARY)
107 {
109 }
110
111 if (flags & SPACE_MAP_KERNEL_HEAP)
112 {
114 }
115
116 if (flags & SPACE_MAP_IDENTITY)
117 {
119 }
120
121 return 0;
122}
123
125{
126 if (space == NULL)
127 {
128 return;
129 }
130
131 if (!list_is_empty(&space->cpus))
132 {
133 panic(NULL, "Attempted to free address space still in use by CPUs");
134 }
135
136 uint64_t index;
137 BITMAP_FOR_EACH_SET(&index, &space->callbackBitmap)
138 {
139 space->callbacks[index].func(space->callbacks[index].private);
140 }
141
142 if (space->flags & SPACE_MAP_KERNEL_BINARY)
143 {
145 }
146
147 if (space->flags & SPACE_MAP_KERNEL_HEAP)
148 {
150 }
151
152 if (space->flags & SPACE_MAP_IDENTITY)
153 {
155 }
156
157 free(space->callbacks);
159}
160
161void space_load(space_t* space)
162{
163 if (space == NULL)
164 {
165 return;
166 }
167
169
170 cpu_t* self = smp_self_unsafe();
171 assert(self != NULL);
172
173 assert(self->vmm.currentSpace != NULL);
174 if (space == self->vmm.currentSpace)
175 {
176 return;
177 }
178
179 space_t* oldSpace = self->vmm.currentSpace;
180 self->vmm.currentSpace = NULL;
181
182 lock_acquire(&oldSpace->lock);
183#ifndef NDEBUG
184 bool found = false;
185 cpu_t* cpu;
186 LIST_FOR_EACH(cpu, &oldSpace->cpus, vmm.entry)
187 {
188 if (self == cpu)
189 {
190 found = true;
191 break;
192 }
193 }
194 if (!found)
195 {
196 lock_release(&oldSpace->lock);
197 panic(NULL, "CPU not found in old space's CPU list");
198 }
199#endif
200 list_remove(&oldSpace->cpus, &self->vmm.entry);
201 lock_release(&oldSpace->lock);
202
203 lock_acquire(&space->lock);
204 list_push(&space->cpus, &self->vmm.entry);
205 lock_release(&space->lock);
206 self->vmm.currentSpace = space;
207
208 page_table_load(&space->pageTable);
209}
210
211static void space_align_region(void** virtAddr, uint64_t* length)
212{
213 void* aligned = (void*)ROUND_DOWN(*virtAddr, PAGE_SIZE);
214 *length += ((uint64_t)*virtAddr - (uint64_t)aligned);
215 *virtAddr = aligned;
216}
217
219{
220 for (uint64_t i = 0; i < pageAmount; i++)
221 {
222 uintptr_t addr = (uintptr_t)buffer + (i * PAGE_SIZE);
223 if (page_table_is_mapped(&space->pageTable, (void*)addr, 1))
224 {
225 continue;
226 }
227
228 void* page = pmm_alloc();
229 if (page == NULL)
230 {
231 errno = ENOMEM;
232 return ERR;
233 }
234
235 if (page_table_map(&space->pageTable, (void*)addr, page, 1, PML_PRESENT | PML_USER | PML_WRITE | PML_OWNED,
237 {
238 pmm_free(page);
239 errno = EFAULT;
240 return ERR;
241 }
242 }
243
244 return 0;
245}
246
247static void space_pin_depth_dec(space_t* space, const void* address, uint64_t pageAmount)
248{
250
252 for (uint64_t i = 0; i < pageAmount; i++)
253 {
254 uintptr_t addr = (uintptr_t)address + (i * PAGE_SIZE);
255 if (page_table_traverse(&space->pageTable, &traverse, addr, PML_NONE) == ERR)
256 {
257 continue;
258 }
259
260 if (!traverse.entry->present || !traverse.entry->pinned)
261 {
262 continue;
263 }
264
265 map_key_t key = map_key_uint64(addr);
266 map_entry_t* entry = map_get(&space->pinnedPages, &key);
267 if (entry == NULL) // Not pinned more then once
268 {
269 traverse.entry->pinned = false;
270 continue;
271 }
272
273 space_pinned_page_t* pinnedPage = CONTAINER_OF(entry, space_pinned_page_t, mapEntry);
274 pinnedPage->pinCount--;
275 if (pinnedPage->pinCount == 0)
276 {
277 map_remove(&space->pinnedPages, &key);
278 free(pinnedPage);
279 traverse.entry->pinned = false;
280 }
281 }
282}
283
285{
287
289 for (uint64_t i = 0; i < pageAmount; i++)
290 {
291 uintptr_t addr = (uintptr_t)address + (i * PAGE_SIZE);
292 if (page_table_traverse(&space->pageTable, &traverse, addr, PML_NONE) == ERR)
293 {
294 continue;
295 }
296
297 if (!traverse.entry->present)
298 {
299 continue;
300 }
301
302 if (!traverse.entry->pinned)
303 {
304 traverse.entry->pinned = true;
305 continue;
306 }
307
308 map_key_t key = map_key_uint64(addr);
309 map_entry_t* entry = map_get(&space->pinnedPages, &key);
310 if (entry != NULL) // Already pinned more than once
311 {
312 space_pinned_page_t* pinnedPage = CONTAINER_OF(entry, space_pinned_page_t, mapEntry);
313 pinnedPage->pinCount++;
314 continue;
315 }
316
317 space_pinned_page_t* newPinnedPage = malloc(sizeof(space_pinned_page_t));
318 if (newPinnedPage == NULL)
319 {
320 return ERR;
321 }
322 map_entry_init(&newPinnedPage->mapEntry);
323 newPinnedPage->pinCount = 2; // One for the page table, one for the map
324 if (map_insert(&space->pinnedPages, &key, &newPinnedPage->mapEntry) == ERR)
325 {
326 free(newPinnedPage);
327 return ERR;
328 }
329 }
330
331 return 0;
332}
333
334uint64_t space_pin(space_t* space, const void* buffer, uint64_t length, stack_pointer_t* userStack)
335{
336 if (space == NULL || (buffer == NULL && length != 0))
337 {
338 errno = EINVAL;
339 return ERR;
340 }
341
342 if (length == 0)
343 {
344 return 0;
345 }
346
347 uintptr_t bufferOverflow = (uintptr_t)buffer + length;
348 if (bufferOverflow < (uintptr_t)buffer)
349 {
351 return ERR;
352 }
353
354 LOCK_SCOPE(&space->lock);
355
356 space_align_region((void**)&buffer, &length);
358
360 {
361 if (userStack == NULL || !stack_pointer_is_in_stack(userStack, (uintptr_t)buffer, length))
362 {
363 errno = EFAULT;
364 return ERR;
365 }
366
368 {
369 return ERR;
370 }
371 }
372
374 {
375 errno = EFAULT;
376 return ERR;
377 }
378
379 return 0;
380}
381
382uint64_t space_pin_terminated(space_t* space, const void* address, const void* terminator, uint8_t objectSize,
383 uint64_t maxCount, stack_pointer_t* userStack)
384{
385 if (space == NULL || address == NULL || terminator == NULL || objectSize == 0 || maxCount == 0)
386 {
387 errno = EINVAL;
388 return ERR;
389 }
390
391 LOCK_SCOPE(&space->lock);
392
393 uint64_t terminatorMatchedBytes = 0;
394 uintptr_t current = (uintptr_t)address;
395 uintptr_t end = (uintptr_t)address + (maxCount * objectSize);
396 uint64_t pinnedPages = 0;
397 while (current < end)
398 {
399 if (!page_table_is_mapped(&space->pageTable, (void*)current, 1))
400 {
401 if (userStack == NULL || !stack_pointer_is_in_stack(userStack, current, 1))
402 {
403 errno = EFAULT;
404 goto error;
405 }
406
407 if (space_populate_user_region(space, (void*)current, 1) == ERR)
408 {
409 goto error;
410 }
411 }
412
413 if (space_pin_depth_inc(space, (void*)current, 1) == ERR)
414 {
415 errno = EFAULT;
416 goto error;
417 }
418 pinnedPages++;
419
420 // Scan ONLY the currently pinned page for the terminator.
421 uintptr_t scanEnd = MIN(ROUND_UP(current + 1, PAGE_SIZE), end);
422 for (uintptr_t scanAddr = current; scanAddr < scanEnd; scanAddr++)
423 {
424 // Terminator matched bytes will wrap around to the next page
425 if (*((uint8_t*)scanAddr) == ((uint8_t*)terminator)[terminatorMatchedBytes])
426 {
427 terminatorMatchedBytes++;
428 if (terminatorMatchedBytes == objectSize)
429 {
430 return scanAddr - (uintptr_t)address + 1 - objectSize;
431 }
432 }
433 else
434 {
435 scanAddr += objectSize - terminatorMatchedBytes - 1; // Skip the rest of the object
436 terminatorMatchedBytes = 0;
437 }
438 }
439
440 current = scanEnd;
441 }
442
443error:
444 space_pin_depth_dec(space, address, pinnedPages);
445 return ERR;
446}
447
448void space_unpin(space_t* space, const void* address, uint64_t length)
449{
450 if (space == NULL || (address == NULL && length != 0))
451 {
452 return;
453 }
454
455 if (length == 0)
456 {
457 return;
458 }
459
460 uintptr_t overflow = (uintptr_t)address + length;
461 if (overflow < (uintptr_t)address)
462 {
463 return;
464 }
465
466 LOCK_SCOPE(&space->lock);
467
468 space_align_region((void**)&address, &length);
470
472}
473
474uint64_t space_check_access(space_t* space, const void* addr, uint64_t length)
475{
476 if (space == NULL || (addr == NULL && length != 0))
477 {
478 errno = EINVAL;
479 return ERR;
480 }
481
482 if (length == 0)
483 {
484 return 0;
485 }
486
487 uintptr_t addrOverflow = (uintptr_t)addr + length;
488 if (addrOverflow < (uintptr_t)addr)
489 {
491 return ERR;
492 }
493
494 if ((uintptr_t)addr < space->startAddress || addrOverflow > space->endAddress)
495 {
496 errno = EFAULT;
497 return ERR;
498 }
499
500 return 0;
501}
502
504{
505 void* addr;
506 if (page_table_find_unmapped_region(&space->pageTable, (void*)space->freeAddress, (void*)space->endAddress,
507 pageAmount, &addr) != ERR)
508 {
509 space->freeAddress = (uintptr_t)addr + pageAmount * PAGE_SIZE;
511 return addr;
512 }
513
514 if (page_table_find_unmapped_region(&space->pageTable, (void*)space->startAddress, (void*)space->freeAddress,
515 pageAmount, &addr) != ERR)
516 {
518 return addr;
519 }
520
521 return NULL;
522}
523
524uint64_t space_mapping_start(space_t* space, space_mapping_t* mapping, void* virtAddr, void* physAddr, uint64_t length,
525 pml_flags_t flags)
526{
527 if (space == NULL || mapping == NULL || length == 0)
528 {
529 errno = EINVAL;
530 return ERR;
531 }
532
533 uintptr_t virtOverflow = (uintptr_t)virtAddr + length;
534 if (virtOverflow < (uintptr_t)virtAddr)
535 {
537 return ERR;
538 }
539
540 uintptr_t physOverflow = (uintptr_t)physAddr + length;
541 if (physAddr != NULL && physOverflow < (uintptr_t)physAddr)
542 {
544 return ERR;
545 }
546
547 if (flags & PML_USER)
548 {
549 if (virtAddr != NULL &&
550 ((uintptr_t)virtAddr + length > VMM_USER_SPACE_MAX || (uintptr_t)virtAddr < VMM_USER_SPACE_MIN))
551 {
552 errno = EFAULT;
553 return ERR;
554 }
555 }
556
557 lock_acquire(&space->lock);
558
560 if (virtAddr == NULL)
561 {
562 pageAmount = BYTES_TO_PAGES(length);
563 virtAddr = space_find_free_region(space, pageAmount);
564 if (virtAddr == NULL)
565 {
566 lock_release(&space->lock);
567 errno = ENOMEM;
568 return ERR;
569 }
570 }
571 else
572 {
573 space_align_region(&virtAddr, &length);
574 pageAmount = BYTES_TO_PAGES(length);
575 }
576
577 mapping->virtAddr = virtAddr;
578 if (physAddr != NULL)
579 {
580 mapping->physAddr = (void*)PML_ENSURE_LOWER_HALF(ROUND_DOWN(physAddr, PAGE_SIZE));
581 }
582 else
583 {
584 mapping->physAddr = NULL;
585 }
586
587 mapping->flags = flags;
588 mapping->pageAmount = pageAmount;
589 return 0; // We return with the lock still acquired.
590}
591
593{
594 if (space == NULL)
595 {
596 return PML_MAX_CALLBACK;
597 }
598
600 if (callbackId == PML_MAX_CALLBACK)
601 {
602 return PML_MAX_CALLBACK;
603 }
604
605 if (callbackId >= space->callbacksLength)
606 {
607 space_callback_t* newCallbacks = malloc(sizeof(space_callback_t) * (callbackId + 1));
608 if (newCallbacks == NULL)
609 {
610 return PML_MAX_CALLBACK;
611 }
612
613 if (space->callbacks != NULL)
614 {
615 memcpy(newCallbacks, space->callbacks, sizeof(space_callback_t) * space->callbacksLength);
616 free(space->callbacks);
617 }
618 memset(&newCallbacks[space->callbacksLength], 0,
619 sizeof(space_callback_t) * (callbackId + 1 - space->callbacksLength));
620
621 space->callbacks = newCallbacks;
622 space->callbacksLength = callbackId + 1;
623 }
624
625 bitmap_set(&space->callbackBitmap, callbackId);
626 space_callback_t* callback = &space->callbacks[callbackId];
627 callback->func = func;
628 callback->private = private;
629 callback->pageAmount = pageAmount;
630 return callbackId;
631}
632
634{
635 bitmap_clear(&space->callbackBitmap, callbackId);
636}
637
638void space_tlb_shootdown(space_t* space, void* virtAddr, uint64_t pageAmount)
639{
640 if (space == NULL)
641 {
642 return;
643 }
644
645 uint64_t cpuAmount = smp_cpu_amount();
646 if (cpuAmount <= 1)
647 {
648 return;
649 }
650 cpu_t* self = smp_self_unsafe();
651
652 uint16_t expectedAcks = 0;
653 atomic_store(&space->shootdownAcks, 0);
654
655 cpu_t* cpu;
656 LIST_FOR_EACH(cpu, &space->cpus, vmm.entry)
657 {
658 if (cpu == self)
659 {
660 continue;
661 }
662
663 lock_acquire(&cpu->vmm.lock);
665 {
666 lock_release(&cpu->vmm.lock);
667 panic(NULL, "CPU %d shootdown buffer overflow", cpu->id);
668 }
669
670 vmm_shootdown_t* shootdown = &cpu->vmm.shootdowns[cpu->vmm.shootdownCount++];
671 shootdown->space = space;
672 shootdown->virtAddr = virtAddr;
673 shootdown->pageAmount = pageAmount;
674 lock_release(&cpu->vmm.lock);
675
677 expectedAcks++;
678 }
679
681 while (atomic_load(&space->shootdownAcks) < expectedAcks)
682 {
684 {
685 panic(NULL, "TLB shootdown timeout in space %p for region %p - %p", space, virtAddr,
686 (void*)((uintptr_t)virtAddr + pageAmount * PAGE_SIZE));
687 }
688
689 asm volatile("pause");
690 }
691
692#ifndef NDEBUG
693 LIST_FOR_EACH(cpu, &space->cpus, vmm.entry)
694 {
695 if (cpu == self)
696 {
697 continue;
698 }
699
700 lock_acquire(&cpu->vmm.lock);
701 for (uint8_t i = 0; i < cpu->vmm.shootdownCount; i++)
702 {
703 vmm_shootdown_t* shootdown = &cpu->vmm.shootdowns[i];
704 if (shootdown->space != space || shootdown->virtAddr != virtAddr || shootdown->pageAmount != pageAmount)
705 {
706 continue;
707 }
708
709 panic(NULL, "TLB shootdown entry not cleared in cpu %d for space %p region %p - %p", cpu->id, space,
710 virtAddr, (void*)((uintptr_t)virtAddr + pageAmount * PAGE_SIZE));
711 }
712 lock_release(&cpu->vmm.lock);
713 }
714#endif
715}
716
718{
719 if (space == NULL)
720 {
721 return;
722 }
723
724 if (virtAddr <= space->freeAddress && space->freeAddress < virtAddr + pageAmount * PAGE_SIZE)
725 {
726 space->freeAddress = virtAddr + pageAmount * PAGE_SIZE;
727 }
728}
729
731{
732 if (space == NULL || mapping == NULL)
733 {
734 errno = EINVAL;
735 return NULL;
736 }
737
738 if (err != EOK)
739 {
740 lock_release(&space->lock); // Release the lock from space_mapping_start.
741 errno = err;
742 return NULL;
743 }
744
745 space_update_free_address(space, (uintptr_t)mapping->virtAddr, mapping->pageAmount);
746 lock_release(&space->lock); // Release the lock from space_mapping_start.
747 return mapping->virtAddr;
748}
749
750bool space_is_mapped(space_t* space, const void* virtAddr, uint64_t length)
751{
752 space_align_region((void**)&virtAddr, &length);
753 LOCK_SCOPE(&space->lock);
754 return page_table_is_mapped(&space->pageTable, virtAddr, BYTES_TO_PAGES(length));
755}
#define assert(expression)
Definition assert.h:29
static clock_t startTime
Definition clock.c:5
int errno_t
Definition errno_t.h:4
@ INTERRUPT_TLB_SHOOTDOWN
TLB shootdown interrupt.
Definition interrupt.h:134
static cpu_t * smp_self_unsafe(void)
Returns a pointer to the cpu_t structure of the current CPU.
Definition smp.h:90
static uint16_t smp_cpu_amount(void)
Returns the number of CPUs currently identified.
Definition smp.h:66
bool stack_pointer_is_in_stack(stack_pointer_t *stack, uintptr_t addr, uint64_t length)
Check if an region is within the stack.
void lapic_send_ipi(lapic_id_t id, interrupt_t vector)
Send an Inter-Processor Interrupt (IPI) to a local apic.
Definition apic.c:225
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
Definition panic.c:362
pml_index_t
Indexes into a pml level.
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
#define PML_ADDR_TO_INDEX(addr, level)
Calculates the index into a page table level for a given virtual address.
static uint64_t page_table_map(page_table_t *table, void *virtAddr, void *physAddr, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
Definition paging.h:422
uint8_t pml_callback_id_t
Callback ID type.
static void page_table_load(page_table_t *table)
Loads the page table into the CR3 register if it is not already loaded.
Definition paging.h:183
#define PAGE_TABLE_TRAVERSE_CREATE
Create a page_table_traverse_t initializer.
Definition paging.h:255
static bool page_table_is_mapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely mapped.
Definition paging.h:360
#define PML_MAX_CALLBACK
Maximum number of callbacks that can be registered for a page table.
static void page_table_deinit(page_table_t *table)
Deinitializes a page table, freeing all allocated pages.
Definition paging.h:173
static bool page_table_is_unmapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely unmapped.
Definition paging.h:389
static uint64_t page_table_find_unmapped_region(page_table_t *table, void *startAddr, void *endAddr, uint64_t pageAmount, void **outAddr)
Finds the first contiguous unmapped region with the given number of pages within the specified addres...
Definition paging.h:756
static uint64_t page_table_init(page_table_t *table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages)
Initializes a page table.
Definition paging.h:157
static uint64_t page_table_traverse(page_table_t *table, page_table_traverse_t *traverse, uintptr_t virtAddr, pml_flags_t flags)
Allows for fast traversal of the page table by caching previously accessed layers.
Definition paging.h:279
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
@ PML4
@ PML_USER
@ PML_PRESENT
@ PML_WRITE
@ PML_NONE
@ PML_OWNED
uint64_t pmm_alloc_pages(void **addresses, uint64_t count)
Allocates multiple physical pages.
Definition pmm.c:175
void pmm_free(void *address)
Frees a single physical page.
Definition pmm.c:211
void pmm_free_pages(void **addresses, uint64_t count)
Frees multiple physical pages.
Definition pmm.c:217
void * pmm_alloc_bitmap(uint64_t count, uintptr_t maxAddr, uint64_t alignment)
Allocates a contiguous region of physical pages managed by the bitmap.
Definition pmm.c:198
void * pmm_alloc(void)
Allocates a single physical page.
Definition pmm.c:162
void space_unpin(space_t *space, const void *address, uint64_t length)
Unpins pages in a region previously pinned with space_pin() or space_pin_string().
Definition space.c:448
bool space_is_mapped(space_t *space, const void *virtAddr, uint64_t length)
Checks if a virtual memory region is fully mapped.
Definition space.c:750
void * space_mapping_end(space_t *space, space_mapping_t *mapping, errno_t err)
Performs cleanup after changes to the address space mappings.
Definition space.c:730
pml_callback_id_t space_alloc_callback(space_t *space, uint64_t pageAmount, space_callback_func_t func, void *private)
Allocate a callback.
Definition space.c:592
uint64_t space_check_access(space_t *space, const void *addr, uint64_t length)
Checks if a virtual memory region is within the allowed address range of the space.
Definition space.c:474
void space_free_callback(space_t *space, pml_callback_id_t callbackId)
Free a callback.
Definition space.c:633
space_flags_t
Flags for space initialization.
Definition space.h:28
uint64_t space_pin_terminated(space_t *space, const void *address, const void *terminator, uint8_t objectSize, uint64_t maxCount, stack_pointer_t *userStack)
Pins a region of memory terminated by a terminator value.
Definition space.c:382
void(* space_callback_func_t)(void *private)
Space callback function.
Definition space.h:43
uint64_t space_pin(space_t *space, const void *buffer, uint64_t length, stack_pointer_t *userStack)
Pins pages within a region of the address space.
Definition space.c:334
void space_load(space_t *space)
Loads a virtual address space.
Definition space.c:161
void space_tlb_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
Performs a TLB shootdown for a region of the address space, and wait for acknowledgements.
Definition space.c:638
void space_deinit(space_t *space)
Deinitializes a virtual address space.
Definition space.c:124
uint64_t space_init(space_t *space, uintptr_t startAddress, uintptr_t endAddress, space_flags_t flags)
Initializes a virtual address space.
Definition space.c:62
#define SPACE_TLB_SHOOTDOWN_TIMEOUT
The maximum time to wait for the acknowledgements from other CPU's before panicking.
Definition space.h:106
uint64_t space_mapping_start(space_t *space, space_mapping_t *mapping, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags)
Prepare for changes to the address space mappings.
Definition space.c:524
@ SPACE_MAP_KERNEL_HEAP
Map the kernel heap into the address space.
Definition space.h:36
@ SPACE_USE_PMM_BITMAP
Definition space.h:34
@ SPACE_MAP_IDENTITY
Map the identity mapped physical memory into the address space.
Definition space.h:37
@ SPACE_MAP_KERNEL_BINARY
Map the kernel binary into the address space.
Definition space.h:35
#define VMM_KERNEL_HEAP_MAX
The maximum address for the kernel heap.
Definition vmm.h:68
#define VMM_KERNEL_BINARY_MAX
The maximum address for the content of the kernel binary.
Definition vmm.h:61
#define VMM_IDENTITY_MAPPED_MIN
The minimum address for the identity mapped physical memory.
Definition vmm.h:72
#define VMM_USER_SPACE_MAX
The maximum address for user space.
Definition vmm.h:74
#define VMM_KERNEL_BINARY_MIN
The minimum address for the content of the kernel binary./*#end#*‍/.
Definition vmm.h:62
#define VMM_USER_SPACE_MIN
The minimum address for user space.
Definition vmm.h:75
space_t * vmm_get_kernel_space(void)
Retrieves the kernel's address space.
Definition vmm.c:138
#define VMM_MAX_SHOOTDOWN_REQUESTS
Maximum number of shootdown requests that can be queued per CPU.
Definition vmm.h:102
#define VMM_IDENTITY_MAPPED_MAX
The maximum address for the identity mapped physical memory.
Definition vmm.h:71
#define VMM_KERNEL_HEAP_MIN
The minimum address for the kernel heap.
Definition vmm.h:69
static void lock_init(lock_t *lock)
Initializes a lock.
Definition lock.h:80
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
Definition lock.h:57
static void lock_release(lock_t *lock)
Releases a lock.
Definition lock.h:140
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
Definition lock.h:97
clock_t timer_uptime(void)
Time since boot.
Definition timer.c:73
void map_entry_init(map_entry_t *entry)
Initialize a map entry.
Definition map.c:71
static map_key_t map_key_uint64(uint64_t uint64)
Create a map key from a uint64_t.
Definition map.h:115
uint64_t map_insert(map_t *map, const map_key_t *key, map_entry_t *value)
Insert a key-value pair into the map.
Definition map.c:191
uint64_t map_init(map_t *map)
Initialize a map.
Definition map.c:172
void map_remove(map_t *map, const map_key_t *key)
Remove a key-value pair from the map.
Definition map.c:258
map_entry_t * map_get(map_t *map, const map_key_t *key)
Get a value from the map by key.
Definition map.c:236
#define EINVAL
Invalid argument.
Definition errno.h:142
#define EFAULT
Bad address.
Definition errno.h:102
#define ENOMEM
Out of memory.
Definition errno.h:92
#define EOVERFLOW
Value too large for defined data type.
Definition errno.h:402
#define errno
Error number variable.
Definition errno.h:27
#define EOK
No error.
Definition errno.h:32
static uint64_t bitmap_find_first_clear(bitmap_t *map)
Find the first clear bit in the bitmap.
Definition bitmap.h:283
static void bitmap_clear(bitmap_t *map, uint64_t index)
Clear a bit in the bitmap.
Definition bitmap.h:186
static void bitmap_set(bitmap_t *map, uint64_t index)
Set a bit in the bitmap.
Definition bitmap.h:106
static void bitmap_init(bitmap_t *map, void *buffer, uint64_t length)
Initialize a bitmap.
Definition bitmap.h:74
#define BITMAP_FOR_EACH_SET(idx, map)
Iterate over each set bit in the bitmap.
Definition bitmap.h:58
#define BITMAP_BITS_TO_BYTES(bits)
Convert number of bits to number of bytes.
Definition bitmap.h:42
#define LIST_FOR_EACH(elem, list, member)
Iterates over a list.
Definition list.h:65
static void list_remove(list_t *list, list_entry_t *entry)
Removes a list entry from its current list.
Definition list.h:317
static void list_push(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
Definition list.h:345
static bool list_is_empty(list_t *list)
Checks if a list is empty.
Definition list.h:229
static void list_init(list_t *list)
Initializes a list.
Definition list.h:198
#define MIN(x, y)
Definition math.h:16
#define ROUND_DOWN(number, multiple)
Definition math.h:21
#define ROUND_UP(number, multiple)
Definition math.h:19
#define PAGE_SIZE
Memory page size.
Definition proc.h:140
#define BYTES_TO_PAGES(amount)
Convert bytes to pages.
Definition proc.h:151
#define NULL
Pointer error value.
Definition NULL.h:23
#define ERR
Integer error value.
Definition ERR.h:17
#define CONTAINER_OF(ptr, type, member)
Container of macro.
__UINT64_TYPE__ clock_t
A nanosecond time.
Definition clock_t.h:13
static uintptr_t address
Definition hpet.c:12
EFI_PHYSICAL_ADDRESS buffer
Definition mem.c:15
static uint64_t pageAmount
Definition pmm.c:42
static void start()
Definition main.c:542
#define RFLAGS_INTERRUPT_ENABLE
Definition regs.h:32
static uint64_t rflags_read()
Definition regs.h:78
static void space_pin_depth_dec(space_t *space, const void *address, uint64_t pageAmount)
Definition space.c:247
static uint64_t space_pmm_bitmap_alloc_pages(void **pages, uint64_t pageAmount)
Definition space.c:18
static void space_map_kernel_space_region(space_t *space, uintptr_t start, uintptr_t end)
Definition space.c:36
static void space_update_free_address(space_t *space, uintptr_t virtAddr, uint64_t pageAmount)
Definition space.c:717
static uint64_t space_pin_depth_inc(space_t *space, const void *address, uint64_t pageAmount)
Definition space.c:284
static void space_align_region(void **virtAddr, uint64_t *length)
Definition space.c:211
static void * space_find_free_region(space_t *space, uint64_t pageAmount)
Definition space.c:503
static uint64_t space_populate_user_region(space_t *space, const void *buffer, uint64_t pageAmount)
Definition space.c:218
static void space_unmap_kernel_space_region(space_t *space, uintptr_t start, uintptr_t end)
Definition space.c:51
#define atomic_store(object, desired)
Definition stdatomic.h:289
#define atomic_load(object)
Definition stdatomic.h:288
#define atomic_init(obj, value)
Definition stdatomic.h:75
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
__UINT8_TYPE__ uint8_t
Definition stdint.h:11
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
__UINT16_TYPE__ uint16_t
Definition stdint.h:13
#define UINT32_MAX
Definition stdint.h:70
_PUBLIC void * malloc(size_t size)
Definition malloc.c:5
_PUBLIC void free(void *ptr)
Definition free.c:11
_PUBLIC void * memcpy(void *_RESTRICT s1, const void *_RESTRICT s2, size_t n)
Definition memcpy.c:4
_PUBLIC void * memset(void *s, int c, size_t n)
Definition memset.c:4
CPU structure.
Definition cpu.h:42
cpuid_t id
Definition cpu.h:43
vmm_cpu_ctx_t vmm
Definition cpu.h:46
lapic_id_t lapicId
Definition cpu.h:44
Map entry structure.
Definition map.h:57
Map key stucture.
Definition map.h:45
pml_alloc_pages_t allocPages
Helper structure for fast traversal of the page table.
Definition paging.h:237
pml_entry_t * entry
Definition paging.h:247
uint64_t pinned
uint64_t owned
uint64_t raw
uint64_t present
If set the page is present in memory and readable.
A entry in a page table without a specified address or callback ID.
pml_entry_t entries[PML_INDEX_AMOUNT]
uint64_t pageAmount
Definition space.h:53
space_callback_func_t func
Definition space.h:51
void * private
Definition space.h:52
Helper structure for managing address space mappings.
Definition space.h:217
void * physAddr
Definition space.h:219
pml_flags_t flags
Definition space.h:221
uint64_t pageAmount
Definition space.h:220
void * virtAddr
Definition space.h:218
Pinned page structure.
Definition space.h:63
uint64_t pinCount
The number of times this page is pinned, will be unpinned when it reaches 0.
Definition space.h:65
map_entry_t mapEntry
Definition space.h:64
Virtual address space structure.
Definition space.h:79
lock_t lock
Definition space.h:100
map_t pinnedPages
Map of pages with a pin depth greater than 1.
Definition space.h:80
uintptr_t startAddress
The start address for allocations in this address space.
Definition space.h:82
uint64_t callbacksLength
Length of the callbacks array.
Definition space.h:92
uintptr_t endAddress
The end address for allocations in this address space.
Definition space.h:83
page_table_t pageTable
The page table associated with the address space.
Definition space.h:81
bitmap_t callbackBitmap
Bitmap to track available callback IDs.
Definition space.h:93
space_flags_t flags
Definition space.h:85
list_t cpus
List of CPUs using this address space.
Definition space.h:98
uintptr_t freeAddress
The next available free virtual address in this address space.
Definition space.h:84
atomic_uint16_t shootdownAcks
Definition space.h:99
space_callback_t * callbacks
Definition space.h:91
uint64_t bitmapBuffer[BITMAP_BITS_TO_QWORDS(PML_MAX_CALLBACK)]
Definition space.h:97
Structure to define a stack in memory.
uint8_t shootdownCount
Definition vmm.h:112
space_t * currentSpace
Will only be accessed by the owner CPU, so no lock.
Definition vmm.h:114
list_entry_t entry
Used by a space to know which CPUs are using it, protected by the space lock.
Definition vmm.h:110
lock_t lock
Definition vmm.h:113
vmm_shootdown_t shootdowns[VMM_MAX_SHOOTDOWN_REQUESTS]
Definition vmm.h:111
TLB shootdown structure.
Definition vmm.h:92
uint64_t pageAmount
Definition vmm.h:96
space_t * space
Definition vmm.h:94
void * virtAddr
Definition vmm.h:95
static space_t kernelSpace
Definition vmm.c:24