PatchworkOS  c9fea19
A non-POSIX operating system.
Loading...
Searching...
No Matches
space.c
Go to the documentation of this file.
1#include <kernel/cpu/ipi.h>
2#include <kernel/mem/space.h>
3
4#include <kernel/cpu/cpu.h>
5#include <kernel/log/panic.h>
6#include <kernel/mem/paging.h>
7#include <kernel/mem/pmm.h>
8#include <kernel/mem/space.h>
9#include <kernel/mem/vmm.h>
10#include <kernel/sched/clock.h>
11#include <kernel/utils/map.h>
12
13#include <assert.h>
14#include <errno.h>
15#include <stdlib.h>
16#include <string.h>
17#include <sys/math.h>
18#include <sys/proc.h>
19
21{
22 for (uint64_t i = 0; i < pageAmount; i++)
23 {
24 void* page = pmm_alloc_bitmap(1, UINT32_MAX, 0);
25 if (page == NULL)
26 {
27 for (uint64_t j = 0; j < i; j++)
28 {
29 pmm_free(pages[j]);
30 }
31 return ERR;
32 }
33 pages[i] = page;
34 }
35 return 0;
36}
37
39{
42
44 pml_index_t endIndex = PML_ADDR_TO_INDEX(end - 1, PML4) + 1; // Inclusive end
45
46 for (pml_index_t i = startIndex; i < endIndex; i++)
47 {
49 space->pageTable.pml4->entries[i].owned = 0;
50 }
51}
52
54{
56 pml_index_t endIndex = PML_ADDR_TO_INDEX(end - 1, PML4) + 1; // Inclusive end
57
58 for (pml_index_t i = startIndex; i < endIndex; i++)
59 {
60 space->pageTable.pml4->entries[i].raw = 0;
61 }
62}
63
65{
66 if (space == NULL)
67 {
68 errno = EINVAL;
69 return ERR;
70 }
71
73 {
75 {
76 errno = ENOMEM;
77 return ERR;
78 }
79 // We only use the bitmap pmm allocator for the page table itself, not for mappings.
81 }
82 else
83 {
85 {
86 errno = ENOMEM;
87 return ERR;
88 }
89 }
90
91 map_init(&space->pinnedPages);
92 space->startAddress = startAddress;
93 space->endAddress = endAddress;
94 space->freeAddress = startAddress;
95 space->flags = flags;
96 space->callbacks = NULL;
97 space->callbacksLength = 0;
100 list_init(&space->cpus);
101 atomic_init(&space->shootdownAcks, 0);
102 lock_init(&space->lock);
103
105 {
107 }
108
110 {
112 }
113
115 {
117 }
118
119 return 0;
120}
121
123{
124 if (space == NULL)
125 {
126 return;
127 }
128
129 if (!list_is_empty(&space->cpus))
130 {
131 panic(NULL, "Attempted to free address space still in use by CPUs");
132 }
133
134 uint64_t index;
135 BITMAP_FOR_EACH_SET(&index, &space->callbackBitmap)
136 {
137 space->callbacks[index].func(space->callbacks[index].private);
138 }
139
140 if (space->flags & SPACE_MAP_KERNEL_BINARY)
141 {
143 }
144
145 if (space->flags & SPACE_MAP_KERNEL_HEAP)
146 {
148 }
149
150 if (space->flags & SPACE_MAP_IDENTITY)
151 {
153 }
154
155 free(space->callbacks);
157}
158
159void space_load(space_t* space)
160{
161 if (space == NULL)
162 {
163 return;
164 }
165
167
168 cpu_t* self = cpu_get_unsafe();
169 assert(self != NULL);
170
171 assert(self->vmm.currentSpace != NULL);
172 if (space == self->vmm.currentSpace)
173 {
174 return;
175 }
176
177 space_t* oldSpace = self->vmm.currentSpace;
178 self->vmm.currentSpace = NULL;
179
180 lock_acquire(&oldSpace->lock);
181#ifndef NDEBUG
182 bool found = false;
183 cpu_t* cpu;
184 LIST_FOR_EACH(cpu, &oldSpace->cpus, vmm.entry)
185 {
186 if (self == cpu)
187 {
188 found = true;
189 break;
190 }
191 }
192 if (!found)
193 {
194 lock_release(&oldSpace->lock);
195 panic(NULL, "CPU not found in old space's CPU list");
196 }
197#endif
198 list_remove(&oldSpace->cpus, &self->vmm.entry);
199 lock_release(&oldSpace->lock);
200
201 lock_acquire(&space->lock);
202 list_push_back(&space->cpus, &self->vmm.entry);
203 lock_release(&space->lock);
204 self->vmm.currentSpace = space;
205
206 page_table_load(&space->pageTable);
207}
208
209static void space_align_region(void** virtAddr, uint64_t* length)
210{
211 void* aligned = (void*)ROUND_DOWN(*virtAddr, PAGE_SIZE);
212 *length += ((uint64_t)*virtAddr - (uint64_t)aligned);
213 *virtAddr = aligned;
214}
215
217{
218 for (uint64_t i = 0; i < pageAmount; i++)
219 {
220 uintptr_t addr = (uintptr_t)buffer + (i * PAGE_SIZE);
221 if (page_table_is_mapped(&space->pageTable, (void*)addr, 1))
222 {
223 continue;
224 }
225
226 void* page = pmm_alloc();
227 if (page == NULL)
228 {
229 return ERR;
230 }
231
232 if (page_table_map(&space->pageTable, (void*)addr, page, 1, PML_PRESENT | PML_USER | PML_WRITE | PML_OWNED,
234 {
235 pmm_free(page);
236 return ERR;
237 }
238 }
239
240 return 0;
241}
242
243static void space_pin_depth_dec(space_t* space, const void* address, uint64_t pageAmount)
244{
246
248 for (uint64_t i = 0; i < pageAmount; i++)
249 {
250 uintptr_t addr = (uintptr_t)address + (i * PAGE_SIZE);
251 if (page_table_traverse(&space->pageTable, &traverse, addr, PML_NONE) == ERR)
252 {
253 continue;
254 }
255
256 if (!traverse.entry->present || !traverse.entry->pinned)
257 {
258 continue;
259 }
260
261 map_key_t key = map_key_uint64(addr);
262 map_entry_t* entry = map_get(&space->pinnedPages, &key);
263 if (entry == NULL) // Not pinned more then once
264 {
265 traverse.entry->pinned = false;
266 continue;
267 }
268
269 space_pinned_page_t* pinnedPage = CONTAINER_OF(entry, space_pinned_page_t, mapEntry);
270 pinnedPage->pinCount--;
271 if (pinnedPage->pinCount == 0)
272 {
273 map_remove(&space->pinnedPages, &pinnedPage->mapEntry);
274 free(pinnedPage);
275 traverse.entry->pinned = false;
276 }
277 }
278}
279
281{
283
285 for (uint64_t i = 0; i < pageAmount; i++)
286 {
287 uintptr_t addr = (uintptr_t)address + (i * PAGE_SIZE);
288 if (page_table_traverse(&space->pageTable, &traverse, addr, PML_NONE) == ERR)
289 {
290 continue;
291 }
292
293 if (!traverse.entry->present)
294 {
295 continue;
296 }
297
298 if (!traverse.entry->pinned)
299 {
300 traverse.entry->pinned = true;
301 continue;
302 }
303
304 map_key_t key = map_key_uint64(addr);
305 map_entry_t* entry = map_get(&space->pinnedPages, &key);
306 if (entry != NULL) // Already pinned more than once
307 {
308 space_pinned_page_t* pinnedPage = CONTAINER_OF(entry, space_pinned_page_t, mapEntry);
309 pinnedPage->pinCount++;
310 continue;
311 }
312
313 space_pinned_page_t* newPinnedPage = malloc(sizeof(space_pinned_page_t));
314 if (newPinnedPage == NULL)
315 {
316 return ERR;
317 }
318 map_entry_init(&newPinnedPage->mapEntry);
319 newPinnedPage->pinCount = 2; // One for the page table, one for the map
320 if (map_insert(&space->pinnedPages, &key, &newPinnedPage->mapEntry) == ERR)
321 {
322 free(newPinnedPage);
323 return ERR;
324 }
325 }
326
327 return 0;
328}
329
330uint64_t space_pin(space_t* space, const void* buffer, uint64_t length, stack_pointer_t* userStack)
331{
332 if (space == NULL || (buffer == NULL && length != 0))
333 {
334 errno = EINVAL;
335 return ERR;
336 }
337
338 if (length == 0)
339 {
340 return 0;
341 }
342
343 uintptr_t bufferOverflow = (uintptr_t)buffer + length;
344 if (bufferOverflow < (uintptr_t)buffer)
345 {
347 return ERR;
348 }
349
350 LOCK_SCOPE(&space->lock);
351
352 space_align_region((void**)&buffer, &length);
354
356 {
357 if (userStack == NULL || !stack_pointer_is_in_stack(userStack, (uintptr_t)buffer, length))
358 {
359 errno = EFAULT;
360 return ERR;
361 }
362
364 {
365 errno = ENOMEM;
366 return ERR;
367 }
368 }
369
371 {
372 errno = ENOMEM;
373 return ERR;
374 }
375
376 return 0;
377}
378
379uint64_t space_pin_terminated(space_t* space, const void* address, const void* terminator, uint8_t objectSize,
380 uint64_t maxCount, stack_pointer_t* userStack)
381{
382 if (space == NULL || address == NULL || terminator == NULL || objectSize == 0 || maxCount == 0)
383 {
384 errno = EINVAL;
385 return ERR;
386 }
387
388 uint64_t terminatorMatchedBytes = 0;
389 uintptr_t current = (uintptr_t)address;
390 uintptr_t end = (uintptr_t)address + (maxCount * objectSize);
391 if (end < (uintptr_t)address)
392 {
394 return ERR;
395 }
396
397 LOCK_SCOPE(&space->lock);
398
399 uint64_t pinnedPages = 0;
400 while (current < end)
401 {
402 if (!page_table_is_mapped(&space->pageTable, (void*)current, 1))
403 {
404 if (userStack == NULL || !stack_pointer_is_in_stack(userStack, current, 1))
405 {
406 errno = EFAULT;
407 goto error;
408 }
409
410 if (space_populate_user_region(space, (void*)current, 1) == ERR)
411 {
412 errno = ENOMEM;
413 goto error;
414 }
415 }
416
417 if (space_pin_depth_inc(space, (void*)current, 1) == ERR)
418 {
419 errno = ENOMEM;
420 goto error;
421 }
422 pinnedPages++;
423
424 // Scan ONLY the currently pinned page for the terminator.
425 uintptr_t scanEnd = MIN(ROUND_UP(current + 1, PAGE_SIZE), end);
426 for (uintptr_t scanAddr = current; scanAddr < scanEnd; scanAddr++)
427 {
428 // Terminator matched bytes will wrap around to the next page
429 if (*((uint8_t*)scanAddr) == ((uint8_t*)terminator)[terminatorMatchedBytes])
430 {
431 terminatorMatchedBytes++;
432 if (terminatorMatchedBytes == objectSize)
433 {
434 return scanAddr - (uintptr_t)address + 1 - objectSize;
435 }
436 }
437 else
438 {
439 scanAddr += objectSize - terminatorMatchedBytes - 1; // Skip the rest of the object
440 terminatorMatchedBytes = 0;
441 }
442 }
443
444 current = scanEnd;
445 }
446
447error:
448 space_pin_depth_dec(space, address, pinnedPages);
449 return ERR;
450}
451
452void space_unpin(space_t* space, const void* address, uint64_t length)
453{
454 if (space == NULL || (address == NULL && length != 0))
455 {
456 return;
457 }
458
459 if (length == 0)
460 {
461 return;
462 }
463
464 uintptr_t overflow = (uintptr_t)address + length;
465 if (overflow < (uintptr_t)address)
466 {
467 return;
468 }
469
470 LOCK_SCOPE(&space->lock);
471
472 space_align_region((void**)&address, &length);
474
476}
477
478uint64_t space_check_access(space_t* space, const void* addr, uint64_t length)
479{
480 if (space == NULL || (addr == NULL && length != 0))
481 {
482 errno = EINVAL;
483 return ERR;
484 }
485
486 if (length == 0)
487 {
488 return 0;
489 }
490
491 uintptr_t addrOverflow = (uintptr_t)addr + length;
492 if (addrOverflow < (uintptr_t)addr)
493 {
495 return ERR;
496 }
497
498 if ((uintptr_t)addr < space->startAddress || addrOverflow > space->endAddress)
499 {
500 errno = EFAULT;
501 return ERR;
502 }
503
504 return 0;
505}
506
508{
509 void* addr;
510 if (page_table_find_unmapped_region(&space->pageTable, (void*)space->freeAddress, (void*)space->endAddress,
511 pageAmount, &addr) != ERR)
512 {
513 space->freeAddress = (uintptr_t)addr + pageAmount * PAGE_SIZE;
515 return addr;
516 }
517
518 if (page_table_find_unmapped_region(&space->pageTable, (void*)space->startAddress, (void*)space->freeAddress,
519 pageAmount, &addr) != ERR)
520 {
522 return addr;
523 }
524
525 return NULL;
526}
527
528uint64_t space_mapping_start(space_t* space, space_mapping_t* mapping, void* virtAddr, void* physAddr, uint64_t length,
530{
531 if (space == NULL || mapping == NULL || length == 0)
532 {
533 errno = EINVAL;
534 return ERR;
535 }
536
537 uintptr_t virtOverflow = (uintptr_t)virtAddr + length;
538 if (virtOverflow < (uintptr_t)virtAddr)
539 {
541 return ERR;
542 }
543
544 uintptr_t physOverflow = (uintptr_t)physAddr + length;
545 if (physAddr != NULL && physOverflow < (uintptr_t)physAddr)
546 {
548 return ERR;
549 }
550
551 if (flags & PML_USER)
552 {
553 if (virtAddr != NULL &&
554 ((uintptr_t)virtAddr + length > VMM_USER_SPACE_MAX || (uintptr_t)virtAddr < VMM_USER_SPACE_MIN))
555 {
556 errno = EFAULT;
557 return ERR;
558 }
559 }
560
561 stack_pointer_poke(1000); // 1000 bytes should be enough.
562
563 lock_acquire(&space->lock);
564
566 if (virtAddr == NULL)
567 {
568 pageAmount = BYTES_TO_PAGES(length);
569 virtAddr = space_find_free_region(space, pageAmount);
570 if (virtAddr == NULL)
571 {
572 lock_release(&space->lock);
573 errno = ENOMEM;
574 return ERR;
575 }
576 }
577 else
578 {
579 space_align_region(&virtAddr, &length);
580 pageAmount = BYTES_TO_PAGES(length);
581 }
582
583 mapping->virtAddr = virtAddr;
584 if (physAddr != NULL)
585 {
586 mapping->physAddr = (void*)PML_ENSURE_LOWER_HALF(ROUND_DOWN(physAddr, PAGE_SIZE));
587 }
588 else
589 {
590 mapping->physAddr = NULL;
591 }
592
593 mapping->flags = flags;
594 mapping->pageAmount = pageAmount;
595 return 0; // We return with the lock still acquired.
596}
597
599{
600 if (space == NULL)
601 {
602 return PML_MAX_CALLBACK;
603 }
604
606 if (callbackId == PML_MAX_CALLBACK)
607 {
608 return PML_MAX_CALLBACK;
609 }
610
611 if (callbackId >= space->callbacksLength)
612 {
613 space_callback_t* newCallbacks = malloc(sizeof(space_callback_t) * (callbackId + 1));
614 if (newCallbacks == NULL)
615 {
616 return PML_MAX_CALLBACK;
617 }
618
619 if (space->callbacks != NULL)
620 {
621 memcpy(newCallbacks, space->callbacks, sizeof(space_callback_t) * space->callbacksLength);
622 free(space->callbacks);
623 }
624 memset(&newCallbacks[space->callbacksLength], 0,
625 sizeof(space_callback_t) * (callbackId + 1 - space->callbacksLength));
626
627 space->callbacks = newCallbacks;
628 space->callbacksLength = callbackId + 1;
629 }
630
631 bitmap_set(&space->callbackBitmap, callbackId);
632 space_callback_t* callback = &space->callbacks[callbackId];
633 callback->func = func;
634 callback->private = private;
635 callback->pageAmount = pageAmount;
636 return callbackId;
637}
638
640{
641 bitmap_clear(&space->callbackBitmap, callbackId);
642}
643
645{
646 vmm_cpu_ctx_t* ctx = &data->self->vmm;
647 while (true)
648 {
649 lock_acquire(&ctx->lock);
650 if (ctx->shootdownCount == 0)
651 {
652 lock_release(&ctx->lock);
653 break;
654 }
655
656 vmm_shootdown_t shootdown = ctx->shootdowns[ctx->shootdownCount - 1];
657 ctx->shootdownCount--;
658 lock_release(&ctx->lock);
659
660 assert(shootdown.space != NULL);
661 assert(shootdown.pageAmount != 0);
662 assert(shootdown.virtAddr != NULL);
663
664 tlb_invalidate(shootdown.virtAddr, shootdown.pageAmount);
665 atomic_fetch_add(&shootdown.space->shootdownAcks, 1);
666 }
667}
668
669void space_tlb_shootdown(space_t* space, void* virtAddr, uint64_t pageAmount)
670{
671 if (space == NULL)
672 {
673 return;
674 }
675
676 if (cpu_amount() <= 1)
677 {
678 return;
679 }
680 cpu_t* self = cpu_get_unsafe();
681
682 uint16_t expectedAcks = 0;
683 atomic_store(&space->shootdownAcks, 0);
684
685 cpu_t* cpu;
686 LIST_FOR_EACH(cpu, &space->cpus, vmm.entry)
687 {
688 if (cpu == self)
689 {
690 continue;
691 }
692
693 lock_acquire(&cpu->vmm.lock);
695 {
696 lock_release(&cpu->vmm.lock);
697 panic(NULL, "CPU %d shootdown buffer overflow", cpu->id);
698 }
699
700 vmm_shootdown_t* shootdown = &cpu->vmm.shootdowns[cpu->vmm.shootdownCount++];
701 shootdown->space = space;
702 shootdown->virtAddr = virtAddr;
703 shootdown->pageAmount = pageAmount;
704 lock_release(&cpu->vmm.lock);
705
707 {
708 panic(NULL, "Failed to send TLB shootdown IPI to CPU %d", cpu->id);
709 }
710 expectedAcks++;
711 }
712
714 while (atomic_load(&space->shootdownAcks) < expectedAcks)
715 {
717 {
718 panic(NULL, "TLB shootdown timeout in space %p for region %p - %p", space, virtAddr,
719 (void*)((uintptr_t)virtAddr + pageAmount * PAGE_SIZE));
720 }
721
722 asm volatile("pause");
723 }
724}
725
727{
728 if (space == NULL)
729 {
730 return;
731 }
732
733 if (virtAddr <= space->freeAddress && space->freeAddress < virtAddr + pageAmount * PAGE_SIZE)
734 {
735 space->freeAddress = virtAddr + pageAmount * PAGE_SIZE;
736 }
737}
738
740{
741 if (space == NULL || mapping == NULL)
742 {
743 errno = EINVAL;
744 return NULL;
745 }
746
747 if (err != EOK)
748 {
749 lock_release(&space->lock); // Release the lock from space_mapping_start.
750 errno = err;
751 return NULL;
752 }
753
754 space_update_free_address(space, (uintptr_t)mapping->virtAddr, mapping->pageAmount);
755 lock_release(&space->lock); // Release the lock from space_mapping_start.
756 return mapping->virtAddr;
757}
758
759bool space_is_mapped(space_t* space, const void* virtAddr, uint64_t length)
760{
761 space_align_region((void**)&virtAddr, &length);
762 LOCK_SCOPE(&space->lock);
763 return page_table_is_mapped(&space->pageTable, virtAddr, BYTES_TO_PAGES(length));
764}
765
767{
768 if (space == NULL)
769 {
770 errno = EINVAL;
771 return ERR;
772 }
773
774 LOCK_SCOPE(&space->lock);
777}
#define assert(expression)
Definition assert.h:29
static fd_t data
Definition dwm.c:21
int errno_t
Definition errno_t.h:4
uint64_t ipi_send(cpu_t *cpu, ipi_flags_t flags, ipi_func_t func, void *private)
Send an IPI to one or more CPUs.
Definition ipi.c:138
@ IPI_SINGLE
Send the IPI to the specified CPU.
Definition ipi.h:105
bool stack_pointer_is_in_stack(stack_pointer_t *stack, uintptr_t addr, uint64_t length)
Check if an region is within the stack.
void stack_pointer_poke(uint64_t offset)
Poke the stack to ensure that a page fault will occur at the given offset.
static cpu_t * cpu_get_unsafe(void)
Gets the current CPU structure without disabling interrupts.
Definition cpu.h:299
static uint16_t cpu_amount(void)
Gets the number of identified CPUs.
Definition cpu.h:244
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
Definition panic.c:266
static void tlb_invalidate(void *virtAddr, uint64_t pageCount)
Invalidates a region of pages in the TLB.
Definition paging.h:31
pml_index_t
Indexes into a pml level.
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
#define PML_ADDR_TO_INDEX(addr, level)
Calculates the index into a page table level for a given virtual address.
static uint64_t page_table_map(page_table_t *table, void *virtAddr, void *physAddr, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
Definition paging.h:421
uint8_t pml_callback_id_t
Callback ID type.
static void page_table_load(page_table_t *table)
Loads the page table into the CR3 register if it is not already loaded.
Definition paging.h:183
#define PAGE_TABLE_TRAVERSE_CREATE
Create a page_table_traverse_t initializer.
Definition paging.h:254
static bool page_table_is_mapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely mapped.
Definition paging.h:359
#define PML_MAX_CALLBACK
Maximum number of callbacks that can be registered for a page table.
static void page_table_deinit(page_table_t *table)
Deinitializes a page table, freeing all allocated pages.
Definition paging.h:173
static bool page_table_is_unmapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely unmapped.
Definition paging.h:388
static uint64_t page_table_find_unmapped_region(page_table_t *table, void *startAddr, void *endAddr, uint64_t pageAmount, void **outAddr)
Finds the first contiguous unmapped region with the given number of pages within the specified addres...
Definition paging.h:755
static uint64_t page_table_init(page_table_t *table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages)
Initializes a page table.
Definition paging.h:157
static uint64_t page_table_traverse(page_table_t *table, page_table_traverse_t *traverse, uintptr_t virtAddr, pml_flags_t flags)
Allows for fast traversal of the page table by caching previously accessed layers.
Definition paging.h:278
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
static uint64_t page_table_count_pages_with_flags(page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
Counts the number of pages in a range that have all the specified flags set.
Definition paging.h:977
@ PML4
@ PML_USER
@ PML_PRESENT
@ PML_WRITE
@ PML_NONE
@ PML_OWNED
uint64_t pmm_alloc_pages(void **addresses, uint64_t count)
Allocates multiple physical pages.
Definition pmm.c:184
void pmm_free(void *address)
Frees a single physical page.
Definition pmm.c:220
void pmm_free_pages(void **addresses, uint64_t count)
Frees multiple physical pages.
Definition pmm.c:226
void * pmm_alloc_bitmap(uint64_t count, uintptr_t maxAddr, uint64_t alignment)
Allocates a contiguous region of physical pages managed by the bitmap.
Definition pmm.c:207
void * pmm_alloc(void)
Allocates a single physical page.
Definition pmm.c:171
void space_unpin(space_t *space, const void *address, uint64_t length)
Unpins pages in a region previously pinned with space_pin() or space_pin_string().
Definition space.c:452
bool space_is_mapped(space_t *space, const void *virtAddr, uint64_t length)
Checks if a virtual memory region is fully mapped.
Definition space.c:759
void * space_mapping_end(space_t *space, space_mapping_t *mapping, errno_t err)
Performs cleanup after changes to the address space mappings.
Definition space.c:739
pml_callback_id_t space_alloc_callback(space_t *space, uint64_t pageAmount, space_callback_func_t func, void *private)
Allocate a callback.
Definition space.c:598
uint64_t space_check_access(space_t *space, const void *addr, uint64_t length)
Checks if a virtual memory region is within the allowed address range of the space.
Definition space.c:478
void space_free_callback(space_t *space, pml_callback_id_t callbackId)
Free a callback.
Definition space.c:639
space_flags_t
Flags for space initialization.
Definition space.h:28
uint64_t space_pin_terminated(space_t *space, const void *address, const void *terminator, uint8_t objectSize, uint64_t maxCount, stack_pointer_t *userStack)
Pins a region of memory terminated by a terminator value.
Definition space.c:379
uint64_t space_user_page_count(space_t *space)
Get the number of user pages allocated in the address space.
Definition space.c:766
void(* space_callback_func_t)(void *private)
Space callback function.
Definition space.h:43
uint64_t space_pin(space_t *space, const void *buffer, uint64_t length, stack_pointer_t *userStack)
Pins pages within a region of the address space.
Definition space.c:330
void space_load(space_t *space)
Loads a virtual address space.
Definition space.c:159
void space_tlb_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
Performs a TLB shootdown for a region of the address space, and wait for acknowledgements.
Definition space.c:669
void space_deinit(space_t *space)
Deinitializes a virtual address space.
Definition space.c:122
uint64_t space_init(space_t *space, uintptr_t startAddress, uintptr_t endAddress, space_flags_t flags)
Initializes a virtual address space.
Definition space.c:64
#define SPACE_TLB_SHOOTDOWN_TIMEOUT
The maximum time to wait for the acknowledgements from other CPU's before panicking.
Definition space.h:106
uint64_t space_mapping_start(space_t *space, space_mapping_t *mapping, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags)
Prepare for changes to the address space mappings.
Definition space.c:528
@ SPACE_MAP_KERNEL_HEAP
Map the kernel heap into the address space.
Definition space.h:36
@ SPACE_USE_PMM_BITMAP
Definition space.h:34
@ SPACE_MAP_IDENTITY
Map the identity mapped physical memory into the address space.
Definition space.h:37
@ SPACE_MAP_KERNEL_BINARY
Map the kernel binary into the address space.
Definition space.h:35
#define VMM_KERNEL_HEAP_MAX
The maximum address for the kernel heap.
Definition vmm.h:68
#define VMM_KERNEL_BINARY_MAX
The maximum address for the content of the kernel binary.
Definition vmm.h:61
space_t * vmm_kernel_space_get(void)
Retrieves the kernel's address space.
Definition vmm.c:133
#define VMM_IDENTITY_MAPPED_MIN
The minimum address for the identity mapped physical memory.
Definition vmm.h:72
#define VMM_USER_SPACE_MAX
The maximum address for user space.
Definition vmm.h:74
#define VMM_KERNEL_BINARY_MIN
The minimum address for the content of the kernel binary./*#end#*‍/.
Definition vmm.h:62
#define VMM_USER_SPACE_MIN
The minimum address for user space.
Definition vmm.h:75
#define VMM_MAX_SHOOTDOWN_REQUESTS
Maximum number of shootdown requests that can be queued per CPU.
Definition vmm.h:102
#define VMM_IDENTITY_MAPPED_MAX
The maximum address for the identity mapped physical memory.
Definition vmm.h:71
#define VMM_KERNEL_HEAP_MIN
The minimum address for the kernel heap.
Definition vmm.h:69
clock_t clock_uptime(void)
Retrieve the time in nanoseconds since boot.
Definition clock.c:99
static void lock_init(lock_t *lock)
Initializes a lock.
Definition lock.h:86
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
Definition lock.h:57
static void lock_release(lock_t *lock)
Releases a lock.
Definition lock.h:146
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
Definition lock.h:103
void map_init(map_t *map)
Initialize a map.
Definition map.c:176
void map_entry_init(map_entry_t *entry)
Initialize a map entry.
Definition map.c:71
static map_key_t map_key_uint64(uint64_t uint64)
Create a map key from a uint64_t.
Definition map.h:128
uint64_t map_insert(map_t *map, const map_key_t *key, map_entry_t *value)
Insert a key-value pair into the map.
Definition map.c:204
void map_remove(map_t *map, map_entry_t *entry)
Remove a entry from the map.
Definition map.c:353
map_entry_t * map_get(map_t *map, const map_key_t *key)
Get a value from the map by key.
Definition map.c:287
#define EINVAL
Invalid argument.
Definition errno.h:142
#define EFAULT
Bad address.
Definition errno.h:102
#define ENOMEM
Out of memory.
Definition errno.h:92
#define EOVERFLOW
Value too large for defined data type.
Definition errno.h:402
#define errno
Error number variable.
Definition errno.h:27
#define EOK
No error.
Definition errno.h:32
void bitmap_init(bitmap_t *map, void *buffer, uint64_t length)
Initialize a bitmap.
Definition bitmap_init.c:3
void bitmap_clear(bitmap_t *map, uint64_t index)
Clear a bit in the bitmap.
Definition bitmap_clear.c:3
void bitmap_set(bitmap_t *map, uint64_t index)
Set a bit in the bitmap.
Definition bitmap_set.c:3
uint64_t bitmap_find_first_clear(bitmap_t *map, uint64_t startIdx, uint64_t endIdx)
Find the first clear bit in the bitmap.
#define BITMAP_FOR_EACH_SET(idx, map)
Iterate over each set bit in the bitmap.
Definition bitmap.h:58
#define BITMAP_BITS_TO_BYTES(bits)
Convert number of bits to number of bytes.
Definition bitmap.h:42
#define LIST_FOR_EACH(elem, list, member)
Iterates over a list.
Definition list.h:63
static void list_push_back(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
Definition list.h:343
static void list_remove(list_t *list, list_entry_t *entry)
Removes a list entry from its current list.
Definition list.h:315
static bool list_is_empty(list_t *list)
Checks if a list is empty.
Definition list.h:227
static void list_init(list_t *list)
Initializes a list.
Definition list.h:196
#define MIN(x, y)
Definition math.h:16
#define ROUND_DOWN(number, multiple)
Definition math.h:21
#define ROUND_UP(number, multiple)
Definition math.h:19
#define PAGE_SIZE
The size of a memory page in bytes.
Definition proc.h:106
#define BYTES_TO_PAGES(amount)
Convert a size in bytes to pages.
Definition proc.h:114
#define NULL
Pointer error value.
Definition NULL.h:23
#define ERR
Integer error value.
Definition ERR.h:17
#define CONTAINER_OF(ptr, type, member)
Container of macro.
__UINT64_TYPE__ clock_t
A nanosecond time.
Definition clock_t.h:13
static uintptr_t address
Mapped virtual address of the HPET registers.
Definition hpet.c:95
static clock_t startTime
Definition clock.c:5
EFI_PHYSICAL_ADDRESS buffer
Definition mem.c:15
static const path_flag_t flags[]
Definition path.c:42
static uint64_t pageAmount
Definition pmm.c:44
static void start()
Definition main.c:542
#define RFLAGS_INTERRUPT_ENABLE
Definition regs.h:32
static uint64_t rflags_read()
Definition regs.h:78
static void space_pin_depth_dec(space_t *space, const void *address, uint64_t pageAmount)
Definition space.c:243
static uint64_t space_pmm_bitmap_alloc_pages(void **pages, uint64_t pageAmount)
Definition space.c:20
static void space_map_kernel_space_region(space_t *space, uintptr_t start, uintptr_t end)
Definition space.c:38
static void space_update_free_address(space_t *space, uintptr_t virtAddr, uint64_t pageAmount)
Definition space.c:726
static void space_tlb_shootdown_ipi_handler(ipi_func_data_t *data)
Definition space.c:644
static uint64_t space_pin_depth_inc(space_t *space, const void *address, uint64_t pageAmount)
Definition space.c:280
static void space_align_region(void **virtAddr, uint64_t *length)
Definition space.c:209
static void * space_find_free_region(space_t *space, uint64_t pageAmount)
Definition space.c:507
static uint64_t space_populate_user_region(space_t *space, const void *buffer, uint64_t pageAmount)
Definition space.c:216
static void space_unmap_kernel_space_region(space_t *space, uintptr_t start, uintptr_t end)
Definition space.c:53
#define atomic_store(object, desired)
Definition stdatomic.h:289
#define atomic_load(object)
Definition stdatomic.h:288
#define atomic_fetch_add(object, operand)
Definition stdatomic.h:283
#define atomic_init(obj, value)
Definition stdatomic.h:75
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
__UINT8_TYPE__ uint8_t
Definition stdint.h:11
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
__UINT16_TYPE__ uint16_t
Definition stdint.h:13
#define UINT32_MAX
Definition stdint.h:70
_PUBLIC void * malloc(size_t size)
Definition malloc.c:5
_PUBLIC void free(void *ptr)
Definition free.c:11
_PUBLIC void * memcpy(void *_RESTRICT s1, const void *_RESTRICT s2, size_t n)
Definition memcpy.c:61
_PUBLIC void * memset(void *s, int c, size_t n)
Definition memset.c:4
CPU structure.
Definition cpu.h:122
cpuid_t id
Definition cpu.h:123
vmm_cpu_ctx_t vmm
Definition cpu.h:129
IPI function data structure.
Definition ipi.h:58
Map entry structure.
Definition map.h:68
Map key stucture.
Definition map.h:56
pml_alloc_pages_t allocPages
Helper structure for fast traversal of the page table.
Definition paging.h:236
pml_entry_t * entry
Definition paging.h:246
uint64_t pinned
uint64_t owned
uint64_t raw
uint64_t present
If set the page is present in memory and readable.
A entry in a page table without a specified address or callback ID.
pml_entry_t entries[PML_INDEX_AMOUNT]
uint64_t pageAmount
Definition space.h:53
space_callback_func_t func
Definition space.h:51
void * private
Definition space.h:52
Helper structure for managing address space mappings.
Definition space.h:231
void * physAddr
Definition space.h:233
pml_flags_t flags
Definition space.h:235
uint64_t pageAmount
Definition space.h:234
void * virtAddr
Definition space.h:232
Pinned page structure.
Definition space.h:63
uint64_t pinCount
The number of times this page is pinned, will be unpinned when it reaches 0.
Definition space.h:65
map_entry_t mapEntry
Definition space.h:64
Virtual address space structure.
Definition space.h:79
lock_t lock
Definition space.h:100
map_t pinnedPages
Map of pages with a pin depth greater than 1.
Definition space.h:81
uintptr_t startAddress
The start address for allocations in this address space.
Definition space.h:82
uint64_t callbacksLength
Length of the callbacks array.
Definition space.h:92
uintptr_t endAddress
The end address for allocations in this address space.
Definition space.h:83
page_table_t pageTable
The page table associated with the address space.
Definition space.h:80
bitmap_t callbackBitmap
Bitmap to track available callback IDs.
Definition space.h:93
space_flags_t flags
Definition space.h:85
list_t cpus
List of CPUs using this address space.
Definition space.h:98
uintptr_t freeAddress
The next available free virtual address in this address space.
Definition space.h:84
atomic_uint16_t shootdownAcks
Definition space.h:99
space_callback_t * callbacks
Definition space.h:91
uint64_t bitmapBuffer[BITMAP_BITS_TO_QWORDS(PML_MAX_CALLBACK)]
Definition space.h:97
Structure to define a stack in memory.
Per-CPU VMM context.
Definition vmm.h:109
uint8_t shootdownCount
Definition vmm.h:112
space_t * currentSpace
Will only be accessed by the owner CPU, so no lock.
Definition vmm.h:114
list_entry_t entry
Used by a space to know which CPUs are using it, protected by the space lock.
Definition vmm.h:110
lock_t lock
Definition vmm.h:113
vmm_shootdown_t shootdowns[VMM_MAX_SHOOTDOWN_REQUESTS]
Definition vmm.h:111
TLB shootdown structure.
Definition vmm.h:92
uint64_t pageAmount
Definition vmm.h:96
space_t * space
Definition vmm.h:94
void * virtAddr
Definition vmm.h:95
static space_t kernelSpace
Definition vmm.c:24