PatchworkOS
Loading...
Searching...
No Matches
paging.h
Go to the documentation of this file.
1#pragma once
2
3#include <kernel/cpu/regs.h>
5
6#include <assert.h>
7#include <stdbool.h>
8#include <stdlib.h>
9#include <string.h>
10#include <sys/proc.h>
11
12#ifdef __BOOT__
13#include <efi.h>
14#include <efilib.h>
15#endif
16
31static inline void tlb_invalidate(void* virtAddr, uint64_t pageCount)
32{
33 if (pageCount == 0)
34 {
35 return;
36 }
37
38 if (pageCount > 16)
39 {
41 }
42 else
43 {
44 for (uint64_t i = 0; i < pageCount; i++)
45 {
46 asm volatile("invlpg (%0)" ::"r"(virtAddr + i * PAGE_SIZE) : "memory");
47 }
48 }
49}
50
62{
63#ifdef __BOOT__
64 return entry.addr << PML_ADDR_OFFSET_BITS;
65#else
67#endif
68}
69
78static inline bool pml_is_empty(pml_t* pml)
79{
80 for (pml_index_t i = 0; i < PML_INDEX_AMOUNT; i++)
81 {
82 if (pml->entries[i].raw != 0)
83 {
84 return false;
85 }
86 }
87 return true;
88}
89
97static inline uint64_t pml_new(page_table_t* table, pml_t** outPml)
98{
99 pml_t* pml;
100 if (table->allocPages((void**)&pml, 1) == ERR)
101 {
102 return ERR;
103 }
104#ifdef __BOOT__
105 SetMem(pml, PAGE_SIZE, 0);
106#else
107 memset(pml, 0, PAGE_SIZE);
108#endif
109 *outPml = pml;
110 return 0;
111}
112
120static inline void pml_free(page_table_t* table, pml_t* pml, pml_level_t level)
121{
122 if (level < 0)
123 {
124 return;
125 }
126
127 for (pml_index_t i = 0; i < PML_INDEX_AMOUNT; i++)
128 {
129 pml_entry_t* entry = &pml->entries[i];
130 if (!entry->present)
131 {
132 continue;
133 }
134
135 if (level > PML1)
136 {
137 pml_free(table, (pml_t*)pml_accessible_addr(*entry), level - 1);
138 }
139 else if (entry->owned)
140 {
141 void* addr = (void*)pml_accessible_addr(*entry);
142 table->freePages(&addr, 1);
143 }
144 }
145
146 table->freePages((void**)&pml, 1);
147}
148
157static inline uint64_t page_table_init(page_table_t* table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages)
158{
159 table->allocPages = allocPages;
160 table->freePages = freePages;
161 if (pml_new(table, &table->pml4) == ERR)
162 {
163 return ERR;
164 }
165 return 0;
166}
167
173static inline void page_table_deinit(page_table_t* table)
174{
175 pml_free(table, table->pml4, PML4);
176}
177
183static inline void page_table_load(page_table_t* table)
184{
185 uint64_t cr3 = PML_ENSURE_LOWER_HALF(table->pml4);
186 if (cr3 != cr3_read())
187 {
188 cr3_write(cr3);
189 }
190}
191
208static inline uint64_t page_table_get_pml(page_table_t* table, pml_t* currentPml, pml_index_t index, pml_flags_t flags,
209 pml_t** outPml)
210{
211 pml_entry_t* entry = &currentPml->entries[index];
212 if (entry->present)
213 {
214 *outPml = (pml_t*)pml_accessible_addr(*entry);
215 return 0;
216 }
217 else if (flags & PML_PRESENT)
218 {
219 pml_t* nextPml;
220 if (pml_new(table, &nextPml) == ERR)
221 {
222 return ERR;
223 }
224 currentPml->entries[index].raw = (flags & PML_FLAGS_MASK) | (PML_ENSURE_LOWER_HALF(nextPml) & PML_ADDR_MASK);
225 *outPml = nextPml;
226 return 0;
227 }
228
229 return ERR;
230}
231
249
255#define PAGE_TABLE_TRAVERSE_CREATE \
256 { \
257 .pml3Valid = false, \
258 .pml2Valid = false, \
259 .pml1Valid = false, \
260 }
261
280 pml_flags_t flags)
281{
282 pml_index_t newIdx3 = PML_ADDR_TO_INDEX(virtAddr, PML4);
283 if (!traverse->pml3Valid || traverse->oldIdx3 != newIdx3)
284 {
285 if (page_table_get_pml(table, table->pml4, newIdx3, (flags | PML_WRITE | PML_USER) & ~PML_GLOBAL,
286 &traverse->pml3) == ERR)
287 {
288 return ERR;
289 }
290 traverse->oldIdx3 = newIdx3;
291 traverse->pml2Valid = false; // Invalidate cache for lower levels
292 }
293
294 pml_index_t newIdx2 = PML_ADDR_TO_INDEX(virtAddr, PML3);
295 if (!traverse->pml2Valid || traverse->oldIdx2 != newIdx2)
296 {
297 if (page_table_get_pml(table, traverse->pml3, newIdx2, flags | PML_WRITE | PML_USER, &traverse->pml2) == ERR)
298 {
299 return ERR;
300 }
301 traverse->oldIdx2 = newIdx2;
302 traverse->pml1Valid = false; // Invalidate cache for lower levels
303 }
304
305 pml_index_t newIdx1 = PML_ADDR_TO_INDEX(virtAddr, PML2);
306 if (!traverse->pml1Valid || traverse->oldIdx1 != newIdx1)
307 {
308 if (page_table_get_pml(table, traverse->pml2, newIdx1, flags | PML_WRITE | PML_USER, &traverse->pml1) == ERR)
309 {
310 return ERR;
311 }
312 traverse->oldIdx1 = newIdx1;
313 }
314
315 traverse->entry = &traverse->pml1->entries[PML_ADDR_TO_INDEX(virtAddr, PML1)];
316 return 0;
317}
318
329static inline uint64_t page_table_get_phys_addr(page_table_t* table, const void* virtAddr, void** outPhysAddr)
330{
331 uint64_t offset = ((uint64_t)virtAddr) % PAGE_SIZE;
332 virtAddr = (void*)ROUND_DOWN(virtAddr, PAGE_SIZE);
333
335
336 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr, PML_NONE) == ERR)
337 {
338 return ERR;
339 }
340
341 if (!traverse.entry->present)
342 {
343 return ERR;
344 }
345
346 *outPhysAddr = (void*)((traverse.entry->addr << PML_ADDR_OFFSET_BITS) + offset);
347 return 0;
348}
349
360static inline bool page_table_is_mapped(page_table_t* table, const void* virtAddr, uint64_t pageAmount)
361{
363 for (uint64_t i = 0; i < pageAmount; i++)
364 {
365 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
366 {
367 return false;
368 }
369
370 if (!traverse.entry->present)
371 {
372 return false;
373 }
374 }
375
376 return true;
377}
378
389static inline bool page_table_is_unmapped(page_table_t* table, const void* virtAddr, uint64_t pageAmount)
390{
392
393 for (uint64_t i = 0; i < pageAmount; i++)
394 {
395 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
396 {
397 continue;
398 }
399
400 if (traverse.entry->present)
401 {
402 return false;
403 }
404 }
405
406 return true;
407}
408
422static inline uint64_t page_table_map(page_table_t* table, void* virtAddr, void* physAddr, uint64_t pageAmount,
423 pml_flags_t flags, pml_callback_id_t callbackId)
424{
425 if (!(flags & PML_PRESENT))
426 {
427 return ERR;
428 }
429
431
432 for (uint64_t i = 0; i < pageAmount; i++)
433 {
434 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr, flags) == ERR)
435 {
436 return ERR;
437 }
438
439 if (traverse.entry->present)
440 {
441 return ERR;
442 }
443
444 traverse.entry->raw = flags;
446 traverse.entry->lowCallbackId = callbackId & 1;
447 traverse.entry->highCallbackId = callbackId >> 1;
448
449 physAddr = (void*)((uintptr_t)physAddr + PAGE_SIZE);
450 virtAddr = (void*)((uintptr_t)virtAddr + PAGE_SIZE);
451 }
452
453 return 0;
454}
455
469static inline uint64_t page_table_map_pages(page_table_t* table, void* virtAddr, void** pages, uint64_t pageAmount,
470 pml_flags_t flags, pml_callback_id_t callbackId)
471{
472 if (!(flags & PML_PRESENT))
473 {
474 return ERR;
475 }
476
478
479 for (uint64_t i = 0; i < pageAmount; i++)
480 {
481 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr, flags) == ERR)
482 {
483 return ERR;
484 }
485
486 if (traverse.entry->present)
487 {
488 return ERR;
489 }
490
491 traverse.entry->raw = flags;
493 traverse.entry->lowCallbackId = callbackId & 1;
494 traverse.entry->highCallbackId = callbackId >> 1;
495
496 virtAddr = (void*)((uintptr_t)virtAddr + PAGE_SIZE);
497 }
498
499 return 0;
500}
501
515static inline void page_table_unmap(page_table_t* table, void* virtAddr, uint64_t pageAmount)
516{
518
519 for (uint64_t i = 0; i < pageAmount; i++)
520 {
521 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
522 {
523 continue;
524 }
525
526 if (!traverse.entry->present)
527 {
528 continue;
529 }
530
531 traverse.entry->present = 0;
532 }
533
534 tlb_invalidate(virtAddr, pageAmount);
535}
536
546
557{
558 buffer->pages[buffer->pageCount] = address;
559 buffer->pageCount++;
560
561 if (buffer->pageCount >= PML_PAGE_BUFFER_SIZE)
562 {
563 table->freePages(buffer->pages, buffer->pageCount);
564 buffer->pageCount = 0;
565 }
566}
567
577{
578 if (buffer->pageCount > 0)
579 {
580 table->freePages(buffer->pages, buffer->pageCount);
581 buffer->pageCount = 0;
582 }
583}
584
596 page_table_traverse_t* traverse, page_table_page_buffer_t* pageBuffer)
597{
598 if (prevTraverse->pml1Valid && prevTraverse->pml1 != traverse->pml1 && pml_is_empty(prevTraverse->pml1))
599 {
600 page_table_page_buffer_push(table, pageBuffer, prevTraverse->pml1);
601 prevTraverse->pml2->entries[prevTraverse->oldIdx1].raw = 0;
602 if (prevTraverse->pml2Valid && prevTraverse->pml2 != traverse->pml2 && pml_is_empty(prevTraverse->pml2))
603 {
604 page_table_page_buffer_push(table, pageBuffer, prevTraverse->pml2);
605 prevTraverse->pml3->entries[prevTraverse->oldIdx2].raw = 0;
606 if (prevTraverse->pml3Valid && prevTraverse->pml3 != traverse->pml3 && pml_is_empty(prevTraverse->pml3))
607 {
608 page_table_page_buffer_push(table, pageBuffer, prevTraverse->pml3);
609 table->pml4->entries[prevTraverse->oldIdx3].raw = 0;
610 }
611 }
612 }
613}
614
629static inline void page_table_clear(page_table_t* table, void* virtAddr, uint64_t pageAmount)
630{
631 page_table_page_buffer_t pageBuffer = {.pageCount = 0};
632
635 for (uint64_t i = 0; i < pageAmount; i++)
636 {
637 uintptr_t currentVirtAddr = (uintptr_t)virtAddr + i * PAGE_SIZE;
638
639 page_table_clear_pml1_pml2_pml3(table, &prevTraverse, &traverse, &pageBuffer);
640
641 if (page_table_traverse(table, &traverse, currentVirtAddr, PML_NONE) == ERR)
642 {
643 prevTraverse.pml1Valid = false;
644 prevTraverse.pml2Valid = false;
645 prevTraverse.pml3Valid = false;
646 continue;
647 }
648 prevTraverse = traverse;
649
650 if (traverse.entry->present)
651 {
652 continue;
653 }
654
655 if (traverse.entry->owned)
656 {
657 page_table_page_buffer_push(table, &pageBuffer, (void*)pml_accessible_addr(*traverse.entry));
658 }
659
660 traverse.entry->raw = 0;
661 }
662
663 page_table_clear_pml1_pml2_pml3(table, &prevTraverse, &traverse, &pageBuffer);
664 page_table_page_buffer_flush(table, &pageBuffer);
665}
666
675static inline void page_table_collect_callbacks(page_table_t* table, void* virtAddr, uint64_t pageAmount,
676 uint64_t* callbacks)
677{
679
680 for (uint64_t i = 0; i < pageAmount; i++)
681 {
682 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
683 {
684 continue;
685 }
686
687 if (!traverse.entry->present)
688 {
689 continue;
690 }
691
692 pml_callback_id_t callbackId = traverse.entry->lowCallbackId | (traverse.entry->highCallbackId << 1);
693 if (callbackId != PML_CALLBACK_NONE)
694 {
695 callbacks[callbackId]++;
696 }
697 }
698}
699
711static inline uint64_t page_table_set_flags(page_table_t* table, void* virtAddr, uint64_t pageAmount, pml_flags_t flags)
712{
714
715 for (uint64_t i = 0; i < pageAmount; i++)
716 {
717 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
718 {
719 continue;
720 }
721
722 if (!traverse.entry->present)
723 {
724 return ERR;
725 }
726
727 if (traverse.entry->owned)
728 {
729 flags |= PML_OWNED;
730 }
731
732 // Bit magic to only update the flags while preserving the address and callback ID.
733 traverse.entry->raw = (traverse.entry->raw & ~PML_FLAGS_MASK) | (flags & PML_FLAGS_MASK);
734 }
735
736 tlb_invalidate(virtAddr, pageAmount);
737 return 0;
738}
739
756static inline uint64_t page_table_find_unmapped_region(page_table_t* table, void* startAddr, void* endAddr,
757 uint64_t pageAmount, void** outAddr)
758{
759 uintptr_t currentAddr = ROUND_DOWN((uintptr_t)startAddr, PAGE_SIZE);
760 uintptr_t end = (uintptr_t)endAddr;
761
762 if (pageAmount >= (PML3_SIZE / PAGE_SIZE))
763 {
764 while (currentAddr < end)
765 {
766 pml_index_t idx4 = PML_ADDR_TO_INDEX(currentAddr, PML4);
767 pml_index_t idx3 = PML_ADDR_TO_INDEX(currentAddr, PML3);
768
769 pml_entry_t* entry4 = &table->pml4->entries[idx4];
770 if (!entry4->present)
771 {
772 *outAddr = (void*)currentAddr;
773 return 0;
774 }
775
776 pml_t* pml3 = (pml_t*)pml_accessible_addr(*entry4);
777 pml_entry_t* entry3 = &pml3->entries[idx3];
778
779 if (!entry3->present)
780 {
781 *outAddr = (void*)currentAddr;
782 return 0;
783 }
784
785 currentAddr = ROUND_UP(currentAddr + 1, PML3_SIZE);
786 }
787 return ERR;
788 }
789
790 if (pageAmount >= (PML2_SIZE / PAGE_SIZE))
791 {
792 while (currentAddr < end)
793 {
794 pml_index_t idx4 = PML_ADDR_TO_INDEX(currentAddr, PML4);
795 pml_entry_t* entry4 = &table->pml4->entries[idx4];
796
797 if (!entry4->present)
798 {
799 *outAddr = (void*)currentAddr;
800 return 0;
801 }
802
803 pml_t* pml3 = (pml_t*)pml_accessible_addr(*entry4);
804 pml_index_t idx3 = PML_ADDR_TO_INDEX(currentAddr, PML3);
805 pml_entry_t* entry3 = &pml3->entries[idx3];
806
807 if (!entry3->present)
808 {
809 *outAddr = (void*)currentAddr;
810 return 0;
811 }
812
813 pml_t* pml2 = (pml_t*)pml_accessible_addr(*entry3);
814 pml_index_t idx2 = PML_ADDR_TO_INDEX(currentAddr, PML2);
815 pml_entry_t* entry2 = &pml2->entries[idx2];
816
817 if (!entry2->present)
818 {
819 *outAddr = (void*)currentAddr;
820 return 0;
821 }
822
823 currentAddr = ROUND_UP(currentAddr + 1, PML2_SIZE);
824 }
825 return ERR;
826 }
827
828 uintptr_t regionStart = 0;
829 uint64_t consecutiveUnmapped = 0;
830
831 while (currentAddr < end)
832 {
833 pml_index_t idx4 = PML_ADDR_TO_INDEX(currentAddr, PML4);
834 pml_entry_t* entry4 = &table->pml4->entries[idx4];
835
836 if (!entry4->present)
837 {
838 if (consecutiveUnmapped == 0)
839 {
840 regionStart = currentAddr;
841 }
842
843 uintptr_t skipTo = PML_INDEX_TO_ADDR(idx4 + 1, PML4);
844 uint64_t skippedPages = (MIN(skipTo, end) - currentAddr) / PAGE_SIZE;
845 consecutiveUnmapped += skippedPages;
846
847 if (consecutiveUnmapped >= pageAmount)
848 {
849 *outAddr = (void*)regionStart;
850 return 0;
851 }
852
853 currentAddr = skipTo;
854 continue;
855 }
856
857 pml_t* pml3 = (pml_t*)pml_accessible_addr(*entry4);
858 pml_index_t idx3 = PML_ADDR_TO_INDEX(currentAddr, PML3);
859 pml_entry_t* entry3 = &pml3->entries[idx3];
860
861 if (!entry3->present)
862 {
863 if (consecutiveUnmapped == 0)
864 {
865 regionStart = currentAddr;
866 }
867
868 uint64_t skippedPages = PML3_SIZE / PAGE_SIZE;
869 consecutiveUnmapped += skippedPages;
870
871 if (consecutiveUnmapped >= pageAmount)
872 {
873 *outAddr = (void*)regionStart;
874 return 0;
875 }
876
877 currentAddr = ROUND_UP(currentAddr + 1, PML3_SIZE);
878 continue;
879 }
880
881 pml_t* pml2 = (pml_t*)pml_accessible_addr(*entry3);
882 pml_index_t idx2 = PML_ADDR_TO_INDEX(currentAddr, PML2);
883 pml_entry_t* entry2 = &pml2->entries[idx2];
884
885 if (!entry2->present)
886 {
887 if (consecutiveUnmapped == 0)
888 {
889 regionStart = currentAddr;
890 }
891
892 uint64_t skippedPages = PML2_SIZE / PAGE_SIZE;
893 consecutiveUnmapped += skippedPages;
894
895 if (consecutiveUnmapped >= pageAmount)
896 {
897 *outAddr = (void*)regionStart;
898 return 0;
899 }
900
901 currentAddr = ROUND_UP(currentAddr + 1, PML2_SIZE);
902 continue;
903 }
904
905 pml_t* pml1 = (pml_t*)pml_accessible_addr(*entry2);
906 pml_index_t idx1 = PML_ADDR_TO_INDEX(currentAddr, PML1);
907
908 for (; idx1 < PML_INDEX_AMOUNT && currentAddr < end; idx1++, currentAddr += PAGE_SIZE)
909 {
910 if (!pml1->entries[idx1].present)
911 {
912 if (consecutiveUnmapped == 0)
913 {
914 regionStart = currentAddr;
915 }
916 consecutiveUnmapped++;
917
918 if (consecutiveUnmapped >= pageAmount)
919 {
920 *outAddr = (void*)regionStart;
921 return 0;
922 }
923 }
924 else
925 {
926 consecutiveUnmapped = 0;
927 }
928 }
929 }
930
931 return ERR;
932}
933
942static inline bool page_table_is_pinned(page_table_t* table, const void* virtAddr, uint64_t pageAmount)
943{
945
946 for (uint64_t i = 0; i < pageAmount; i++)
947 {
948 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
949 {
950 continue;
951 }
952
953 if (!traverse.entry->present)
954 {
955 continue;
956 }
957
958 if (traverse.entry->pinned)
959 {
960 return true;
961 }
962 }
963
964 return false;
965}
966
static void page_table_clear_pml1_pml2_pml3(page_table_t *table, page_table_traverse_t *prevTraverse, page_table_traverse_t *traverse, page_table_page_buffer_t *pageBuffer)
Clears any empty page table levels any time a pml1, pml2 or pml3 boundry is crossed.
Definition paging.h:595
pml_level_t
Enums for the different page table levels.
#define PML_ADDR_OFFSET_BITS
Number of bits used for the offset within a page.
#define PML_FLAGS_MASK
Mask for all pml flags.
void(* pml_free_pages_t)(void **, uint64_t)
Generic page free function type.
#define PML_ADDR_MASK
Mask for the address in a page table entry.
static uint64_t page_table_get_phys_addr(page_table_t *table, const void *virtAddr, void **outPhysAddr)
Retrieves the physical address mapped to a given virtual address.
Definition paging.h:329
static void tlb_invalidate(void *virtAddr, uint64_t pageCount)
Invalidates a region of pages in the TLB.
Definition paging.h:31
#define PML_INDEX_TO_ADDR(index, level)
Calculates the lowest virtual address that maps to a given index at a specified page table level.
pml_index_t
Indexes into a pml level.
static void page_table_collect_callbacks(page_table_t *table, void *virtAddr, uint64_t pageAmount, uint64_t *callbacks)
Collects the number of pages associated with each callback ID in the specified range.
Definition paging.h:675
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
static uint64_t page_table_get_pml(page_table_t *table, pml_t *currentPml, pml_index_t index, pml_flags_t flags, pml_t **outPml)
Retrieves or allocates the next level page table.
Definition paging.h:208
static uintptr_t pml_accessible_addr(pml_entry_t entry)
Retrieves the address from a page table entry and converts it to an accessible address.
Definition paging.h:61
#define PML_ADDR_TO_INDEX(addr, level)
Calculates the index into a page table level for a given virtual address.
#define PML_PAGE_BUFFER_SIZE
Size of the page buffer used to batch page allocations and frees.
static void page_table_unmap(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Unmaps a range of virtual addresses from the page table.
Definition paging.h:515
static void pml_free(page_table_t *table, pml_t *pml, pml_level_t level)
Recursively frees a page table level, all its children and any owned pages.
Definition paging.h:120
uint64_t(* pml_alloc_pages_t)(void **, uint64_t)
Generic page allocation function type.
static uint64_t page_table_map(page_table_t *table, void *virtAddr, void *physAddr, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
Definition paging.h:422
static void page_table_page_buffer_push(page_table_t *table, page_table_page_buffer_t *buffer, void *address)
Pushes a page table level onto the page buffer, freeing the buffer if full.
Definition paging.h:556
static bool pml_is_empty(pml_t *pml)
Checks if a page table level is empty (all entries are 0).
Definition paging.h:78
uint8_t pml_callback_id_t
Callback ID type.
static void page_table_load(page_table_t *table)
Loads the page table into the CR3 register if it is not already loaded.
Definition paging.h:183
#define PML2_SIZE
Size of the region mapped by a single PML2 entry.
#define PAGE_TABLE_TRAVERSE_CREATE
Create a page_table_traverse_t initializer.
Definition paging.h:255
static void page_table_clear(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Clears page table entries in the specified range and frees any owned pages.
Definition paging.h:629
static bool page_table_is_mapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely mapped.
Definition paging.h:360
static void page_table_deinit(page_table_t *table)
Deinitializes a page table, freeing all allocated pages.
Definition paging.h:173
static uint64_t pml_new(page_table_t *table, pml_t **outPml)
Allocates and initializes a new page table level.
Definition paging.h:97
#define PML_LOWER_TO_HIGHER(addr)
Converts an address from the lower half to the higher half.
static bool page_table_is_unmapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely unmapped.
Definition paging.h:389
static uint64_t page_table_find_unmapped_region(page_table_t *table, void *startAddr, void *endAddr, uint64_t pageAmount, void **outAddr)
Finds the first contiguous unmapped region with the given number of pages within the specified addres...
Definition paging.h:756
static uint64_t page_table_init(page_table_t *table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages)
Initializes a page table.
Definition paging.h:157
static uint64_t page_table_traverse(page_table_t *table, page_table_traverse_t *traverse, uintptr_t virtAddr, pml_flags_t flags)
Allows for fast traversal of the page table by caching previously accessed layers.
Definition paging.h:279
#define PML3_SIZE
Size of the region mapped by a single PML3 entry.
static uint64_t page_table_set_flags(page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
Sets the flags for a range of pages in the page table.
Definition paging.h:711
static bool page_table_is_pinned(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if any page in a range is pinned.
Definition paging.h:942
static uint64_t page_table_map_pages(page_table_t *table, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps an array of physical pages to contiguous virtual addresses in the page table.
Definition paging.h:469
static void page_table_page_buffer_flush(page_table_t *table, page_table_page_buffer_t *buffer)
Flushes the page buffer, freeing any remaining pages.
Definition paging.h:576
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
@ PML3
@ PML1
@ PML4
@ PML2
@ PML_INDEX_AMOUNT
@ PML_USER
@ PML_PRESENT
@ PML_WRITE
@ PML_NONE
@ PML_GLOBAL
@ PML_OWNED
#define MIN(x, y)
Definition math.h:16
#define ROUND_DOWN(number, multiple)
Definition math.h:21
#define ROUND_UP(number, multiple)
Definition math.h:19
#define PAGE_SIZE
Memory page size.
Definition proc.h:140
#define ERR
Integer error value.
Definition ERR.h:17
static uintptr_t address
Definition hpet.c:12
EFI_PHYSICAL_ADDRESS buffer
Definition mem.c:15
static uint64_t pageAmount
Definition pmm.c:42
static void cr3_write(uint64_t value)
Definition regs.h:109
static uint64_t cr3_read()
Definition regs.h:102
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
_PUBLIC void * memset(void *s, int c, size_t n)
Definition memset.c:4
Buffer of pages used to batch page frees.
Definition paging.h:542
void * pages[PML_PAGE_BUFFER_SIZE]
Definition paging.h:543
A page table structure.
pml_alloc_pages_t allocPages
pml_free_pages_t freePages
Helper structure for fast traversal of the page table.
Definition paging.h:237
pml_index_t oldIdx3
Definition paging.h:244
pml_entry_t * entry
Definition paging.h:247
pml_index_t oldIdx1
Definition paging.h:246
pml_index_t oldIdx2
Definition paging.h:245
uint64_t addr
The address contained in the entry, note that this is shifted right by 12 bits.
uint64_t pinned
uint64_t highCallbackId
uint64_t owned
uint64_t raw
uint64_t present
If set the page is present in memory and readable.
uint64_t lowCallbackId
A entry in a page table without a specified address or callback ID.
A page table level.
pml_entry_t entries[PML_INDEX_AMOUNT]