40 for (
uint64_t i = 0; i < amount; i++)
42 ASM(
"invlpg (%0)" ::
"r"(addr + (i *
PAGE_SIZE)) :
"memory");
113 else if (entry->
owned)
231#define PAGE_TABLE_TRAVERSE_CREATE \
233 .pml3Valid = false, \
234 .pml2Valid = false, \
235 .pml1Valid = false, \
337 for (
uint64_t i = 0; i < amount; i++)
367 for (
uint64_t i = 0; i < amount; i++)
406 for (
uint64_t i = 0; i < amount; i++)
453 for (
uint64_t i = 0; i < amount; i++)
493 for (
uint64_t i = 0; i < amount; i++)
609 for (
uint64_t i = 0; i < amount; i++)
620 prevTraverse = traverse;
651 for (
uint64_t i = 0; i < amount; i++)
666 callbacks[callbackId]++;
686 for (
uint64_t i = 0; i < amount; i++)
729 size_t amount,
size_t alignment,
void** outAddr)
741 while (currentAddr < end)
751 if (alignedAddr < nextPml4 && alignedAddr < end)
753 *outAddr = (
void*)alignedAddr;
756 currentAddr = nextPml4;
767 if (alignedAddr < nextPml3 && alignedAddr < end)
769 *outAddr = (
void*)alignedAddr;
781 while (currentAddr < end)
790 if (alignedAddr < nextPml4 && alignedAddr < end)
792 *outAddr = (
void*)alignedAddr;
795 currentAddr = nextPml4;
807 if (alignedAddr < nextPml3 && alignedAddr < end)
809 *outAddr = (
void*)alignedAddr;
812 currentAddr = nextPml3;
824 if (alignedAddr < nextPml2 && alignedAddr < end)
826 *outAddr = (
void*)alignedAddr;
839 while (currentAddr < end)
846 if (consecutiveUnmapped == 0)
848 regionStart = currentAddr;
853 consecutiveUnmapped += skippedPages;
855 if (consecutiveUnmapped >= amount)
857 *outAddr = (
void*)regionStart;
861 currentAddr = skipTo;
873 if (consecutiveUnmapped == 0)
876 if (alignedAddr < skipTo && alignedAddr < end)
878 regionStart = alignedAddr;
879 consecutiveUnmapped = (
MIN(skipTo, end) - alignedAddr) /
PAGE_SIZE;
885 consecutiveUnmapped += skippedPages;
888 if (consecutiveUnmapped >= amount)
890 *outAddr = (
void*)regionStart;
894 currentAddr = skipTo;
906 if (consecutiveUnmapped == 0)
909 if (alignedAddr < skipTo && alignedAddr < end)
911 regionStart = alignedAddr;
912 consecutiveUnmapped = (
MIN(skipTo, end) - alignedAddr) /
PAGE_SIZE;
918 consecutiveUnmapped += skippedPages;
921 if (consecutiveUnmapped >= amount)
923 *outAddr = (
void*)regionStart;
927 currentAddr = skipTo;
938 if (consecutiveUnmapped == 0)
941 if (alignedAddr == currentAddr)
943 regionStart = currentAddr;
944 consecutiveUnmapped++;
949 consecutiveUnmapped++;
952 if (consecutiveUnmapped >= amount)
954 *outAddr = (
void*)regionStart;
960 consecutiveUnmapped = 0;
979 for (
uint64_t i = 0; i < amount; i++)
1025 amount -= skipPages;
1037 amount -= skipPages;
1049 amount -= skipPages;
1056 for (; idx1 < PML_INDEX_AMOUNT && amount > 0; idx1++, addr = (
void*)((
uintptr_t)addr +
PAGE_SIZE), amount--)
EFI_PHYSICAL_ADDRESS buffer
static uintptr_t address
Mapped virtual address of the HPET registers.
#define PFN_TO_PHYS(_pfn)
Convert a PFN to its physical address.
static void page_table_clear_pml1_pml2_pml3(page_table_t *table, page_table_traverse_t *prevTraverse, page_table_traverse_t *traverse, page_table_page_buffer_t *pageBuffer)
Clears any empty page table levels any time a pml1, pml2 or pml3 boundry is crossed.
uintptr_t phys_addr_t
Physical address type.
pml_level_t
Enums for the different page table levels.
#define VIRT_TO_PFN(_addr)
Convert a identity mapped higher half virtual address to its PFN.
#define PML_FLAGS_MASK
Mask for all pml flags.
static uint64_t page_table_count_pages_with_flags(page_table_t *table, void *addr, size_t amount, pml_flags_t flags)
Counts the number of pages in a range that have all the specified flags set.
static void page_table_clear(page_table_t *table, void *addr, size_t amount)
Clears page table entries in the specified range and frees any owned pages.
static void page_table_collect_callbacks(page_table_t *table, void *addr, size_t amount, uint64_t *callbacks)
Collects the number of pages associated with each callback ID in the specified range.
static uint64_t page_table_traverse(page_table_t *table, page_table_traverse_t *traverse, const void *addr, pml_flags_t flags)
Allows for fast traversal of the page table by caching previously accessed layers.
#define PML_INDEX_TO_ADDR(index, level)
Calculates the lowest virtual address that maps to a given index at a specified page table level.
pml_index_t
Indexes into a pml level.
static void page_table_unmap(page_table_t *table, void *addr, size_t amount)
Unmaps a range of virtual addresses from the page table.
#define PML4_SIZE
Size of the region mapped by a single PML4 entry.
static uint64_t page_table_get_phys_addr(page_table_t *table, void *addr, phys_addr_t *out)
Retrieves the physical address mapped to a given virtual address.
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
static bool page_table_is_unmapped(page_table_t *table, void *addr, size_t amount)
Checks if a range of virtual addresses is completely unmapped.
#define PML_ADDR_TO_INDEX(addr, level)
Calculates the index into a page table level for a given virtual address.
#define PML_PAGE_BUFFER_SIZE
Size of the page buffer used to batch page allocations and frees.
static void pml_free(page_table_t *table, pml_t *pml, pml_level_t level)
Recursively frees a page table level, all its children and any owned pages.
static void page_table_page_buffer_push(page_table_t *table, page_table_page_buffer_t *buffer, void *address)
Pushes a page table level onto the page buffer, freeing the buffer if full.
static uint64_t page_table_map(page_table_t *table, void *addr, phys_addr_t phys, size_t amount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
static bool pml_is_empty(pml_t *pml)
Checks if a page table level is empty (all entries are 0).
uint8_t pml_callback_id_t
Callback ID type.
void(* pml_free_pages_t)(pfn_t *, size_t)
Generic page free function type.
static uint64_t page_table_get_pml(page_table_t *table, pml_t *current, pml_index_t index, pml_flags_t flags, pml_t **out)
Retrieves or allocates the next level page table.
static void page_table_load(page_table_t *table)
Loads the page table into the CR3 register if it is not already loaded.
#define PML2_SIZE
Size of the region mapped by a single PML2 entry.
#define PAGE_TABLE_TRAVERSE_CREATE
Create a page_table_traverse_t initializer.
static void tlb_invalidate(void *addr, size_t amount)
Invalidates a region of pages in the TLB.
static bool page_table_is_pinned(page_table_t *table, void *addr, size_t amount)
Checks if any page in a range is pinned.
static uint64_t page_table_map_pages(page_table_t *table, void *addr, const pfn_t *pfns, size_t amount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps an array of physical pages to contiguous virtual addresses in the page table.
static uint64_t page_table_set_flags(page_table_t *table, void *addr, size_t amount, pml_flags_t flags)
Sets the flags for a range of pages in the page table.
static void page_table_deinit(page_table_t *table)
Deinitializes a page table, freeing all allocated pages.
#define PHYS_TO_PFN(_addr)
Convert a physical address to its PFN.
static uint64_t pml_new(page_table_t *table, pml_t **outPml)
Allocates and initializes a new page table level.
uint64_t(* pml_alloc_pages_t)(pfn_t *, size_t)
Generic page allocation function type.
#define PFN_TO_VIRT(_pfn)
Convert a PFN to its identity mapped higher half virtual address.
size_t pfn_t
Page Frame Number type.
static uint64_t page_table_init(page_table_t *table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages)
Initializes a page table.
static bool page_table_is_mapped(page_table_t *table, const void *addr, size_t amount)
Checks if a range of virtual addresses is completely mapped.
static uint64_t page_table_find_unmapped_region(page_table_t *table, void *startAddr, void *endAddr, size_t amount, size_t alignment, void **outAddr)
Finds the first contiguous unmapped region with the given number of pages within the specified addres...
#define PML3_SIZE
Size of the region mapped by a single PML3 entry.
static void page_table_page_buffer_flush(page_table_t *table, page_table_page_buffer_t *buffer)
Flushes the page buffer, freeing any remaining pages.
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
#define ASM(...)
Inline assembly macro.
#define ROUND_DOWN(number, multiple)
#define ROUND_UP(number, multiple)
#define ERR
Integer error value.
#define PAGE_SIZE
The size of a memory page in bytes.
static const path_flag_t flags[]
static void cr3_write(uint64_t value)
static uint64_t cr3_read(void)
__UINTPTR_TYPE__ uintptr_t
_PUBLIC void * memset(void *s, int c, size_t n)
Buffer of pages used to batch page frees.
pfn_t pfns[PML_PAGE_BUFFER_SIZE]
pml_alloc_pages_t allocPages
pml_free_pages_t freePages
Helper structure for fast traversal of the page table.
pfn_t pfn
The physical frame number (physical address >> 12).
uint64_t present
If set the page is present in memory and readable.
A entry in a page table without a specified address or callback ID.
pml_entry_t entries[PML_INDEX_AMOUNT]