44 for (
uint64_t i = 0; i < pageCount; i++)
46 asm volatile(
"invlpg (%0)" ::
"r"(virtAddr + i *
PAGE_SIZE) :
"memory");
139 else if (entry->
owned)
254#define PAGE_TABLE_TRAVERSE_CREATE \
256 .pml3Valid = false, \
257 .pml2Valid = false, \
258 .pml1Valid = false, \
577 if (
buffer->pageCount > 0)
647 prevTraverse = traverse;
694 callbacks[callbackId]++;
763 while (currentAddr < end)
771 *outAddr = (
void*)currentAddr;
780 *outAddr = (
void*)currentAddr;
791 while (currentAddr < end)
798 *outAddr = (
void*)currentAddr;
808 *outAddr = (
void*)currentAddr;
818 *outAddr = (
void*)currentAddr;
830 while (currentAddr < end)
837 if (consecutiveUnmapped == 0)
839 regionStart = currentAddr;
844 consecutiveUnmapped += skippedPages;
848 *outAddr = (
void*)regionStart;
852 currentAddr = skipTo;
862 if (consecutiveUnmapped == 0)
864 regionStart = currentAddr;
868 consecutiveUnmapped += skippedPages;
872 *outAddr = (
void*)regionStart;
886 if (consecutiveUnmapped == 0)
888 regionStart = currentAddr;
892 consecutiveUnmapped += skippedPages;
896 *outAddr = (
void*)regionStart;
911 if (consecutiveUnmapped == 0)
913 regionStart = currentAddr;
915 consecutiveUnmapped++;
919 *outAddr = (
void*)regionStart;
925 consecutiveUnmapped = 0;
1021 for (; idx1 < PML_INDEX_AMOUNT && pageAmount > 0;
static void page_table_clear_pml1_pml2_pml3(page_table_t *table, page_table_traverse_t *prevTraverse, page_table_traverse_t *traverse, page_table_page_buffer_t *pageBuffer)
Clears any empty page table levels any time a pml1, pml2 or pml3 boundry is crossed.
pml_level_t
Enums for the different page table levels.
#define PML_ADDR_OFFSET_BITS
Number of bits used for the offset within a page.
#define PML_FLAGS_MASK
Mask for all pml flags.
void(* pml_free_pages_t)(void **, uint64_t)
Generic page free function type.
#define PML_ADDR_MASK
Mask for the address in a page table entry.
static uint64_t page_table_get_phys_addr(page_table_t *table, const void *virtAddr, void **outPhysAddr)
Retrieves the physical address mapped to a given virtual address.
static void tlb_invalidate(void *virtAddr, uint64_t pageCount)
Invalidates a region of pages in the TLB.
#define PML_INDEX_TO_ADDR(index, level)
Calculates the lowest virtual address that maps to a given index at a specified page table level.
pml_index_t
Indexes into a pml level.
static void page_table_collect_callbacks(page_table_t *table, void *virtAddr, uint64_t pageAmount, uint64_t *callbacks)
Collects the number of pages associated with each callback ID in the specified range.
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
static uint64_t page_table_get_pml(page_table_t *table, pml_t *currentPml, pml_index_t index, pml_flags_t flags, pml_t **outPml)
Retrieves or allocates the next level page table.
static uintptr_t pml_accessible_addr(pml_entry_t entry)
Retrieves the address from a page table entry and converts it to an accessible address.
#define PML_ADDR_TO_INDEX(addr, level)
Calculates the index into a page table level for a given virtual address.
#define PML_PAGE_BUFFER_SIZE
Size of the page buffer used to batch page allocations and frees.
static void page_table_unmap(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Unmaps a range of virtual addresses from the page table.
static void pml_free(page_table_t *table, pml_t *pml, pml_level_t level)
Recursively frees a page table level, all its children and any owned pages.
uint64_t(* pml_alloc_pages_t)(void **, uint64_t)
Generic page allocation function type.
static uint64_t page_table_map(page_table_t *table, void *virtAddr, void *physAddr, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
static void page_table_page_buffer_push(page_table_t *table, page_table_page_buffer_t *buffer, void *address)
Pushes a page table level onto the page buffer, freeing the buffer if full.
static bool pml_is_empty(pml_t *pml)
Checks if a page table level is empty (all entries are 0).
uint8_t pml_callback_id_t
Callback ID type.
static void page_table_load(page_table_t *table)
Loads the page table into the CR3 register if it is not already loaded.
#define PML2_SIZE
Size of the region mapped by a single PML2 entry.
#define PAGE_TABLE_TRAVERSE_CREATE
Create a page_table_traverse_t initializer.
static void page_table_clear(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Clears page table entries in the specified range and frees any owned pages.
static bool page_table_is_mapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely mapped.
static void page_table_deinit(page_table_t *table)
Deinitializes a page table, freeing all allocated pages.
static uint64_t pml_new(page_table_t *table, pml_t **outPml)
Allocates and initializes a new page table level.
#define PML_LOWER_TO_HIGHER(addr)
Converts an address from the lower half to the higher half.
static bool page_table_is_unmapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely unmapped.
static uint64_t page_table_find_unmapped_region(page_table_t *table, void *startAddr, void *endAddr, uint64_t pageAmount, void **outAddr)
Finds the first contiguous unmapped region with the given number of pages within the specified addres...
static uint64_t page_table_init(page_table_t *table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages)
Initializes a page table.
static uint64_t page_table_traverse(page_table_t *table, page_table_traverse_t *traverse, uintptr_t virtAddr, pml_flags_t flags)
Allows for fast traversal of the page table by caching previously accessed layers.
#define PML3_SIZE
Size of the region mapped by a single PML3 entry.
static uint64_t page_table_set_flags(page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
Sets the flags for a range of pages in the page table.
static bool page_table_is_pinned(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if any page in a range is pinned.
static uint64_t page_table_map_pages(page_table_t *table, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps an array of physical pages to contiguous virtual addresses in the page table.
static void page_table_page_buffer_flush(page_table_t *table, page_table_page_buffer_t *buffer)
Flushes the page buffer, freeing any remaining pages.
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
static uint64_t page_table_count_pages_with_flags(page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
Counts the number of pages in a range that have all the specified flags set.
#define ROUND_DOWN(number, multiple)
#define ROUND_UP(number, multiple)
#define PAGE_SIZE
The size of a memory page in bytes.
#define ERR
Integer error value.
static uintptr_t address
Mapped virtual address of the HPET registers.
EFI_PHYSICAL_ADDRESS buffer
static const path_flag_t flags[]
static uint64_t pageAmount
static void cr3_write(uint64_t value)
static uint64_t cr3_read()
__UINTPTR_TYPE__ uintptr_t
_PUBLIC void * memset(void *s, int c, size_t n)
Buffer of pages used to batch page frees.
void * pages[PML_PAGE_BUFFER_SIZE]
pml_alloc_pages_t allocPages
pml_free_pages_t freePages
Helper structure for fast traversal of the page table.
uint64_t addr
The address contained in the entry, note that this is shifted right by 12 bits.
uint64_t present
If set the page is present in memory and readable.
A entry in a page table without a specified address or callback ID.
pml_entry_t entries[PML_INDEX_AMOUNT]