|
PatchworkOS
19e446b
A non-POSIX operating system.
|
Paging. More...
Paging.
Paging is used to map virtual memory to physical memory. Meaning that when some address is accessed by the CPU, the address might actually point to a different location in physical memory. This is done using a page table.
Additonally Patchwork uses page tables to store metadata about memory pages which is then used by the vmm to avoid the need for a seperate data structure that keeps track of memory.
For this implementation it was decided to try to derive every value from first principles, meaning that no values are hardcoded, for example the lower and higher half bounderies are derived from the number of bits that can be used for the address in a page table entry. This makes the code more complex, but means we rely on fewer potentially incorrect sources.
Note that most if not all of the paging functions will not check ahead of time if the operation will succeed, for example page_table_map() will not check that the target range is unmapped, if its not then the function will fail partway through and make a mess. Its simply best for performance and flexibility to have the caller ensure that the operation will succeed.
Data Structures | |
| struct | page_table_traverse_t |
| Helper structure for fast traversal of the page table. More... | |
| struct | page_table_page_buffer_t |
| Buffer of pages used to batch page frees. More... | |
| struct | pml_entry_t |
| struct | pml_t |
| A page table level. More... | |
| struct | page_table_t |
| A page table structure. More... | |
| struct | pml_flags_t |
| A entry in a page table without a specified address or callback ID. More... | |
Macros | |
| #define | PAGE_TABLE_TRAVERSE_CREATE |
Create a page_table_traverse_t initializer. | |
| #define | PHYS_ADDR_INVALID ((phys_addr_t)(UINTPTR_MAX)) |
| Invalid physical address. | |
| #define | PFN_TO_PHYS(_pfn) ((phys_addr_t)(_pfn) * PAGE_SIZE) |
| Convert a PFN to its physical address. | |
| #define | PHYS_TO_PFN(_addr) ((pfn_t)(_addr) / PAGE_SIZE) |
| Convert a physical address to its PFN. | |
| #define | PFN_TO_VIRT(_pfn) ((void*)PML_LOWER_TO_HIGHER((uintptr_t)(_pfn) * PAGE_SIZE)) |
| Convert a PFN to its identity mapped higher half virtual address. | |
| #define | VIRT_TO_PFN(_addr) ((pfn_t)(PML_HIGHER_TO_LOWER(_addr) / PAGE_SIZE)) |
| Convert a identity mapped higher half virtual address to its PFN. | |
| #define | PML_ADDR_OFFSET_BITS 12 |
| Number of bits used for the offset within a page. | |
| #define | PML_ADDR_MASK 0x000FFFFFFFFFF000ULL |
| Mask for the address in a page table entry. | |
| #define | PML_FLAGS_MASK |
| Mask for all pml flags. | |
| #define | PML_INDEX_BITS 9 |
| Number of bits used to index into a page table level. | |
| #define | PML_INDEX_TO_ADDR_NO_WRAP(index, level) ((uintptr_t)(index) << (((level) - 1) * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS)) |
| Calculates the lowest virtual address that maps to a given index at a specified page table level, without wrapping. | |
| #define | PML_VIRT_ADDR_BITS (PML_INDEX_BITS * PML_LEVEL_AMOUNT + PML_ADDR_OFFSET_BITS) |
| Total number of bits used for virtual addresses. | |
| #define | PML_LOWER_HALF_START (0) |
| The start of the lower half of the address space. | |
| #define | PML_LOWER_HALF_END ((1ULL << (PML_VIRT_ADDR_BITS - 1)) - PAGE_SIZE) |
| The end of the lower half of the address space. | |
| #define | PML_HIGHER_HALF_START (~((1ULL << (PML_VIRT_ADDR_BITS - 1)) - 1)) |
| The start of the higher half of the address space. | |
| #define | PML_HIGHER_HALF_END (~0ULL & ~(PAGE_SIZE - 1)) |
| The end of the higher half of the address space. | |
| #define | PML_HIGHER_TO_LOWER(addr) ((uintptr_t)(addr) - PML_HIGHER_HALF_START) |
| Converts an address from the higher half to the lower half. | |
| #define | PML_LOWER_TO_HIGHER(addr) ((uintptr_t)(addr) + PML_HIGHER_HALF_START) |
| Converts an address from the lower half to the higher half. | |
| #define | PML_ENSURE_LOWER_HALF(addr) ((phys_addr_t)(addr) >= PML_HIGHER_HALF_START ? PML_HIGHER_TO_LOWER(addr) : (phys_addr_t)(addr)) |
| Ensures that the given address is in the lower half of the address space. | |
| #define | PML_ENSURE_HIGHER_HALF(addr) ((uintptr_t)(addr) < PML_HIGHER_HALF_START ? PML_LOWER_TO_HIGHER(addr) : (uintptr_t)(addr)) |
| Ensures that the given address is in the higher half of the address space. | |
| #define | PML_ADDR_TO_INDEX(addr, level) |
| Calculates the index into a page table level for a given virtual address. | |
| #define | PML_INDEX_TO_ADDR(index, level) |
| Calculates the lowest virtual address that maps to a given index at a specified page table level. | |
| #define | PML2_SIZE (1ULL << (PML_INDEX_BITS + PML_ADDR_OFFSET_BITS)) |
| Size of the region mapped by a single PML2 entry. | |
| #define | PML3_SIZE (1ULL << (2 * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS)) |
| Size of the region mapped by a single PML3 entry. | |
| #define | PML4_SIZE (1ULL << (3 * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS)) |
| Size of the region mapped by a single PML4 entry. | |
| #define | PML_MAX_CALLBACK ((1 << 8) - 1) |
| Maximum number of callbacks that can be registered for a page table. | |
| #define | PML_CALLBACK_NONE ((1 << 8) - 1) |
| Special callback ID that indicates no callback is associated with the page. | |
| #define | PML_PAGE_BUFFER_SIZE 64 |
| Size of the page buffer used to batch page allocations and frees. | |
Typedefs | |
| typedef size_t | pfn_t |
| Page Frame Number type. | |
| typedef uintptr_t | phys_addr_t |
| Physical address type. | |
| typedef uint8_t | pml_callback_id_t |
| Callback ID type. | |
| typedef uint64_t(* | pml_alloc_pages_t) (pfn_t *, size_t) |
| Generic page allocation function type. | |
| typedef void(* | pml_free_pages_t) (pfn_t *, size_t) |
| Generic page free function type. | |
Enumerations | |
| enum | pml_flags_t { PML_NONE = 0 , PML_PRESENT = (1ULL << 0) , PML_WRITE = (1ULL << 1) , PML_USER = (1ULL << 2) , PML_WRITE_THROUGH = (1ULL << 3) , PML_CACHE_DISABLED = (1ULL << 4) , PML_ACCESSED = (1ULL << 5) , PML_DIRTY = (1ULL << 6) , PML_SIZE = (1ULL << 7) , PML_GLOBAL = (1ULL << 8) , PML_OWNED = (1ULL << 9) , PML_NO_EXECUTE = (1ULL << 63) } |
| enum | pml_level_t { PML1 = 1 , PT = 1 , PML2 = 2 , PD = 2 , PML3 = 3 , PDPT = 3 , PML4 = 4 , PML_LEVEL_AMOUNT = 4 } |
| Enums for the different page table levels. More... | |
| enum | pml_index_t { PML_INDEX_LOWER_HALF_MIN = 0 , PML_INDEX_LOWER_HALF_MAX = 255 , PML_INDEX_HIGHER_HALF_MIN = 256 , PML_INDEX_HIGHER_HALF_MAX = 511 , PML_INDEX_AMOUNT = 512 } |
| Indexes into a pml level. More... | |
Functions | |
| static void | tlb_invalidate (void *addr, size_t amount) |
| Invalidates a region of pages in the TLB. | |
| static bool | pml_is_empty (pml_t *pml) |
| Checks if a page table level is empty (all entries are 0). | |
| static uint64_t | pml_new (page_table_t *table, pml_t **outPml) |
| Allocates and initializes a new page table level. | |
| static void | pml_free (page_table_t *table, pml_t *pml, pml_level_t level) |
| Recursively frees a page table level, all its children and any owned pages. | |
| static uint64_t | page_table_init (page_table_t *table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages) |
| Initializes a page table. | |
| static void | page_table_deinit (page_table_t *table) |
| Deinitializes a page table, freeing all allocated pages. | |
| static void | page_table_load (page_table_t *table) |
| Loads the page table into the CR3 register if it is not already loaded. | |
| static uint64_t | page_table_get_pml (page_table_t *table, pml_t *current, pml_index_t index, pml_flags_t flags, pml_t **out) |
| Retrieves or allocates the next level page table. | |
| static uint64_t | page_table_traverse (page_table_t *table, page_table_traverse_t *traverse, const void *addr, pml_flags_t flags) |
| Allows for fast traversal of the page table by caching previously accessed layers. | |
| static uint64_t | page_table_get_phys_addr (page_table_t *table, void *addr, phys_addr_t *out) |
| Retrieves the physical address mapped to a given virtual address. | |
| static bool | page_table_is_mapped (page_table_t *table, const void *addr, size_t amount) |
| Checks if a range of virtual addresses is completely mapped. | |
| static bool | page_table_is_unmapped (page_table_t *table, void *addr, size_t amount) |
| Checks if a range of virtual addresses is completely unmapped. | |
| static uint64_t | page_table_map (page_table_t *table, void *addr, phys_addr_t phys, size_t amount, pml_flags_t flags, pml_callback_id_t callbackId) |
| Maps a range of virtual addresses to physical addresses in the page table. | |
| static uint64_t | page_table_map_pages (page_table_t *table, void *addr, const pfn_t *pfns, size_t amount, pml_flags_t flags, pml_callback_id_t callbackId) |
| Maps an array of physical pages to contiguous virtual addresses in the page table. | |
| static void | page_table_unmap (page_table_t *table, void *addr, size_t amount) |
| Unmaps a range of virtual addresses from the page table. | |
| static void | page_table_page_buffer_push (page_table_t *table, page_table_page_buffer_t *buffer, void *address) |
| Pushes a page table level onto the page buffer, freeing the buffer if full. | |
| static void | page_table_page_buffer_flush (page_table_t *table, page_table_page_buffer_t *buffer) |
| Flushes the page buffer, freeing any remaining pages. | |
| static void | page_table_clear_pml1_pml2_pml3 (page_table_t *table, page_table_traverse_t *prevTraverse, page_table_traverse_t *traverse, page_table_page_buffer_t *pageBuffer) |
| Clears any empty page table levels any time a pml1, pml2 or pml3 boundry is crossed. | |
| static void | page_table_clear (page_table_t *table, void *addr, size_t amount) |
| Clears page table entries in the specified range and frees any owned pages. | |
| static void | page_table_collect_callbacks (page_table_t *table, void *addr, size_t amount, uint64_t *callbacks) |
| Collects the number of pages associated with each callback ID in the specified range. | |
| static uint64_t | page_table_set_flags (page_table_t *table, void *addr, size_t amount, pml_flags_t flags) |
| Sets the flags for a range of pages in the page table. | |
| static uint64_t | page_table_find_unmapped_region (page_table_t *table, void *startAddr, void *endAddr, size_t amount, size_t alignment, void **outAddr) |
| Finds the first contiguous unmapped region with the given number of pages within the specified address range. | |
| static bool | page_table_is_pinned (page_table_t *table, void *addr, size_t amount) |
| Checks if any page in a range is pinned. | |
| static uint64_t | page_table_count_pages_with_flags (page_table_t *table, void *addr, size_t amount, pml_flags_t flags) |
| Counts the number of pages in a range that have all the specified flags set. | |
| #define PAGE_TABLE_TRAVERSE_CREATE |
Create a page_table_traverse_t initializer.
page_table_traverse_t initializer. | #define PHYS_ADDR_INVALID ((phys_addr_t)(UINTPTR_MAX)) |
Invalid physical address.
Definition at line 47 of file paging_types.h.
| #define PFN_TO_PHYS | ( | _pfn | ) | ((phys_addr_t)(_pfn) * PAGE_SIZE) |
Convert a PFN to its physical address.
| _pfn | The Page Frame Number. |
Definition at line 55 of file paging_types.h.
Convert a physical address to its PFN.
| _addr | The physical address. |
Definition at line 63 of file paging_types.h.
| #define PFN_TO_VIRT | ( | _pfn | ) | ((void*)PML_LOWER_TO_HIGHER((uintptr_t)(_pfn) * PAGE_SIZE)) |
Convert a PFN to its identity mapped higher half virtual address.
| _pfn | The Page Frame Number. |
Definition at line 72 of file paging_types.h.
| #define VIRT_TO_PFN | ( | _addr | ) | ((pfn_t)(PML_HIGHER_TO_LOWER(_addr) / PAGE_SIZE)) |
Convert a identity mapped higher half virtual address to its PFN.
| _addr | The higher half virtual address. |
Definition at line 84 of file paging_types.h.
| #define PML_ADDR_OFFSET_BITS 12 |
Number of bits used for the offset within a page.
Each page is 4KB (2^12 bytes).
Definition at line 155 of file paging_types.h.
| #define PML_ADDR_MASK 0x000FFFFFFFFFF000ULL |
Mask for the address in a page table entry.
The address is stored in bits 12-51 of the entry.
Definition at line 162 of file paging_types.h.
| #define PML_FLAGS_MASK |
Mask for all pml flags.
Definition at line 189 of file paging_types.h.
| #define PML_INDEX_BITS 9 |
Number of bits used to index into a page table level.
Each page table level has 512 entries (2^9 = 512).
Definition at line 244 of file paging_types.h.
| #define PML_INDEX_TO_ADDR_NO_WRAP | ( | index, | |
| level | |||
| ) | ((uintptr_t)(index) << (((level) - 1) * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS)) |
Calculates the lowest virtual address that maps to a given index at a specified page table level, without wrapping.
This macro does not handle the wrapping behavior for the higher half of the address space, use PML_INDEX_TO_ADDR() instead.
| index | The index within the page table level. |
| level | The page table level. |
Definition at line 257 of file paging_types.h.
| #define PML_VIRT_ADDR_BITS (PML_INDEX_BITS * PML_LEVEL_AMOUNT + PML_ADDR_OFFSET_BITS) |
Total number of bits used for virtual addresses.
The x86_64 architecture only uses 48 bits for virtual addresses. However, with 5 level paging (not used here) it would be possible to use up to 57 bits.
PatchworkOS currently uses 4 level paging, so we have:
Total: 48 bits.
Definition at line 275 of file paging_types.h.
| #define PML_LOWER_HALF_START (0) |
The start of the lower half of the address space.
The lower half starts at address 0. Obviously.
Definition at line 282 of file paging_types.h.
| #define PML_LOWER_HALF_END ((1ULL << (PML_VIRT_ADDR_BITS - 1)) - PAGE_SIZE) |
The end of the lower half of the address space.
Can be thought of as the last page-aligned address before bit PML_VIRT_ADDR_BITS - 1 is set.
Definition at line 289 of file paging_types.h.
| #define PML_HIGHER_HALF_START (~((1ULL << (PML_VIRT_ADDR_BITS - 1)) - 1)) |
The start of the higher half of the address space.
Note that the "gap" between the lower half and higher half is con-canonical and thus invalid to access.
Calculated by sign-extending the bit PML_VIRT_ADDR_BITS - 1.
Definition at line 298 of file paging_types.h.
| #define PML_HIGHER_HALF_END (~0ULL & ~(PAGE_SIZE - 1)) |
The end of the higher half of the address space.
Calculated by taking the maximum possible address (all bits set) and aligning it down to the nearest page boundary.
Definition at line 305 of file paging_types.h.
| #define PML_HIGHER_TO_LOWER | ( | addr | ) | ((uintptr_t)(addr) - PML_HIGHER_HALF_START) |
Converts an address from the higher half to the lower half.
| addr | The address to convert. |
Definition at line 313 of file paging_types.h.
| #define PML_LOWER_TO_HIGHER | ( | addr | ) | ((uintptr_t)(addr) + PML_HIGHER_HALF_START) |
Converts an address from the lower half to the higher half.
| addr | The address to convert. |
Definition at line 321 of file paging_types.h.
| #define PML_ENSURE_LOWER_HALF | ( | addr | ) | ((phys_addr_t)(addr) >= PML_HIGHER_HALF_START ? PML_HIGHER_TO_LOWER(addr) : (phys_addr_t)(addr)) |
Ensures that the given address is in the lower half of the address space.
| addr | The address. |
Definition at line 329 of file paging_types.h.
| #define PML_ENSURE_HIGHER_HALF | ( | addr | ) | ((uintptr_t)(addr) < PML_HIGHER_HALF_START ? PML_LOWER_TO_HIGHER(addr) : (uintptr_t)(addr)) |
Ensures that the given address is in the higher half of the address space.
| addr | The address. |
Definition at line 338 of file paging_types.h.
| #define PML_ADDR_TO_INDEX | ( | addr, | |
| level | |||
| ) |
Calculates the index into a page table level for a given virtual address.
| addr | The virtual address. |
| level | The page table level. |
Definition at line 348 of file paging_types.h.
| #define PML_INDEX_TO_ADDR | ( | index, | |
| level | |||
| ) |
Calculates the lowest virtual address that maps to a given index at a specified page table level.
| index | The index within the page table level. |
| level | The page table level. |
Definition at line 359 of file paging_types.h.
| #define PML2_SIZE (1ULL << (PML_INDEX_BITS + PML_ADDR_OFFSET_BITS)) |
Size of the region mapped by a single PML2 entry.
Definition at line 367 of file paging_types.h.
| #define PML3_SIZE (1ULL << (2 * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS)) |
Size of the region mapped by a single PML3 entry.
Definition at line 372 of file paging_types.h.
| #define PML4_SIZE (1ULL << (3 * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS)) |
Size of the region mapped by a single PML4 entry.
Definition at line 377 of file paging_types.h.
| #define PML_MAX_CALLBACK ((1 << 8) - 1) |
Maximum number of callbacks that can be registered for a page table.
This is limited by the number of bits available in the page table entry for storing the callback ID, one low bit and 7 high bits. We reserve the maximum value to indicate no callback is associated with the page.
Definition at line 385 of file paging_types.h.
| #define PML_CALLBACK_NONE ((1 << 8) - 1) |
Special callback ID that indicates no callback is associated with the page.
Definition at line 390 of file paging_types.h.
| #define PML_PAGE_BUFFER_SIZE 64 |
Size of the page buffer used to batch page allocations and frees.
Definition at line 423 of file paging_types.h.
Page Frame Number type.
Definition at line 37 of file paging_types.h.
| typedef uintptr_t phys_addr_t |
Physical address type.
Definition at line 42 of file paging_types.h.
| typedef uint8_t pml_callback_id_t |
Callback ID type.
Definition at line 395 of file paging_types.h.
Generic page allocation function type.
Used to allow both the kernel and bootloader to provide their own page allocation functions.
Definition at line 411 of file paging_types.h.
Generic page free function type.
Used to allow both the kernel and bootloader to provide their own page free functions.
Definition at line 418 of file paging_types.h.
| enum pml_flags_t |
| Enumerator | |
|---|---|
| PML_NONE | |
| PML_PRESENT | |
| PML_WRITE | |
| PML_USER | |
| PML_WRITE_THROUGH | |
| PML_CACHE_DISABLED | |
| PML_ACCESSED | |
| PML_DIRTY | |
| PML_SIZE | |
| PML_GLOBAL | |
| PML_OWNED | |
| PML_NO_EXECUTE | |
Definition at line 170 of file paging_types.h.
| enum pml_level_t |
Enums for the different page table levels.
A page table is a tree like structure with 4 levels, each level has 512 entries. The levels are named PML1 (or PT), PML2 (or PD), PML3 (or PDPT) and PML4.
The PML4 is the root of the tree and each entry in the PML4 points to a PML3, each entry in a PML3 points to a PML2, and each entry in a PML2 points to a PML1. Each entry in a PML1 points to a 4KB page in memory.
The tree is traversed such that given some input virtual address, we calculate the index into each level using PML_ADDR_TO_INDEX(), until we reach the PML1 level, which then, as mentioned, points to the actual page in memory. So, in short, input virtual memory, traverse down the tree to find the physical memory and its flags.
| Enumerator | |
|---|---|
| PML1 | |
| PT | Page Table. |
| PML2 | |
| PD | Page Directory. |
| PML3 | |
| PDPT | Page Directory Pointer Table. |
| PML4 | |
| PML_LEVEL_AMOUNT | Total number of levels in the page table. |
Definition at line 207 of file paging_types.h.
| enum pml_index_t |
Indexes into a pml level.
In each pml entry there are 512 entries, the first 256 entries map the lower half of the address space, the last 256 entries map the higher half of the address space.
For each half of the entries the address mapped by it increases by a set amount depending on the level in the tree structure, but for the higher half the addresses wraps around and instead the address is or'd by 0xFFFF800000000000.
| Enumerator | |
|---|---|
| PML_INDEX_LOWER_HALF_MIN | |
| PML_INDEX_LOWER_HALF_MAX | |
| PML_INDEX_HIGHER_HALF_MIN | |
| PML_INDEX_HIGHER_HALF_MAX | |
| PML_INDEX_AMOUNT | |
Definition at line 230 of file paging_types.h.
|
inlinestatic |
Invalidates a region of pages in the TLB.
Even if a page table entry is modified, the CPU might still use a cached version of the entry in the TLB. To ensure our changes are detected we must invalidate this cache using invlpg or if many pages are changed, a full TLB flush by reloading CR3.
| addr | The starting virtual address of the region. |
| amount | The number of pages to invalidate. |
Definition at line 27 of file paging.h.
Checks if a page table level is empty (all entries are 0).
Used as a helper for page_table_clear().
| pml | The page table level to check. |
Definition at line 55 of file paging.h.
|
inlinestatic |
Allocates and initializes a new page table level.
| table | The page table. |
| outPml | Will be filled with the newly allocated page table level. |
0. On failure, ERR. Definition at line 74 of file paging.h.
|
inlinestatic |
Recursively frees a page table level, all its children and any owned pages.
| table | The page table. |
| pml | The current page table level to free. |
| level | The current level of the page table. |
Definition at line 94 of file paging.h.
|
inlinestatic |
Initializes a page table.
| table | The page table to initialize. |
| allocPages | The function to use for allocating pages. |
| freePages | The function to use for freeing pages. |
0. On failure, ERR. Definition at line 132 of file paging.h.
|
inlinestatic |
|
inlinestatic |
|
inlinestatic |
Retrieves or allocates the next level page table.
If the entry at the specified index is present, it retrieves the corresponding page table level. If the entry is not present and the PML_PRESENT flag is set in flags, it allocates a new page table level, and initializes it with the provided flags and callback ID. If the entry is not present and the PML_PRESENT flag is not set, it returns ERR.
| table | The page table. |
| current | The current page table level. |
| index | The index within the current page table level. |
| flags | The flags to assign to a newly allocated page table level, if applicable. |
| out | Will be filled with the retrieved or newly allocated page table level. |
0. On failure, ERR. Definition at line 182 of file paging.h.
|
inlinestatic |
Allows for fast traversal of the page table by caching previously accessed layers.
If the present flag is not set in flags then no new levels will be allocated and if non present pages are encountered the function will return false.
Note that higher level flags are or'd with PML_WRITE | PML_USER since only the permissions of a higher level will apply to lower levels, meaning that the lowest level should be the one with the actual desired permissions. Additionally, the PML_GLOBAL flag is not allowed on the PML3 level.
| table | The page table. |
| traverse | The helper structure used to cache each layer. |
| addr | The target virtual address. |
| flags | The flags to assigned to newly allocated levels, if the present flag is not set then dont allocate new levels. |
0. On failure, ERR. Definition at line 255 of file paging.h.
|
inlinestatic |
Retrieves the physical address mapped to a given virtual address.
| table | The page table. |
| addr | The virtual address to look up. |
| out | Will be filled with the corresponding physical address on success. |
0. On failure, ERR. Definition at line 303 of file paging.h.
|
inlinestatic |
Checks if a range of virtual addresses is completely mapped.
If any page in the range is not mapped, the function returns false.
| table | The page table. |
| addr | The starting virtual address. |
| amount | The number of pages to check. |
true if the entire range is mapped, false otherwise. Definition at line 334 of file paging.h.
|
inlinestatic |
Checks if a range of virtual addresses is completely unmapped.
If any page in the range is mapped, the function returns false.
| table | The page table. |
| addr | The starting virtual address. |
| amount | The number of pages to check. |
true if the entire range is unmapped, false otherwise. Definition at line 363 of file paging.h.
|
inlinestatic |
Maps a range of virtual addresses to physical addresses in the page table.
If any page in the range is already mapped, the function will fail and return ERR.
| table | The page table. |
| addr | The starting virtual address. |
| phys | The starting physical address. |
| amount | The number of pages to map. |
| flags | The flags to set for the mapped pages. Must include PML_PRESENT. |
| callbackId | The callback ID to associate with the mapped pages or PML_CALLBACK_NONE. |
0. On failure, ERR. Definition at line 396 of file paging.h.
|
inlinestatic |
Maps an array of physical pages to contiguous virtual addresses in the page table.
If any page in the range is already mapped, the function will fail and return ERR.
| table | The page table. |
| addr | The starting virtual address. |
| pfns | Array of page frame numbers to map. |
| amount | The number of pages in the array to map. |
| flags | The flags to set for the mapped pages. Must include PML_PRESENT. |
| callbackId | The callback ID to associate with the mapped pages or PML_CALLBACK_NONE. |
0. On failure, ERR. Definition at line 443 of file paging.h.
|
inlinestatic |
Unmaps a range of virtual addresses from the page table.
If a page is not currently mapped, it is skipped.
Will NOT free owned pages, instead it only sets the present flag to 0. This is to help with TLB shootdowns where we must unmap, wait for all CPUs to acknowledge the unmap, and only then free the pages. Use page_table_clear() to free owned pages separately.
| table | The page table. |
| addr | The starting virtual address. |
| amount | The number of pages to unmap. |
Definition at line 489 of file paging.h.
|
inlinestatic |
Pushes a page table level onto the page buffer, freeing the buffer if full.
Used as a helper for page_table_clear().
| table | The page table. |
| buffer | The page buffer. |
| address | The address to push. |
Definition at line 530 of file paging.h.
|
inlinestatic |
Flushes the page buffer, freeing any remaining pages.
Used as a helper for page_table_clear().
| table | The page table. |
| buffer | The page buffer. |
Definition at line 550 of file paging.h.
|
inlinestatic |
Clears any empty page table levels any time a pml1, pml2 or pml3 boundry is crossed.
Used as a helper for page_table_clear().
| table | The page table. |
| prevTraverse | The previous traverse state. |
| traverse | The current traverse state. |
| pageBuffer | The page buffer. |
Definition at line 569 of file paging.h.
|
inlinestatic |
Clears page table entries in the specified range and frees any owned pages.
Intended to be used in conjunction with page_table_unmap() to first unmap pages and then free any owned pages after TLB shootdown is complete.
Any still present or pinned entries will be skipped.
All unskipped entries will be fully cleared (set to 0).
| table | The page table. |
| addr | The starting virtual address. |
| amount | The number of pages to clear. |
Definition at line 603 of file paging.h.
|
inlinestatic |
Collects the number of pages associated with each callback ID in the specified range.
| table | The page table. |
| addr | The starting virtual address. |
| amount | The number of pages to check. |
| callbacks | An array of size PML_MAX_CALLBACK that will be filled with the occurrences of each callback ID. |
Definition at line 647 of file paging.h.
|
inlinestatic |
Sets the flags for a range of pages in the page table.
If a page is not currently mapped, it is skipped.
| table | The page table. |
| addr | The starting virtual address. |
| amount | The number of pages to update. |
| flags | The new flags to set. The PML_OWNED flag is preserved. |
0. On failure, ERR. Definition at line 682 of file paging.h.
|
inlinestatic |
Finds the first contiguous unmapped region with the given number of pages within the specified address range.
Good luck with this function, im like 99% sure it works.
This function should be O(r) in the worse case where r is the amount of pages in the address range, note how the number of pages needed does not affect the complexity. This has the fun affect that the more memory is allocated the faster this function will run on average.
| table | The page table. |
| startAddr | The start address to begin searching (inclusive). |
| endAddr | The end address of the search range (exclusive). |
| amount | The number of consecutive unmapped pages needed. |
| alignment | The required alignment for the region in bytes. |
| outAddr | Will be filled with the start address of the unmapped region if found. |
0. If no suitable region is found, ERR. Definition at line 728 of file paging.h.
|
inlinestatic |
Checks if any page in a range is pinned.
| table | The page table. |
| addr | The starting virtual address. |
| amount | The number of pages to check. |
true if any page in the range us pinned, false otherwise. Definition at line 976 of file paging.h.
|
inlinestatic |
Counts the number of pages in a range that have all the specified flags set.
Can be used to, for example, check the total amount of pages allocated to a process by counting the pages with the PML_PRESENT | PML_USER | PML_OWNED flags set.
| table | The page table. |
| addr | The starting virtual address. |
| amount | The number of pages to check. |
| flags | The flags to check for. |
Definition at line 1012 of file paging.h.