PatchworkOS
Loading...
Searching...
No Matches
Paging

Paging. More...

Data Structures

struct  page_table_traverse_t
 Helper structure for fast traversal of the page table. More...
 
struct  page_table_page_buffer_t
 Buffer of pages used to batch page frees. More...
 
struct  pml_entry_t
 
struct  pml_t
 A page table level. More...
 
struct  page_table_t
 A page table structure. More...
 
struct  pml_flags_t
 A entry in a page table without a specified address or callback ID. More...
 

Macros

#define PAGE_TABLE_TRAVERSE_CREATE
 Create a page_table_traverse_t initializer.
 
#define PML_PIN_DEPTH_MAX   3
 Maximum pin depth for a page.
 
#define PML_ADDR_OFFSET_BITS   12
 Number of bits used for the offset within a page.
 
#define PML_ADDR_MASK   0x000FFFFFFFFFF000ULL
 Mask for the address in a page table entry.
 
#define PML_FLAGS_MASK
 Mask for all pml flags.
 
#define PML_INDEX_BITS   9
 Number of bits used to index into a page table level.
 
#define PML_INDEX_TO_ADDR_NO_WRAP(index, level)    ((uintptr_t)(index) << (((level) - 1) * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS))
 Calculates the lowest virtual address that maps to a given index at a specified page table level, without wrapping.
 
#define PML_VIRT_ADDR_BITS   (PML_INDEX_BITS * PML_LEVEL_AMOUNT + PML_ADDR_OFFSET_BITS)
 Total number of bits used for virtual addresses.
 
#define PML_LOWER_HALF_START   (0)
 The start of the lower half of the address space.
 
#define PML_LOWER_HALF_END   ((1ULL << (PML_VIRT_ADDR_BITS - 1)) - PAGE_SIZE)
 The end of the lower half of the address space.
 
#define PML_HIGHER_HALF_START   (~((1ULL << (PML_VIRT_ADDR_BITS - 1)) - 1))
 The start of the higher half of the address space.
 
#define PML_HIGHER_HALF_END   (~0ULL & ~(PAGE_SIZE - 1))
 The end of the higher half of the address space.
 
#define PML_HIGHER_TO_LOWER(addr)   ((uintptr_t)(addr) - PML_HIGHER_HALF_START)
 Converts an address from the higher half to the lower half.
 
#define PML_LOWER_TO_HIGHER(addr)   ((uintptr_t)(addr) + PML_HIGHER_HALF_START)
 Converts an address from the lower half to the higher half.
 
#define PML_ENSURE_LOWER_HALF(addr)    ((uintptr_t)(addr) >= PML_HIGHER_HALF_START ? PML_HIGHER_TO_LOWER(addr) : (uintptr_t)(addr))
 Ensures that the given address is in the lower half of the address space.
 
#define PML_ADDR_TO_INDEX(addr, level)
 Calculates the index into a page table level for a given virtual address.
 
#define PML_INDEX_TO_ADDR(index, level)
 Calculates the lowest virtual address that maps to a given index at a specified page table level.
 
#define PML2_SIZE   (1ULL << (PML_INDEX_BITS + PML_ADDR_OFFSET_BITS))
 Size of the region mapped by a single PML2 entry.
 
#define PML3_SIZE   (1ULL << (2 * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS))
 Size of the region mapped by a single PML3 entry.
 
#define PML4_SIZE   (1ULL << (3 * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS))
 Size of the region mapped by a single PML4 entry.
 
#define PML_MAX_CALLBACK   ((1 << 8) - 1)
 Maximum number of callbacks that can be registered for a page table.
 
#define PML_CALLBACK_NONE   ((1 << 8) - 1)
 Special callback ID that indicates no callback is associated with the page.
 
#define PML_PAGE_BUFFER_SIZE   64
 Size of the page buffer used to batch page allocations and frees.
 

Typedefs

typedef uint8_t pml_callback_id_t
 Callback ID type.
 
typedef uint64_t(* pml_alloc_pages_t) (void **, uint64_t)
 Generic page allocation function type.
 
typedef void(* pml_free_pages_t) (void **, uint64_t)
 Generic page free function type.
 

Enumerations

enum  pml_flags_t {
  PML_NONE = 0 ,
  PML_PRESENT = (1ULL << 0) ,
  PML_WRITE = (1ULL << 1) ,
  PML_USER = (1ULL << 2) ,
  PML_WRITE_THROUGH = (1ULL << 3) ,
  PML_CACHE_DISABLED = (1ULL << 4) ,
  PML_ACCESSED = (1ULL << 5) ,
  PML_DIRTY = (1ULL << 6) ,
  PML_SIZE = (1ULL << 7) ,
  PML_GLOBAL = (1ULL << 8) ,
  PML_OWNED = (1ULL << 9) ,
  PML_NO_EXECUTE = (1ULL << 63)
}
 
enum  pml_level_t {
  PML1 = 1 ,
  PT = 1 ,
  PML2 = 2 ,
  PD = 2 ,
  PML3 = 3 ,
  PDPT = 3 ,
  PML4 = 4 ,
  PML_LEVEL_AMOUNT = 4
}
 Enums for the different page table levels. More...
 
enum  pml_index_t {
  PML_INDEX_LOWER_HALF_MIN = 0 ,
  PML_INDEX_LOWER_HALF_MAX = 255 ,
  PML_INDEX_HIGHER_HALF_MIN = 256 ,
  PML_INDEX_HIGHER_HALF_MAX = 511 ,
  PML_INDEX_AMOUNT = 512
}
 Indexes into a pml level. More...
 

Functions

static void tlb_invalidate (void *virtAddr, uint64_t pageCount)
 Invalidates a region of pages in the TLB.
 
static uintptr_t pml_accessible_addr (pml_entry_t entry)
 Retrieves the address from a page table entry and converts it to an accessible address.
 
static bool pml_is_empty (pml_t *pml)
 Checks if a page table level is empty (all entries are 0).
 
static uint64_t pml_new (page_table_t *table, pml_t **outPml)
 Allocates and initializes a new page table level.
 
static void pml_free (page_table_t *table, pml_t *pml, pml_level_t level)
 Recursively frees a page table level, all its children and any owned pages.
 
static uint64_t page_table_init (page_table_t *table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages)
 Initializes a page table.
 
static void page_table_deinit (page_table_t *table)
 Deinitializes a page table, freeing all allocated pages.
 
static void page_table_load (page_table_t *table)
 Loads the page table into the CR3 register if it is not already loaded.
 
static uint64_t page_table_get_pml (page_table_t *table, pml_t *currentPml, pml_index_t index, pml_flags_t flags, pml_t **outPml)
 Retrieves or allocates the next level page table.
 
static uint64_t page_table_traverse (page_table_t *table, page_table_traverse_t *traverse, uintptr_t virtAddr, pml_flags_t flags)
 Allows for fast traversal of the page table by caching previously accessed layers.
 
static uint64_t page_table_get_phys_addr (page_table_t *table, const void *virtAddr, void **outPhysAddr)
 Retrieves the physical address mapped to a given virtual address.
 
static bool page_table_is_mapped (page_table_t *table, const void *virtAddr, uint64_t pageAmount)
 Checks if a range of virtual addresses is completely mapped.
 
static bool page_table_is_unmapped (page_table_t *table, const void *virtAddr, uint64_t pageAmount)
 Checks if a range of virtual addresses is completely unmapped.
 
static uint64_t page_table_map (page_table_t *table, void *virtAddr, void *physAddr, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
 Maps a range of virtual addresses to physical addresses in the page table.
 
static uint64_t page_table_map_pages (page_table_t *table, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
 Maps an array of physical pages to contiguous virtual addresses in the page table.
 
static void page_table_unmap (page_table_t *table, void *virtAddr, uint64_t pageAmount)
 Unmaps a range of virtual addresses from the page table.
 
static void page_table_page_buffer_push (page_table_t *table, page_table_page_buffer_t *buffer, void *address)
 Pushes a page table level onto the page buffer, freeing the buffer if full.
 
static void page_table_page_buffer_flush (page_table_t *table, page_table_page_buffer_t *buffer)
 Flushes the page buffer, freeing any remaining pages.
 
static void page_table_clear_pml1_pml2_pml3 (page_table_t *table, page_table_traverse_t *prevTraverse, page_table_traverse_t *traverse, page_table_page_buffer_t *pageBuffer)
 Clears any empty page table levels any time a pml1, pml2 or pml3 boundry is crossed.
 
static void page_table_clear (page_table_t *table, void *virtAddr, uint64_t pageAmount)
 Clears page table entries in the specified range and frees any owned pages.
 
static void page_table_collect_callbacks (page_table_t *table, void *virtAddr, uint64_t pageAmount, uint64_t *callbacks)
 Collects the number of pages associated with each callback ID in the specified range.
 
static uint64_t page_table_set_flags (page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
 Sets the flags for a range of pages in the page table.
 
static uint64_t page_table_find_unmapped_region (page_table_t *table, void *startAddr, void *endAddr, uint64_t pageAmount, void **outAddr)
 Finds the first contiguous unmapped region with the given number of pages within the specified address range.
 
static bool page_table_is_pinned (page_table_t *table, const void *virtAddr, uint64_t pageAmount)
 Checks if any page in a range is pinned.
 

Detailed Description

Paging.

Paging is used to map virtual memory to physical memory. Meaning that when some address is accessed by the CPU, the address might actually point to a different location in physical memory. This is done using a page table.

Additonally Patchwork uses page tables to store metadata about memory pages which is then used by the vmm to avoid the need for a seperate data structure that keeps track of memory.

For this implementation it was decided to try to derive every value from first principles, meaning that no values are hardcoded, for example the lower and higher half bounderies are derived from the number of bits that can be used for the address in a page table entry. This makes the code more complex, but means we rely on fewer potentially incorrect sources.

Note that most if not all of the paging functions will not check ahead of time if the operation will succeed, for example page_table_map() will not check that the target range is unmapped, if its not then the function will fail partway through and make a mess. Its simply best for performance and flexibility to have the caller ensure that the operation will succeed.

See also
OSDev Paging

Macro Definition Documentation

◆ PAGE_TABLE_TRAVERSE_CREATE

#define PAGE_TABLE_TRAVERSE_CREATE
Value:
{ \
.pml3Valid = false, \
.pml2Valid = false, \
.pml1Valid = false, \
}

Create a page_table_traverse_t initializer.

Returns
A page_table_traverse_t initializer.

Definition at line 255 of file paging.h.

◆ PML2_SIZE

#define PML2_SIZE   (1ULL << (PML_INDEX_BITS + PML_ADDR_OFFSET_BITS))

Size of the region mapped by a single PML2 entry.

Definition at line 312 of file paging_types.h.

◆ PML3_SIZE

#define PML3_SIZE   (1ULL << (2 * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS))

Size of the region mapped by a single PML3 entry.

Definition at line 317 of file paging_types.h.

◆ PML4_SIZE

#define PML4_SIZE   (1ULL << (3 * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS))

Size of the region mapped by a single PML4 entry.

Definition at line 322 of file paging_types.h.

◆ PML_ADDR_MASK

#define PML_ADDR_MASK   0x000FFFFFFFFFF000ULL

Mask for the address in a page table entry.

The address is stored in bits 12-51 of the entry.

Definition at line 113 of file paging_types.h.

◆ PML_ADDR_OFFSET_BITS

#define PML_ADDR_OFFSET_BITS   12

Number of bits used for the offset within a page.

Each page is 4KB (2^12 bytes).

Definition at line 106 of file paging_types.h.

◆ PML_ADDR_TO_INDEX

#define PML_ADDR_TO_INDEX (   addr,
  level 
)
Value:
((pml_index_t)(((uintptr_t)(addr) >> (((level) - 1) * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS)) & \
#define PML_ADDR_OFFSET_BITS
Number of bits used for the offset within a page.
pml_index_t
Indexes into a pml level.
#define PML_INDEX_BITS
Number of bits used to index into a page table level.
@ PML_INDEX_AMOUNT
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43

Calculates the index into a page table level for a given virtual address.

Parameters
addrThe virtual address.
levelThe page table level.
Returns
The index within the page table level.

Definition at line 293 of file paging_types.h.

◆ PML_CALLBACK_NONE

#define PML_CALLBACK_NONE   ((1 << 8) - 1)

Special callback ID that indicates no callback is associated with the page.

Definition at line 335 of file paging_types.h.

◆ PML_ENSURE_LOWER_HALF

#define PML_ENSURE_LOWER_HALF (   addr)     ((uintptr_t)(addr) >= PML_HIGHER_HALF_START ? PML_HIGHER_TO_LOWER(addr) : (uintptr_t)(addr))

Ensures that the given address is in the lower half of the address space.

If the address is in the higher half, it is converted to the lower half. If the address is already in the lower half, it is returned unchanged.

Parameters
addrThe address to ensure is in the lower half.
Returns
The address in the lower half of the address space.

Definition at line 283 of file paging_types.h.

◆ PML_FLAGS_MASK

#define PML_FLAGS_MASK
Value:
@ PML_ACCESSED
@ PML_WRITE_THROUGH
@ PML_DIRTY
@ PML_CACHE_DISABLED
@ PML_USER
@ PML_PRESENT
@ PML_SIZE
@ PML_WRITE
@ PML_GLOBAL
@ PML_OWNED
@ PML_NO_EXECUTE

Mask for all pml flags.

Definition at line 140 of file paging_types.h.

◆ PML_HIGHER_HALF_END

#define PML_HIGHER_HALF_END   (~0ULL & ~(PAGE_SIZE - 1))

The end of the higher half of the address space.

Calculated by taking the maximum possible address (all bits set) and aligning it down to the nearest page boundary.

Definition at line 256 of file paging_types.h.

◆ PML_HIGHER_HALF_START

#define PML_HIGHER_HALF_START   (~((1ULL << (PML_VIRT_ADDR_BITS - 1)) - 1))

The start of the higher half of the address space.

Note that the "gap" between the lower half and higher half is con-canonical and thus invalid to access.

Calculated by sign-extending the bit PML_VIRT_ADDR_BITS - 1.

Definition at line 249 of file paging_types.h.

◆ PML_HIGHER_TO_LOWER

#define PML_HIGHER_TO_LOWER (   addr)    ((uintptr_t)(addr) - PML_HIGHER_HALF_START)

Converts an address from the higher half to the lower half.

Parameters
addrThe address to convert.
Returns
The converted address.

Definition at line 264 of file paging_types.h.

◆ PML_INDEX_BITS

#define PML_INDEX_BITS   9

Number of bits used to index into a page table level.

Each page table level has 512 entries (2^9 = 512).

Definition at line 195 of file paging_types.h.

◆ PML_INDEX_TO_ADDR

#define PML_INDEX_TO_ADDR (   index,
  level 
)
Value:
? PML_INDEX_TO_ADDR_NO_WRAP(index, level) \
#define PML_INDEX_TO_ADDR_NO_WRAP(index, level)
Calculates the lowest virtual address that maps to a given index at a specified page table level,...
#define PML_HIGHER_HALF_START
The start of the higher half of the address space.
@ PML_INDEX_HIGHER_HALF_MIN

Calculates the lowest virtual address that maps to a given index at a specified page table level.

Parameters
indexThe index within the page table level.
levelThe page table level.
Returns
The lowest virtual addr that maps to the given index at the specified level

Definition at line 304 of file paging_types.h.

◆ PML_INDEX_TO_ADDR_NO_WRAP

#define PML_INDEX_TO_ADDR_NO_WRAP (   index,
  level 
)     ((uintptr_t)(index) << (((level) - 1) * PML_INDEX_BITS + PML_ADDR_OFFSET_BITS))

Calculates the lowest virtual address that maps to a given index at a specified page table level, without wrapping.

This macro does not handle the wrapping behavior for the higher half of the address space, use PML_INDEX_TO_ADDR() instead.

Parameters
indexThe index within the page table level.
levelThe page table level.
Returns
The lowest virtual addr that maps to the given index at the specified level

Definition at line 208 of file paging_types.h.

◆ PML_LOWER_HALF_END

#define PML_LOWER_HALF_END   ((1ULL << (PML_VIRT_ADDR_BITS - 1)) - PAGE_SIZE)

The end of the lower half of the address space.

Can be thought of as the last page-aligned address before bit PML_VIRT_ADDR_BITS - 1 is set.

Definition at line 240 of file paging_types.h.

◆ PML_LOWER_HALF_START

#define PML_LOWER_HALF_START   (0)

The start of the lower half of the address space.

The lower half starts at address 0. Obviously.

Definition at line 233 of file paging_types.h.

◆ PML_LOWER_TO_HIGHER

#define PML_LOWER_TO_HIGHER (   addr)    ((uintptr_t)(addr) + PML_HIGHER_HALF_START)

Converts an address from the lower half to the higher half.

Parameters
addrThe address to convert.
Returns
The converted address.

Definition at line 272 of file paging_types.h.

◆ PML_MAX_CALLBACK

#define PML_MAX_CALLBACK   ((1 << 8) - 1)

Maximum number of callbacks that can be registered for a page table.

This is limited by the number of bits available in the page table entry for storing the callback ID, one low bit and 7 high bits. We reserve the maximum value to indicate no callback is associated with the page.

Definition at line 330 of file paging_types.h.

◆ PML_PAGE_BUFFER_SIZE

#define PML_PAGE_BUFFER_SIZE   64

Size of the page buffer used to batch page allocations and frees.

Definition at line 368 of file paging_types.h.

◆ PML_PIN_DEPTH_MAX

#define PML_PIN_DEPTH_MAX   3

Maximum pin depth for a page.

See pml_entry_t::pinDepth for more information.

Definition at line 99 of file paging_types.h.

◆ PML_VIRT_ADDR_BITS

#define PML_VIRT_ADDR_BITS   (PML_INDEX_BITS * PML_LEVEL_AMOUNT + PML_ADDR_OFFSET_BITS)

Total number of bits used for virtual addresses.

The x86_64 architecture only uses 48 bits for virtual addresses. However, with 5 level paging (not used here) it would be possible to use up to 57 bits.

PatchworkOS currently uses 4 level paging, so we have:

  • 9 bits for PML4
  • 9 bits for PML3
  • 9 bits for PML2
  • 9 bits for PML1
  • 12 bits for page offset

Total: 48 bits.

Definition at line 226 of file paging_types.h.

Typedef Documentation

◆ pml_alloc_pages_t

typedef uint64_t(* pml_alloc_pages_t) (void **, uint64_t)

Generic page allocation function type.

Used to allow both the kernel and bootloader to provide their own page allocation functions.

Definition at line 356 of file paging_types.h.

◆ pml_callback_id_t

Callback ID type.

Definition at line 340 of file paging_types.h.

◆ pml_free_pages_t

typedef void(* pml_free_pages_t) (void **, uint64_t)

Generic page free function type.

Used to allow both the kernel and bootloader to provide their own page free functions.

Definition at line 363 of file paging_types.h.

Enumeration Type Documentation

◆ pml_flags_t

Enumerator
PML_NONE 
PML_PRESENT 
PML_WRITE 
PML_USER 
PML_WRITE_THROUGH 
PML_CACHE_DISABLED 
PML_ACCESSED 
PML_DIRTY 
PML_SIZE 
PML_GLOBAL 
PML_OWNED 
PML_NO_EXECUTE 

Definition at line 121 of file paging_types.h.

◆ pml_index_t

Indexes into a pml level.

In each pml entry there are 512 entries, the first 256 entries map the lower half of the address space, the last 256 entries map the higher half of the address space.

For each half of the entries the address mapped by it increases by a set amount depending on the level in the tree structure, but for the higher half the addresses wraps around and instead the address is or'd by 0xFFFF800000000000.

See also
PML_INDEX_TO_ADDR() for the wrapping behavior.
Enumerator
PML_INDEX_LOWER_HALF_MIN 
PML_INDEX_LOWER_HALF_MAX 
PML_INDEX_HIGHER_HALF_MIN 
PML_INDEX_HIGHER_HALF_MAX 
PML_INDEX_AMOUNT 

Definition at line 181 of file paging_types.h.

◆ pml_level_t

Enums for the different page table levels.

A page table is a tree like structure with 4 levels, each level has 512 entries. The levels are named PML1 (or PT), PML2 (or PD), PML3 (or PDPT) and PML4.

The PML4 is the root of the tree and each entry in the PML4 points to a PML3, each entry in a PML3 points to a PML2, and each entry in a PML2 points to a PML1. Each entry in a PML1 points to a 4KB page in memory.

The tree is traversed such that given some input virtual address, we calculate the index into each level using PML_ADDR_TO_INDEX(), until we reach the PML1 level, which then, as mentioned, points to the actual page in memory. So, in short, input virtual memory, traverse down the tree to find the physical memory and its flags.

Enumerator
PML1 
PT 

Page Table.

PML2 
PD 

Page Directory.

PML3 
PDPT 

Page Directory Pointer Table.

PML4 
PML_LEVEL_AMOUNT 

Total number of levels in the page table.

Definition at line 158 of file paging_types.h.

Function Documentation

◆ page_table_clear()

static void page_table_clear ( page_table_t table,
void *  virtAddr,
uint64_t  pageAmount 
)
inlinestatic

Clears page table entries in the specified range and frees any owned pages.

Intended to be used in conjunction with page_table_unmap() to first unmap pages and then free any owned pages after TLB shootdown is complete.

Any still present or pinned entries will be skipped.

All unskipped entries will be fully cleared (set to 0).

Parameters
tableThe page table.
virtAddrThe starting virtual address.
pageAmountThe number of pages to clear.

Definition at line 629 of file paging.h.

References page_table_traverse_t::entry, ERR, pml_entry_t::owned, PAGE_SIZE, page_table_clear_pml1_pml2_pml3(), page_table_page_buffer_flush(), page_table_page_buffer_push(), page_table_traverse(), PAGE_TABLE_TRAVERSE_CREATE, pageAmount, page_table_page_buffer_t::pageCount, page_table_traverse_t::pml1Valid, page_table_traverse_t::pml2Valid, page_table_traverse_t::pml3Valid, pml_accessible_addr(), PML_NONE, pml_entry_t::present, and pml_entry_t::raw.

Referenced by vmm_page_table_unmap_with_shootdown().

◆ page_table_clear_pml1_pml2_pml3()

static void page_table_clear_pml1_pml2_pml3 ( page_table_t table,
page_table_traverse_t prevTraverse,
page_table_traverse_t traverse,
page_table_page_buffer_t pageBuffer 
)
inlinestatic

Clears any empty page table levels any time a pml1, pml2 or pml3 boundry is crossed.

Used as a helper for page_table_clear().

Parameters
tableThe page table.
prevTraverseThe previous traverse state.
traverseThe current traverse state.
pageBufferThe page buffer.

Definition at line 595 of file paging.h.

References pml_t::entries, page_table_traverse_t::oldIdx1, page_table_traverse_t::oldIdx2, page_table_traverse_t::oldIdx3, page_table_page_buffer_push(), page_table_traverse_t::pml1, page_table_traverse_t::pml1Valid, page_table_traverse_t::pml2, page_table_traverse_t::pml2Valid, page_table_traverse_t::pml3, page_table_traverse_t::pml3Valid, page_table_t::pml4, pml_is_empty(), and pml_entry_t::raw.

Referenced by page_table_clear().

◆ page_table_collect_callbacks()

static void page_table_collect_callbacks ( page_table_t table,
void *  virtAddr,
uint64_t  pageAmount,
uint64_t callbacks 
)
inlinestatic

Collects the number of pages associated with each callback ID in the specified range.

Parameters
tableThe page table.
virtAddrThe starting virtual address.
pageAmountThe number of pages to check.
callbacksAn array of size PML_MAX_CALLBACK that will be filled with the occurrences of each callback ID.

Definition at line 675 of file paging.h.

References page_table_traverse_t::entry, ERR, pml_entry_t::highCallbackId, pml_entry_t::lowCallbackId, PAGE_SIZE, page_table_traverse(), PAGE_TABLE_TRAVERSE_CREATE, pageAmount, PML_CALLBACK_NONE, PML_NONE, and pml_entry_t::present.

Referenced by vmm_unmap().

◆ page_table_deinit()

static void page_table_deinit ( page_table_t table)
inlinestatic

Deinitializes a page table, freeing all allocated pages.

Parameters
tableThe page table to deinitialize.

Definition at line 173 of file paging.h.

References PML4, page_table_t::pml4, and pml_free().

Referenced by space_deinit().

◆ page_table_find_unmapped_region()

static uint64_t page_table_find_unmapped_region ( page_table_t table,
void *  startAddr,
void *  endAddr,
uint64_t  pageAmount,
void **  outAddr 
)
inlinestatic

Finds the first contiguous unmapped region with the given number of pages within the specified address range.

Good luck with this function, im like 99% sure it works.

This function should be O(r) in the worse case where r is the amount of pages in the address range, note how the number of pages needed does not affect the complexity. This has the fun affect that the more memory is allocated the faster this function will run on average.

Parameters
tableThe page table.
startAddrThe start address to begin searching (inclusive).
endAddrThe end address of the search range (exclusive).
pageAmountThe number of consecutive unmapped pages needed.
outAddrWill be filled with the start address of the unmapped region if found.
Returns
On success, 0. If no suitable region is found, ERR.

Definition at line 756 of file paging.h.

References pml_t::entries, ERR, MIN, PAGE_SIZE, pageAmount, PML1, PML2, PML2_SIZE, PML3, PML3_SIZE, PML4, page_table_t::pml4, pml_accessible_addr(), PML_ADDR_TO_INDEX, PML_INDEX_AMOUNT, PML_INDEX_TO_ADDR, pml_entry_t::present, ROUND_DOWN, and ROUND_UP.

Referenced by space_find_free_region().

◆ page_table_get_phys_addr()

static uint64_t page_table_get_phys_addr ( page_table_t table,
const void *  virtAddr,
void **  outPhysAddr 
)
inlinestatic

Retrieves the physical address mapped to a given virtual address.

If the virtual address is not mapped, the function returns ERR.

Parameters
tableThe page table.
virtAddrThe virtual address to look up.
outPhysAddrWill be filled with the corresponding physical address on success.
Returns
On success, 0. On failure, ERR.

Definition at line 329 of file paging.h.

References pml_entry_t::addr, page_table_traverse_t::entry, ERR, PAGE_SIZE, page_table_traverse(), PAGE_TABLE_TRAVERSE_CREATE, PML_ADDR_OFFSET_BITS, PML_NONE, pml_entry_t::present, and ROUND_DOWN.

◆ page_table_get_pml()

static uint64_t page_table_get_pml ( page_table_t table,
pml_t currentPml,
pml_index_t  index,
pml_flags_t  flags,
pml_t **  outPml 
)
inlinestatic

Retrieves or allocates the next level page table.

If the entry at the specified index is present, it retrieves the corresponding page table level. If the entry is not present and the PML_PRESENT flag is set in flags, it allocates a new page table level, and initializes it with the provided flags and callback ID. If the entry is not present and the PML_PRESENT flag is not set, it returns ERR.

Parameters
tableThe page table.
currentPmlThe current page table level.
indexThe index within the current page table level.
flagsThe flags to assign to a newly allocated page table level, if applicable.
callbackIdThe callback ID to assign to a newly allocated page table level, if applicable.
outPmlWill be filled with the retrieved or newly allocated page table level.
Returns
On success, 0. On failure, ERR.

Definition at line 208 of file paging.h.

References pml_t::entries, ERR, pml_accessible_addr(), PML_ADDR_MASK, PML_ENSURE_LOWER_HALF, PML_FLAGS_MASK, pml_new(), PML_PRESENT, pml_entry_t::present, and pml_entry_t::raw.

Referenced by page_table_traverse().

◆ page_table_init()

static uint64_t page_table_init ( page_table_t table,
pml_alloc_pages_t  allocPages,
pml_free_pages_t  freePages 
)
inlinestatic

Initializes a page table.

Parameters
tableThe page table to initialize.
allocPagesThe function to use for allocating pages.
freePagesThe function to use for freeing pages.
Returns
On success, 0. On failure, ERR.

Definition at line 157 of file paging.h.

References page_table_t::allocPages, ERR, page_table_t::freePages, page_table_t::pml4, and pml_new().

Referenced by mem_page_table_init(), and space_init().

◆ page_table_is_mapped()

static bool page_table_is_mapped ( page_table_t table,
const void *  virtAddr,
uint64_t  pageAmount 
)
inlinestatic

Checks if a range of virtual addresses is completely mapped.

If any page in the range is not mapped, the function returns false.

Parameters
tableThe page table.
virtAddrThe starting virtual address.
pageAmountThe number of pages to check.
Returns
true if the entire range is mapped, false otherwise.

Definition at line 360 of file paging.h.

References page_table_traverse_t::entry, ERR, PAGE_SIZE, page_table_traverse(), PAGE_TABLE_TRAVERSE_CREATE, pageAmount, PML_NONE, and pml_entry_t::present.

Referenced by space_is_mapped(), space_pin(), space_pin_terminated(), and space_populate_user_region().

◆ page_table_is_pinned()

static bool page_table_is_pinned ( page_table_t table,
const void *  virtAddr,
uint64_t  pageAmount 
)
inlinestatic

Checks if any page in a range is pinned.

Parameters
tableThe page table.
virtAddrThe starting virtual address.
pageAmountThe number of pages to check.
Returns
true if any page in the range us pinned, false otherwise.

Definition at line 942 of file paging.h.

References page_table_traverse_t::entry, ERR, PAGE_SIZE, page_table_traverse(), PAGE_TABLE_TRAVERSE_CREATE, pageAmount, pml_entry_t::pinned, PML_NONE, and pml_entry_t::present.

Referenced by vmm_alloc(), vmm_map(), vmm_map_pages(), vmm_protect(), and vmm_unmap().

◆ page_table_is_unmapped()

static bool page_table_is_unmapped ( page_table_t table,
const void *  virtAddr,
uint64_t  pageAmount 
)
inlinestatic

Checks if a range of virtual addresses is completely unmapped.

If any page in the range is mapped, the function returns false.

Parameters
tableThe page table.
virtAddrThe starting virtual address.
pageAmountThe number of pages to check.
Returns
true if the entire range is unmapped, false otherwise.

Definition at line 389 of file paging.h.

References page_table_traverse_t::entry, ERR, PAGE_SIZE, page_table_traverse(), PAGE_TABLE_TRAVERSE_CREATE, pageAmount, PML_NONE, and pml_entry_t::present.

Referenced by space_find_free_region(), vmm_alloc(), vmm_map(), vmm_map_pages(), and vmm_protect().

◆ page_table_load()

static void page_table_load ( page_table_t table)
inlinestatic

Loads the page table into the CR3 register if it is not already loaded.

Parameters
tableThe page table to load.

Definition at line 183 of file paging.h.

References cr3_read(), cr3_write(), page_table_t::pml4, and PML_ENSURE_LOWER_HALF.

Referenced by efi_main(), and space_load().

◆ page_table_map()

static uint64_t page_table_map ( page_table_t table,
void *  virtAddr,
void *  physAddr,
uint64_t  pageAmount,
pml_flags_t  flags,
pml_callback_id_t  callbackId 
)
inlinestatic

Maps a range of virtual addresses to physical addresses in the page table.

If any page in the range is already mapped, the function will fail and return ERR.

Parameters
tableThe page table.
virtAddrThe starting virtual address.
physAddrThe starting physical address.
pageAmountThe number of pages to map.
flagsThe flags to set for the mapped pages. Must include PML_PRESENT.
callbackIdThe callback ID to associate with the mapped pages or PML_CALLBACK_NONE.
Returns
On success, 0. On failure, ERR.

Definition at line 422 of file paging.h.

References pml_entry_t::addr, page_table_traverse_t::entry, ERR, pml_entry_t::highCallbackId, pml_entry_t::lowCallbackId, PAGE_SIZE, page_table_traverse(), PAGE_TABLE_TRAVERSE_CREATE, pageAmount, PML_ADDR_OFFSET_BITS, PML_ENSURE_LOWER_HALF, PML_PRESENT, pml_entry_t::present, and pml_entry_t::raw.

Referenced by mem_page_table_init(), space_populate_user_region(), vmm_init(), and vmm_map().

◆ page_table_map_pages()

static uint64_t page_table_map_pages ( page_table_t table,
void *  virtAddr,
void **  pages,
uint64_t  pageAmount,
pml_flags_t  flags,
pml_callback_id_t  callbackId 
)
inlinestatic

Maps an array of physical pages to contiguous virtual addresses in the page table.

If any page in the range is already mapped, the function will fail and return ERR.

Parameters
tableThe page table.
virtAddrThe starting virtual address.
pagesArray of physical page addresses to map.
pageAmountThe number of pages in the array to map.
flagsThe flags to set for the mapped pages. Must include PML_PRESENT.
callbackIdThe callback ID to associate with the mapped pages or PML_CALLBACK_NONE.
Returns
On success, 0. On failure, ERR.

Definition at line 469 of file paging.h.

References pml_entry_t::addr, page_table_traverse_t::entry, ERR, pml_entry_t::highCallbackId, pml_entry_t::lowCallbackId, PAGE_SIZE, page_table_traverse(), PAGE_TABLE_TRAVERSE_CREATE, pageAmount, PML_ADDR_OFFSET_BITS, PML_ENSURE_LOWER_HALF, PML_PRESENT, pml_entry_t::present, and pml_entry_t::raw.

Referenced by vmm_alloc(), and vmm_map_pages().

◆ page_table_page_buffer_flush()

static void page_table_page_buffer_flush ( page_table_t table,
page_table_page_buffer_t buffer 
)
inlinestatic

Flushes the page buffer, freeing any remaining pages.

Used as a helper for page_table_clear().

Parameters
tableThe page table.
bufferThe page buffer.

Definition at line 576 of file paging.h.

References buffer, and page_table_t::freePages.

Referenced by page_table_clear().

◆ page_table_page_buffer_push()

static void page_table_page_buffer_push ( page_table_t table,
page_table_page_buffer_t buffer,
void *  address 
)
inlinestatic

Pushes a page table level onto the page buffer, freeing the buffer if full.

Used as a helper for page_table_clear().

Parameters
tableThe page table.
bufferThe page buffer.
addressThe address to push.

Definition at line 556 of file paging.h.

References address, buffer, page_table_t::freePages, page_table_page_buffer_t::pages, and PML_PAGE_BUFFER_SIZE.

Referenced by page_table_clear(), and page_table_clear_pml1_pml2_pml3().

◆ page_table_set_flags()

static uint64_t page_table_set_flags ( page_table_t table,
void *  virtAddr,
uint64_t  pageAmount,
pml_flags_t  flags 
)
inlinestatic

Sets the flags for a range of pages in the page table.

If a page is not currently mapped, it is skipped.

Parameters
tableThe page table.
virtAddrThe starting virtual address.
pageAmountThe number of pages to update.
flagsThe new flags to set. The PML_OWNED flag is preserved.
Returns
On success, 0. On failure, ERR.

Definition at line 711 of file paging.h.

References page_table_traverse_t::entry, ERR, pml_entry_t::owned, PAGE_SIZE, page_table_traverse(), PAGE_TABLE_TRAVERSE_CREATE, pageAmount, PML_FLAGS_MASK, PML_NONE, PML_OWNED, pml_entry_t::present, pml_entry_t::raw, and tlb_invalidate().

Referenced by vmm_protect().

◆ page_table_traverse()

static uint64_t page_table_traverse ( page_table_t table,
page_table_traverse_t traverse,
uintptr_t  virtAddr,
pml_flags_t  flags 
)
inlinestatic

Allows for fast traversal of the page table by caching previously accessed layers.

If the present flag is not set in flags then no new levels will be allocated and if non present pages are encountered the function will return false.

Note that higher level flags are or'd with PML_WRITE | PML_USER since only the permissions of a higher level will apply to lower levels, meaning that the lowest level should be the one with the actual desired permissions. Additionally, the PML_GLOBAL flag is not allowed on the PML3 level.

Parameters
tableThe page table.
traverseThe helper structure used to cache each layer.
virtAddrThe target virtual address.
flagsThe flags to assigned to newly allocated levels, if the present flag is not set then dont allocate new levels.
Returns
On success, 0. On failure, ERR.

Definition at line 279 of file paging.h.

References pml_t::entries, page_table_traverse_t::entry, ERR, page_table_traverse_t::oldIdx1, page_table_traverse_t::oldIdx2, page_table_traverse_t::oldIdx3, page_table_get_pml(), page_table_traverse_t::pml1, PML1, page_table_traverse_t::pml1Valid, page_table_traverse_t::pml2, PML2, page_table_traverse_t::pml2Valid, page_table_traverse_t::pml3, PML3, page_table_traverse_t::pml3Valid, PML4, page_table_t::pml4, PML_ADDR_TO_INDEX, PML_GLOBAL, PML_USER, and PML_WRITE.

Referenced by page_table_clear(), page_table_collect_callbacks(), page_table_get_phys_addr(), page_table_is_mapped(), page_table_is_pinned(), page_table_is_unmapped(), page_table_map(), page_table_map_pages(), page_table_set_flags(), page_table_unmap(), space_pin_depth_dec(), and space_pin_depth_inc().

◆ page_table_unmap()

static void page_table_unmap ( page_table_t table,
void *  virtAddr,
uint64_t  pageAmount 
)
inlinestatic

Unmaps a range of virtual addresses from the page table.

If a page is not currently mapped, it is skipped.

Will NOT free owned pages, instead it only sets the present flag to 0. This is to help with TLB shootdowns where we must unmap, wait for all CPUs to acknowledge the unmap, and only then free the pages. Use page_table_clear() to free owned pages separately.

Parameters
tableThe page table.
virtAddrThe starting virtual address.
pageAmountThe number of pages to unmap.

Definition at line 515 of file paging.h.

References page_table_traverse_t::entry, ERR, PAGE_SIZE, page_table_traverse(), PAGE_TABLE_TRAVERSE_CREATE, pageAmount, PML_NONE, pml_entry_t::present, and tlb_invalidate().

Referenced by vmm_page_table_unmap_with_shootdown().

◆ pml_accessible_addr()

static uintptr_t pml_accessible_addr ( pml_entry_t  entry)
inlinestatic

Retrieves the address from a page table entry and converts it to an accessible address.

The accessible address depends on if we are in the kernel or the bootloader as the bootloader has physical memory identity mapped to the higher half of the address space, while the kernel does not and instead has the higher half mapped to the lower half of the address space.

Parameters
entryThe page table entry.
Returns
The accessible address contained in the entry.

Definition at line 61 of file paging.h.

References pml_entry_t::addr, PML_ADDR_OFFSET_BITS, and PML_LOWER_TO_HIGHER.

Referenced by page_table_clear(), page_table_find_unmapped_region(), page_table_get_pml(), and pml_free().

◆ pml_free()

static void pml_free ( page_table_t table,
pml_t pml,
pml_level_t  level 
)
inlinestatic

Recursively frees a page table level, all its children and any owned pages.

Parameters
tableThe page table.
pmlThe current page table level to free.
levelThe current level of the page table.

Definition at line 120 of file paging.h.

References pml_t::entries, page_table_t::freePages, pml_entry_t::owned, PML1, pml_accessible_addr(), pml_free(), PML_INDEX_AMOUNT, and pml_entry_t::present.

Referenced by page_table_deinit(), and pml_free().

◆ pml_is_empty()

static bool pml_is_empty ( pml_t pml)
inlinestatic

Checks if a page table level is empty (all entries are 0).

Used as a helper for page_table_clear().

Parameters
pmlThe page table level to check.
Returns
true if all entries are raw 0, false otherwise.

Definition at line 78 of file paging.h.

References pml_t::entries, PML_INDEX_AMOUNT, and pml_entry_t::raw.

Referenced by page_table_clear_pml1_pml2_pml3().

◆ pml_new()

static uint64_t pml_new ( page_table_t table,
pml_t **  outPml 
)
inlinestatic

Allocates and initializes a new page table level.

Parameters
tableThe page table.
outPmlWill be filled with the newly allocated page table level.
Returns
On success, 0. On failure, ERR.

Definition at line 97 of file paging.h.

References page_table_t::allocPages, ERR, memset(), and PAGE_SIZE.

Referenced by page_table_get_pml(), and page_table_init().

◆ tlb_invalidate()

static void tlb_invalidate ( void *  virtAddr,
uint64_t  pageCount 
)
inlinestatic

Invalidates a region of pages in the TLB.

Even if a page table entry is modified, the CPU might still use a cached version of the entry in the TLB. To ensure our changes are detected we must invalidate this cache using invlpg or if many pages are changed, a full TLB flush by reloading CR3.

Parameters
virtAddrThe virtual address of the page to invalidate.

Definition at line 31 of file paging.h.

References cr3_read(), cr3_write(), and PAGE_SIZE.

Referenced by page_table_set_flags(), page_table_unmap(), and vmm_shootdown_handler().