131 panic(
NULL,
"Attempted to free address space still in use by CPUs");
195 panic(
NULL,
"CPU not found in old space's CPU list");
314 if (newPinnedPage ==
NULL)
382 if (space ==
NULL ||
address ==
NULL || terminator ==
NULL || objectSize == 0 || maxCount == 0)
388 uint64_t terminatorMatchedBytes = 0;
400 while (current < end)
426 for (
uintptr_t scanAddr = current; scanAddr < scanEnd; scanAddr++)
429 if (*((
uint8_t*)scanAddr) == ((
uint8_t*)terminator)[terminatorMatchedBytes])
431 terminatorMatchedBytes++;
432 if (terminatorMatchedBytes == objectSize)
439 scanAddr += objectSize - terminatorMatchedBytes - 1;
440 terminatorMatchedBytes = 0;
480 if (space ==
NULL || (addr ==
NULL && length != 0))
531 if (space ==
NULL || mapping ==
NULL || length == 0)
553 if (virtAddr !=
NULL &&
566 if (virtAddr ==
NULL)
570 if (virtAddr ==
NULL)
584 if (physAddr !=
NULL)
614 if (newCallbacks ==
NULL)
633 callback->
func = func;
697 panic(
NULL,
"CPU %d shootdown buffer overflow", cpu->
id);
701 shootdown->
space = space;
708 panic(
NULL,
"Failed to send TLB shootdown IPI to CPU %d", cpu->
id);
718 panic(
NULL,
"TLB shootdown timeout in space %p for region %p - %p", space, virtAddr,
722 asm volatile(
"pause");
741 if (space ==
NULL || mapping ==
NULL)
#define assert(expression)
uint64_t ipi_send(cpu_t *cpu, ipi_flags_t flags, ipi_func_t func, void *private)
Send an IPI to one or more CPUs.
@ IPI_SINGLE
Send the IPI to the specified CPU.
bool stack_pointer_is_in_stack(stack_pointer_t *stack, uintptr_t addr, uint64_t length)
Check if an region is within the stack.
void stack_pointer_poke(uint64_t offset)
Poke the stack to ensure that a page fault will occur at the given offset.
static cpu_t * cpu_get_unsafe(void)
Gets the current CPU structure without disabling interrupts.
static uint16_t cpu_amount(void)
Gets the number of identified CPUs.
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
static void tlb_invalidate(void *virtAddr, uint64_t pageCount)
Invalidates a region of pages in the TLB.
pml_index_t
Indexes into a pml level.
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
#define PML_ADDR_TO_INDEX(addr, level)
Calculates the index into a page table level for a given virtual address.
static uint64_t page_table_map(page_table_t *table, void *virtAddr, void *physAddr, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
uint8_t pml_callback_id_t
Callback ID type.
static void page_table_load(page_table_t *table)
Loads the page table into the CR3 register if it is not already loaded.
#define PAGE_TABLE_TRAVERSE_CREATE
Create a page_table_traverse_t initializer.
static bool page_table_is_mapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely mapped.
#define PML_MAX_CALLBACK
Maximum number of callbacks that can be registered for a page table.
static void page_table_deinit(page_table_t *table)
Deinitializes a page table, freeing all allocated pages.
static bool page_table_is_unmapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely unmapped.
static uint64_t page_table_find_unmapped_region(page_table_t *table, void *startAddr, void *endAddr, uint64_t pageAmount, void **outAddr)
Finds the first contiguous unmapped region with the given number of pages within the specified addres...
static uint64_t page_table_init(page_table_t *table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages)
Initializes a page table.
static uint64_t page_table_traverse(page_table_t *table, page_table_traverse_t *traverse, uintptr_t virtAddr, pml_flags_t flags)
Allows for fast traversal of the page table by caching previously accessed layers.
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
static uint64_t page_table_count_pages_with_flags(page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
Counts the number of pages in a range that have all the specified flags set.
uint64_t pmm_alloc_pages(void **addresses, uint64_t count)
Allocates multiple physical pages.
void pmm_free(void *address)
Frees a single physical page.
void pmm_free_pages(void **addresses, uint64_t count)
Frees multiple physical pages.
void * pmm_alloc_bitmap(uint64_t count, uintptr_t maxAddr, uint64_t alignment)
Allocates a contiguous region of physical pages managed by the bitmap.
void * pmm_alloc(void)
Allocates a single physical page.
void space_unpin(space_t *space, const void *address, uint64_t length)
Unpins pages in a region previously pinned with space_pin() or space_pin_string().
bool space_is_mapped(space_t *space, const void *virtAddr, uint64_t length)
Checks if a virtual memory region is fully mapped.
void * space_mapping_end(space_t *space, space_mapping_t *mapping, errno_t err)
Performs cleanup after changes to the address space mappings.
pml_callback_id_t space_alloc_callback(space_t *space, uint64_t pageAmount, space_callback_func_t func, void *private)
Allocate a callback.
uint64_t space_check_access(space_t *space, const void *addr, uint64_t length)
Checks if a virtual memory region is within the allowed address range of the space.
void space_free_callback(space_t *space, pml_callback_id_t callbackId)
Free a callback.
space_flags_t
Flags for space initialization.
uint64_t space_pin_terminated(space_t *space, const void *address, const void *terminator, uint8_t objectSize, uint64_t maxCount, stack_pointer_t *userStack)
Pins a region of memory terminated by a terminator value.
uint64_t space_user_page_count(space_t *space)
Get the number of user pages allocated in the address space.
void(* space_callback_func_t)(void *private)
Space callback function.
uint64_t space_pin(space_t *space, const void *buffer, uint64_t length, stack_pointer_t *userStack)
Pins pages within a region of the address space.
void space_load(space_t *space)
Loads a virtual address space.
void space_tlb_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
Performs a TLB shootdown for a region of the address space, and wait for acknowledgements.
void space_deinit(space_t *space)
Deinitializes a virtual address space.
uint64_t space_init(space_t *space, uintptr_t startAddress, uintptr_t endAddress, space_flags_t flags)
Initializes a virtual address space.
#define SPACE_TLB_SHOOTDOWN_TIMEOUT
The maximum time to wait for the acknowledgements from other CPU's before panicking.
uint64_t space_mapping_start(space_t *space, space_mapping_t *mapping, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags)
Prepare for changes to the address space mappings.
@ SPACE_MAP_KERNEL_HEAP
Map the kernel heap into the address space.
@ SPACE_MAP_IDENTITY
Map the identity mapped physical memory into the address space.
@ SPACE_MAP_KERNEL_BINARY
Map the kernel binary into the address space.
#define VMM_KERNEL_HEAP_MAX
The maximum address for the kernel heap.
#define VMM_KERNEL_BINARY_MAX
The maximum address for the content of the kernel binary.
space_t * vmm_kernel_space_get(void)
Retrieves the kernel's address space.
#define VMM_IDENTITY_MAPPED_MIN
The minimum address for the identity mapped physical memory.
#define VMM_USER_SPACE_MAX
The maximum address for user space.
#define VMM_KERNEL_BINARY_MIN
The minimum address for the content of the kernel binary./*#end#*/.
#define VMM_USER_SPACE_MIN
The minimum address for user space.
#define VMM_MAX_SHOOTDOWN_REQUESTS
Maximum number of shootdown requests that can be queued per CPU.
#define VMM_IDENTITY_MAPPED_MAX
The maximum address for the identity mapped physical memory.
#define VMM_KERNEL_HEAP_MIN
The minimum address for the kernel heap.
clock_t clock_uptime(void)
Retrieve the time in nanoseconds since boot.
static void lock_init(lock_t *lock)
Initializes a lock.
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
static void lock_release(lock_t *lock)
Releases a lock.
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
void map_init(map_t *map)
Initialize a map.
void map_entry_init(map_entry_t *entry)
Initialize a map entry.
static map_key_t map_key_uint64(uint64_t uint64)
Create a map key from a uint64_t.
uint64_t map_insert(map_t *map, const map_key_t *key, map_entry_t *value)
Insert a key-value pair into the map.
void map_remove(map_t *map, map_entry_t *entry)
Remove a entry from the map.
map_entry_t * map_get(map_t *map, const map_key_t *key)
Get a value from the map by key.
#define EINVAL
Invalid argument.
#define EFAULT
Bad address.
#define ENOMEM
Out of memory.
#define EOVERFLOW
Value too large for defined data type.
#define errno
Error number variable.
void bitmap_init(bitmap_t *map, void *buffer, uint64_t length)
Initialize a bitmap.
void bitmap_clear(bitmap_t *map, uint64_t index)
Clear a bit in the bitmap.
void bitmap_set(bitmap_t *map, uint64_t index)
Set a bit in the bitmap.
uint64_t bitmap_find_first_clear(bitmap_t *map, uint64_t startIdx, uint64_t endIdx)
Find the first clear bit in the bitmap.
#define BITMAP_FOR_EACH_SET(idx, map)
Iterate over each set bit in the bitmap.
#define BITMAP_BITS_TO_BYTES(bits)
Convert number of bits to number of bytes.
#define LIST_FOR_EACH(elem, list, member)
Iterates over a list.
static void list_push_back(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
static void list_remove(list_t *list, list_entry_t *entry)
Removes a list entry from its current list.
static bool list_is_empty(list_t *list)
Checks if a list is empty.
static void list_init(list_t *list)
Initializes a list.
#define ROUND_DOWN(number, multiple)
#define ROUND_UP(number, multiple)
#define PAGE_SIZE
The size of a memory page in bytes.
#define BYTES_TO_PAGES(amount)
Convert a size in bytes to pages.
#define NULL
Pointer error value.
#define ERR
Integer error value.
#define CONTAINER_OF(ptr, type, member)
Container of macro.
__UINT64_TYPE__ clock_t
A nanosecond time.
static uintptr_t address
Mapped virtual address of the HPET registers.
EFI_PHYSICAL_ADDRESS buffer
static const path_flag_t flags[]
static uint64_t pageAmount
#define RFLAGS_INTERRUPT_ENABLE
static uint64_t rflags_read()
static void space_pin_depth_dec(space_t *space, const void *address, uint64_t pageAmount)
static uint64_t space_pmm_bitmap_alloc_pages(void **pages, uint64_t pageAmount)
static void space_map_kernel_space_region(space_t *space, uintptr_t start, uintptr_t end)
static void space_update_free_address(space_t *space, uintptr_t virtAddr, uint64_t pageAmount)
static void space_tlb_shootdown_ipi_handler(ipi_func_data_t *data)
static uint64_t space_pin_depth_inc(space_t *space, const void *address, uint64_t pageAmount)
static void space_align_region(void **virtAddr, uint64_t *length)
static void * space_find_free_region(space_t *space, uint64_t pageAmount)
static uint64_t space_populate_user_region(space_t *space, const void *buffer, uint64_t pageAmount)
static void space_unmap_kernel_space_region(space_t *space, uintptr_t start, uintptr_t end)
#define atomic_store(object, desired)
#define atomic_load(object)
#define atomic_fetch_add(object, operand)
#define atomic_init(obj, value)
__UINTPTR_TYPE__ uintptr_t
_PUBLIC void * malloc(size_t size)
_PUBLIC void free(void *ptr)
_PUBLIC void * memcpy(void *_RESTRICT s1, const void *_RESTRICT s2, size_t n)
_PUBLIC void * memset(void *s, int c, size_t n)
IPI function data structure.
pml_alloc_pages_t allocPages
Helper structure for fast traversal of the page table.
uint64_t present
If set the page is present in memory and readable.
A entry in a page table without a specified address or callback ID.
pml_entry_t entries[PML_INDEX_AMOUNT]
space_callback_func_t func
Helper structure for managing address space mappings.
uint64_t pinCount
The number of times this page is pinned, will be unpinned when it reaches 0.
Virtual address space structure.
map_t pinnedPages
Map of pages with a pin depth greater than 1.
uintptr_t startAddress
The start address for allocations in this address space.
uint64_t callbacksLength
Length of the callbacks array.
uintptr_t endAddress
The end address for allocations in this address space.
page_table_t pageTable
The page table associated with the address space.
bitmap_t callbackBitmap
Bitmap to track available callback IDs.
list_t cpus
List of CPUs using this address space.
uintptr_t freeAddress
The next available free virtual address in this address space.
atomic_uint16_t shootdownAcks
space_callback_t * callbacks
uint64_t bitmapBuffer[BITMAP_BITS_TO_QWORDS(PML_MAX_CALLBACK)]
Structure to define a stack in memory.
space_t * currentSpace
Will only be accessed by the owner CPU, so no lock.
list_entry_t entry
Used by a space to know which CPUs are using it, protected by the space lock.
vmm_shootdown_t shootdowns[VMM_MAX_SHOOTDOWN_REQUESTS]
static space_t kernelSpace