50 panic(
NULL,
"Failed to initialize kernel address space");
73 panic(
NULL,
"Memory descriptor %d has invalid virtual address 0x%016lx", i, desc->VirtualStart);
77 panic(
NULL,
"Memory descriptor %d has invalid physical address 0x%016lx", i, desc->PhysicalStart);
83 panic(
NULL,
"Failed to map memory descriptor %d (phys=0x%016lx-0x%016lx virt=0x%016lx)", i,
84 desc->PhysicalStart, desc->PhysicalStart + desc->NumberOfPages *
PAGE_SIZE, desc->VirtualStart);
93 LOG_INFO(
"kernel virt=[0x%016lx-0x%016lx] phys=[0x%016lx-0x%016lx]\n", minVaddr, maxVaddr,
98 panic(
NULL,
"Failed to map kernel memory");
112 LOG_INFO(
"loading kernel space... ");
199 while (remainingPages != 0)
203 void* addresses[maxBatchSize];
204 uint64_t batchSize =
MIN(remainingPages, maxBatchSize);
220 remainingPages -= batchSize;
336 if (virtAddr ==
NULL || length == 0)
373 assert(index < space->callbacksLength);
398 if (space ==
NULL || virtAddr ==
NULL || length == 0)
406 return vmm_unmap(space, virtAddr, length);
#define assert(expression)
#define BOOT_MEMORY_MAP_GET_DESCRIPTOR(map, index)
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
static cpu_t * cpu_get_unsafe(void)
Gets the current CPU structure without disabling interrupts.
#define CPU_ID_BOOTSTRAP
ID of the bootstrap CPU.
boot_info_t * boot_info_get(void)
Gets the boot info structure.
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
#define LOG_INFO(format,...)
#define LOG_DEBUG(format,...)
pml_index_t
Indexes into a pml level.
static void page_table_collect_callbacks(page_table_t *table, void *virtAddr, uint64_t pageAmount, uint64_t *callbacks)
Collects the number of pages associated with each callback ID in the specified range.
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
static void page_table_unmap(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Unmaps a range of virtual addresses from the page table.
static uint64_t page_table_map(page_table_t *table, void *virtAddr, void *physAddr, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
uint8_t pml_callback_id_t
Callback ID type.
static void page_table_clear(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Clears page table entries in the specified range and frees any owned pages.
#define PML_MAX_CALLBACK
Maximum number of callbacks that can be registered for a page table.
#define PML_LOWER_HALF_END
The end of the lower half of the address space.
static bool page_table_is_unmapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely unmapped.
#define PML_HIGHER_HALF_START
The start of the higher half of the address space.
static uint64_t page_table_set_flags(page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
Sets the flags for a range of pages in the page table.
static bool page_table_is_pinned(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if any page in a range is pinned.
static uint64_t page_table_map_pages(page_table_t *table, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps an array of physical pages to contiguous virtual addresses in the page table.
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
@ PML_INDEX_LOWER_HALF_MIN
@ PML_INDEX_LOWER_HALF_MAX
uint64_t pmm_alloc_pages(void **addresses, uint64_t count)
Allocates multiple physical pages.
void * space_mapping_end(space_t *space, space_mapping_t *mapping, errno_t err)
Performs cleanup after changes to the address space mappings.
pml_callback_id_t space_alloc_callback(space_t *space, uint64_t pageAmount, space_callback_func_t func, void *private)
Allocate a callback.
uint64_t space_check_access(space_t *space, const void *addr, uint64_t length)
Checks if a virtual memory region is within the allowed address range of the space.
void space_free_callback(space_t *space, pml_callback_id_t callbackId)
Free a callback.
void(* space_callback_func_t)(void *private)
Space callback function.
void space_tlb_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
Performs a TLB shootdown for a region of the address space, and wait for acknowledgements.
uint64_t space_init(space_t *space, uintptr_t startAddress, uintptr_t endAddress, space_flags_t flags)
Initializes a virtual address space.
uint64_t space_mapping_start(space_t *space, space_mapping_t *mapping, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags)
Prepare for changes to the address space mappings.
#define VMM_KERNEL_HEAP_MAX
The maximum address for the kernel heap.
void vmm_cpu_ctx_init(vmm_cpu_ctx_t *ctx)
Initializes a per-CPU VMM context and performs per-CPU VMM initialization.
#define VMM_KERNEL_STACKS_MAX
The maximum address for kernel stacks.
void vmm_init(void)
Initializes the Virtual Memory Manager.
#define VMM_KERNEL_BINARY_MAX
The maximum address for the content of the kernel binary.
space_t * vmm_kernel_space_get(void)
Retrieves the kernel's address space.
void vmm_kernel_space_load(void)
Loads the kernel's address space into the current CPU.
#define VMM_KERNEL_STACKS_MIN
The minimum address for kernel stacks.
void * vmm_alloc(space_t *space, void *virtAddr, uint64_t length, pml_flags_t pmlFlags, vmm_alloc_flags_t allocFlags)
Allocates and maps virtual memory in a given address space.
#define VMM_IDENTITY_MAPPED_MIN
The minimum address for the identity mapped physical memory.
#define VMM_USER_SPACE_MAX
The maximum address for user space.
#define VMM_KERNEL_BINARY_MIN
The minimum address for the content of the kernel binary./*#end#*/.
#define VMM_USER_SPACE_MIN
The minimum address for user space.
void * vmm_map(space_t *space, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags, space_callback_func_t func, void *private)
Maps physical memory to virtual memory in a given address space.
void * vmm_protect(space_t *space, void *virtAddr, uint64_t length, pml_flags_t flags)
Changes memory protection flags for a virtual memory region in a given address space.
#define VMM_IDENTITY_MAPPED_MAX
The maximum address for the identity mapped physical memory.
void * vmm_unmap(space_t *space, void *virtAddr, uint64_t length)
Unmaps virtual memory from a given address space.
#define VMM_KERNEL_HEAP_MIN
The minimum address for the kernel heap.
pml_flags_t vmm_prot_to_flags(prot_t prot)
Converts the user space memory protection flags to page table entry flags.
void * vmm_map_pages(space_t *space, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, space_callback_func_t func, void *private)
Maps an array of physical pages to virtual memory in a given address space.
vmm_alloc_flags_t
Flags for vmm_alloc().
@ VMM_ALLOC_FAIL_IF_MAPPED
If set and any page is already mapped, fail and set errno to EEXIST.
process_t * sched_process(void)
Retrieves the process of the currently running thread.
static void lock_init(lock_t *lock)
Initializes a lock.
static void lock_release(lock_t *lock)
Releases a lock.
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
#define ENOENT
No such file or directory.
#define ENOSPC
No space left on device.
#define EEXIST
File exists.
#define EINVAL
Invalid argument.
#define EFAULT
Bad address.
#define ENOMEM
Out of memory.
#define EBUSY
Device or resource busy.
#define errno
Error number variable.
#define BITMAP_FOR_EACH_SET(idx, map)
Iterate over each set bit in the bitmap.
void elf64_get_loadable_bounds(const Elf64_File *elf, Elf64_Addr *minAddr, Elf64_Addr *maxAddr)
Get the loadable virtual memory bounds of an ELF file.
uint64_t Elf64_Addr
ELF64 Unsigned program address.
static void list_push_back(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
static void list_entry_init(list_entry_t *entry)
Initializes a list entry.
#define PAGE_SIZE
The size of a memory page in bytes.
#define BYTES_TO_PAGES(amount)
Convert a size in bytes to pages.
prot_t
Memory protection flags.
@ PROT_READ
Readable memory.
@ PROT_WRITE
Writable memory.
@ PROT_NONE
Invalid memory, cannot be accessed.
#define NULL
Pointer error value.
#define ERR
Integer error value.
static uintptr_t address
Mapped virtual address of the HPET registers.
static const path_flag_t flags[]
static uint64_t pageAmount
static void cr3_write(uint64_t value)
#define CR4_PAGE_GLOBAL_ENABLE
static void cr4_write(uint64_t value)
static uint64_t cr4_read()
__UINTPTR_TYPE__ uintptr_t
A entry in a page table without a specified address or callback ID.
pml_entry_t entries[PML_INDEX_AMOUNT]
space_callback_func_t func
Helper structure for managing address space mappings.
Virtual address space structure.
page_table_t pageTable
The page table associated with the address space.
bitmap_t callbackBitmap
Bitmap to track available callback IDs.
list_t cpus
List of CPUs using this address space.
space_callback_t * callbacks
space_t * currentSpace
Will only be accessed by the owner CPU, so no lock.
list_entry_t entry
Used by a space to know which CPUs are using it, protected by the space lock.
static void vmm_page_table_unmap_with_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
static space_t kernelSpace
static void vmm_cpu_ctx_init_common(vmm_cpu_ctx_t *ctx)