45 panic(
NULL,
"Failed to initialize kernel address space");
68 panic(
NULL,
"Memory descriptor %d has invalid virtual address 0x%016lx", i, desc->VirtualStart);
72 panic(
NULL,
"Memory descriptor %d has invalid physical address 0x%016lx", i, desc->PhysicalStart);
78 panic(
NULL,
"Failed to map memory descriptor %d (phys=0x%016lx-0x%016lx virt=0x%016lx)", i,
79 desc->PhysicalStart, desc->PhysicalStart + desc->NumberOfPages *
PAGE_SIZE, desc->VirtualStart);
83 LOG_INFO(
"kernel virt=[0x%016lx-0x%016lx] phys=[0x%016lx-0x%016lx]\n", kernel->
virtStart,
88 panic(
NULL,
"Failed to map kernel memory");
99 LOG_INFO(
"loading kernel space... ");
204 while (remainingPages != 0)
208 void* addresses[maxBatchSize];
209 uint64_t batchSize =
MIN(remainingPages, maxBatchSize);
225 remainingPages -= batchSize;
341 if (virtAddr ==
NULL || length == 0)
378 assert(index < space->callbacksLength);
403 if (space ==
NULL || virtAddr ==
NULL || length == 0)
411 return vmm_unmap(space, virtAddr, length);
#define assert(expression)
#define BOOT_MEMORY_MAP_GET_DESCRIPTOR(map, index)
static cpu_t * smp_self_unsafe(void)
Returns a pointer to the cpu_t structure of the current CPU.
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
#define CPU_ID_BOOTSTRAP
ID of the bootstrap CPU.
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
#define LOG_INFO(format,...)
#define LOG_DEBUG(format,...)
static void tlb_invalidate(void *virtAddr, uint64_t pageCount)
Invalidates a region of pages in the TLB.
pml_index_t
Indexes into a pml level.
static void page_table_collect_callbacks(page_table_t *table, void *virtAddr, uint64_t pageAmount, uint64_t *callbacks)
Collects the number of pages associated with each callback ID in the specified range.
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
static void page_table_unmap(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Unmaps a range of virtual addresses from the page table.
static uint64_t page_table_map(page_table_t *table, void *virtAddr, void *physAddr, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
uint8_t pml_callback_id_t
Callback ID type.
static void page_table_clear(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Clears page table entries in the specified range and frees any owned pages.
#define PML_MAX_CALLBACK
Maximum number of callbacks that can be registered for a page table.
#define PML_LOWER_HALF_END
The end of the lower half of the address space.
static bool page_table_is_unmapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely unmapped.
#define PML_HIGHER_HALF_START
The start of the higher half of the address space.
static uint64_t page_table_set_flags(page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
Sets the flags for a range of pages in the page table.
static bool page_table_is_pinned(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if any page in a range is pinned.
static uint64_t page_table_map_pages(page_table_t *table, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps an array of physical pages to contiguous virtual addresses in the page table.
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
@ PML_INDEX_LOWER_HALF_MIN
@ PML_INDEX_LOWER_HALF_MAX
uint64_t pmm_alloc_pages(void **addresses, uint64_t count)
Allocates multiple physical pages.
void * space_mapping_end(space_t *space, space_mapping_t *mapping, errno_t err)
Performs cleanup after changes to the address space mappings.
pml_callback_id_t space_alloc_callback(space_t *space, uint64_t pageAmount, space_callback_func_t func, void *private)
Allocate a callback.
uint64_t space_check_access(space_t *space, const void *addr, uint64_t length)
Checks if a virtual memory region is within the allowed address range of the space.
void space_free_callback(space_t *space, pml_callback_id_t callbackId)
Free a callback.
void(* space_callback_func_t)(void *private)
Space callback function.
void space_tlb_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
Performs a TLB shootdown for a region of the address space, and wait for acknowledgements.
uint64_t space_init(space_t *space, uintptr_t startAddress, uintptr_t endAddress, space_flags_t flags)
Initializes a virtual address space.
uint64_t space_mapping_start(space_t *space, space_mapping_t *mapping, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags)
Prepare for changes to the address space mappings.
#define VMM_KERNEL_HEAP_MAX
The maximum address for the kernel heap.
void vmm_cpu_ctx_init(vmm_cpu_ctx_t *ctx)
Initializes a per-CPU VMM context and performs per-CPU VMM initialization.
#define VMM_KERNEL_STACKS_MAX
The maximum address for kernel stacks.
void vmm_shootdown_handler(interrupt_frame_t *frame, cpu_t *self)
TLB shootdown interrupt handler.
uint64_t vmm_unmap(space_t *space, void *virtAddr, uint64_t length)
Unmaps virtual memory from a given address space.
#define VMM_KERNEL_BINARY_MAX
The maximum address for the content of the kernel binary.
uint64_t vmm_protect(space_t *space, void *virtAddr, uint64_t length, pml_flags_t flags)
Changes memory protection flags for a virtual memory region in a given address space.
void vmm_init(const boot_memory_t *memory, const boot_gop_t *gop, const boot_kernel_t *kernel)
Initializes the Virtual Memory Manager.
#define VMM_KERNEL_STACKS_MIN
The minimum address for kernel stacks.
void * vmm_alloc(space_t *space, void *virtAddr, uint64_t length, pml_flags_t pmlFlags, vmm_alloc_flags_t allocFlags)
Allocates and maps virtual memory in a given address space.
#define VMM_IDENTITY_MAPPED_MIN
The minimum address for the identity mapped physical memory.
#define VMM_USER_SPACE_MAX
The maximum address for user space.
#define VMM_KERNEL_BINARY_MIN
The minimum address for the content of the kernel binary./*#end#*/.
#define VMM_USER_SPACE_MIN
The minimum address for user space.
space_t * vmm_get_kernel_space(void)
Retrieves the kernel's address space.
void * vmm_map(space_t *space, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags, space_callback_func_t func, void *private)
Maps physical memory to virtual memory in a given address space.
void vmm_unmap_bootloader_lower_half(thread_t *bootThread)
Unmaps the lower half of the address space after kernel initialization.
#define VMM_IDENTITY_MAPPED_MAX
The maximum address for the identity mapped physical memory.
void vmm_map_bootloader_lower_half(thread_t *bootThread)
Maps the lower half of the address space to the boot thread during kernel initialization.
#define VMM_KERNEL_HEAP_MIN
The minimum address for the kernel heap.
pml_flags_t vmm_prot_to_flags(prot_t prot)
Converts the user space memory protection flags to page table entry flags.
void * vmm_map_pages(space_t *space, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, space_callback_func_t func, void *private)
Maps an array of physical pages to virtual memory in a given address space.
vmm_alloc_flags_t
Flags for vmm_alloc().
@ VMM_ALLOC_FAIL_IF_MAPPED
If set and any page is already mapped, fail and set errno to EEXIST.
process_t * sched_process(void)
Retrieves the process of the currently running thread.
static void lock_init(lock_t *lock)
Initializes a lock.
static void lock_release(lock_t *lock)
Releases a lock.
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
#define ENOENT
No such file or directory.
#define ENOSPC
No space left on device.
#define EEXIST
File exists.
#define EINVAL
Invalid argument.
#define EFAULT
Bad address.
#define ENOMEM
Out of memory.
#define EBUSY
Device or resource busy.
#define errno
Error number variable.
#define BITMAP_FOR_EACH_SET(idx, map)
Iterate over each set bit in the bitmap.
static void list_push(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
static void list_entry_init(list_entry_t *entry)
Initializes a list entry.
#define PAGE_SIZE
Memory page size.
#define BYTES_TO_PAGES(amount)
Convert bytes to pages.
prot_t
Memory protection flags.
@ PROT_READ
Memory can be read from.
@ PROT_WRITE
Memory can be written to.
#define NULL
Pointer error value.
#define ERR
Integer error value.
static thread_t bootThread
static uint64_t pageAmount
static void cr3_write(uint64_t value)
#define CR4_PAGE_GLOBAL_ENABLE
static uint64_t cr3_read()
static void cr4_write(uint64_t value)
static uint64_t cr4_read()
#define atomic_fetch_add(object, operand)
__UINTPTR_TYPE__ uintptr_t
A entry in a page table without a specified address or callback ID.
pml_entry_t entries[PML_INDEX_AMOUNT]
space_callback_func_t func
Helper structure for managing address space mappings.
Virtual address space structure.
page_table_t pageTable
The page table associated with the address space.
bitmap_t callbackBitmap
Bitmap to track available callback IDs.
list_t cpus
List of CPUs using this address space.
atomic_uint16_t shootdownAcks
space_callback_t * callbacks
Thread of execution structure.
process_t * process
The parent process that the thread executes within.
space_t * currentSpace
Will only be accessed by the owner CPU, so no lock.
list_entry_t entry
Used by a space to know which CPUs are using it, protected by the space lock.
vmm_shootdown_t shootdowns[VMM_MAX_SHOOTDOWN_REQUESTS]
static void vmm_page_table_unmap_with_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
static space_t kernelSpace
static void vmm_cpu_ctx_init_common(vmm_cpu_ctx_t *ctx)