PatchworkOS  321f6ec
A non-POSIX operating system.
Loading...
Searching...
No Matches
vmm.c
Go to the documentation of this file.
1#include <kernel/mem/vmm.h>
2
3#include <kernel/cpu/cpu.h>
4#include <kernel/cpu/regs.h>
7#include <kernel/log/log.h>
8#include <kernel/log/panic.h>
9#include <kernel/mem/paging.h>
10#include <kernel/mem/pmm.h>
11#include <kernel/mem/space.h>
12#include <kernel/proc/process.h>
13#include <kernel/sched/sched.h>
14#include <kernel/sched/thread.h>
15#include <kernel/sync/lock.h>
16
17#include <boot/boot_info.h>
18
19#include <assert.h>
20#include <errno.h>
21#include <sys/math.h>
22#include <sys/proc.h>
23
25
40
41void vmm_init(void)
42{
44 const boot_memory_t* memory = &bootInfo->memory;
45 const boot_gop_t* gop = &bootInfo->gop;
46 const boot_kernel_t* kernel = &bootInfo->kernel;
47
49 {
50 panic(NULL, "Failed to initialize kernel address space");
51 }
52
53 LOG_DEBUG("address space layout:\n");
54 LOG_DEBUG(" kernel binary: 0x%016lx-0x%016lx\n", VMM_KERNEL_BINARY_MIN, VMM_KERNEL_BINARY_MAX);
55 LOG_DEBUG(" kernel stacks: 0x%016lx-0x%016lx\n", VMM_KERNEL_STACKS_MIN, VMM_KERNEL_STACKS_MAX);
56 LOG_DEBUG(" kernel heap: 0x%016lx-0x%016lx\n", VMM_KERNEL_HEAP_MIN, VMM_KERNEL_HEAP_MAX);
57 LOG_DEBUG(" identity map: 0x%016lx-0x%016lx\n", VMM_IDENTITY_MAPPED_MIN, VMM_IDENTITY_MAPPED_MAX);
58 LOG_DEBUG(" user space: 0x%016lx-0x%016lx\n", VMM_USER_SPACE_MIN, VMM_USER_SPACE_MAX);
59
60 LOG_INFO("kernel pml4 allocated at 0x%lx\n", kernelSpace.pageTable.pml4);
61
62 // Keep using the bootloaders memory mappings during initialization.
64 {
66 }
67
68 for (uint64_t i = 0; i < memory->map.length; i++)
69 {
70 const EFI_MEMORY_DESCRIPTOR* desc = BOOT_MEMORY_MAP_GET_DESCRIPTOR(&memory->map, i);
71 if (desc->VirtualStart < PML_HIGHER_HALF_START)
72 {
73 panic(NULL, "Memory descriptor %d has invalid virtual address 0x%016lx", i, desc->VirtualStart);
74 }
75 if (desc->PhysicalStart > PML_LOWER_HALF_END)
76 {
77 panic(NULL, "Memory descriptor %d has invalid physical address 0x%016lx", i, desc->PhysicalStart);
78 }
79
80 if (page_table_map(&kernelSpace.pageTable, (void*)desc->VirtualStart, (void*)desc->PhysicalStart,
81 desc->NumberOfPages, PML_WRITE | PML_GLOBAL | PML_PRESENT, PML_CALLBACK_NONE) == ERR)
82 {
83 panic(NULL, "Failed to map memory descriptor %d (phys=0x%016lx-0x%016lx virt=0x%016lx)", i,
84 desc->PhysicalStart, desc->PhysicalStart + desc->NumberOfPages * PAGE_SIZE, desc->VirtualStart);
85 }
86 }
87
88 Elf64_Addr minVaddr = 0;
89 Elf64_Addr maxVaddr = 0;
90 elf64_get_loadable_bounds(&kernel->elf, &minVaddr, &maxVaddr);
91 uint64_t kernelPageAmount = BYTES_TO_PAGES(maxVaddr - minVaddr);
92
93 LOG_INFO("kernel virt=[0x%016lx-0x%016lx] phys=[0x%016lx-0x%016lx]\n", minVaddr, maxVaddr,
94 (uintptr_t)kernel->physAddr, (uintptr_t)kernel->physAddr + kernelPageAmount * PAGE_SIZE);
95 if (page_table_map(&kernelSpace.pageTable, (void*)minVaddr, kernel->physAddr, kernelPageAmount,
97 {
98 panic(NULL, "Failed to map kernel memory");
99 }
100
101 LOG_INFO("GOP virt=[0x%016lx-0x%016lx] phys=[0x%016lx-0x%016lx]\n", gop->virtAddr, gop->virtAddr + gop->size,
105 {
106 panic(NULL, "Failed to map GOP memory");
107 }
108}
109
111{
112 LOG_INFO("loading kernel space... ");
113
114 cpu_t* cpu = cpu_get_unsafe();
115 assert(cpu != NULL);
116 assert(cpu->id == CPU_ID_BOOTSTRAP);
118
119 LOG_INFO("done!\n");
120}
121
123{
124 cpu_t* cpu = cpu_get_unsafe();
125 if (cpu->id == CPU_ID_BOOTSTRAP) // Initalized early in vmm_init.
126 {
127 return;
128 }
129
131}
132
134{
135 return &kernelSpace;
136}
137
139{
140 switch ((int)prot)
141 {
142 case PROT_NONE:
143 return 0;
144 case PROT_READ:
145 return PML_PRESENT;
146 case PROT_READ | PROT_WRITE:
147 return PML_PRESENT | PML_WRITE;
148 default:
149 return 0;
150 }
151}
152
153// Handles the logic of unmapping with a shootdown, should be called with the spaces lock acquired.
154// We need to make sure that any underlying physical pages owned by the page table are freed after every CPU
155// has invalidated their TLBs.
156static inline void vmm_page_table_unmap_with_shootdown(space_t* space, void* virtAddr, uint64_t pageAmount)
157{
158 page_table_unmap(&space->pageTable, virtAddr, pageAmount);
159 space_tlb_shootdown(space, virtAddr, pageAmount);
160 page_table_clear(&space->pageTable, virtAddr, pageAmount);
161}
162
163void* vmm_alloc(space_t* space, void* virtAddr, uint64_t length, pml_flags_t pmlFlags, vmm_alloc_flags_t allocFlags)
164{
165 if (length == 0 || !(pmlFlags & PML_PRESENT))
166 {
167 errno = EINVAL;
168 return NULL;
169 }
170
171 if (space == NULL)
172 {
173 space = vmm_kernel_space_get();
174 }
175
176 space_mapping_t mapping;
177 if (space_mapping_start(space, &mapping, virtAddr, NULL, length, pmlFlags | PML_OWNED) == ERR)
178 {
179 return NULL;
180 }
181
182 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
183 {
184 return space_mapping_end(space, &mapping, EBUSY);
185 }
186
187 if (!page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
188 {
189 if (allocFlags & VMM_ALLOC_FAIL_IF_MAPPED)
190 {
191 return space_mapping_end(space, &mapping, EEXIST);
192 }
193
195 }
196
197 const uint64_t maxBatchSize = 64;
198 uint64_t remainingPages = mapping.pageAmount;
199 while (remainingPages != 0)
200 {
201 uintptr_t currentVirtAddr = (uintptr_t)mapping.virtAddr + (mapping.pageAmount - remainingPages) * PAGE_SIZE;
202
203 void* addresses[maxBatchSize];
204 uint64_t batchSize = MIN(remainingPages, maxBatchSize);
205 if (pmm_alloc_pages(addresses, batchSize) == ERR)
206 {
207 // Page table will free the previously allocated pages as they are owned by the Page table.
208 vmm_page_table_unmap_with_shootdown(space, mapping.virtAddr, mapping.pageAmount - remainingPages);
209 return space_mapping_end(space, &mapping, ENOMEM);
210 }
211
212 if (page_table_map_pages(&space->pageTable, (void*)currentVirtAddr, addresses, batchSize, mapping.flags,
214 {
215 // Page table will free the previously allocated pages as they are owned by the Page table.
216 vmm_page_table_unmap_with_shootdown(space, mapping.virtAddr, mapping.pageAmount - remainingPages);
217 return space_mapping_end(space, &mapping, ENOMEM);
218 }
219
220 remainingPages -= batchSize;
221 }
222
223 return space_mapping_end(space, &mapping, EOK);
224}
225
226void* vmm_map(space_t* space, void* virtAddr, void* physAddr, uint64_t length, pml_flags_t flags,
227 space_callback_func_t func, void* private)
228{
229 if (physAddr == NULL || length == 0 || !(flags & PML_PRESENT))
230 {
231 errno = EINVAL;
232 return NULL;
233 }
234
235 if (space == NULL)
236 {
237 space = vmm_kernel_space_get();
238 }
239
240 space_mapping_t mapping;
241 if (space_mapping_start(space, &mapping, virtAddr, physAddr, length, flags) == ERR)
242 {
243 return NULL;
244 }
245
246 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
247 {
248 return space_mapping_end(space, &mapping, EBUSY);
249 }
250
252 if (func != NULL)
253 {
254 callbackId = space_alloc_callback(space, mapping.pageAmount, func, private);
255 if (callbackId == PML_MAX_CALLBACK)
256 {
257 return space_mapping_end(space, &mapping, ENOSPC);
258 }
259 }
260
261 if (!page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
262 {
264 }
265
266 if (page_table_map(&space->pageTable, mapping.virtAddr, mapping.physAddr, mapping.pageAmount, flags, callbackId) ==
267 ERR)
268 {
269 if (callbackId != PML_CALLBACK_NONE)
270 {
271 space_free_callback(space, callbackId);
272 }
273
274 return space_mapping_end(space, &mapping, ENOMEM);
275 }
276
277 return space_mapping_end(space, &mapping, EOK);
278}
279
280void* vmm_map_pages(space_t* space, void* virtAddr, void** pages, uint64_t pageAmount, pml_flags_t flags,
281 space_callback_func_t func, void* private)
282{
283 if (pages == NULL || pageAmount == 0 || !(flags & PML_PRESENT))
284 {
285 errno = EINVAL;
286 return NULL;
287 }
288
289 if (space == NULL)
290 {
291 space = vmm_kernel_space_get();
292 }
293
294 space_mapping_t mapping;
295 if (space_mapping_start(space, &mapping, virtAddr, NULL, pageAmount * PAGE_SIZE, flags) == ERR)
296 {
297 return NULL;
298 }
299
300 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
301 {
302 return space_mapping_end(space, &mapping, EBUSY);
303 }
304
306 if (func != NULL)
307 {
308 callbackId = space_alloc_callback(space, pageAmount, func, private);
309 if (callbackId == PML_MAX_CALLBACK)
310 {
311 return space_mapping_end(space, &mapping, ENOSPC);
312 }
313 }
314
315 if (!page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
316 {
318 }
319
320 if (page_table_map_pages(&space->pageTable, mapping.virtAddr, pages, mapping.pageAmount, mapping.flags,
321 callbackId) == ERR)
322 {
323 if (callbackId != PML_CALLBACK_NONE)
324 {
325 space_free_callback(space, callbackId);
326 }
327
328 return space_mapping_end(space, &mapping, ENOMEM);
329 }
330
331 return space_mapping_end(space, &mapping, EOK);
332}
333
334void* vmm_unmap(space_t* space, void* virtAddr, uint64_t length)
335{
336 if (virtAddr == NULL || length == 0)
337 {
338 errno = EINVAL;
339 return NULL;
340 }
341
342 if (space == NULL)
343 {
344 space = vmm_kernel_space_get();
345 }
346
347 space_mapping_t mapping;
348 if (space_mapping_start(space, &mapping, virtAddr, NULL, length, PML_NONE) == ERR)
349 {
350 return NULL;
351 }
352
353 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
354 {
355 return space_mapping_end(space, &mapping, EBUSY);
356 }
357
358 // Stores the amount of pages that have each callback id within the region.
359 uint64_t callbacks[PML_MAX_CALLBACK] = {0};
360 page_table_collect_callbacks(&space->pageTable, mapping.virtAddr, mapping.pageAmount, callbacks);
361
363
364 uint64_t index;
365 BITMAP_FOR_EACH_SET(&index, &space->callbackBitmap)
366 {
367 space_callback_t* callback = &space->callbacks[index];
368 assert(callback->pageAmount >= callbacks[index]);
369
370 callback->pageAmount -= callbacks[index];
371 if (callback->pageAmount == 0)
372 {
373 assert(index < space->callbacksLength);
374 space->callbacks[index].func(space->callbacks[index].private);
375 space_free_callback(space, index);
376 }
377 }
378
379 return space_mapping_end(space, &mapping, EOK);
380}
381
383{
384 process_t* process = sched_process();
385 space_t* space = &process->space;
386
387 if (space_check_access(space, address, length) == ERR)
388 {
389 errno = EFAULT;
390 return NULL;
391 }
392
393 return vmm_unmap(space, address, length);
394}
395
396void* vmm_protect(space_t* space, void* virtAddr, uint64_t length, pml_flags_t flags)
397{
398 if (space == NULL || virtAddr == NULL || length == 0)
399 {
400 errno = EINVAL;
401 return NULL;
402 }
403
404 if (!(flags & PML_PRESENT))
405 {
406 return vmm_unmap(space, virtAddr, length);
407 }
408
409 if (space == NULL)
410 {
411 space = vmm_kernel_space_get();
412 }
413
414 space_mapping_t mapping;
415 if (space_mapping_start(space, &mapping, virtAddr, NULL, length, flags) == ERR)
416 {
417 return NULL;
418 }
419
420 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
421 {
422 return space_mapping_end(space, &mapping, EBUSY);
423 }
424
425 if (page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
426 {
427 return space_mapping_end(space, &mapping, ENOENT);
428 }
429
430 if (page_table_set_flags(&space->pageTable, mapping.virtAddr, mapping.pageAmount, mapping.flags) == ERR)
431 {
432 return space_mapping_end(space, &mapping, EINVAL);
433 }
434
435 space_tlb_shootdown(space, mapping.virtAddr, mapping.pageAmount);
436
437 return space_mapping_end(space, &mapping, EOK);
438}
439
441{
442 process_t* process = sched_process();
443 space_t* space = &process->space;
444
445 if (space_check_access(space, address, length) == ERR)
446 {
447 return NULL;
448 }
449
450 return vmm_protect(space, address, length, vmm_prot_to_flags(prot) | PML_USER);
451}
#define assert(expression)
Definition assert.h:29
boot_info_t * bootInfo
Definition boot_info.c:14
#define BOOT_MEMORY_MAP_GET_DESCRIPTOR(map, index)
Definition boot_info.h:52
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
Definition syscall.h:163
@ SYS_MUNMAP
Definition syscall.h:85
@ SYS_MPROTECT
Definition syscall.h:86
static cpu_t * cpu_get_unsafe(void)
Gets the current CPU structure without disabling interrupts.
Definition cpu.h:299
#define CPU_ID_BOOTSTRAP
ID of the bootstrap CPU.
Definition cpu.h:56
boot_info_t * boot_info_get(void)
Gets the boot info structure.
Definition boot_info.c:16
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
Definition panic.c:266
#define LOG_INFO(format,...)
Definition log.h:106
#define LOG_DEBUG(format,...)
Definition log.h:100
pml_index_t
Indexes into a pml level.
static void page_table_collect_callbacks(page_table_t *table, void *virtAddr, uint64_t pageAmount, uint64_t *callbacks)
Collects the number of pages associated with each callback ID in the specified range.
Definition paging.h:674
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
static void page_table_unmap(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Unmaps a range of virtual addresses from the page table.
Definition paging.h:514
static uint64_t page_table_map(page_table_t *table, void *virtAddr, void *physAddr, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
Definition paging.h:421
uint8_t pml_callback_id_t
Callback ID type.
static void page_table_clear(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Clears page table entries in the specified range and frees any owned pages.
Definition paging.h:628
#define PML_MAX_CALLBACK
Maximum number of callbacks that can be registered for a page table.
#define PML_LOWER_HALF_END
The end of the lower half of the address space.
static bool page_table_is_unmapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely unmapped.
Definition paging.h:388
#define PML_HIGHER_HALF_START
The start of the higher half of the address space.
static uint64_t page_table_set_flags(page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
Sets the flags for a range of pages in the page table.
Definition paging.h:710
static bool page_table_is_pinned(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if any page in a range is pinned.
Definition paging.h:941
static uint64_t page_table_map_pages(page_table_t *table, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps an array of physical pages to contiguous virtual addresses in the page table.
Definition paging.h:468
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
@ PML_INDEX_LOWER_HALF_MIN
@ PML_INDEX_LOWER_HALF_MAX
@ PML_USER
@ PML_PRESENT
@ PML_WRITE
@ PML_NONE
@ PML_GLOBAL
@ PML_OWNED
uint64_t pmm_alloc_pages(void **addresses, uint64_t count)
Allocates multiple physical pages.
Definition pmm.c:184
void * space_mapping_end(space_t *space, space_mapping_t *mapping, errno_t err)
Performs cleanup after changes to the address space mappings.
Definition space.c:739
pml_callback_id_t space_alloc_callback(space_t *space, uint64_t pageAmount, space_callback_func_t func, void *private)
Allocate a callback.
Definition space.c:598
uint64_t space_check_access(space_t *space, const void *addr, uint64_t length)
Checks if a virtual memory region is within the allowed address range of the space.
Definition space.c:478
void space_free_callback(space_t *space, pml_callback_id_t callbackId)
Free a callback.
Definition space.c:639
void(* space_callback_func_t)(void *private)
Space callback function.
Definition space.h:43
void space_tlb_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
Performs a TLB shootdown for a region of the address space, and wait for acknowledgements.
Definition space.c:669
uint64_t space_init(space_t *space, uintptr_t startAddress, uintptr_t endAddress, space_flags_t flags)
Initializes a virtual address space.
Definition space.c:64
uint64_t space_mapping_start(space_t *space, space_mapping_t *mapping, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags)
Prepare for changes to the address space mappings.
Definition space.c:528
@ SPACE_USE_PMM_BITMAP
Definition space.h:34
#define VMM_KERNEL_HEAP_MAX
The maximum address for the kernel heap.
Definition vmm.h:68
void vmm_cpu_ctx_init(vmm_cpu_ctx_t *ctx)
Initializes a per-CPU VMM context and performs per-CPU VMM initialization.
Definition vmm.c:122
#define VMM_KERNEL_STACKS_MAX
The maximum address for kernel stacks.
Definition vmm.h:65
void vmm_init(void)
Initializes the Virtual Memory Manager.
Definition vmm.c:41
#define VMM_KERNEL_BINARY_MAX
The maximum address for the content of the kernel binary.
Definition vmm.h:61
space_t * vmm_kernel_space_get(void)
Retrieves the kernel's address space.
Definition vmm.c:133
void vmm_kernel_space_load(void)
Loads the kernel's address space into the current CPU.
Definition vmm.c:110
#define VMM_KERNEL_STACKS_MIN
The minimum address for kernel stacks.
Definition vmm.h:66
void * vmm_alloc(space_t *space, void *virtAddr, uint64_t length, pml_flags_t pmlFlags, vmm_alloc_flags_t allocFlags)
Allocates and maps virtual memory in a given address space.
Definition vmm.c:163
#define VMM_IDENTITY_MAPPED_MIN
The minimum address for the identity mapped physical memory.
Definition vmm.h:72
#define VMM_USER_SPACE_MAX
The maximum address for user space.
Definition vmm.h:74
#define VMM_KERNEL_BINARY_MIN
The minimum address for the content of the kernel binary./*#end#*‍/.
Definition vmm.h:62
#define VMM_USER_SPACE_MIN
The minimum address for user space.
Definition vmm.h:75
void * vmm_map(space_t *space, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags, space_callback_func_t func, void *private)
Maps physical memory to virtual memory in a given address space.
Definition vmm.c:226
void * vmm_protect(space_t *space, void *virtAddr, uint64_t length, pml_flags_t flags)
Changes memory protection flags for a virtual memory region in a given address space.
Definition vmm.c:396
#define VMM_IDENTITY_MAPPED_MAX
The maximum address for the identity mapped physical memory.
Definition vmm.h:71
void * vmm_unmap(space_t *space, void *virtAddr, uint64_t length)
Unmaps virtual memory from a given address space.
Definition vmm.c:334
#define VMM_KERNEL_HEAP_MIN
The minimum address for the kernel heap.
Definition vmm.h:69
pml_flags_t vmm_prot_to_flags(prot_t prot)
Converts the user space memory protection flags to page table entry flags.
Definition vmm.c:138
void * vmm_map_pages(space_t *space, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, space_callback_func_t func, void *private)
Maps an array of physical pages to virtual memory in a given address space.
Definition vmm.c:280
vmm_alloc_flags_t
Flags for vmm_alloc().
Definition vmm.h:122
@ VMM_ALLOC_FAIL_IF_MAPPED
If set and any page is already mapped, fail and set errno to EEXIST.
Definition vmm.h:124
process_t * sched_process(void)
Retrieves the process of the currently running thread.
Definition sched.c:620
static void lock_init(lock_t *lock)
Initializes a lock.
Definition lock.h:86
static void lock_release(lock_t *lock)
Releases a lock.
Definition lock.h:146
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
Definition lock.h:103
#define ENOENT
No such file or directory.
Definition errno.h:42
#define ENOSPC
No space left on device.
Definition errno.h:172
#define EEXIST
File exists.
Definition errno.h:117
#define EINVAL
Invalid argument.
Definition errno.h:142
#define EFAULT
Bad address.
Definition errno.h:102
#define ENOMEM
Out of memory.
Definition errno.h:92
#define EBUSY
Device or resource busy.
Definition errno.h:112
#define errno
Error number variable.
Definition errno.h:27
#define EOK
No error.
Definition errno.h:32
#define BITMAP_FOR_EACH_SET(idx, map)
Iterate over each set bit in the bitmap.
Definition bitmap.h:58
void elf64_get_loadable_bounds(const Elf64_File *elf, Elf64_Addr *minAddr, Elf64_Addr *maxAddr)
Get the loadable virtual memory bounds of an ELF file.
uint64_t Elf64_Addr
ELF64 Unsigned program address.
Definition elf.h:30
static void list_push_back(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
Definition list.h:343
static void list_entry_init(list_entry_t *entry)
Initializes a list entry.
Definition list.h:182
#define MIN(x, y)
Definition math.h:16
#define PAGE_SIZE
The size of a memory page in bytes.
Definition proc.h:106
#define BYTES_TO_PAGES(amount)
Convert a size in bytes to pages.
Definition proc.h:114
prot_t
Memory protection flags.
Definition proc.h:129
@ PROT_READ
Readable memory.
Definition proc.h:131
@ PROT_WRITE
Writable memory.
Definition proc.h:132
@ PROT_NONE
Invalid memory, cannot be accessed.
Definition proc.h:130
#define NULL
Pointer error value.
Definition NULL.h:23
#define ERR
Integer error value.
Definition ERR.h:17
static uintptr_t address
Mapped virtual address of the HPET registers.
Definition hpet.c:95
boot_gop_t * gop
Definition mem.c:18
static const path_flag_t flags[]
Definition path.c:42
static uint64_t pageAmount
Definition pmm.c:44
static void cr3_write(uint64_t value)
Definition regs.h:109
#define CR4_PAGE_GLOBAL_ENABLE
Definition regs.h:51
static void cr4_write(uint64_t value)
Definition regs.h:97
static uint64_t cr4_read()
Definition regs.h:90
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
uint64_t size
Definition boot_info.h:46
uint32_t * virtAddr
Definition boot_info.h:45
uint32_t * physAddr
Definition boot_info.h:44
boot_memory_t memory
Definition boot_info.h:106
boot_gop_t gop
Definition boot_info.h:101
boot_kernel_t kernel
Definition boot_info.h:105
void * physAddr
Definition boot_info.h:90
Elf64_File elf
Definition boot_info.h:89
uint64_t length
Definition boot_info.h:58
page_table_t table
Definition boot_info.h:96
boot_memory_map_t map
Definition boot_info.h:95
CPU structure.
Definition cpu.h:122
cpuid_t id
Definition cpu.h:123
vmm_cpu_ctx_t vmm
Definition cpu.h:129
A entry in a page table without a specified address or callback ID.
pml_entry_t entries[PML_INDEX_AMOUNT]
Process structure.
Definition process.h:205
space_t space
Definition process.h:210
uint64_t pageAmount
Definition space.h:53
space_callback_func_t func
Definition space.h:51
void * private
Definition space.h:52
Helper structure for managing address space mappings.
Definition space.h:231
void * physAddr
Definition space.h:233
pml_flags_t flags
Definition space.h:235
uint64_t pageAmount
Definition space.h:234
void * virtAddr
Definition space.h:232
Virtual address space structure.
Definition space.h:79
lock_t lock
Definition space.h:100
page_table_t pageTable
The page table associated with the address space.
Definition space.h:80
bitmap_t callbackBitmap
Bitmap to track available callback IDs.
Definition space.h:93
list_t cpus
List of CPUs using this address space.
Definition space.h:98
space_callback_t * callbacks
Definition space.h:91
Per-CPU VMM context.
Definition vmm.h:109
uint8_t shootdownCount
Definition vmm.h:112
space_t * currentSpace
Will only be accessed by the owner CPU, so no lock.
Definition vmm.h:114
list_entry_t entry
Used by a space to know which CPUs are using it, protected by the space lock.
Definition vmm.h:110
lock_t lock
Definition vmm.h:113
static void vmm_page_table_unmap_with_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
Definition vmm.c:156
static space_t kernelSpace
Definition vmm.c:24
static void vmm_cpu_ctx_init_common(vmm_cpu_ctx_t *ctx)
Definition vmm.c:26