PatchworkOS
Loading...
Searching...
No Matches
vmm.c
Go to the documentation of this file.
1#include <kernel/mem/vmm.h>
2
3#include <kernel/cpu/cpu.h>
4#include <kernel/cpu/regs.h>
5#include <kernel/cpu/smp.h>
7#include <kernel/log/log.h>
8#include <kernel/log/panic.h>
9#include <kernel/mem/paging.h>
10#include <kernel/mem/pmm.h>
11#include <kernel/mem/space.h>
12#include <kernel/proc/process.h>
13#include <kernel/sched/sched.h>
14#include <kernel/sched/thread.h>
15#include <kernel/sync/lock.h>
16
17#include <boot/boot_info.h>
18
19#include <assert.h>
20#include <errno.h>
21#include <sys/math.h>
22#include <sys/proc.h>
23
25
40
41void vmm_init(const boot_memory_t* memory, const boot_gop_t* gop, const boot_kernel_t* kernel)
42{
44 {
45 panic(NULL, "Failed to initialize kernel address space");
46 }
47
48 LOG_DEBUG("address space layout:\n");
49 LOG_DEBUG(" kernel binary: 0x%016lx-0x%016lx\n", VMM_KERNEL_BINARY_MIN, VMM_KERNEL_BINARY_MAX);
50 LOG_DEBUG(" kernel stacks: 0x%016lx-0x%016lx\n", VMM_KERNEL_STACKS_MIN, VMM_KERNEL_STACKS_MAX);
51 LOG_DEBUG(" kernel heap: 0x%016lx-0x%016lx\n", VMM_KERNEL_HEAP_MIN, VMM_KERNEL_HEAP_MAX);
52 LOG_DEBUG(" identity map: 0x%016lx-0x%016lx\n", VMM_IDENTITY_MAPPED_MIN, VMM_IDENTITY_MAPPED_MAX);
53 LOG_DEBUG(" user space: 0x%016lx-0x%016lx\n", VMM_USER_SPACE_MIN, VMM_USER_SPACE_MAX);
54
55 LOG_INFO("kernel pml4 allocated at 0x%lx\n", kernelSpace.pageTable.pml4);
56
57 // Keep using the bootloaders memory mappings during initialization.
59 {
61 }
62
63 for (uint64_t i = 0; i < memory->map.length; i++)
64 {
65 const EFI_MEMORY_DESCRIPTOR* desc = BOOT_MEMORY_MAP_GET_DESCRIPTOR(&memory->map, i);
66 if (desc->VirtualStart < PML_HIGHER_HALF_START)
67 {
68 panic(NULL, "Memory descriptor %d has invalid virtual address 0x%016lx", i, desc->VirtualStart);
69 }
70 if (desc->PhysicalStart > PML_LOWER_HALF_END)
71 {
72 panic(NULL, "Memory descriptor %d has invalid physical address 0x%016lx", i, desc->PhysicalStart);
73 }
74
75 if (page_table_map(&kernelSpace.pageTable, (void*)desc->VirtualStart, (void*)desc->PhysicalStart,
76 desc->NumberOfPages, PML_WRITE | PML_GLOBAL | PML_PRESENT, PML_CALLBACK_NONE) == ERR)
77 {
78 panic(NULL, "Failed to map memory descriptor %d (phys=0x%016lx-0x%016lx virt=0x%016lx)", i,
79 desc->PhysicalStart, desc->PhysicalStart + desc->NumberOfPages * PAGE_SIZE, desc->VirtualStart);
80 }
81 }
82
83 LOG_INFO("kernel virt=[0x%016lx-0x%016lx] phys=[0x%016lx-0x%016lx]\n", kernel->virtStart,
84 kernel->virtStart + kernel->size, kernel->physStart, kernel->physStart + kernel->size);
85 if (page_table_map(&kernelSpace.pageTable, (void*)kernel->virtStart, (void*)kernel->physStart,
87 {
88 panic(NULL, "Failed to map kernel memory");
89 }
90
91 LOG_INFO("GOP virt=[0x%016lx-0x%016lx] phys=[0x%016lx-0x%016lx]\n", gop->virtAddr, gop->virtAddr + gop->size,
95 {
96 panic(NULL, "Failed to map GOP memory");
97 }
98
99 LOG_INFO("loading kernel space... ");
101
102 cpu_t* cpu = smp_self_unsafe();
103 assert(cpu != NULL);
104 assert(cpu->id == CPU_ID_BOOTSTRAP);
106
107 LOG_INFO("done!\n");
108}
109
111{
112 cpu_t* cpu = smp_self_unsafe();
113 if (cpu->id == CPU_ID_BOOTSTRAP) // Initalized early in vmm_init.
114 {
115 return;
116 }
117
119}
120
128
137
139{
140 return &kernelSpace;
141}
142
144{
145 switch ((int)prot)
146 {
147 case PROT_NONE:
148 return 0;
149 case PROT_READ:
150 return PML_PRESENT;
151 case PROT_READ | PROT_WRITE:
152 return PML_PRESENT | PML_WRITE;
153 default:
154 return 0;
155 }
156}
157
158// Handles the logic of unmapping with a shootdown, should be called with the spaces lock acquired.
159// We need to make sure that any underlying physical pages owned by the page table are freed after every CPU
160// has invalidated their TLBs.
161static inline void vmm_page_table_unmap_with_shootdown(space_t* space, void* virtAddr, uint64_t pageAmount)
162{
163 page_table_unmap(&space->pageTable, virtAddr, pageAmount);
164 space_tlb_shootdown(space, virtAddr, pageAmount);
165 page_table_clear(&space->pageTable, virtAddr, pageAmount);
166}
167
168void* vmm_alloc(space_t* space, void* virtAddr, uint64_t length, pml_flags_t pmlFlags, vmm_alloc_flags_t allocFlags)
169{
170 if (length == 0 || !(pmlFlags & PML_PRESENT))
171 {
172 errno = EINVAL;
173 return NULL;
174 }
175
176 if (space == NULL)
177 {
178 space = vmm_get_kernel_space();
179 }
180
181 space_mapping_t mapping;
182 if (space_mapping_start(space, &mapping, virtAddr, NULL, length, pmlFlags | PML_OWNED) == ERR)
183 {
184 return NULL;
185 }
186
187 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
188 {
189 return space_mapping_end(space, &mapping, EBUSY);
190 }
191
192 if (!page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
193 {
194 if (allocFlags & VMM_ALLOC_FAIL_IF_MAPPED)
195 {
196 return space_mapping_end(space, &mapping, EEXIST);
197 }
198
200 }
201
202 const uint64_t maxBatchSize = 64;
203 uint64_t remainingPages = mapping.pageAmount;
204 while (remainingPages != 0)
205 {
206 uintptr_t currentVirtAddr = (uintptr_t)mapping.virtAddr + (mapping.pageAmount - remainingPages) * PAGE_SIZE;
207
208 void* addresses[maxBatchSize];
209 uint64_t batchSize = MIN(remainingPages, maxBatchSize);
210 if (pmm_alloc_pages(addresses, batchSize) == ERR)
211 {
212 // Page table will free the previously allocated pages as they are owned by the Page table.
213 vmm_page_table_unmap_with_shootdown(space, mapping.virtAddr, mapping.pageAmount - remainingPages);
214 return space_mapping_end(space, &mapping, ENOMEM);
215 }
216
217 if (page_table_map_pages(&space->pageTable, (void*)currentVirtAddr, addresses, batchSize, mapping.flags,
219 {
220 // Page table will free the previously allocated pages as they are owned by the Page table.
221 vmm_page_table_unmap_with_shootdown(space, mapping.virtAddr, mapping.pageAmount - remainingPages);
222 return space_mapping_end(space, &mapping, ENOMEM);
223 }
224
225 remainingPages -= batchSize;
226 }
227
228 return space_mapping_end(space, &mapping, EOK);
229}
230
231void* vmm_map(space_t* space, void* virtAddr, void* physAddr, uint64_t length, pml_flags_t flags,
232 space_callback_func_t func, void* private)
233{
234 if (physAddr == NULL || length == 0 || !(flags & PML_PRESENT))
235 {
236 errno = EINVAL;
237 return NULL;
238 }
239
240 if (space == NULL)
241 {
242 space = vmm_get_kernel_space();
243 }
244
245 space_mapping_t mapping;
246 if (space_mapping_start(space, &mapping, virtAddr, physAddr, length, flags) == ERR)
247 {
248 return NULL;
249 }
250
251 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
252 {
253 return space_mapping_end(space, &mapping, EBUSY);
254 }
255
257 if (func != NULL)
258 {
259 callbackId = space_alloc_callback(space, mapping.pageAmount, func, private);
260 if (callbackId == PML_MAX_CALLBACK)
261 {
262 return space_mapping_end(space, &mapping, ENOSPC);
263 }
264 }
265
266 if (!page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
267 {
269 }
270
271 if (page_table_map(&space->pageTable, mapping.virtAddr, mapping.physAddr, mapping.pageAmount, flags, callbackId) ==
272 ERR)
273 {
274 if (callbackId != PML_CALLBACK_NONE)
275 {
276 space_free_callback(space, callbackId);
277 }
278
279 return space_mapping_end(space, &mapping, ENOMEM);
280 }
281
282 return space_mapping_end(space, &mapping, EOK);
283}
284
285void* vmm_map_pages(space_t* space, void* virtAddr, void** pages, uint64_t pageAmount, pml_flags_t flags,
286 space_callback_func_t func, void* private)
287{
288 if (pages == NULL || pageAmount == 0 || !(flags & PML_PRESENT))
289 {
290 errno = EINVAL;
291 return NULL;
292 }
293
294 if (space == NULL)
295 {
296 space = vmm_get_kernel_space();
297 }
298
299 space_mapping_t mapping;
300 if (space_mapping_start(space, &mapping, virtAddr, NULL, pageAmount * PAGE_SIZE, flags) == ERR)
301 {
302 return NULL;
303 }
304
305 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
306 {
307 return space_mapping_end(space, &mapping, EBUSY);
308 }
309
311 if (func != NULL)
312 {
313 callbackId = space_alloc_callback(space, pageAmount, func, private);
314 if (callbackId == PML_MAX_CALLBACK)
315 {
316 return space_mapping_end(space, &mapping, ENOSPC);
317 }
318 }
319
320 if (!page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
321 {
323 }
324
325 if (page_table_map_pages(&space->pageTable, mapping.virtAddr, pages, mapping.pageAmount, mapping.flags,
326 callbackId) == ERR)
327 {
328 if (callbackId != PML_CALLBACK_NONE)
329 {
330 space_free_callback(space, callbackId);
331 }
332
333 return space_mapping_end(space, &mapping, ENOMEM);
334 }
335
336 return space_mapping_end(space, &mapping, EOK);
337}
338
339uint64_t vmm_unmap(space_t* space, void* virtAddr, uint64_t length)
340{
341 if (virtAddr == NULL || length == 0)
342 {
343 errno = EINVAL;
344 return ERR;
345 }
346
347 if (space == NULL)
348 {
349 space = vmm_get_kernel_space();
350 }
351
352 space_mapping_t mapping;
353 if (space_mapping_start(space, &mapping, virtAddr, NULL, length, PML_NONE) == ERR)
354 {
355 return ERR;
356 }
357
358 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
359 {
360 return space_mapping_end(space, &mapping, EBUSY) == NULL ? ERR : 0;
361 }
362
363 // Stores the amount of pages that have each callback id within the region.
364 uint64_t callbacks[PML_MAX_CALLBACK] = {0};
365 page_table_collect_callbacks(&space->pageTable, mapping.virtAddr, mapping.pageAmount, callbacks);
366
368
369 uint64_t index;
370 BITMAP_FOR_EACH_SET(&index, &space->callbackBitmap)
371 {
372 space_callback_t* callback = &space->callbacks[index];
373 assert(callback->pageAmount >= callbacks[index]);
374
375 callback->pageAmount -= callbacks[index];
376 if (callback->pageAmount == 0)
377 {
378 assert(index < space->callbacksLength);
379 space->callbacks[index].func(space->callbacks[index].private);
380 space_free_callback(space, index);
381 }
382 }
383
384 return space_mapping_end(space, &mapping, EOK) == NULL ? ERR : 0;
385}
386
388{
389 process_t* process = sched_process();
390 space_t* space = &process->space;
391
392 if (space_check_access(space, address, length) == ERR)
393 {
394 errno = EFAULT;
395 return ERR;
396 }
397
398 return vmm_unmap(space, address, length);
399}
400
401uint64_t vmm_protect(space_t* space, void* virtAddr, uint64_t length, pml_flags_t flags)
402{
403 if (space == NULL || virtAddr == NULL || length == 0)
404 {
405 errno = EINVAL;
406 return ERR;
407 }
408
409 if (!(flags & PML_PRESENT))
410 {
411 return vmm_unmap(space, virtAddr, length);
412 }
413
414 if (space == NULL)
415 {
416 space = vmm_get_kernel_space();
417 }
418
419 space_mapping_t mapping;
420 if (space_mapping_start(space, &mapping, virtAddr, NULL, length, flags) == ERR)
421 {
422 return ERR;
423 }
424
425 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
426 {
427 return space_mapping_end(space, &mapping, EBUSY) == NULL ? ERR : 0;
428 }
429
430 if (page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
431 {
432 return space_mapping_end(space, &mapping, ENOENT) == NULL ? ERR : 0;
433 }
434
435 if (page_table_set_flags(&space->pageTable, mapping.virtAddr, mapping.pageAmount, mapping.flags) == ERR)
436 {
437 return space_mapping_end(space, &mapping, EINVAL) == NULL ? ERR : 0;
438 }
439
440 space_tlb_shootdown(space, mapping.virtAddr, mapping.pageAmount);
441
442 return space_mapping_end(space, &mapping, EOK) == NULL ? ERR : 0;
443}
444
446{
447 process_t* process = sched_process();
448 space_t* space = &process->space;
449
450 if (space_check_access(space, address, length) == ERR)
451 {
452 return ERR;
453 }
454
455 return vmm_protect(space, address, length, vmm_prot_to_flags(prot) | PML_USER);
456}
457
459{
460 (void)frame;
461
462 vmm_cpu_ctx_t* ctx = &self->vmm;
463 while (true)
464 {
465 lock_acquire(&ctx->lock);
466 if (ctx->shootdownCount == 0)
467 {
468 lock_release(&ctx->lock);
469 break;
470 }
471
472 vmm_shootdown_t shootdown = ctx->shootdowns[ctx->shootdownCount - 1];
473 ctx->shootdownCount--;
474 lock_release(&ctx->lock);
475
476 assert(shootdown.space != NULL);
477 assert(shootdown.pageAmount != 0);
478 assert(shootdown.virtAddr != NULL);
479
480 tlb_invalidate(shootdown.virtAddr, shootdown.pageAmount);
481 atomic_fetch_add(&shootdown.space->shootdownAcks, 1);
482 }
483}
#define assert(expression)
Definition assert.h:29
#define BOOT_MEMORY_MAP_GET_DESCRIPTOR(map, index)
Definition boot_info.h:41
static cpu_t * smp_self_unsafe(void)
Returns a pointer to the cpu_t structure of the current CPU.
Definition smp.h:90
#define SYS_MPROTECT
Definition syscalls.h:43
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
Definition syscalls.h:100
#define SYS_MUNMAP
Definition syscalls.h:42
#define CPU_ID_BOOTSTRAP
ID of the bootstrap CPU.
Definition cpu_id.h:19
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
Definition panic.c:362
#define LOG_INFO(format,...)
Definition log.h:87
#define LOG_DEBUG(format,...)
Definition log.h:81
static void tlb_invalidate(void *virtAddr, uint64_t pageCount)
Invalidates a region of pages in the TLB.
Definition paging.h:31
pml_index_t
Indexes into a pml level.
static void page_table_collect_callbacks(page_table_t *table, void *virtAddr, uint64_t pageAmount, uint64_t *callbacks)
Collects the number of pages associated with each callback ID in the specified range.
Definition paging.h:675
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
static void page_table_unmap(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Unmaps a range of virtual addresses from the page table.
Definition paging.h:515
static uint64_t page_table_map(page_table_t *table, void *virtAddr, void *physAddr, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
Definition paging.h:422
uint8_t pml_callback_id_t
Callback ID type.
static void page_table_clear(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Clears page table entries in the specified range and frees any owned pages.
Definition paging.h:629
#define PML_MAX_CALLBACK
Maximum number of callbacks that can be registered for a page table.
#define PML_LOWER_HALF_END
The end of the lower half of the address space.
static bool page_table_is_unmapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely unmapped.
Definition paging.h:389
#define PML_HIGHER_HALF_START
The start of the higher half of the address space.
static uint64_t page_table_set_flags(page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
Sets the flags for a range of pages in the page table.
Definition paging.h:711
static bool page_table_is_pinned(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if any page in a range is pinned.
Definition paging.h:942
static uint64_t page_table_map_pages(page_table_t *table, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps an array of physical pages to contiguous virtual addresses in the page table.
Definition paging.h:469
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
@ PML_INDEX_LOWER_HALF_MIN
@ PML_INDEX_LOWER_HALF_MAX
@ PML_USER
@ PML_PRESENT
@ PML_WRITE
@ PML_NONE
@ PML_GLOBAL
@ PML_OWNED
uint64_t pmm_alloc_pages(void **addresses, uint64_t count)
Allocates multiple physical pages.
Definition pmm.c:175
void * space_mapping_end(space_t *space, space_mapping_t *mapping, errno_t err)
Performs cleanup after changes to the address space mappings.
Definition space.c:730
pml_callback_id_t space_alloc_callback(space_t *space, uint64_t pageAmount, space_callback_func_t func, void *private)
Allocate a callback.
Definition space.c:592
uint64_t space_check_access(space_t *space, const void *addr, uint64_t length)
Checks if a virtual memory region is within the allowed address range of the space.
Definition space.c:474
void space_free_callback(space_t *space, pml_callback_id_t callbackId)
Free a callback.
Definition space.c:633
void(* space_callback_func_t)(void *private)
Space callback function.
Definition space.h:43
void space_tlb_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
Performs a TLB shootdown for a region of the address space, and wait for acknowledgements.
Definition space.c:638
uint64_t space_init(space_t *space, uintptr_t startAddress, uintptr_t endAddress, space_flags_t flags)
Initializes a virtual address space.
Definition space.c:62
uint64_t space_mapping_start(space_t *space, space_mapping_t *mapping, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags)
Prepare for changes to the address space mappings.
Definition space.c:524
@ SPACE_USE_PMM_BITMAP
Definition space.h:34
#define VMM_KERNEL_HEAP_MAX
The maximum address for the kernel heap.
Definition vmm.h:68
void vmm_cpu_ctx_init(vmm_cpu_ctx_t *ctx)
Initializes a per-CPU VMM context and performs per-CPU VMM initialization.
Definition vmm.c:110
#define VMM_KERNEL_STACKS_MAX
The maximum address for kernel stacks.
Definition vmm.h:65
void vmm_shootdown_handler(interrupt_frame_t *frame, cpu_t *self)
TLB shootdown interrupt handler.
Definition vmm.c:458
uint64_t vmm_unmap(space_t *space, void *virtAddr, uint64_t length)
Unmaps virtual memory from a given address space.
Definition vmm.c:339
#define VMM_KERNEL_BINARY_MAX
The maximum address for the content of the kernel binary.
Definition vmm.h:61
uint64_t vmm_protect(space_t *space, void *virtAddr, uint64_t length, pml_flags_t flags)
Changes memory protection flags for a virtual memory region in a given address space.
Definition vmm.c:401
void vmm_init(const boot_memory_t *memory, const boot_gop_t *gop, const boot_kernel_t *kernel)
Initializes the Virtual Memory Manager.
Definition vmm.c:41
#define VMM_KERNEL_STACKS_MIN
The minimum address for kernel stacks.
Definition vmm.h:66
void * vmm_alloc(space_t *space, void *virtAddr, uint64_t length, pml_flags_t pmlFlags, vmm_alloc_flags_t allocFlags)
Allocates and maps virtual memory in a given address space.
Definition vmm.c:168
#define VMM_IDENTITY_MAPPED_MIN
The minimum address for the identity mapped physical memory.
Definition vmm.h:72
#define VMM_USER_SPACE_MAX
The maximum address for user space.
Definition vmm.h:74
#define VMM_KERNEL_BINARY_MIN
The minimum address for the content of the kernel binary./*#end#*‍/.
Definition vmm.h:62
#define VMM_USER_SPACE_MIN
The minimum address for user space.
Definition vmm.h:75
space_t * vmm_get_kernel_space(void)
Retrieves the kernel's address space.
Definition vmm.c:138
void * vmm_map(space_t *space, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags, space_callback_func_t func, void *private)
Maps physical memory to virtual memory in a given address space.
Definition vmm.c:231
void vmm_unmap_bootloader_lower_half(thread_t *bootThread)
Unmaps the lower half of the address space after kernel initialization.
Definition vmm.c:129
#define VMM_IDENTITY_MAPPED_MAX
The maximum address for the identity mapped physical memory.
Definition vmm.h:71
void vmm_map_bootloader_lower_half(thread_t *bootThread)
Maps the lower half of the address space to the boot thread during kernel initialization.
Definition vmm.c:121
#define VMM_KERNEL_HEAP_MIN
The minimum address for the kernel heap.
Definition vmm.h:69
pml_flags_t vmm_prot_to_flags(prot_t prot)
Converts the user space memory protection flags to page table entry flags.
Definition vmm.c:143
void * vmm_map_pages(space_t *space, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, space_callback_func_t func, void *private)
Maps an array of physical pages to virtual memory in a given address space.
Definition vmm.c:285
vmm_alloc_flags_t
Flags for vmm_alloc().
Definition vmm.h:122
@ VMM_ALLOC_FAIL_IF_MAPPED
If set and any page is already mapped, fail and set errno to EEXIST.
Definition vmm.h:124
process_t * sched_process(void)
Retrieves the process of the currently running thread.
Definition sched.c:164
static void lock_init(lock_t *lock)
Initializes a lock.
Definition lock.h:80
static void lock_release(lock_t *lock)
Releases a lock.
Definition lock.h:140
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
Definition lock.h:97
#define ENOENT
No such file or directory.
Definition errno.h:42
#define ENOSPC
No space left on device.
Definition errno.h:172
#define EEXIST
File exists.
Definition errno.h:117
#define EINVAL
Invalid argument.
Definition errno.h:142
#define EFAULT
Bad address.
Definition errno.h:102
#define ENOMEM
Out of memory.
Definition errno.h:92
#define EBUSY
Device or resource busy.
Definition errno.h:112
#define errno
Error number variable.
Definition errno.h:27
#define EOK
No error.
Definition errno.h:32
#define BITMAP_FOR_EACH_SET(idx, map)
Iterate over each set bit in the bitmap.
Definition bitmap.h:58
static void list_push(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
Definition list.h:345
static void list_entry_init(list_entry_t *entry)
Initializes a list entry.
Definition list.h:184
#define MIN(x, y)
Definition math.h:16
#define PAGE_SIZE
Memory page size.
Definition proc.h:140
#define BYTES_TO_PAGES(amount)
Convert bytes to pages.
Definition proc.h:151
prot_t
Memory protection flags.
Definition proc.h:170
@ PROT_READ
Memory can be read from.
Definition proc.h:172
@ PROT_WRITE
Memory can be written to.
Definition proc.h:173
@ PROT_NONE
None.
Definition proc.h:171
#define NULL
Pointer error value.
Definition NULL.h:23
#define ERR
Integer error value.
Definition ERR.h:17
static uintptr_t address
Definition hpet.c:12
static thread_t bootThread
Definition thread.c:19
boot_gop_t * gop
Definition mem.c:18
static uint64_t pageAmount
Definition pmm.c:42
static void cr3_write(uint64_t value)
Definition regs.h:109
#define CR4_PAGE_GLOBAL_ENABLE
Definition regs.h:51
static uint64_t cr3_read()
Definition regs.h:102
static void cr4_write(uint64_t value)
Definition regs.h:97
static uint64_t cr4_read()
Definition regs.h:90
#define atomic_fetch_add(object, operand)
Definition stdatomic.h:283
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
uint64_t size
Definition boot_info.h:35
uint32_t * virtAddr
Definition boot_info.h:34
uint32_t * physAddr
Definition boot_info.h:33
uintptr_t virtStart
Definition boot_info.h:89
uintptr_t physStart
Definition boot_info.h:88
uint64_t size
Definition boot_info.h:91
uint64_t length
Definition boot_info.h:47
page_table_t table
Definition boot_info.h:97
boot_memory_map_t map
Definition boot_info.h:96
CPU structure.
Definition cpu.h:42
cpuid_t id
Definition cpu.h:43
vmm_cpu_ctx_t vmm
Definition cpu.h:46
Trap Frame Structure.
Definition interrupt.h:42
uint64_t raw
A entry in a page table without a specified address or callback ID.
pml_entry_t entries[PML_INDEX_AMOUNT]
Process structure.
Definition process.h:53
space_t space
Definition process.h:59
uint64_t pageAmount
Definition space.h:53
space_callback_func_t func
Definition space.h:51
void * private
Definition space.h:52
Helper structure for managing address space mappings.
Definition space.h:217
void * physAddr
Definition space.h:219
pml_flags_t flags
Definition space.h:221
uint64_t pageAmount
Definition space.h:220
void * virtAddr
Definition space.h:218
Virtual address space structure.
Definition space.h:79
lock_t lock
Definition space.h:100
page_table_t pageTable
The page table associated with the address space.
Definition space.h:81
bitmap_t callbackBitmap
Bitmap to track available callback IDs.
Definition space.h:93
list_t cpus
List of CPUs using this address space.
Definition space.h:98
atomic_uint16_t shootdownAcks
Definition space.h:99
space_callback_t * callbacks
Definition space.h:91
Thread of execution structure.
Definition thread.h:55
process_t * process
The parent process that the thread executes within.
Definition thread.h:57
Per-CPU VMM context.
Definition vmm.h:109
uint8_t shootdownCount
Definition vmm.h:112
space_t * currentSpace
Will only be accessed by the owner CPU, so no lock.
Definition vmm.h:114
list_entry_t entry
Used by a space to know which CPUs are using it, protected by the space lock.
Definition vmm.h:110
lock_t lock
Definition vmm.h:113
vmm_shootdown_t shootdowns[VMM_MAX_SHOOTDOWN_REQUESTS]
Definition vmm.h:111
TLB shootdown structure.
Definition vmm.h:92
uint64_t pageAmount
Definition vmm.h:96
space_t * space
Definition vmm.h:94
void * virtAddr
Definition vmm.h:95
static void vmm_page_table_unmap_with_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
Definition vmm.c:161
static space_t kernelSpace
Definition vmm.c:24
static void vmm_cpu_ctx_init_common(vmm_cpu_ctx_t *ctx)
Definition vmm.c:26