PatchworkOS  19e446b
A non-POSIX operating system.
Loading...
Searching...
No Matches
vmm.c
Go to the documentation of this file.
1#include <kernel/mem/vmm.h>
2
3#include <kernel/cpu/cpu.h>
4#include <kernel/cpu/ipi.h>
5#include <kernel/cpu/regs.h>
8#include <kernel/log/log.h>
9#include <kernel/log/panic.h>
10#include <kernel/mem/paging.h>
11#include <kernel/mem/pmm.h>
12#include <kernel/mem/space.h>
13#include <kernel/proc/process.h>
14#include <kernel/sched/clock.h>
15#include <kernel/sched/sched.h>
16#include <kernel/sched/thread.h>
17#include <kernel/sync/lock.h>
18
19#include <boot/boot_info.h>
20
21#include <assert.h>
22#include <errno.h>
23#include <sys/math.h>
24#include <sys/proc.h>
25
27
41
43{
44 vmm_cpu_init(SELF_PTR(pcpu_vmm));
45}
46
47void vmm_init(void)
48{
50 const boot_memory_t* memory = &bootInfo->memory;
51 const boot_gop_t* gop = &bootInfo->gop;
52 const boot_kernel_t* kernel = &bootInfo->kernel;
53
55 {
56 panic(NULL, "Failed to initialize kernel address space");
57 }
58
59 LOG_DEBUG("address space layout:\n");
60 LOG_DEBUG(" kernel binary: %p-%p\n", VMM_KERNEL_BINARY_MIN, VMM_KERNEL_BINARY_MAX);
61 LOG_DEBUG(" kernel stacks: %p-%p\n", VMM_KERNEL_STACKS_MIN, VMM_KERNEL_STACKS_MAX);
62 LOG_DEBUG(" kernel heap: %p-%p\n", VMM_KERNEL_HEAP_MIN, VMM_KERNEL_HEAP_MAX);
64 LOG_DEBUG(" user space: %p-%p\n", VMM_USER_SPACE_MIN, VMM_USER_SPACE_MAX);
65
66 LOG_INFO("kernel pml4 allocated at 0x%lx\n", kernelSpace.pageTable.pml4);
67
68 // Keep using the bootloaders memory mappings during initialization.
70 {
72 }
73
74 for (size_t i = 0; i < memory->map.length; i++)
75 {
76 const EFI_MEMORY_DESCRIPTOR* desc = BOOT_MEMORY_MAP_GET_DESCRIPTOR(&memory->map, i);
77 if (desc->VirtualStart < PML_HIGHER_HALF_START)
78 {
79 panic(NULL, "Memory descriptor %d has invalid virtual address %p", i, desc->VirtualStart);
80 }
81 if (desc->PhysicalStart > PML_LOWER_HALF_END)
82 {
83 panic(NULL, "Memory descriptor %d has invalid physical address %p", i, desc->PhysicalStart);
84 }
85
86 if (page_table_map(&kernelSpace.pageTable, (void*)desc->VirtualStart, desc->PhysicalStart, desc->NumberOfPages,
88 {
89 panic(NULL, "Failed to map memory descriptor %d (phys=%p-%p virt=%p)", i, desc->PhysicalStart,
90 desc->PhysicalStart + desc->NumberOfPages * PAGE_SIZE, desc->VirtualStart);
91 }
92 }
93
94 Elf64_Addr minVaddr = 0;
95 Elf64_Addr maxVaddr = 0;
96 elf64_get_loadable_bounds(&kernel->elf, &minVaddr, &maxVaddr);
97 uint64_t kernelPageAmount = BYTES_TO_PAGES(maxVaddr - minVaddr);
98
99 LOG_INFO("kernel virt=[%p-%p] phys=[%p-%p]\n", minVaddr, maxVaddr, (uintptr_t)kernel->physAddr,
100 (uintptr_t)kernel->physAddr + kernelPageAmount * PAGE_SIZE);
101 if (page_table_map(&kernelSpace.pageTable, (void*)minVaddr, kernel->physAddr, kernelPageAmount,
103 {
104 panic(NULL, "Failed to map kernel memory");
105 }
106
107 LOG_INFO("GOP virt=[%p-%p] phys=[%p-%p]\n", gop->virtAddr, gop->virtAddr + gop->size, gop->physAddr,
108 gop->physAddr + gop->size);
111 {
112 panic(NULL, "Failed to map GOP memory");
113 }
114}
115
117{
118 LOG_INFO("loading kernel space... ");
119 vmm_cpu_init(SELF_PTR(pcpu_vmm));
120 LOG_INFO("done!\n");
121}
122
124{
125 return &kernelSpace;
126}
127
129{
130 switch ((int)prot)
131 {
132 case PROT_NONE:
133 return 0;
134 case PROT_READ:
135 return PML_PRESENT;
136 case PROT_READ | PROT_WRITE:
137 return PML_PRESENT | PML_WRITE;
138 default:
139 return 0;
140 }
141}
142
143// Handles the logic of unmapping with a shootdown, should be called with the spaces lock acquired.
144// We need to make sure that any underlying physical pages owned by the page table are freed after every CPU
145// has invalidated their TLBs.
146static inline void vmm_page_table_unmap_with_shootdown(space_t* space, void* virtAddr, uint64_t pageAmount)
147{
148 page_table_unmap(&space->pageTable, virtAddr, pageAmount);
149 vmm_tlb_shootdown(space, virtAddr, pageAmount);
150 page_table_clear(&space->pageTable, virtAddr, pageAmount);
151}
152
153void* vmm_alloc(space_t* space, void* virtAddr, size_t length, size_t alignment, pml_flags_t pmlFlags,
154 vmm_alloc_flags_t allocFlags)
155{
156 if (length == 0 || !(pmlFlags & PML_PRESENT))
157 {
158 errno = EINVAL;
159 return NULL;
160 }
161
162 if (space == NULL)
163 {
164 space = vmm_kernel_space_get();
165 }
166
167 space_mapping_t mapping;
168 if (space_mapping_start(space, &mapping, virtAddr, PHYS_ADDR_INVALID, length, alignment, pmlFlags | PML_OWNED) ==
169 ERR)
170 {
171 return NULL;
172 }
173
174 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
175 {
176 return space_mapping_end(space, &mapping, EBUSY);
177 }
178
179 if (!page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
180 {
181 if (allocFlags & VMM_ALLOC_FAIL_IF_MAPPED)
182 {
183 return space_mapping_end(space, &mapping, EEXIST);
184 }
185
187 }
188
189 const uint64_t maxBatchSize = 64;
190 uint64_t remainingPages = mapping.pageAmount;
191 while (remainingPages != 0)
192 {
193 void* currentVirtAddr = mapping.virtAddr + (mapping.pageAmount - remainingPages) * PAGE_SIZE;
194
195 pfn_t pages[maxBatchSize];
196 uint64_t batchSize = MIN(remainingPages, maxBatchSize);
197 if (pmm_alloc_pages(pages, batchSize) == ERR)
198 {
199 // Page table will free the previously allocated pages as they are owned by the Page table.
200 vmm_page_table_unmap_with_shootdown(space, mapping.virtAddr, mapping.pageAmount - remainingPages);
201 return space_mapping_end(space, &mapping, ENOMEM);
202 }
203
204 if (allocFlags & VMM_ALLOC_ZERO)
205 {
206 for (uint64_t i = 0; i < batchSize; i++)
207 {
209 }
210 }
211
212 if (page_table_map_pages(&space->pageTable, currentVirtAddr, pages, batchSize, mapping.flags,
214 {
215 // Page table will free the previously allocated pages as they are owned by the Page table.
216 vmm_page_table_unmap_with_shootdown(space, mapping.virtAddr, mapping.pageAmount - remainingPages);
217 return space_mapping_end(space, &mapping, ENOMEM);
218 }
219
220 remainingPages -= batchSize;
221 }
222
223 return space_mapping_end(space, &mapping, EOK);
224}
225
226void* vmm_map(space_t* space, void* virtAddr, phys_addr_t physAddr, size_t length, pml_flags_t flags,
227 space_callback_func_t func, void* data)
228{
229 if (physAddr == PHYS_ADDR_INVALID || length == 0 || !(flags & PML_PRESENT))
230 {
231 errno = EINVAL;
232 return NULL;
233 }
234
235 if (space == NULL)
236 {
237 space = vmm_kernel_space_get();
238 }
239
240 space_mapping_t mapping;
241 if (space_mapping_start(space, &mapping, virtAddr, physAddr, length, 1, flags) == ERR)
242 {
243 return NULL;
244 }
245
246 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
247 {
248 return space_mapping_end(space, &mapping, EBUSY);
249 }
250
252 if (func != NULL)
253 {
254 callbackId = space_alloc_callback(space, mapping.pageAmount, func, data);
255 if (callbackId == PML_MAX_CALLBACK)
256 {
257 return space_mapping_end(space, &mapping, ENOSPC);
258 }
259 }
260
261 if (!page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
262 {
264 }
265
266 if (page_table_map(&space->pageTable, mapping.virtAddr, mapping.physAddr, mapping.pageAmount, flags, callbackId) ==
267 ERR)
268 {
269 if (callbackId != PML_CALLBACK_NONE)
270 {
271 space_free_callback(space, callbackId);
272 }
273
275 return space_mapping_end(space, &mapping, ENOMEM);
276 }
277
278 return space_mapping_end(space, &mapping, EOK);
279}
280
281void* vmm_map_pages(space_t* space, void* virtAddr, pfn_t* pfns, size_t amount, pml_flags_t flags,
282 space_callback_func_t func, void* data)
283{
284 if (pfns == NULL || amount == 0 || !(flags & PML_PRESENT))
285 {
286 errno = EINVAL;
287 return NULL;
288 }
289
290 if (space == NULL)
291 {
292 space = vmm_kernel_space_get();
293 }
294
295 space_mapping_t mapping;
296 if (space_mapping_start(space, &mapping, virtAddr, PHYS_ADDR_INVALID, amount * PAGE_SIZE, 1, flags) == ERR)
297 {
298 return NULL;
299 }
300
301 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
302 {
303 return space_mapping_end(space, &mapping, EBUSY);
304 }
305
307 if (func != NULL)
308 {
309 callbackId = space_alloc_callback(space, amount, func, data);
310 if (callbackId == PML_MAX_CALLBACK)
311 {
312 return space_mapping_end(space, &mapping, ENOSPC);
313 }
314 }
315
316 if (!page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
317 {
319 }
320
321 if (page_table_map_pages(&space->pageTable, mapping.virtAddr, pfns, mapping.pageAmount, mapping.flags,
322 callbackId) == ERR)
323 {
324 if (callbackId != PML_CALLBACK_NONE)
325 {
326 space_free_callback(space, callbackId);
327 }
328
330 return space_mapping_end(space, &mapping, ENOMEM);
331 }
332
333 return space_mapping_end(space, &mapping, EOK);
334}
335
336void* vmm_unmap(space_t* space, void* virtAddr, size_t length)
337{
338 if (virtAddr == NULL || length == 0)
339 {
340 errno = EINVAL;
341 return NULL;
342 }
343
344 if (space == NULL)
345 {
346 space = vmm_kernel_space_get();
347 }
348
349 space_mapping_t mapping;
350 if (space_mapping_start(space, &mapping, virtAddr, PHYS_ADDR_INVALID, length, 1, PML_NONE) == ERR)
351 {
352 return NULL;
353 }
354
355 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
356 {
357 return space_mapping_end(space, &mapping, EBUSY);
358 }
359
360 // Stores the amount of pages that have each callback id within the region.
361 uint64_t callbacks[PML_MAX_CALLBACK] = {0};
362 page_table_collect_callbacks(&space->pageTable, mapping.virtAddr, mapping.pageAmount, callbacks);
363
365
366 uint64_t index;
367 BITMAP_FOR_EACH_SET(&index, &space->callbackBitmap)
368 {
369 space_callback_t* callback = &space->callbacks[index];
370 assert(callback->pageAmount >= callbacks[index]);
371
372 callback->pageAmount -= callbacks[index];
373 if (callback->pageAmount == 0)
374 {
375 assert(index < space->callbacksLength);
376 space->callbacks[index].func(space->callbacks[index].data);
377 space_free_callback(space, index);
378 }
379 }
380
381 return space_mapping_end(space, &mapping, EOK);
382}
383
384SYSCALL_DEFINE(SYS_MUNMAP, void*, void* address, size_t length)
385{
386 process_t* process = process_current();
387 space_t* space = &process->space;
388
389 if (space_check_access(space, address, length) == ERR)
390 {
391 errno = EFAULT;
392 return NULL;
393 }
394
395 return vmm_unmap(space, address, length);
396}
397
398void* vmm_protect(space_t* space, void* virtAddr, size_t length, pml_flags_t flags)
399{
400 if (space == NULL || virtAddr == NULL || length == 0)
401 {
402 errno = EINVAL;
403 return NULL;
404 }
405
406 if (!(flags & PML_PRESENT))
407 {
408 return vmm_unmap(space, virtAddr, length);
409 }
410
411 if (space == NULL)
412 {
413 space = vmm_kernel_space_get();
414 }
415
416 space_mapping_t mapping;
417 if (space_mapping_start(space, &mapping, virtAddr, PHYS_ADDR_INVALID, length, 1, flags) == ERR)
418 {
419 return NULL;
420 }
421
422 if (page_table_is_pinned(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
423 {
424 return space_mapping_end(space, &mapping, EBUSY);
425 }
426
427 if (page_table_is_unmapped(&space->pageTable, mapping.virtAddr, mapping.pageAmount))
428 {
429 return space_mapping_end(space, &mapping, ENOENT);
430 }
431
432 if (page_table_set_flags(&space->pageTable, mapping.virtAddr, mapping.pageAmount, mapping.flags) == ERR)
433 {
434 return space_mapping_end(space, &mapping, EINVAL);
435 }
436
437 vmm_tlb_shootdown(space, mapping.virtAddr, mapping.pageAmount);
438
439 return space_mapping_end(space, &mapping, EOK);
440}
441
442void vmm_load(space_t* space)
443{
444 if (space == NULL)
445 {
446 return;
447 }
448
450
451 assert(pcpu_vmm->space != NULL);
452 if (space == pcpu_vmm->space)
453 {
454 return;
455 }
456
457 space_t* oldSpace = pcpu_vmm->space;
458 pcpu_vmm->space = NULL;
459
460 lock_acquire(&oldSpace->lock);
461 bitmap_clear(&oldSpace->cpus, SELF->id);
462 lock_release(&oldSpace->lock);
463
464 lock_acquire(&space->lock);
465 bitmap_set(&space->cpus, SELF->id);
466 lock_release(&space->lock);
467 pcpu_vmm->space = space;
468
469 page_table_load(&space->pageTable);
470}
471
473{
474 UNUSED(data);
475
476 vmm_cpu_t* vmm = SELF_PTR(pcpu_vmm);
477 while (true)
478 {
479 lock_acquire(&vmm->lock);
480 if (vmm->shootdownCount == 0)
481 {
482 lock_release(&vmm->lock);
483 break;
484 }
485
486 vmm_shootdown_t shootdown = vmm->shootdowns[vmm->shootdownCount - 1];
487 vmm->shootdownCount--;
488 lock_release(&vmm->lock);
489
490 assert(shootdown.space != NULL);
491 assert(shootdown.pageAmount != 0);
492 assert(shootdown.virtAddr != NULL);
493
494 tlb_invalidate(shootdown.virtAddr, shootdown.pageAmount);
495 atomic_fetch_add(&shootdown.space->shootdownAcks, 1);
496 }
497}
498
499void vmm_tlb_shootdown(space_t* space, void* virtAddr, size_t pageAmount)
500{
501 if (space == NULL)
502 {
503 return;
504 }
505
506 if (cpu_amount() <= 1)
507 {
508 return;
509 }
510
511 uint16_t expectedAcks = 0;
512 atomic_store(&space->shootdownAcks, 0);
513
514 cpu_id_t id;
515 BITMAP_FOR_EACH_SET(&id, &space->cpus)
516 {
517 if (id == SELF->id)
518 {
519 continue;
520 }
521
522 vmm_cpu_t* cpu = CPU_PTR(id, pcpu_vmm);
523
524 lock_acquire(&cpu->lock);
526 {
527 lock_release(&cpu->lock);
528 panic(NULL, "CPU %d shootdown buffer overflow", id);
529 }
530
531 vmm_shootdown_t* shootdown = &cpu->shootdowns[cpu->shootdownCount++];
532 shootdown->space = space;
533 shootdown->virtAddr = virtAddr;
534 shootdown->pageAmount = pageAmount;
535 lock_release(&cpu->lock);
536
538 {
539 panic(NULL, "Failed to send TLB shootdown IPI to CPU %d", id);
540 }
541 expectedAcks++;
542 }
543
545 while (atomic_load(&space->shootdownAcks) < expectedAcks)
546 {
548 {
549 panic(NULL, "TLB shootdown timeout in space %p for region %p - %p", space, virtAddr,
550 (void*)((uintptr_t)virtAddr + pageAmount * PAGE_SIZE));
551 }
552
553 ASM("pause");
554 }
555}
556
557SYSCALL_DEFINE(SYS_MPROTECT, void*, void* address, size_t length, prot_t prot)
558{
559 process_t* process = process_current();
560 space_t* space = &process->space;
561
562 if (space_check_access(space, address, length) == ERR)
563 {
564 return NULL;
565 }
566
567 return vmm_protect(space, address, length, vmm_prot_to_flags(prot) | PML_USER);
568}
#define assert(expression)
Definition assert.h:29
boot_gop_t * gop
Definition main.c:240
boot_info_t * bootInfo
Definition boot_info.c:14
static fd_t data
Definition dwm.c:21
static char * id
Definition dwm.c:20
#define BOOT_MEMORY_MAP_GET_DESCRIPTOR(map, index)
Definition boot_info.h:52
uint64_t ipi_send(cpu_t *cpu, ipi_flags_t flags, ipi_func_t func, void *data)
Send an IPI to one or more CPUs.
Definition ipi.c:140
@ IPI_SINGLE
Send the IPI to the specified CPU.
Definition ipi.h:104
#define SELF_PTR(ptr)
Macro to get a pointer to a percpu variable on the current CPU.
Definition percpu.h:93
#define SELF
Macro to access data in the current cpu.
Definition percpu.h:85
#define CPU_PTR(id, ptr)
Macro to get a pointer to a percpu variable on a specific CPU.
Definition percpu.h:102
#define PERCPU_DEFINE_CTOR(type, name)
Macro to define a percpu variable with a constructor.
Definition percpu.h:130
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
Definition syscall.h:172
@ SYS_MUNMAP
Definition syscall.h:84
@ SYS_MPROTECT
Definition syscall.h:85
uint16_t cpu_id_t
Type used to identify a CPU.
Definition cpu.h:65
static uint16_t cpu_amount(void)
Gets the number of identified CPUs.
Definition cpu.h:168
static cpu_t * cpu_get_by_id(cpu_id_t id)
Gets a CPU structure by its ID.
Definition cpu.h:179
static uintptr_t address
Mapped virtual address of the HPET registers.
Definition hpet.c:96
boot_info_t * boot_info_get(void)
Gets the boot info structure.
Definition boot_info.c:16
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
Definition panic.c:292
#define LOG_INFO(format,...)
Definition log.h:91
#define LOG_DEBUG(format,...)
Definition log.h:85
uintptr_t phys_addr_t
Physical address type.
static void page_table_clear(page_table_t *table, void *addr, size_t amount)
Clears page table entries in the specified range and frees any owned pages.
Definition paging.h:603
static void page_table_collect_callbacks(page_table_t *table, void *addr, size_t amount, uint64_t *callbacks)
Collects the number of pages associated with each callback ID in the specified range.
Definition paging.h:647
pml_index_t
Indexes into a pml level.
static void page_table_unmap(page_table_t *table, void *addr, size_t amount)
Unmaps a range of virtual addresses from the page table.
Definition paging.h:489
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
static bool page_table_is_unmapped(page_table_t *table, void *addr, size_t amount)
Checks if a range of virtual addresses is completely unmapped.
Definition paging.h:363
static uint64_t page_table_map(page_table_t *table, void *addr, phys_addr_t phys, size_t amount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
Definition paging.h:396
uint8_t pml_callback_id_t
Callback ID type.
static void page_table_load(page_table_t *table)
Loads the page table into the CR3 register if it is not already loaded.
Definition paging.h:158
static void tlb_invalidate(void *addr, size_t amount)
Invalidates a region of pages in the TLB.
Definition paging.h:27
static bool page_table_is_pinned(page_table_t *table, void *addr, size_t amount)
Checks if any page in a range is pinned.
Definition paging.h:976
static uint64_t page_table_map_pages(page_table_t *table, void *addr, const pfn_t *pfns, size_t amount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps an array of physical pages to contiguous virtual addresses in the page table.
Definition paging.h:443
static uint64_t page_table_set_flags(page_table_t *table, void *addr, size_t amount, pml_flags_t flags)
Sets the flags for a range of pages in the page table.
Definition paging.h:682
#define PML_MAX_CALLBACK
Maximum number of callbacks that can be registered for a page table.
#define PHYS_ADDR_INVALID
Invalid physical address.
#define PML_LOWER_HALF_END
The end of the lower half of the address space.
#define PFN_TO_VIRT(_pfn)
Convert a PFN to its identity mapped higher half virtual address.
size_t pfn_t
Page Frame Number type.
#define PML_HIGHER_HALF_START
The start of the higher half of the address space.
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
@ PML_INDEX_LOWER_HALF_MIN
@ PML_INDEX_LOWER_HALF_MAX
@ PML_USER
@ PML_PRESENT
@ PML_WRITE
@ PML_NONE
@ PML_GLOBAL
@ PML_OWNED
uint64_t pmm_alloc_pages(pfn_t *pfns, size_t count)
Allocate multiple pages of physical memory.
Definition pmm.c:275
void * space_mapping_end(space_t *space, space_mapping_t *mapping, errno_t err)
Performs cleanup after changes to the address space mappings.
Definition space.c:630
void space_free_callback(space_t *space, pml_callback_id_t callbackId)
Free a callback.
Definition space.c:612
uint64_t space_mapping_start(space_t *space, space_mapping_t *mapping, void *virtAddr, phys_addr_t physAddr, size_t length, size_t alignment, pml_flags_t flags)
Prepare for changes to the address space mappings.
Definition space.c:494
void(* space_callback_func_t)(void *data)
Space callback function.
Definition space.h:42
pml_callback_id_t space_alloc_callback(space_t *space, size_t pageAmount, space_callback_func_t func, void *data)
Allocate a callback.
Definition space.c:571
uint64_t space_check_access(space_t *space, const void *addr, size_t length)
Checks if a virtual memory region is within the allowed address range of the space.
Definition space.c:444
uint64_t space_init(space_t *space, uintptr_t startAddress, uintptr_t endAddress, space_flags_t flags)
Initializes a virtual address space.
Definition space.c:64
#define SPACE_TLB_SHOOTDOWN_TIMEOUT
The maximum time to wait for the acknowledgements from other CPU's before panicking.
Definition space.h:101
@ SPACE_USE_PMM_BITMAP
Definition space.h:33
void * vmm_map_pages(space_t *space, void *virtAddr, pfn_t *pfns, size_t amount, pml_flags_t flags, space_callback_func_t func, void *data)
Maps an array of physical pages to virtual memory in a given address space.
Definition vmm.c:281
#define VMM_KERNEL_HEAP_MAX
The maximum address for the kernel heap.
Definition vmm.h:68
void vmm_tlb_shootdown(space_t *space, void *virtAddr, size_t pageAmount)
Performs a TLB shootdown for a region of the address space, and wait for acknowledgements.
Definition vmm.c:499
#define VMM_KERNEL_STACKS_MAX
The maximum address for kernel stacks.
Definition vmm.h:65
void vmm_init(void)
Initializes the Virtual Memory Manager.
Definition vmm.c:47
#define VMM_KERNEL_BINARY_MAX
The maximum address for the content of the kernel binary.
Definition vmm.h:61
space_t * vmm_kernel_space_get(void)
Retrieves the kernel's address space.
Definition vmm.c:123
void vmm_load(space_t *space)
Loads a virtual address space.
Definition vmm.c:442
void * vmm_map(space_t *space, void *virtAddr, phys_addr_t physAddr, size_t length, pml_flags_t flags, space_callback_func_t func, void *data)
Maps physical memory to virtual memory in a given address space.
Definition vmm.c:226
void vmm_kernel_space_load(void)
Loads the kernel's address space into the current CPU.
Definition vmm.c:116
#define VMM_KERNEL_STACKS_MIN
The minimum address for kernel stacks.
Definition vmm.h:66
#define VMM_IDENTITY_MAPPED_MIN
The minimum address for the identity mapped physical memory.
Definition vmm.h:72
#define VMM_USER_SPACE_MAX
The maximum address for user space.
Definition vmm.h:74
#define VMM_KERNEL_BINARY_MIN
The minimum address for the content of the kernel binary./*#end#*‍/.
Definition vmm.h:62
#define VMM_USER_SPACE_MIN
The minimum address for user space.
Definition vmm.h:75
void * vmm_alloc(space_t *space, void *virtAddr, size_t length, size_t alignment, pml_flags_t pmlFlags, vmm_alloc_flags_t allocFlags)
Allocates and maps virtual memory in a given address space.
Definition vmm.c:153
#define VMM_MAX_SHOOTDOWN_REQUESTS
Maximum number of shootdown requests that can be queued per CPU.
Definition vmm.h:102
#define VMM_IDENTITY_MAPPED_MAX
The maximum address for the identity mapped physical memory.
Definition vmm.h:71
void * vmm_unmap(space_t *space, void *virtAddr, size_t length)
Unmaps virtual memory from a given address space.
Definition vmm.c:336
void * vmm_protect(space_t *space, void *virtAddr, size_t length, pml_flags_t flags)
Changes memory protection flags for a virtual memory region in a given address space.
Definition vmm.c:398
#define VMM_KERNEL_HEAP_MIN
The minimum address for the kernel heap.
Definition vmm.h:69
pml_flags_t vmm_prot_to_flags(prot_t prot)
Converts the user space memory protection flags to page table entry flags.
Definition vmm.c:128
vmm_alloc_flags_t
Flags for vmm_alloc().
Definition vmm.h:121
@ VMM_ALLOC_ZERO
If set, atomically zero the allocated pages.
Definition vmm.h:124
@ VMM_ALLOC_FAIL_IF_MAPPED
If set and any page is already mapped, fail and set errno to EEXIST.
Definition vmm.h:123
static process_t * process_current(void)
Retrieves the process of the currently running thread.
Definition process.h:131
clock_t clock_uptime(void)
Retrieve the time in nanoseconds since boot.
Definition clock.c:99
static void lock_init(lock_t *lock)
Initializes a lock.
Definition lock.h:79
static void lock_release(lock_t *lock)
Releases a lock.
Definition lock.h:175
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
Definition lock.h:96
#define ENOENT
No such file or directory.
Definition errno.h:42
#define ENOSPC
No space left on device.
Definition errno.h:172
#define EEXIST
File exists.
Definition errno.h:117
#define EINVAL
Invalid argument.
Definition errno.h:142
#define EFAULT
Bad address.
Definition errno.h:102
#define ENOMEM
Out of memory.
Definition errno.h:92
#define EBUSY
Device or resource busy.
Definition errno.h:112
#define errno
Error number variable.
Definition errno.h:27
#define EOK
No error.
Definition errno.h:32
static void bitmap_clear(bitmap_t *map, uint64_t index)
Clear a bit in the bitmap.
Definition bitmap.h:244
static void bitmap_set(bitmap_t *map, uint64_t index)
Set a bit in the bitmap.
Definition bitmap.h:190
#define BITMAP_FOR_EACH_SET(idx, map)
Iterate over each set bit in the bitmap.
Definition bitmap.h:58
#define ASM(...)
Inline assembly macro.
Definition defs.h:160
#define UNUSED(x)
Mark a variable as unused.
Definition defs.h:96
void elf64_get_loadable_bounds(const Elf64_File *elf, Elf64_Addr *minAddr, Elf64_Addr *maxAddr)
Get the loadable virtual memory bounds of an ELF file.
uint64_t Elf64_Addr
ELF64 Unsigned program address.
Definition elf.h:30
#define MIN(x, y)
Definition math.h:18
#define BYTES_TO_PAGES(amount)
Convert a size in bytes to pages.
Definition proc.h:107
prot_t
Memory protection flags.
Definition proc.h:122
@ PROT_READ
Readable memory.
Definition proc.h:124
@ PROT_WRITE
Writable memory.
Definition proc.h:125
@ PROT_NONE
Invalid memory, cannot be accessed.
Definition proc.h:123
#define NULL
Pointer error value.
Definition NULL.h:25
#define ERR
Integer error value.
Definition ERR.h:17
#define PAGE_SIZE
The size of a memory page in bytes.
Definition PAGE_SIZE.h:8
__UINT64_TYPE__ clock_t
A nanosecond time.
Definition clock_t.h:13
static clock_t startTime
Definition clock.c:5
static const path_flag_t flags[]
Definition path.c:47
static page_t * pages
Definition pmm.c:38
#define RFLAGS_INTERRUPT_ENABLE
Definition regs.h:34
static void cr3_write(uint64_t value)
Definition regs.h:111
#define CR4_PAGE_GLOBAL_ENABLE
Definition regs.h:53
static uint64_t cr4_read(void)
Definition regs.h:92
static uint64_t rflags_read(void)
Definition regs.h:80
static void cr4_write(uint64_t value)
Definition regs.h:99
#define atomic_store(object, desired)
Definition stdatomic.h:289
#define atomic_load(object)
Definition stdatomic.h:288
#define atomic_fetch_add(object, operand)
Definition stdatomic.h:283
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
__UINT16_TYPE__ uint16_t
Definition stdint.h:13
_PUBLIC void * memset(void *s, int c, size_t n)
Definition memset.c:4
size_t size
Definition boot_info.h:46
phys_addr_t physAddr
Definition boot_info.h:44
uint32_t * virtAddr
Definition boot_info.h:45
boot_memory_t memory
Definition boot_info.h:106
boot_gop_t gop
Definition boot_info.h:101
boot_kernel_t kernel
Definition boot_info.h:105
phys_addr_t physAddr
Definition boot_info.h:90
Elf64_File elf
Definition boot_info.h:89
page_table_t table
Definition boot_info.h:96
boot_memory_map_t map
Definition boot_info.h:95
IPI function data structure.
Definition ipi.h:58
A entry in a page table without a specified address or callback ID.
pml_entry_t entries[PML_INDEX_AMOUNT]
Process structure.
Definition process.h:76
space_t space
Definition process.h:84
uint64_t pageAmount
Definition space.h:52
space_callback_func_t func
Definition space.h:50
void * data
Definition space.h:51
Helper structure for managing address space mappings.
Definition space.h:215
phys_addr_t physAddr
Definition space.h:217
size_t pageAmount
Definition space.h:218
pml_flags_t flags
Definition space.h:219
void * virtAddr
Definition space.h:216
Virtual address space structure.
Definition space.h:78
lock_t lock
Definition space.h:95
page_table_t pageTable
The page table associated with the address space.
Definition space.h:79
atomic_uint16_t shootdownAcks
Definition space.h:94
space_callback_t * callbacks
Definition space.h:90
Per-CPU VMM context.
Definition vmm.h:109
lock_t lock
Definition vmm.h:112
space_t * space
Will only be accessed by the owner CPU, so no lock.
Definition vmm.h:113
vmm_shootdown_t shootdowns[VMM_MAX_SHOOTDOWN_REQUESTS]
Definition vmm.h:110
uint8_t shootdownCount
Definition vmm.h:111
TLB shootdown structure.
Definition vmm.h:92
uint64_t pageAmount
Definition vmm.h:96
space_t * space
Definition vmm.h:94
void * virtAddr
Definition vmm.h:95
static void vmm_tlb_shootdown_ipi(ipi_func_data_t *data)
Definition vmm.c:472
static void vmm_page_table_unmap_with_shootdown(space_t *space, void *virtAddr, uint64_t pageAmount)
Definition vmm.c:146
static void vmm_cpu_init(vmm_cpu_t *ctx)
Definition vmm.c:28
static space_t kernelSpace
Definition vmm.c:26