PatchworkOS  19e446b
A non-POSIX operating system.
Loading...
Searching...
No Matches
space.c
Go to the documentation of this file.
1#include <kernel/cpu/ipi.h>
2#include <kernel/mem/space.h>
3
4#include <kernel/cpu/cpu.h>
5#include <kernel/log/panic.h>
6#include <kernel/mem/paging.h>
7#include <kernel/mem/pmm.h>
8#include <kernel/mem/space.h>
9#include <kernel/mem/vmm.h>
10#include <kernel/sched/clock.h>
11#include <kernel/utils/map.h>
12
13#include <assert.h>
14#include <errno.h>
15#include <stdlib.h>
16#include <string.h>
17#include <sys/math.h>
18#include <sys/proc.h>
19
20static uint64_t space_pmm_bitmap_alloc_pages(pfn_t* pfns, size_t pageAmount)
21{
22 for (size_t i = 0; i < pageAmount; i++)
23 {
25 if (pfn == ERR)
26 {
27 for (size_t j = 0; j < i; j++)
28 {
29 pmm_free(pfns[j]);
30 }
31 return ERR;
32 }
33 pfns[i] = pfn;
34 }
35 return 0;
36}
37
39{
42
44 pml_index_t endIndex = PML_ADDR_TO_INDEX(end - 1, PML4) + 1; // Inclusive end
45
46 for (pml_index_t i = startIndex; i < endIndex; i++)
47 {
49 space->pageTable.pml4->entries[i].owned = 0;
50 }
51}
52
54{
56 pml_index_t endIndex = PML_ADDR_TO_INDEX(end - 1, PML4) + 1; // Inclusive end
57
58 for (pml_index_t i = startIndex; i < endIndex; i++)
59 {
60 space->pageTable.pml4->entries[i].raw = 0;
61 }
62}
63
65{
66 if (space == NULL)
67 {
68 errno = EINVAL;
69 return ERR;
70 }
71
73 {
75 {
76 errno = ENOMEM;
77 return ERR;
78 }
79 // We only use the bitmap pmm allocator for the page table itself, not for mappings.
81 }
82 else
83 {
85 {
86 errno = ENOMEM;
87 return ERR;
88 }
89 }
90
91 map_init(&space->pinnedPages);
92 space->startAddress = startAddress;
93 space->endAddress = endAddress;
94 space->freeAddress = startAddress;
95 space->flags = flags;
96 space->callbacks = NULL;
97 space->callbacksLength = 0;
98 BITMAP_DEFINE_INIT(space->callbackBitmap, PML_MAX_CALLBACK);
99 BITMAP_DEFINE_INIT(space->cpus, CPU_MAX);
100 atomic_init(&space->shootdownAcks, 0);
101 lock_init(&space->lock);
102
104 {
106 }
107
109 {
111 }
112
114 {
116 }
117
118 return 0;
119}
120
122{
123 if (space == NULL)
124 {
125 return;
126 }
127
128 if (!bitmap_is_empty(&space->cpus))
129 {
130 panic(NULL, "Attempted to free address space still in use by CPUs");
131 }
132
133 uint64_t index;
134 BITMAP_FOR_EACH_SET(&index, &space->callbackBitmap)
135 {
136 space->callbacks[index].func(space->callbacks[index].data);
137 }
138
139 if (space->flags & SPACE_MAP_KERNEL_BINARY)
140 {
142 }
143
144 if (space->flags & SPACE_MAP_KERNEL_HEAP)
145 {
147 }
148
149 if (space->flags & SPACE_MAP_IDENTITY)
150 {
152 }
153
154 free(space->callbacks);
156}
157
158static void space_align_region(void** virtAddr, size_t* length)
159{
160 void* aligned = (void*)ROUND_DOWN(*virtAddr, PAGE_SIZE);
161 *length += ((uintptr_t)*virtAddr - (uintptr_t)aligned);
162 *virtAddr = aligned;
163}
164
165static uint64_t space_populate_user_region(space_t* space, const void* buffer, size_t pageAmount)
166{
167 for (size_t i = 0; i < pageAmount; i++)
168 {
169 uintptr_t addr = (uintptr_t)buffer + (i * PAGE_SIZE);
170 if (page_table_is_mapped(&space->pageTable, (void*)addr, 1))
171 {
172 continue;
173 }
174
175 pfn_t pfn = pmm_alloc();
176 if (pfn == ERR)
177 {
178 return ERR;
179 }
180
181 if (page_table_map(&space->pageTable, (void*)addr, PFN_TO_PHYS(pfn), 1,
183 {
184 pmm_free(pfn);
185 return ERR;
186 }
187 }
188
189 return 0;
190}
191
192static void space_pin_depth_dec(space_t* space, const void* address, uint64_t amount)
193{
195
197 for (uint64_t i = 0; i < amount; i++)
198 {
199 const void* addr = address + (i * PAGE_SIZE);
200 if (page_table_traverse(&space->pageTable, &traverse, addr, PML_NONE) == ERR)
201 {
202 continue;
203 }
204
205 if (!traverse.entry->present || !traverse.entry->pinned)
206 {
207 continue;
208 }
209
211 map_entry_t* entry = map_get(&space->pinnedPages, &key);
212 if (entry == NULL) // Not pinned more then once
213 {
214 traverse.entry->pinned = false;
215 continue;
216 }
217
218 space_pinned_page_t* pinnedPage = CONTAINER_OF(entry, space_pinned_page_t, mapEntry);
219 pinnedPage->pinCount--;
220 if (pinnedPage->pinCount == 0)
221 {
222 map_remove(&space->pinnedPages, &pinnedPage->mapEntry);
223 free(pinnedPage);
224 traverse.entry->pinned = false;
225 }
226 }
227}
228
229static inline uint64_t space_pin_depth_inc(space_t* space, const void* address, uint64_t amount)
230{
232
234 for (uint64_t i = 0; i < amount; i++)
235 {
236 const void* addr = address + (i * PAGE_SIZE);
237 if (page_table_traverse(&space->pageTable, &traverse, addr, PML_NONE) == ERR)
238 {
239 continue;
240 }
241
242 if (!traverse.entry->present)
243 {
244 continue;
245 }
246
247 if (!traverse.entry->pinned)
248 {
249 traverse.entry->pinned = true;
250 continue;
251 }
252
254 map_entry_t* entry = map_get(&space->pinnedPages, &key);
255 if (entry != NULL) // Already pinned more than once
256 {
257 space_pinned_page_t* pinnedPage = CONTAINER_OF(entry, space_pinned_page_t, mapEntry);
258 pinnedPage->pinCount++;
259 continue;
260 }
261
262 space_pinned_page_t* newPinnedPage = malloc(sizeof(space_pinned_page_t));
263 if (newPinnedPage == NULL)
264 {
265 space_pin_depth_dec(space, address, i);
266 return ERR;
267 }
268 map_entry_init(&newPinnedPage->mapEntry);
269 newPinnedPage->pinCount = 2; // One for the page table, one for the map
270 if (map_insert(&space->pinnedPages, &key, &newPinnedPage->mapEntry) == ERR)
271 {
272 free(newPinnedPage);
273 space_pin_depth_dec(space, address, i);
274 return ERR;
275 }
276 }
277
278 return 0;
279}
280
281uint64_t space_pin(space_t* space, const void* buffer, size_t length, stack_pointer_t* userStack)
282{
283 if (space == NULL || (buffer == NULL && length != 0))
284 {
285 errno = EINVAL;
286 return ERR;
287 }
288
289 if (length == 0)
290 {
291 return 0;
292 }
293
294 uintptr_t bufferOverflow = (uintptr_t)buffer + length;
295 if (bufferOverflow < (uintptr_t)buffer)
296 {
298 return ERR;
299 }
300
301 LOCK_SCOPE(&space->lock);
302
303 space_align_region((void**)&buffer, &length);
304 size_t pageAmount = BYTES_TO_PAGES(length);
305
306 if (!page_table_is_mapped(&space->pageTable, buffer, pageAmount))
307 {
308 if (userStack == NULL || !stack_pointer_is_in_stack(userStack, (uintptr_t)buffer, length))
309 {
310 errno = EFAULT;
311 return ERR;
312 }
313
314 if (space_populate_user_region(space, buffer, pageAmount) == ERR)
315 {
316 errno = ENOMEM;
317 return ERR;
318 }
319 }
320
321 if (space_pin_depth_inc(space, buffer, pageAmount) == ERR)
322 {
323 errno = ENOMEM;
324 return ERR;
325 }
326
327 return 0;
328}
329
330uint64_t space_pin_terminated(space_t* space, const void* address, const void* terminator, size_t objectSize,
331 size_t maxCount, stack_pointer_t* userStack)
332{
333 if (space == NULL || address == NULL || terminator == NULL || objectSize == 0 || maxCount == 0)
334 {
335 errno = EINVAL;
336 return ERR;
337 }
338
339 size_t terminatorMatchedBytes = 0;
340 uintptr_t current = (uintptr_t)address;
341 uintptr_t end = (uintptr_t)address + (maxCount * objectSize);
342 if (end < (uintptr_t)address)
343 {
345 return ERR;
346 }
347
348 LOCK_SCOPE(&space->lock);
349
350 uint64_t pinnedPages = 0;
351 while (current < end)
352 {
353 if (!page_table_is_mapped(&space->pageTable, (void*)current, 1))
354 {
355 if (userStack == NULL || !stack_pointer_is_in_stack(userStack, current, 1))
356 {
357 errno = EFAULT;
358 goto error;
359 }
360
361 if (space_populate_user_region(space, (void*)ROUND_DOWN(current, PAGE_SIZE), 1) == ERR)
362 {
363 errno = ENOMEM;
364 goto error;
365 }
366 }
367
368 if (space_pin_depth_inc(space, (void*)current, 1) == ERR)
369 {
370 errno = ENOMEM;
371 goto error;
372 }
373 pinnedPages++;
374
375 uintptr_t scanEnd = MIN(ROUND_UP(current + 1, PAGE_SIZE), end);
376
377 if (objectSize == 1)
378 {
379 uint8_t term = *(const uint8_t*)terminator;
380 for (uintptr_t scanAddr = current; scanAddr < scanEnd; scanAddr++)
381 {
382 if (*(uint8_t*)scanAddr == term)
383 {
384 return scanAddr - (uintptr_t)address + 1;
385 }
386 }
387 current = scanEnd;
388 }
389 else
390 {
391 uintptr_t scanAddr = current;
392 while (scanAddr < scanEnd)
393 {
394 if (*((uint8_t*)scanAddr) == ((uint8_t*)terminator)[terminatorMatchedBytes])
395 {
396 terminatorMatchedBytes++;
397 if (terminatorMatchedBytes == objectSize)
398 {
399 return scanAddr - (uintptr_t)address + 1;
400 }
401 scanAddr++;
402 }
403 else
404 {
405 scanAddr = scanAddr - terminatorMatchedBytes + objectSize;
406 terminatorMatchedBytes = 0;
407 }
408 }
409 current = scanAddr;
410 }
411 }
412
413error:
414 space_pin_depth_dec(space, address, pinnedPages);
415 return ERR;
416}
417
418void space_unpin(space_t* space, const void* address, size_t length)
419{
420 if (space == NULL || (address == NULL && length != 0))
421 {
422 return;
423 }
424
425 if (length == 0)
426 {
427 return;
428 }
429
430 uintptr_t overflow = (uintptr_t)address + length;
431 if (overflow < (uintptr_t)address)
432 {
433 return;
434 }
435
436 LOCK_SCOPE(&space->lock);
437
438 space_align_region((void**)&address, &length);
439 uint64_t pageAmount = BYTES_TO_PAGES(length);
440
441 space_pin_depth_dec(space, address, pageAmount);
442}
443
444uint64_t space_check_access(space_t* space, const void* addr, size_t length)
445{
446 if (space == NULL || (addr == NULL && length != 0))
447 {
448 errno = EINVAL;
449 return ERR;
450 }
451
452 if (length == 0)
453 {
454 return 0;
455 }
456
457 uintptr_t addrOverflow = (uintptr_t)addr + length;
458 if (addrOverflow < (uintptr_t)addr)
459 {
461 return ERR;
462 }
463
464 if ((uintptr_t)addr < space->startAddress || addrOverflow > space->endAddress)
465 {
466 errno = EFAULT;
467 return ERR;
468 }
469
470 return 0;
471}
472
473static void* space_find_free_region(space_t* space, uint64_t pageAmount, uint64_t alignment)
474{
475 void* addr;
476 if (page_table_find_unmapped_region(&space->pageTable, (void*)space->freeAddress, (void*)space->endAddress,
477 pageAmount, alignment, &addr) != ERR)
478 {
479 space->freeAddress = (uintptr_t)addr + pageAmount * PAGE_SIZE;
480 assert(page_table_is_unmapped(&space->pageTable, addr, pageAmount));
481 return addr;
482 }
483
484 if (page_table_find_unmapped_region(&space->pageTable, (void*)space->startAddress, (void*)space->freeAddress,
485 pageAmount, alignment, &addr) != ERR)
486 {
487 assert(page_table_is_unmapped(&space->pageTable, addr, pageAmount));
488 return addr;
489 }
490
491 return NULL;
492}
493
494uint64_t space_mapping_start(space_t* space, space_mapping_t* mapping, void* virtAddr, phys_addr_t physAddr,
495 size_t length, size_t alignment, pml_flags_t flags)
496{
497 if (space == NULL || mapping == NULL || length == 0)
498 {
499 errno = EINVAL;
500 return ERR;
501 }
502
503 uintptr_t virtOverflow = (uintptr_t)virtAddr + length;
504 if (virtOverflow < (uintptr_t)virtAddr)
505 {
507 return ERR;
508 }
509
510 uintptr_t physOverflow = (uintptr_t)physAddr + length;
511 if (physAddr != PHYS_ADDR_INVALID && physOverflow < (uintptr_t)physAddr)
512 {
514 return ERR;
515 }
516
517 if (flags & PML_USER)
518 {
519 if (virtAddr != NULL &&
520 ((uintptr_t)virtAddr + length > VMM_USER_SPACE_MAX || (uintptr_t)virtAddr < VMM_USER_SPACE_MIN))
521 {
522 errno = EFAULT;
523 return ERR;
524 }
525 }
526
527 stack_pointer_poke(1000); // 1000 bytes should be enough.
528
529 lock_acquire(&space->lock);
530
531 uint64_t pageAmount;
532 if (virtAddr == NULL)
533 {
534 pageAmount = BYTES_TO_PAGES(length);
535 virtAddr = space_find_free_region(space, pageAmount, alignment);
536 if (virtAddr == NULL)
537 {
538 lock_release(&space->lock);
539 errno = ENOMEM;
540 return ERR;
541 }
542 }
543 else
544 {
545 space_align_region(&virtAddr, &length);
546 pageAmount = BYTES_TO_PAGES(length);
547
548 if ((uintptr_t)virtAddr % alignment != 0)
549 {
550 lock_release(&space->lock);
551 errno = EINVAL;
552 return ERR;
553 }
554 }
555
556 mapping->virtAddr = virtAddr;
557 if (physAddr != PHYS_ADDR_INVALID)
558 {
559 mapping->physAddr = PML_ENSURE_LOWER_HALF(ROUND_DOWN(physAddr, PAGE_SIZE));
560 }
561 else
562 {
563 mapping->physAddr = PHYS_ADDR_INVALID;
564 }
565
566 mapping->flags = flags;
567 mapping->pageAmount = pageAmount;
568 return 0; // We return with the lock still acquired.
569}
570
572{
573 if (space == NULL)
574 {
575 return PML_MAX_CALLBACK;
576 }
577
578 pml_callback_id_t callbackId = bitmap_find_first_clear(&space->callbackBitmap, 0, PML_MAX_CALLBACK);
579 if (callbackId == PML_MAX_CALLBACK)
580 {
581 return PML_MAX_CALLBACK;
582 }
583
584 if (callbackId >= space->callbacksLength)
585 {
586 space_callback_t* newCallbacks = malloc(sizeof(space_callback_t) * (callbackId + 1));
587 if (newCallbacks == NULL)
588 {
589 return PML_MAX_CALLBACK;
590 }
591
592 if (space->callbacks != NULL)
593 {
594 memcpy(newCallbacks, space->callbacks, sizeof(space_callback_t) * space->callbacksLength);
595 free(space->callbacks);
596 }
597 memset(&newCallbacks[space->callbacksLength], 0,
598 sizeof(space_callback_t) * (callbackId + 1 - space->callbacksLength));
599
600 space->callbacks = newCallbacks;
601 space->callbacksLength = callbackId + 1;
602 }
603
604 bitmap_set(&space->callbackBitmap, callbackId);
605 space_callback_t* callback = &space->callbacks[callbackId];
606 callback->func = func;
607 callback->data = data;
608 callback->pageAmount = pageAmount;
609 return callbackId;
610}
611
613{
614 bitmap_clear(&space->callbackBitmap, callbackId);
615}
616
617static void space_update_free_address(space_t* space, uintptr_t virtAddr, uint64_t pageAmount)
618{
619 if (space == NULL)
620 {
621 return;
622 }
623
624 if (virtAddr <= space->freeAddress && space->freeAddress < virtAddr + pageAmount * PAGE_SIZE)
625 {
626 space->freeAddress = virtAddr + pageAmount * PAGE_SIZE;
627 }
628}
629
631{
632 if (space == NULL || mapping == NULL)
633 {
634 errno = EINVAL;
635 return NULL;
636 }
637
638 if (err != EOK)
639 {
640 lock_release(&space->lock); // Release the lock from space_mapping_start.
641 errno = err;
642 return NULL;
643 }
644
645 space_update_free_address(space, (uintptr_t)mapping->virtAddr, mapping->pageAmount);
646 lock_release(&space->lock); // Release the lock from space_mapping_start.
647 return mapping->virtAddr;
648}
649
650bool space_is_mapped(space_t* space, const void* virtAddr, size_t length)
651{
652 space_align_region((void**)&virtAddr, &length);
653 LOCK_SCOPE(&space->lock);
654 return page_table_is_mapped(&space->pageTable, virtAddr, BYTES_TO_PAGES(length));
655}
656
658{
659 if (space == NULL)
660 {
661 errno = EINVAL;
662 return ERR;
663 }
664
665 LOCK_SCOPE(&space->lock);
668}
669
670phys_addr_t space_virt_to_phys(space_t* space, const void* virtAddr)
671{
672 if (space == NULL)
673 {
674 errno = EINVAL;
675 return ERR;
676 }
677
678 phys_addr_t physAddr;
679 LOCK_SCOPE(&space->lock);
680 if (page_table_get_phys_addr(&space->pageTable, (void*)virtAddr, &physAddr) == ERR)
681 {
682 errno = EFAULT;
683 return ERR;
684 }
685
686 return physAddr;
687}
#define assert(expression)
Definition assert.h:29
EFI_PHYSICAL_ADDRESS buffer
Definition main.c:237
static void start()
Definition main.c:542
static fd_t data
Definition dwm.c:21
int errno_t
Definition errno_t.h:4
bool stack_pointer_is_in_stack(stack_pointer_t *stack, uintptr_t addr, uint64_t length)
Check if an region is within the stack.
void stack_pointer_poke(uint64_t offset)
Poke the stack to ensure that a page fault will occur at the given offset.
#define CPU_MAX
Maximum number of CPUs supported.
Definition cpu.h:50
static uintptr_t address
Mapped virtual address of the HPET registers.
Definition hpet.c:96
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
Definition panic.c:292
#define PFN_TO_PHYS(_pfn)
Convert a PFN to its physical address.
uintptr_t phys_addr_t
Physical address type.
static uint64_t page_table_count_pages_with_flags(page_table_t *table, void *addr, size_t amount, pml_flags_t flags)
Counts the number of pages in a range that have all the specified flags set.
Definition paging.h:1012
static uint64_t page_table_traverse(page_table_t *table, page_table_traverse_t *traverse, const void *addr, pml_flags_t flags)
Allows for fast traversal of the page table by caching previously accessed layers.
Definition paging.h:255
pml_index_t
Indexes into a pml level.
static uint64_t page_table_get_phys_addr(page_table_t *table, void *addr, phys_addr_t *out)
Retrieves the physical address mapped to a given virtual address.
Definition paging.h:303
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
static bool page_table_is_unmapped(page_table_t *table, void *addr, size_t amount)
Checks if a range of virtual addresses is completely unmapped.
Definition paging.h:363
#define PML_ADDR_TO_INDEX(addr, level)
Calculates the index into a page table level for a given virtual address.
static uint64_t page_table_map(page_table_t *table, void *addr, phys_addr_t phys, size_t amount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
Definition paging.h:396
uint8_t pml_callback_id_t
Callback ID type.
#define PAGE_TABLE_TRAVERSE_CREATE
Create a page_table_traverse_t initializer.
Definition paging.h:231
#define PML_MAX_CALLBACK
Maximum number of callbacks that can be registered for a page table.
static void page_table_deinit(page_table_t *table)
Deinitializes a page table, freeing all allocated pages.
Definition paging.h:148
#define PHYS_ADDR_INVALID
Invalid physical address.
size_t pfn_t
Page Frame Number type.
static uint64_t page_table_init(page_table_t *table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages)
Initializes a page table.
Definition paging.h:132
static bool page_table_is_mapped(page_table_t *table, const void *addr, size_t amount)
Checks if a range of virtual addresses is completely mapped.
Definition paging.h:334
static uint64_t page_table_find_unmapped_region(page_table_t *table, void *startAddr, void *endAddr, size_t amount, size_t alignment, void **outAddr)
Finds the first contiguous unmapped region with the given number of pages within the specified addres...
Definition paging.h:728
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
@ PML4
@ PML_USER
@ PML_PRESENT
@ PML_WRITE
@ PML_NONE
@ PML_OWNED
void pmm_free_pages(pfn_t *pfns, size_t count)
Free multiple pages of physical memory.
Definition pmm.c:348
uint64_t pmm_alloc_pages(pfn_t *pfns, size_t count)
Allocate multiple pages of physical memory.
Definition pmm.c:275
void pmm_free(pfn_t pfn)
Free a single page of physical memory.
Definition pmm.c:341
pfn_t pmm_alloc(void)
Allocate a single page of physical memory.
Definition pmm.c:249
pfn_t pmm_alloc_bitmap(size_t count, pfn_t maxPfn, pfn_t alignPfn)
Allocate a contiguous region of physical memory using the bitmap.
Definition pmm.c:317
uint64_t space_pin_terminated(space_t *space, const void *address, const void *terminator, size_t objectSize, size_t maxCount, stack_pointer_t *userStack)
Pins a region of memory terminated by a terminator value.
Definition space.c:330
void * space_mapping_end(space_t *space, space_mapping_t *mapping, errno_t err)
Performs cleanup after changes to the address space mappings.
Definition space.c:630
bool space_is_mapped(space_t *space, const void *virtAddr, size_t length)
Checks if a virtual memory region is fully mapped.
Definition space.c:650
void space_unpin(space_t *space, const void *address, size_t length)
Unpins pages in a region previously pinned with space_pin() or space_pin_string().
Definition space.c:418
void space_free_callback(space_t *space, pml_callback_id_t callbackId)
Free a callback.
Definition space.c:612
uint64_t space_mapping_start(space_t *space, space_mapping_t *mapping, void *virtAddr, phys_addr_t physAddr, size_t length, size_t alignment, pml_flags_t flags)
Prepare for changes to the address space mappings.
Definition space.c:494
void(* space_callback_func_t)(void *data)
Space callback function.
Definition space.h:42
space_flags_t
Flags for space initialization.
Definition space.h:27
pml_callback_id_t space_alloc_callback(space_t *space, size_t pageAmount, space_callback_func_t func, void *data)
Allocate a callback.
Definition space.c:571
uint64_t space_user_page_count(space_t *space)
Get the number of user pages allocated in the address space.
Definition space.c:657
phys_addr_t space_virt_to_phys(space_t *space, const void *virtAddr)
Translate a virtual address to a physical address in the address space.
Definition space.c:670
uint64_t space_pin(space_t *space, const void *buffer, size_t length, stack_pointer_t *userStack)
Pins pages within a region of the address space.
Definition space.c:281
uint64_t space_check_access(space_t *space, const void *addr, size_t length)
Checks if a virtual memory region is within the allowed address range of the space.
Definition space.c:444
void space_deinit(space_t *space)
Deinitializes a virtual address space.
Definition space.c:121
uint64_t space_init(space_t *space, uintptr_t startAddress, uintptr_t endAddress, space_flags_t flags)
Initializes a virtual address space.
Definition space.c:64
@ SPACE_MAP_KERNEL_HEAP
Map the kernel heap into the address space.
Definition space.h:35
@ SPACE_USE_PMM_BITMAP
Definition space.h:33
@ SPACE_MAP_IDENTITY
Map the identity mapped physical memory into the address space.
Definition space.h:36
@ SPACE_MAP_KERNEL_BINARY
Map the kernel binary into the address space.
Definition space.h:34
#define VMM_KERNEL_HEAP_MAX
The maximum address for the kernel heap.
Definition vmm.h:68
#define VMM_KERNEL_BINARY_MAX
The maximum address for the content of the kernel binary.
Definition vmm.h:61
space_t * vmm_kernel_space_get(void)
Retrieves the kernel's address space.
Definition vmm.c:123
#define VMM_IDENTITY_MAPPED_MIN
The minimum address for the identity mapped physical memory.
Definition vmm.h:72
#define VMM_USER_SPACE_MAX
The maximum address for user space.
Definition vmm.h:74
#define VMM_KERNEL_BINARY_MIN
The minimum address for the content of the kernel binary./*#end#*‍/.
Definition vmm.h:62
#define VMM_USER_SPACE_MIN
The minimum address for user space.
Definition vmm.h:75
#define VMM_IDENTITY_MAPPED_MAX
The maximum address for the identity mapped physical memory.
Definition vmm.h:71
#define VMM_KERNEL_HEAP_MIN
The minimum address for the kernel heap.
Definition vmm.h:69
static void lock_init(lock_t *lock)
Initializes a lock.
Definition lock.h:79
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
Definition lock.h:58
static void lock_release(lock_t *lock)
Releases a lock.
Definition lock.h:175
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
Definition lock.h:96
void map_init(map_t *map)
Initialize a map.
Definition map.c:142
void map_entry_init(map_entry_t *entry)
Initialize a map entry.
Definition map.c:37
static map_key_t map_key_uint64(uint64_t uint64)
Create a map key from a uint64_t.
Definition map.h:129
uint64_t map_insert(map_t *map, const map_key_t *key, map_entry_t *value)
Insert a key-value pair into the map.
Definition map.c:170
void map_remove(map_t *map, map_entry_t *entry)
Remove a entry from the map.
Definition map.c:319
map_entry_t * map_get(map_t *map, const map_key_t *key)
Get a value from the map by key.
Definition map.c:253
#define EINVAL
Invalid argument.
Definition errno.h:142
#define EFAULT
Bad address.
Definition errno.h:102
#define ENOMEM
Out of memory.
Definition errno.h:92
#define EOVERFLOW
Value too large for defined data type.
Definition errno.h:402
#define errno
Error number variable.
Definition errno.h:27
#define EOK
No error.
Definition errno.h:32
static uint64_t bitmap_find_first_clear(bitmap_t *map, uint64_t startIdx, uint64_t endIdx)
Find the first clear bit in the bitmap.
Definition bitmap.h:306
static bool bitmap_is_empty(bitmap_t *map)
Check if the bitmap is empty (all bits clear).
Definition bitmap.h:141
static void bitmap_clear(bitmap_t *map, uint64_t index)
Clear a bit in the bitmap.
Definition bitmap.h:244
static void bitmap_set(bitmap_t *map, uint64_t index)
Set a bit in the bitmap.
Definition bitmap.h:190
#define BITMAP_FOR_EACH_SET(idx, map)
Iterate over each set bit in the bitmap.
Definition bitmap.h:58
#define BITMAP_DEFINE_INIT(name, bits)
Initialize a bitmap defined with BITMAP_DEFINE.
Definition bitmap.h:117
#define MIN(x, y)
Definition math.h:18
#define ROUND_DOWN(number, multiple)
Definition math.h:23
#define ROUND_UP(number, multiple)
Definition math.h:21
#define BYTES_TO_PAGES(amount)
Convert a size in bytes to pages.
Definition proc.h:107
#define NULL
Pointer error value.
Definition NULL.h:25
#define ERR
Integer error value.
Definition ERR.h:17
#define PAGE_SIZE
The size of a memory page in bytes.
Definition PAGE_SIZE.h:8
#define CONTAINER_OF(ptr, type, member)
Container of macro.
static const path_flag_t flags[]
Definition path.c:47
static uint64_t space_pmm_bitmap_alloc_pages(pfn_t *pfns, size_t pageAmount)
Definition space.c:20
static void space_map_kernel_space_region(space_t *space, uintptr_t start, uintptr_t end)
Definition space.c:38
static void space_update_free_address(space_t *space, uintptr_t virtAddr, uint64_t pageAmount)
Definition space.c:617
static uint64_t space_populate_user_region(space_t *space, const void *buffer, size_t pageAmount)
Definition space.c:165
static uint64_t space_pin_depth_inc(space_t *space, const void *address, uint64_t amount)
Definition space.c:229
static void * space_find_free_region(space_t *space, uint64_t pageAmount, uint64_t alignment)
Definition space.c:473
static void space_align_region(void **virtAddr, size_t *length)
Definition space.c:158
static void space_pin_depth_dec(space_t *space, const void *address, uint64_t amount)
Definition space.c:192
static void space_unmap_kernel_space_region(space_t *space, uintptr_t start, uintptr_t end)
Definition space.c:53
#define atomic_init(obj, value)
Definition stdatomic.h:75
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
__UINT8_TYPE__ uint8_t
Definition stdint.h:11
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
#define UINT32_MAX
Definition stdint.h:70
_PUBLIC void * malloc(size_t size)
Definition malloc.c:5
_PUBLIC void free(void *ptr)
Definition free.c:11
_PUBLIC void * memcpy(void *_RESTRICT s1, const void *_RESTRICT s2, size_t n)
Definition memcpy.c:61
_PUBLIC void * memset(void *s, int c, size_t n)
Definition memset.c:4
Map entry structure.
Definition map.h:69
Map key stucture.
Definition map.h:57
pml_alloc_pages_t allocPages
Helper structure for fast traversal of the page table.
Definition paging.h:213
pml_entry_t * entry
Definition paging.h:223
uint64_t pinned
uint64_t owned
uint64_t raw
uint64_t present
If set the page is present in memory and readable.
A entry in a page table without a specified address or callback ID.
pml_entry_t entries[PML_INDEX_AMOUNT]
uint64_t pageAmount
Definition space.h:52
space_callback_func_t func
Definition space.h:50
void * data
Definition space.h:51
Helper structure for managing address space mappings.
Definition space.h:215
phys_addr_t physAddr
Definition space.h:217
size_t pageAmount
Definition space.h:218
pml_flags_t flags
Definition space.h:219
void * virtAddr
Definition space.h:216
Pinned page structure.
Definition space.h:62
uint64_t pinCount
The number of times this page is pinned, will be unpinned when it reaches 0.
Definition space.h:64
map_entry_t mapEntry
Definition space.h:63
Virtual address space structure.
Definition space.h:78
lock_t lock
Definition space.h:95
map_t pinnedPages
Map of pages with a pin depth greater than 1.
Definition space.h:80
uintptr_t startAddress
The start address for allocations in this address space.
Definition space.h:81
uint64_t callbacksLength
Length of the callbacks array.
Definition space.h:91
uintptr_t endAddress
The end address for allocations in this address space.
Definition space.h:82
page_table_t pageTable
The page table associated with the address space.
Definition space.h:79
space_flags_t flags
Definition space.h:84
uintptr_t freeAddress
The next available free virtual address in this address space.
Definition space.h:83
atomic_uint16_t shootdownAcks
Definition space.h:94
space_callback_t * callbacks
Definition space.h:90
Structure to define a stack in memory.
static space_t kernelSpace
Definition vmm.c:26