PatchworkOS  966e257
A non-POSIX operating system.
Loading...
Searching...
No Matches
paging.h
Go to the documentation of this file.
1#pragma once
2
3#include <kernel/cpu/regs.h>
5
6#include <assert.h>
7#include <stdbool.h>
8#include <stdlib.h>
9#include <string.h>
10#include <sys/proc.h>
11
12#ifdef _BOOT_
13#include <efi.h>
14#include <efilib.h>
15#endif
16
17/**
18 * @addtogroup kernel_mem_paging
19 * @{
20 */
21
22/**
23 * @brief Invalidates a region of pages in the TLB.
24 *
25 * Even if a page table entry is modified, the CPU might still use a cached version of the entry in the TLB. To ensure
26 * our changes are detected we must invalidate this cache using `invlpg` or if many pages are changed, a full TLB flush
27 * by reloading CR3.
28 *
29 * @param virtAddr The virtual address of the page to invalidate.
30 */
31static inline void tlb_invalidate(void* virtAddr, uint64_t pageCount)
32{
33 if (pageCount == 0)
34 {
35 return;
36 }
37
38 if (pageCount > 16)
39 {
41 }
42 else
43 {
44 for (uint64_t i = 0; i < pageCount; i++)
45 {
46 asm volatile("invlpg (%0)" ::"r"(virtAddr + i * PAGE_SIZE) : "memory");
47 }
48 }
49}
50
51/**
52 * @brief Retrieves the address from a page table entry and converts it to an accessible address.
53 *
54 * The accessible address depends on if we are in the kernel or the bootloader as the bootloader has physical memory
55 * identity mapped to the higher half of the address space, while the kernel does not and instead has the higher half
56 * mapped to the lower half of the address space.
57 *
58 * @param entry The page table entry.
59 * @return The accessible address contained in the entry.
60 */
62{
63#ifdef _BOOT_
64 return entry.addr << PML_ADDR_OFFSET_BITS;
65#else
67#endif
68}
69
70/**
71 * @brief Checks if a page table level is empty (all entries are 0).
72 *
73 * Used as a helper for `page_table_clear()`.
74 *
75 * @param pml The page table level to check.
76 * @return true if all entries are raw 0, false otherwise.
77 */
78static inline bool pml_is_empty(pml_t* pml)
79{
80 for (pml_index_t i = 0; i < PML_INDEX_AMOUNT; i++)
81 {
82 if (pml->entries[i].raw != 0)
83 {
84 return false;
85 }
86 }
87 return true;
88}
89
90/**
91 * @brief Allocates and initializes a new page table level.
92 *
93 * @param table The page table.
94 * @param outPml Will be filled with the newly allocated page table level.
95 * @return On success, `0`. On failure, `ERR`.
96 */
97static inline uint64_t pml_new(page_table_t* table, pml_t** outPml)
98{
99 pml_t* pml;
100 if (table->allocPages((void**)&pml, 1) == ERR)
101 {
102 return ERR;
103 }
104#ifdef _BOOT_
105 SetMem(pml, PAGE_SIZE, 0);
106#else
107 memset(pml, 0, PAGE_SIZE);
108#endif
109 *outPml = pml;
110 return 0;
111}
112
113/**
114 * @brief Recursively frees a page table level, all its children and any owned pages.
115 *
116 * @param table The page table.
117 * @param pml The current page table level to free.
118 * @param level The current level of the page table.
119 */
120static inline void pml_free(page_table_t* table, pml_t* pml, pml_level_t level)
121{
122 if (level < 0)
123 {
124 return;
125 }
126
127 for (pml_index_t i = 0; i < PML_INDEX_AMOUNT; i++)
128 {
129 pml_entry_t* entry = &pml->entries[i];
130 if (!entry->present)
131 {
132 continue;
133 }
134
135 if (level > PML1)
136 {
137 pml_free(table, (pml_t*)pml_accessible_addr(*entry), level - 1);
138 }
139 else if (entry->owned)
140 {
141 void* addr = (void*)pml_accessible_addr(*entry);
142 table->freePages(&addr, 1);
143 }
144 }
145
146 table->freePages((void**)&pml, 1);
147}
148
149/**
150 * @brief Initializes a page table.
151 *
152 * @param table The page table to initialize.
153 * @param allocPages The function to use for allocating pages.
154 * @param freePages The function to use for freeing pages.
155 * @return On success, `0`. On failure, `ERR`.
156 */
157static inline uint64_t page_table_init(page_table_t* table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages)
158{
159 table->allocPages = allocPages;
160 table->freePages = freePages;
161 if (pml_new(table, &table->pml4) == ERR)
162 {
163 return ERR;
164 }
165 return 0;
166}
167
168/**
169 * @brief Deinitializes a page table, freeing all allocated pages.
170 *
171 * @param table The page table to deinitialize.
172 */
173static inline void page_table_deinit(page_table_t* table)
174{
175 pml_free(table, table->pml4, PML4);
176}
177
178/**
179 * @brief Loads the page table into the CR3 register if it is not already loaded.
180 *
181 * @param table The page table to load.
182 */
183static inline void page_table_load(page_table_t* table)
184{
185 uint64_t cr3 = PML_ENSURE_LOWER_HALF(table->pml4);
186 if (cr3 != cr3_read())
187 {
188 cr3_write(cr3);
189 }
190}
191
192/**
193 * @brief Retrieves or allocates the next level page table.
194 *
195 * If the entry at the specified index is present, it retrieves the corresponding page table level.
196 * If the entry is not present and the `PML_PRESENT` flag is set in `flags`, it allocates a new page table level, and
197 * initializes it with the provided flags and callback ID. If the entry is not present and the `PML_PRESENT` flag is not
198 * set, it returns `ERR`.
199 *
200 * @param table The page table.
201 * @param currentPml The current page table level.
202 * @param index The index within the current page table level.
203 * @param flags The flags to assign to a newly allocated page table level, if applicable.
204 * @param outPml Will be filled with the retrieved or newly allocated page table level.
205 * @return On success, `0`. On failure, `ERR`.
206 */
208 pml_t** outPml)
209{
210 pml_entry_t* entry = &currentPml->entries[index];
211 if (entry->present)
212 {
213 *outPml = (pml_t*)pml_accessible_addr(*entry);
214 return 0;
215 }
216 else if (flags & PML_PRESENT)
217 {
218 pml_t* nextPml;
219 if (pml_new(table, &nextPml) == ERR)
220 {
221 return ERR;
222 }
223 currentPml->entries[index].raw = (flags & PML_FLAGS_MASK) | (PML_ENSURE_LOWER_HALF(nextPml) & PML_ADDR_MASK);
224 *outPml = nextPml;
225 return 0;
226 }
227
228 return ERR;
229}
230
231/**
232 * @brief Helper structure for fast traversal of the page table.
233 * @struct page_table_traverse_t
234 */
248
249/**
250 * @brief Create a `page_table_traverse_t` initializer.
251 *
252 * @return A `page_table_traverse_t` initializer.
253 */
254#define PAGE_TABLE_TRAVERSE_CREATE \
255 { \
256 .pml3Valid = false, \
257 .pml2Valid = false, \
258 .pml1Valid = false, \
259 }
260
261/**
262 * @brief Allows for fast traversal of the page table by caching previously accessed layers.
263 *
264 * If the present flag is not set in `flags` then no new levels will be allocated and if non present pages are
265 * encountered the function will return `false`.
266 *
267 * Note that higher level flags are or'd with `PML_WRITE | PML_USER` since only the permissions of a higher level will
268 * apply to lower levels, meaning that the lowest level should be the one with the actual desired permissions.
269 * Additionally, the `PML_GLOBAL` flag is not allowed on the PML3 level.
270 *
271 * @param table The page table.
272 * @param traverse The helper structure used to cache each layer.
273 * @param virtAddr The target virtual address.
274 * @param flags The flags to assigned to newly allocated levels, if the present flag is not set then dont allocate new
275 * levels.
276 * @return On success, `0`. On failure, `ERR`.
277 */
280{
281 pml_index_t newIdx3 = PML_ADDR_TO_INDEX(virtAddr, PML4);
282 if (!traverse->pml3Valid || traverse->oldIdx3 != newIdx3)
283 {
284 if (page_table_get_pml(table, table->pml4, newIdx3, (flags | PML_WRITE | PML_USER) & ~PML_GLOBAL,
285 &traverse->pml3) == ERR)
286 {
287 return ERR;
288 }
289 traverse->oldIdx3 = newIdx3;
290 traverse->pml2Valid = false; // Invalidate cache for lower levels
291 }
292
293 pml_index_t newIdx2 = PML_ADDR_TO_INDEX(virtAddr, PML3);
294 if (!traverse->pml2Valid || traverse->oldIdx2 != newIdx2)
295 {
296 if (page_table_get_pml(table, traverse->pml3, newIdx2, flags | PML_WRITE | PML_USER, &traverse->pml2) == ERR)
297 {
298 return ERR;
299 }
300 traverse->oldIdx2 = newIdx2;
301 traverse->pml1Valid = false; // Invalidate cache for lower levels
302 }
303
304 pml_index_t newIdx1 = PML_ADDR_TO_INDEX(virtAddr, PML2);
305 if (!traverse->pml1Valid || traverse->oldIdx1 != newIdx1)
306 {
307 if (page_table_get_pml(table, traverse->pml2, newIdx1, flags | PML_WRITE | PML_USER, &traverse->pml1) == ERR)
308 {
309 return ERR;
310 }
311 traverse->oldIdx1 = newIdx1;
312 }
313
314 traverse->entry = &traverse->pml1->entries[PML_ADDR_TO_INDEX(virtAddr, PML1)];
315 return 0;
316}
317
318/**
319 * @brief Retrieves the physical address mapped to a given virtual address.
320 *
321 * If the virtual address is not mapped, the function returns `ERR`.
322 *
323 * @param table The page table.
324 * @param virtAddr The virtual address to look up.
325 * @param outPhysAddr Will be filled with the corresponding physical address on success.
326 * @return On success, `0`. On failure, `ERR`.
327 */
328static inline uint64_t page_table_get_phys_addr(page_table_t* table, const void* virtAddr, void** outPhysAddr)
329{
330 uint64_t offset = ((uint64_t)virtAddr) % PAGE_SIZE;
331 virtAddr = (void*)ROUND_DOWN(virtAddr, PAGE_SIZE);
332
334
335 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr, PML_NONE) == ERR)
336 {
337 return ERR;
338 }
339
340 if (!traverse.entry->present)
341 {
342 return ERR;
343 }
344
345 *outPhysAddr = (void*)((traverse.entry->addr << PML_ADDR_OFFSET_BITS) + offset);
346 return 0;
347}
348
349/**
350 * @brief Checks if a range of virtual addresses is completely mapped.
351 *
352 * If any page in the range is not mapped, the function returns `false`.
353 *
354 * @param table The page table.
355 * @param virtAddr The starting virtual address.
356 * @param pageAmount The number of pages to check.
357 * @return `true` if the entire range is mapped, `false` otherwise.
358 */
359static inline bool page_table_is_mapped(page_table_t* table, const void* virtAddr, uint64_t pageAmount)
360{
362 for (uint64_t i = 0; i < pageAmount; i++)
363 {
364 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
365 {
366 return false;
367 }
368
369 if (!traverse.entry->present)
370 {
371 return false;
372 }
373 }
374
375 return true;
376}
377
378/**
379 * @brief Checks if a range of virtual addresses is completely unmapped.
380 *
381 * If any page in the range is mapped, the function returns `false`.
382 *
383 * @param table The page table.
384 * @param virtAddr The starting virtual address.
385 * @param pageAmount The number of pages to check.
386 * @return `true` if the entire range is unmapped, `false` otherwise.
387 */
388static inline bool page_table_is_unmapped(page_table_t* table, const void* virtAddr, uint64_t pageAmount)
389{
391
392 for (uint64_t i = 0; i < pageAmount; i++)
393 {
394 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
395 {
396 continue;
397 }
398
399 if (traverse.entry->present)
400 {
401 return false;
402 }
403 }
404
405 return true;
406}
407
408/**
409 * @brief Maps a range of virtual addresses to physical addresses in the page table.
410 *
411 * If any page in the range is already mapped, the function will fail and return `ERR`.
412 *
413 * @param table The page table.
414 * @param virtAddr The starting virtual address.
415 * @param physAddr The starting physical address.
416 * @param pageAmount The number of pages to map.
417 * @param flags The flags to set for the mapped pages. Must include `PML_PRESENT`.
418 * @param callbackId The callback ID to associate with the mapped pages or `PML_CALLBACK_NONE`.
419 * @return On success, `0`. On failure, `ERR`.
420 */
421static inline uint64_t page_table_map(page_table_t* table, void* virtAddr, void* physAddr, uint64_t pageAmount,
423{
424 if (!(flags & PML_PRESENT))
425 {
426 return ERR;
427 }
428
430
431 for (uint64_t i = 0; i < pageAmount; i++)
432 {
433 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr, flags) == ERR)
434 {
435 return ERR;
436 }
437
438 if (traverse.entry->present)
439 {
440 return ERR;
441 }
442
443 traverse.entry->raw = flags;
445 traverse.entry->lowCallbackId = callbackId & 1;
446 traverse.entry->highCallbackId = callbackId >> 1;
447
448 physAddr = (void*)((uintptr_t)physAddr + PAGE_SIZE);
449 virtAddr = (void*)((uintptr_t)virtAddr + PAGE_SIZE);
450 }
451
452 return 0;
453}
454
455/**
456 * @brief Maps an array of physical pages to contiguous virtual addresses in the page table.
457 *
458 * If any page in the range is already mapped, the function will fail and return `ERR`.
459 *
460 * @param table The page table.
461 * @param virtAddr The starting virtual address.
462 * @param pages Array of physical page addresses to map.
463 * @param pageAmount The number of pages in the array to map.
464 * @param flags The flags to set for the mapped pages. Must include `PML_PRESENT`.
465 * @param callbackId The callback ID to associate with the mapped pages or `PML_CALLBACK_NONE`.
466 * @return On success, `0`. On failure, `ERR`.
467 */
468static inline uint64_t page_table_map_pages(page_table_t* table, void* virtAddr, void** pages, uint64_t pageAmount,
470{
471 if (!(flags & PML_PRESENT))
472 {
473 return ERR;
474 }
475
477
478 for (uint64_t i = 0; i < pageAmount; i++)
479 {
480 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr, flags) == ERR)
481 {
482 return ERR;
483 }
484
485 if (traverse.entry->present)
486 {
487 return ERR;
488 }
489
490 traverse.entry->raw = flags;
492 traverse.entry->lowCallbackId = callbackId & 1;
493 traverse.entry->highCallbackId = callbackId >> 1;
494
495 virtAddr = (void*)((uintptr_t)virtAddr + PAGE_SIZE);
496 }
497
498 return 0;
499}
500
501/**
502 * @brief Unmaps a range of virtual addresses from the page table.
503 *
504 * If a page is not currently mapped, it is skipped.
505 *
506 * Will NOT free owned pages, instead it only sets the present flag to 0. This is to help with TLB shootdowns where we
507 * must unmap, wait for all CPUs to acknowledge the unmap, and only then free the pages. Use `page_table_clear()` to
508 * free owned pages separately.
509 *
510 * @param table The page table.
511 * @param virtAddr The starting virtual address.
512 * @param pageAmount The number of pages to unmap.
513 */
514static inline void page_table_unmap(page_table_t* table, void* virtAddr, uint64_t pageAmount)
515{
517
518 for (uint64_t i = 0; i < pageAmount; i++)
519 {
520 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
521 {
522 continue;
523 }
524
525 if (!traverse.entry->present)
526 {
527 continue;
528 }
529
530 traverse.entry->present = 0;
531 }
532
533 tlb_invalidate(virtAddr, pageAmount);
534}
535
536/**
537 * @brief Buffer of pages used to batch page frees.
538 * @struct page_table_page_buffer_t
539 */
545
546/**
547 * @brief Pushes a page table level onto the page buffer, freeing the buffer if full.
548 *
549 * Used as a helper for `page_table_clear()`.
550 *
551 * @param table The page table.
552 * @param buffer The page buffer.
553 * @param address The address to push.
554 */
556{
557 buffer->pages[buffer->pageCount] = address;
558 buffer->pageCount++;
559
560 if (buffer->pageCount >= PML_PAGE_BUFFER_SIZE)
561 {
562 table->freePages(buffer->pages, buffer->pageCount);
563 buffer->pageCount = 0;
564 }
565}
566
567/**
568 * @brief Flushes the page buffer, freeing any remaining pages.
569 *
570 * Used as a helper for `page_table_clear()`.
571 *
572 * @param table The page table.
573 * @param buffer The page buffer.
574 */
576{
577 if (buffer->pageCount > 0)
578 {
579 table->freePages(buffer->pages, buffer->pageCount);
580 buffer->pageCount = 0;
581 }
582}
583
584/**
585 * @brief Clears any empty page table levels any time a pml1, pml2 or pml3 boundry is crossed.
586 *
587 * Used as a helper for `page_table_clear()`.
588 *
589 * @param table The page table.
590 * @param prevTraverse The previous traverse state.
591 * @param traverse The current traverse state.
592 * @param pageBuffer The page buffer.
593 */
595 page_table_traverse_t* traverse, page_table_page_buffer_t* pageBuffer)
596{
597 if (prevTraverse->pml1Valid && prevTraverse->pml1 != traverse->pml1 && pml_is_empty(prevTraverse->pml1))
598 {
599 page_table_page_buffer_push(table, pageBuffer, prevTraverse->pml1);
600 prevTraverse->pml2->entries[prevTraverse->oldIdx1].raw = 0;
601 if (prevTraverse->pml2Valid && prevTraverse->pml2 != traverse->pml2 && pml_is_empty(prevTraverse->pml2))
602 {
603 page_table_page_buffer_push(table, pageBuffer, prevTraverse->pml2);
604 prevTraverse->pml3->entries[prevTraverse->oldIdx2].raw = 0;
605 if (prevTraverse->pml3Valid && prevTraverse->pml3 != traverse->pml3 && pml_is_empty(prevTraverse->pml3))
606 {
607 page_table_page_buffer_push(table, pageBuffer, prevTraverse->pml3);
608 table->pml4->entries[prevTraverse->oldIdx3].raw = 0;
609 }
610 }
611 }
612}
613
614/**
615 * @brief Clears page table entries in the specified range and frees any owned pages.
616 *
617 * Intended to be used in conjunction with `page_table_unmap()` to first unmap pages and then free any owned pages after
618 * TLB shootdown is complete.
619 *
620 * Any still present or pinned entries will be skipped.
621 *
622 * All unskipped entries will be fully cleared (set to 0).
623 *
624 * @param table The page table.
625 * @param virtAddr The starting virtual address.
626 * @param pageAmount The number of pages to clear.
627 */
628static inline void page_table_clear(page_table_t* table, void* virtAddr, uint64_t pageAmount)
629{
630 page_table_page_buffer_t pageBuffer = {.pageCount = 0};
631
634 for (uint64_t i = 0; i < pageAmount; i++)
635 {
636 uintptr_t currentVirtAddr = (uintptr_t)virtAddr + i * PAGE_SIZE;
637
638 page_table_clear_pml1_pml2_pml3(table, &prevTraverse, &traverse, &pageBuffer);
639
640 if (page_table_traverse(table, &traverse, currentVirtAddr, PML_NONE) == ERR)
641 {
642 prevTraverse.pml1Valid = false;
643 prevTraverse.pml2Valid = false;
644 prevTraverse.pml3Valid = false;
645 continue;
646 }
647 prevTraverse = traverse;
648
649 if (traverse.entry->present)
650 {
651 continue;
652 }
653
654 if (traverse.entry->owned)
655 {
656 page_table_page_buffer_push(table, &pageBuffer, (void*)pml_accessible_addr(*traverse.entry));
657 }
658
659 traverse.entry->raw = 0;
660 }
661
662 page_table_clear_pml1_pml2_pml3(table, &prevTraverse, &traverse, &pageBuffer);
663 page_table_page_buffer_flush(table, &pageBuffer);
664}
665
666/**
667 * @brief Collects the number of pages associated with each callback ID in the specified range.
668 *
669 * @param table The page table.
670 * @param virtAddr The starting virtual address.
671 * @param pageAmount The number of pages to check.
672 * @param callbacks An array of size `PML_MAX_CALLBACK` that will be filled with the occurrences of each callback ID.
673 */
674static inline void page_table_collect_callbacks(page_table_t* table, void* virtAddr, uint64_t pageAmount,
675 uint64_t* callbacks)
676{
678
679 for (uint64_t i = 0; i < pageAmount; i++)
680 {
681 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
682 {
683 continue;
684 }
685
686 if (!traverse.entry->present)
687 {
688 continue;
689 }
690
691 pml_callback_id_t callbackId = traverse.entry->lowCallbackId | (traverse.entry->highCallbackId << 1);
692 if (callbackId != PML_CALLBACK_NONE)
693 {
694 callbacks[callbackId]++;
695 }
696 }
697}
698
699/**
700 * @brief Sets the flags for a range of pages in the page table.
701 *
702 * If a page is not currently mapped, it is skipped.
703 *
704 * @param table The page table.
705 * @param virtAddr The starting virtual address.
706 * @param pageAmount The number of pages to update.
707 * @param flags The new flags to set. The `PML_OWNED` flag is preserved.
708 * @return On success, `0`. On failure, `ERR`.
709 */
711{
713
714 for (uint64_t i = 0; i < pageAmount; i++)
715 {
716 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
717 {
718 continue;
719 }
720
721 if (!traverse.entry->present)
722 {
723 return ERR;
724 }
725
726 if (traverse.entry->owned)
727 {
728 flags |= PML_OWNED;
729 }
730
731 // Bit magic to only update the flags while preserving the address and callback ID.
732 traverse.entry->raw = (traverse.entry->raw & ~PML_FLAGS_MASK) | (flags & PML_FLAGS_MASK);
733 }
734
735 tlb_invalidate(virtAddr, pageAmount);
736 return 0;
737}
738
739/**
740 * @brief Finds the first contiguous unmapped region with the given number of pages within the specified address range.
741 *
742 * Good luck with this function, im like 99% sure it works.
743 *
744 * This function should be `O(r)` in the worse case where `r` is the amount of pages in the address range, note how the
745 * number of pages needed does not affect the complexity. This has the fun affect that the more memory is allocated the
746 * faster this function will run on average.
747 *
748 * @param table The page table.
749 * @param startAddr The start address to begin searching (inclusive).
750 * @param endAddr The end address of the search range (exclusive).
751 * @param pageAmount The number of consecutive unmapped pages needed.
752 * @param outAddr Will be filled with the start address of the unmapped region if found.
753 * @return On success, `0`. If no suitable region is found, `ERR`.
754 */
755static inline uint64_t page_table_find_unmapped_region(page_table_t* table, void* startAddr, void* endAddr,
756 uint64_t pageAmount, void** outAddr)
757{
758 uintptr_t currentAddr = ROUND_DOWN((uintptr_t)startAddr, PAGE_SIZE);
759 uintptr_t end = (uintptr_t)endAddr;
760
761 if (pageAmount >= (PML3_SIZE / PAGE_SIZE))
762 {
763 while (currentAddr < end)
764 {
765 pml_index_t idx4 = PML_ADDR_TO_INDEX(currentAddr, PML4);
766 pml_index_t idx3 = PML_ADDR_TO_INDEX(currentAddr, PML3);
767
768 pml_entry_t* entry4 = &table->pml4->entries[idx4];
769 if (!entry4->present)
770 {
771 *outAddr = (void*)currentAddr;
772 return 0;
773 }
774
775 pml_t* pml3 = (pml_t*)pml_accessible_addr(*entry4);
776 pml_entry_t* entry3 = &pml3->entries[idx3];
777
778 if (!entry3->present)
779 {
780 *outAddr = (void*)currentAddr;
781 return 0;
782 }
783
784 currentAddr = ROUND_UP(currentAddr + 1, PML3_SIZE);
785 }
786 return ERR;
787 }
788
789 if (pageAmount >= (PML2_SIZE / PAGE_SIZE))
790 {
791 while (currentAddr < end)
792 {
793 pml_index_t idx4 = PML_ADDR_TO_INDEX(currentAddr, PML4);
794 pml_entry_t* entry4 = &table->pml4->entries[idx4];
795
796 if (!entry4->present)
797 {
798 *outAddr = (void*)currentAddr;
799 return 0;
800 }
801
802 pml_t* pml3 = (pml_t*)pml_accessible_addr(*entry4);
803 pml_index_t idx3 = PML_ADDR_TO_INDEX(currentAddr, PML3);
804 pml_entry_t* entry3 = &pml3->entries[idx3];
805
806 if (!entry3->present)
807 {
808 *outAddr = (void*)currentAddr;
809 return 0;
810 }
811
812 pml_t* pml2 = (pml_t*)pml_accessible_addr(*entry3);
813 pml_index_t idx2 = PML_ADDR_TO_INDEX(currentAddr, PML2);
814 pml_entry_t* entry2 = &pml2->entries[idx2];
815
816 if (!entry2->present)
817 {
818 *outAddr = (void*)currentAddr;
819 return 0;
820 }
821
822 currentAddr = ROUND_UP(currentAddr + 1, PML2_SIZE);
823 }
824 return ERR;
825 }
826
827 uintptr_t regionStart = 0;
828 uint64_t consecutiveUnmapped = 0;
829
830 while (currentAddr < end)
831 {
832 pml_index_t idx4 = PML_ADDR_TO_INDEX(currentAddr, PML4);
833 pml_entry_t* entry4 = &table->pml4->entries[idx4];
834
835 if (!entry4->present)
836 {
837 if (consecutiveUnmapped == 0)
838 {
839 regionStart = currentAddr;
840 }
841
842 uintptr_t skipTo = PML_INDEX_TO_ADDR(idx4 + 1, PML4);
843 uint64_t skippedPages = (MIN(skipTo, end) - currentAddr) / PAGE_SIZE;
844 consecutiveUnmapped += skippedPages;
845
846 if (consecutiveUnmapped >= pageAmount)
847 {
848 *outAddr = (void*)regionStart;
849 return 0;
850 }
851
852 currentAddr = skipTo;
853 continue;
854 }
855
856 pml_t* pml3 = (pml_t*)pml_accessible_addr(*entry4);
857 pml_index_t idx3 = PML_ADDR_TO_INDEX(currentAddr, PML3);
858 pml_entry_t* entry3 = &pml3->entries[idx3];
859
860 if (!entry3->present)
861 {
862 if (consecutiveUnmapped == 0)
863 {
864 regionStart = currentAddr;
865 }
866
867 uint64_t skippedPages = PML3_SIZE / PAGE_SIZE;
868 consecutiveUnmapped += skippedPages;
869
870 if (consecutiveUnmapped >= pageAmount)
871 {
872 *outAddr = (void*)regionStart;
873 return 0;
874 }
875
876 currentAddr = ROUND_UP(currentAddr + 1, PML3_SIZE);
877 continue;
878 }
879
880 pml_t* pml2 = (pml_t*)pml_accessible_addr(*entry3);
881 pml_index_t idx2 = PML_ADDR_TO_INDEX(currentAddr, PML2);
882 pml_entry_t* entry2 = &pml2->entries[idx2];
883
884 if (!entry2->present)
885 {
886 if (consecutiveUnmapped == 0)
887 {
888 regionStart = currentAddr;
889 }
890
891 uint64_t skippedPages = PML2_SIZE / PAGE_SIZE;
892 consecutiveUnmapped += skippedPages;
893
894 if (consecutiveUnmapped >= pageAmount)
895 {
896 *outAddr = (void*)regionStart;
897 return 0;
898 }
899
900 currentAddr = ROUND_UP(currentAddr + 1, PML2_SIZE);
901 continue;
902 }
903
904 pml_t* pml1 = (pml_t*)pml_accessible_addr(*entry2);
905 pml_index_t idx1 = PML_ADDR_TO_INDEX(currentAddr, PML1);
906
907 for (; idx1 < PML_INDEX_AMOUNT && currentAddr < end; idx1++, currentAddr += PAGE_SIZE)
908 {
909 if (!pml1->entries[idx1].present)
910 {
911 if (consecutiveUnmapped == 0)
912 {
913 regionStart = currentAddr;
914 }
915 consecutiveUnmapped++;
916
917 if (consecutiveUnmapped >= pageAmount)
918 {
919 *outAddr = (void*)regionStart;
920 return 0;
921 }
922 }
923 else
924 {
925 consecutiveUnmapped = 0;
926 }
927 }
928 }
929
930 return ERR;
931}
932
933/**
934 * @brief Checks if any page in a range is pinned.
935 *
936 * @param table The page table.
937 * @param virtAddr The starting virtual address.
938 * @param pageAmount The number of pages to check.
939 * @return `true` if any page in the range us pinned, `false` otherwise.
940 */
941static inline bool page_table_is_pinned(page_table_t* table, const void* virtAddr, uint64_t pageAmount)
942{
944 for (uint64_t i = 0; i < pageAmount; i++)
945 {
946 if (page_table_traverse(table, &traverse, (uintptr_t)virtAddr + i * PAGE_SIZE, PML_NONE) == ERR)
947 {
948 continue;
949 }
950
951 if (!traverse.entry->present)
952 {
953 continue;
954 }
955
956 if (traverse.entry->pinned)
957 {
958 return true;
959 }
960 }
961
962 return false;
963}
964
965/**
966 * @brief Counts the number of pages in a range that have all the specified flags set.
967 *
968 * Can be used to, for example, check the total amount of pages allocated to a process by counting the pages with the
969 * `PML_PRESENT | PML_USER | PML_OWNED` flags set.
970 *
971 * @param table The page table.
972 * @param virtAddr The starting virtual address.
973 * @param pageAmount The number of pages to check.
974 * @param flags The flags to check for.
975 * @return The number of pages with the specified flags set.
976 */
979{
980 uint64_t count = 0;
981 while (pageAmount > 0)
982 {
983 pml_index_t idx4 = PML_ADDR_TO_INDEX((uintptr_t)virtAddr, PML4);
984 pml_entry_t* entry4 = &table->pml4->entries[idx4];
985
986 if (!entry4->present)
987 {
988 uint64_t skipPages = MIN(pageAmount, (PML_INDEX_TO_ADDR(idx4 + 1, PML4) - (uintptr_t)virtAddr) / PAGE_SIZE);
989 virtAddr = (void*)((uintptr_t)virtAddr + skipPages * PAGE_SIZE);
990 pageAmount -= skipPages;
991 continue;
992 }
993
994 pml_t* pml3 = (pml_t*)pml_accessible_addr(*entry4);
995 pml_index_t idx3 = PML_ADDR_TO_INDEX((uintptr_t)virtAddr, PML3);
996 pml_entry_t* entry3 = &pml3->entries[idx3];
997
998 if (!entry3->present)
999 {
1000 uint64_t skipPages = MIN(pageAmount, (PML_INDEX_TO_ADDR(idx3 + 1, PML3) - (uintptr_t)virtAddr) / PAGE_SIZE);
1001 virtAddr = (void*)((uintptr_t)virtAddr + skipPages * PAGE_SIZE);
1002 pageAmount -= skipPages;
1003 continue;
1004 }
1005
1006 pml_t* pml2 = (pml_t*)pml_accessible_addr(*entry3);
1007 pml_index_t idx2 = PML_ADDR_TO_INDEX((uintptr_t)virtAddr, PML2);
1008 pml_entry_t* entry2 = &pml2->entries[idx2];
1009
1010 if (!entry2->present)
1011 {
1012 uint64_t skipPages = MIN(pageAmount, (PML_INDEX_TO_ADDR(idx2 + 1, PML2) - (uintptr_t)virtAddr) / PAGE_SIZE);
1013 virtAddr = (void*)((uintptr_t)virtAddr + skipPages * PAGE_SIZE);
1014 pageAmount -= skipPages;
1015 continue;
1016 }
1017
1018 pml_t* pml1 = (pml_t*)pml_accessible_addr(*entry2);
1019 pml_index_t idx1 = PML_ADDR_TO_INDEX((uintptr_t)virtAddr, PML1);
1020
1021 for (; idx1 < PML_INDEX_AMOUNT && pageAmount > 0;
1022 idx1++, virtAddr = (void*)((uintptr_t)virtAddr + PAGE_SIZE), pageAmount--)
1023 {
1024 pml_entry_t* entry1 = &pml1->entries[idx1];
1025 if (!entry1->present)
1026 {
1027 continue;
1028 }
1029 if ((entry1->raw & flags) == flags)
1030 {
1031 count++;
1032 }
1033 }
1034 }
1035
1036 return count;
1037}
1038
1039/** @} */
static void page_table_clear_pml1_pml2_pml3(page_table_t *table, page_table_traverse_t *prevTraverse, page_table_traverse_t *traverse, page_table_page_buffer_t *pageBuffer)
Clears any empty page table levels any time a pml1, pml2 or pml3 boundry is crossed.
Definition paging.h:594
pml_level_t
Enums for the different page table levels.
#define PML_ADDR_OFFSET_BITS
Number of bits used for the offset within a page.
#define PML_FLAGS_MASK
Mask for all pml flags.
void(* pml_free_pages_t)(void **, uint64_t)
Generic page free function type.
#define PML_ADDR_MASK
Mask for the address in a page table entry.
static uint64_t page_table_get_phys_addr(page_table_t *table, const void *virtAddr, void **outPhysAddr)
Retrieves the physical address mapped to a given virtual address.
Definition paging.h:328
static void tlb_invalidate(void *virtAddr, uint64_t pageCount)
Invalidates a region of pages in the TLB.
Definition paging.h:31
#define PML_INDEX_TO_ADDR(index, level)
Calculates the lowest virtual address that maps to a given index at a specified page table level.
pml_index_t
Indexes into a pml level.
static void page_table_collect_callbacks(page_table_t *table, void *virtAddr, uint64_t pageAmount, uint64_t *callbacks)
Collects the number of pages associated with each callback ID in the specified range.
Definition paging.h:674
#define PML_CALLBACK_NONE
Special callback ID that indicates no callback is associated with the page.
static uint64_t page_table_get_pml(page_table_t *table, pml_t *currentPml, pml_index_t index, pml_flags_t flags, pml_t **outPml)
Retrieves or allocates the next level page table.
Definition paging.h:207
static uintptr_t pml_accessible_addr(pml_entry_t entry)
Retrieves the address from a page table entry and converts it to an accessible address.
Definition paging.h:61
#define PML_ADDR_TO_INDEX(addr, level)
Calculates the index into a page table level for a given virtual address.
#define PML_PAGE_BUFFER_SIZE
Size of the page buffer used to batch page allocations and frees.
static void page_table_unmap(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Unmaps a range of virtual addresses from the page table.
Definition paging.h:514
static void pml_free(page_table_t *table, pml_t *pml, pml_level_t level)
Recursively frees a page table level, all its children and any owned pages.
Definition paging.h:120
uint64_t(* pml_alloc_pages_t)(void **, uint64_t)
Generic page allocation function type.
static uint64_t page_table_map(page_table_t *table, void *virtAddr, void *physAddr, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps a range of virtual addresses to physical addresses in the page table.
Definition paging.h:421
static void page_table_page_buffer_push(page_table_t *table, page_table_page_buffer_t *buffer, void *address)
Pushes a page table level onto the page buffer, freeing the buffer if full.
Definition paging.h:555
static bool pml_is_empty(pml_t *pml)
Checks if a page table level is empty (all entries are 0).
Definition paging.h:78
uint8_t pml_callback_id_t
Callback ID type.
static void page_table_load(page_table_t *table)
Loads the page table into the CR3 register if it is not already loaded.
Definition paging.h:183
#define PML2_SIZE
Size of the region mapped by a single PML2 entry.
#define PAGE_TABLE_TRAVERSE_CREATE
Create a page_table_traverse_t initializer.
Definition paging.h:254
static void page_table_clear(page_table_t *table, void *virtAddr, uint64_t pageAmount)
Clears page table entries in the specified range and frees any owned pages.
Definition paging.h:628
static bool page_table_is_mapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely mapped.
Definition paging.h:359
static void page_table_deinit(page_table_t *table)
Deinitializes a page table, freeing all allocated pages.
Definition paging.h:173
static uint64_t pml_new(page_table_t *table, pml_t **outPml)
Allocates and initializes a new page table level.
Definition paging.h:97
#define PML_LOWER_TO_HIGHER(addr)
Converts an address from the lower half to the higher half.
static bool page_table_is_unmapped(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if a range of virtual addresses is completely unmapped.
Definition paging.h:388
static uint64_t page_table_find_unmapped_region(page_table_t *table, void *startAddr, void *endAddr, uint64_t pageAmount, void **outAddr)
Finds the first contiguous unmapped region with the given number of pages within the specified addres...
Definition paging.h:755
static uint64_t page_table_init(page_table_t *table, pml_alloc_pages_t allocPages, pml_free_pages_t freePages)
Initializes a page table.
Definition paging.h:157
static uint64_t page_table_traverse(page_table_t *table, page_table_traverse_t *traverse, uintptr_t virtAddr, pml_flags_t flags)
Allows for fast traversal of the page table by caching previously accessed layers.
Definition paging.h:278
#define PML3_SIZE
Size of the region mapped by a single PML3 entry.
static uint64_t page_table_set_flags(page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
Sets the flags for a range of pages in the page table.
Definition paging.h:710
static bool page_table_is_pinned(page_table_t *table, const void *virtAddr, uint64_t pageAmount)
Checks if any page in a range is pinned.
Definition paging.h:941
static uint64_t page_table_map_pages(page_table_t *table, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, pml_callback_id_t callbackId)
Maps an array of physical pages to contiguous virtual addresses in the page table.
Definition paging.h:468
static void page_table_page_buffer_flush(page_table_t *table, page_table_page_buffer_t *buffer)
Flushes the page buffer, freeing any remaining pages.
Definition paging.h:575
#define PML_ENSURE_LOWER_HALF(addr)
Ensures that the given address is in the lower half of the address space.
static uint64_t page_table_count_pages_with_flags(page_table_t *table, void *virtAddr, uint64_t pageAmount, pml_flags_t flags)
Counts the number of pages in a range that have all the specified flags set.
Definition paging.h:977
@ PML3
@ PML1
@ PML4
@ PML2
@ PML_INDEX_AMOUNT
@ PML_USER
@ PML_PRESENT
@ PML_WRITE
@ PML_NONE
@ PML_GLOBAL
@ PML_OWNED
#define MIN(x, y)
Definition math.h:16
#define ROUND_DOWN(number, multiple)
Definition math.h:21
#define ROUND_UP(number, multiple)
Definition math.h:19
#define PAGE_SIZE
The size of a memory page in bytes.
Definition proc.h:106
#define ERR
Integer error value.
Definition ERR.h:17
static uintptr_t address
Mapped virtual address of the HPET registers.
Definition hpet.c:95
EFI_PHYSICAL_ADDRESS buffer
Definition mem.c:15
static const path_flag_t flags[]
Definition path.c:42
static uint64_t pageAmount
Definition pmm.c:44
static atomic_long count
Definition main.c:10
static void cr3_write(uint64_t value)
Definition regs.h:109
static uint64_t cr3_read()
Definition regs.h:102
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
_PUBLIC void * memset(void *s, int c, size_t n)
Definition memset.c:4
Buffer of pages used to batch page frees.
Definition paging.h:541
void * pages[PML_PAGE_BUFFER_SIZE]
Definition paging.h:542
A page table structure.
pml_alloc_pages_t allocPages
pml_free_pages_t freePages
Helper structure for fast traversal of the page table.
Definition paging.h:236
pml_index_t oldIdx3
Definition paging.h:243
pml_entry_t * entry
Definition paging.h:246
pml_index_t oldIdx1
Definition paging.h:245
pml_index_t oldIdx2
Definition paging.h:244
uint64_t addr
The address contained in the entry, note that this is shifted right by 12 bits.
uint64_t pinned
uint64_t highCallbackId
uint64_t owned
uint64_t raw
uint64_t present
If set the page is present in memory and readable.
uint64_t lowCallbackId
A entry in a page table without a specified address or callback ID.
A page table level.
pml_entry_t entries[PML_INDEX_AMOUNT]