PatchworkOS  c9fea19
A non-POSIX operating system.
Loading...
Searching...
No Matches
vmm.h
Go to the documentation of this file.
1#pragma once
2
4#include <kernel/mem/space.h>
6
7#include <boot/boot_info.h>
8
9#include <sys/list.h>
10#include <sys/proc.h>
11
12/**
13 * @brief Virtual Memory Manager (VMM).
14 * @defgroup kernel_mem_vmm VMM
15 * @ingroup kernel_mem
16 *
17 * The Virtual Memory Manager (VMM) is responsible for allocating and mapping virtual memory.
18 *
19 * ## TLB Shootdowns
20 *
21 * When we change a mapping in a address space its possible that other CPUs have the same address space loaded and
22 * have the old mappings in their "TLB", which is a hardware feature letting the CPUs cache page table entries. This
23 * cache must be cleared when we change the mappings of a page table. This is called a TLB shootdown.
24 *
25 * Details can be found in `vmm_map()`, `vmm_unmap()` and `vmm_protect()`.
26 *
27 * ## Address Space Layout
28 *
29 * The address space layout is split into several regions. For convenience, the regions are defined using page table
30 * indices, as in the entire virtual address space is divided into 512 regions, each mapped by one entry in the top
31 * level page table (PML4) with 256 entries for the lower half and 256 entries for the higher half. By doing this we can
32 * very easily copy mappings between address spaces by just copying the relevant PML4 entries.
33 *
34 * First, at the very top, we have the kernel binary itself and all its data, code, bss, rodata, etc. This region uses
35 * the last index in the page table. This region will never be fully filled and the kernel itself is not guaranteed to
36 * be loaded at the very start of this region, the exact address is decided by the `linker.lds` script. This section is
37 * mapped identically for all processes.
38 *
39 * Secondly, we have the per-thread kernel stacks, one stack per thread. Each stack is allocated on demand and can grow
40 * dynamically up to `CONFIG_MAX_KERNEL_STACK_PAGES` pages not including its guard page. This section takes up 2 indices
41 * in the page table and will be process-specific as each process has its own threads and thus its own kernel stacks.
42 *
43 * Thirdly, we have the kernel heap, which is used for dynamic memory allocation in the kernel. The kernel heap starts
44 * at `VMM_KERNEL_HEAP_MIN` and grows up towards `VMM_KERNEL_HEAP_MAX`. This section takes up 2 indices in the
45 * page table and is mapped identically for all processes.
46 *
47 * Fourthly (is fourthly really a word?), we have the identity mapped physical memory. All physical memory will be
48 * mapped here by simply taking the original physical address and adding `0xFFFF800000000000` to it. This means that the
49 * physical address `0x123456` will be mapped to the virtual address `0xFFFF800000123456`. This section takes up all
50 * remaining indices below the kernel heap to the end of the higher half and is mapped identically for all processes.
51 *
52 * Fithly, we have non-canonical memory, which is impossible to access and will trigger a general protection fault if
53 * accessed. This section takes up the gap between the lower half and higher half of the address space.
54 *
55 * Finally, we have user space, which starts at `0x400000` (4MiB) and goes up to the top of the lower half. The first
56 * 4MiB is left unmapped to catch null pointer dereferences. This section is different for each process.
57 *
58 * @{
59 */
60
61#define VMM_KERNEL_BINARY_MAX PML_HIGHER_HALF_END ///< The maximum address for the content of the kernel binary.
62#define VMM_KERNEL_BINARY_MIN \
63 PML_INDEX_TO_ADDR(PML_INDEX_AMOUNT - 1, PML4) ///< The minimum address for the content of the kernel binary.
64
65#define VMM_KERNEL_STACKS_MAX VMM_KERNEL_BINARY_MIN ///< The maximum address for kernel stacks.
66#define VMM_KERNEL_STACKS_MIN PML_INDEX_TO_ADDR(PML_INDEX_AMOUNT - 3, PML4) ///< The minimum address for kernel stacks.
67
68#define VMM_KERNEL_HEAP_MAX VMM_KERNEL_STACKS_MIN ///< The maximum address for the kernel heap.
69#define VMM_KERNEL_HEAP_MIN PML_INDEX_TO_ADDR(PML_INDEX_AMOUNT - 5, PML4) ///< The minimum address for the kernel heap.
70
71#define VMM_IDENTITY_MAPPED_MAX VMM_KERNEL_HEAP_MIN ///< The maximum address for the identity mapped physical memory.
72#define VMM_IDENTITY_MAPPED_MIN PML_HIGHER_HALF_START ///< The minimum address for the identity mapped physical memory.
73
74#define VMM_USER_SPACE_MAX PML_LOWER_HALF_END ///< The maximum address for user space.
75#define VMM_USER_SPACE_MIN (0x400000) ///< The minimum address for user space.
76
77/**
78 * @brief Check if an address is page aligned.
79 *
80 * @param addr The address to check.
81 * @return true if the address is page aligned, false otherwise.
82 */
83#define VMM_IS_PAGE_ALIGNED(addr) (((uintptr_t)(addr) & (PAGE_SIZE - 1)) == 0)
84
85/**
86 * @brief TLB shootdown structure.
87 * @struct vmm_shootdown_t
88 *
89 * Stored in a CPU's shootdown list and will be processed when it receives a `INTERRUPT_TLB_SHOOTDOWN` interrupt.
90 */
98
99/**
100 * @brief Maximum number of shootdown requests that can be queued per CPU.
101 */
102#define VMM_MAX_SHOOTDOWN_REQUESTS 16
103
104/**
105 * @brief Per-CPU VMM context.
106 * @struct vmm_cpu_ctx_t
107 */
108typedef struct
109{
110 list_entry_t entry; ///< Used by a space to know which CPUs are using it, protected by the space lock.
114 space_t* currentSpace; ///< Will only be accessed by the owner CPU, so no lock.
116
117/**
118 * @brief Flags for `vmm_alloc()`.
119 * @enum vmm_alloc_flags_t
120 */
121typedef enum
122{
123 VMM_ALLOC_OVERWRITE = 0 << 0, ///< If any page is already mapped, overwrite the mapping.
124 VMM_ALLOC_FAIL_IF_MAPPED = 1 << 0 ///< If set and any page is already mapped, fail and set `errno` to `EEXIST`.
126
127/**
128 * @brief Initializes the Virtual Memory Manager.
129 */
130void vmm_init(void);
131
132/**
133 * @brief Loads the kernel's address space into the current CPU.
134 */
135void vmm_kernel_space_load(void);
136
137/**
138 * @brief Initializes a per-CPU VMM context and performs per-CPU VMM initialization.
139 *
140 * Must be called on the CPU that owns the context.
141 *
142 * @param ctx The CPU VMM context to initialize.
143 */
145
146/**
147 * @brief Retrieves the kernel's address space.
148 *
149 * @return Pointer to the kernel's address space.
150 */
152
153/**
154 * @brief Converts the user space memory protection flags to page table entry flags.
155 *
156 * @param prot The memory protection flags.
157 * @return The corresponding page table entry flags.
158 */
160
161/**
162 * @brief Allocates and maps virtual memory in a given address space.
163 *
164 * The allocated memory will be backed by newly allocated physical memory pages and is not guaranteed to be zeroed.
165 *
166 * @see `vmm_map()` for details on TLB shootdowns.
167 *
168 * @param space The target address space, if `NULL`, the kernel space is used.
169 * @param virtAddr The desired virtual address. If `NULL`, the kernel chooses an available address.
170 * @param length The length of the virtual memory region to allocate, in bytes.
171 * @param pmlFlags The page table flags for the mapping, will always include `PML_OWNED`, must have `PML_PRESENT` set.
172 * @param allocFlags The allocation flags.
173 * @return On success, the virtual address. On failure, returns `NULL` and `errno` is set to:
174 * - `EINVAL`: Invalid parameters.
175 * - `EBUSY`: The region contains pinned pages.
176 * - `EEXIST`: The region is already mapped and `VMM_ALLOC_FAIL_IF_MAPPED` is set.
177 * - `ENOMEM`: Not enough memory.
178 * - Other values from `space_mapping_start()`.
179 */
180void* vmm_alloc(space_t* space, void* virtAddr, uint64_t length, pml_flags_t pmlFlags, vmm_alloc_flags_t allocFlags);
181
182/**
183 * @brief Maps physical memory to virtual memory in a given address space.
184 *
185 * Will overwrite any existing mappings in the specified range.
186 *
187 * When mapping a page there is no need for a TLN shootdown as any previous access to that page will cause a non-present
188 * page fault. However if the page is already mapped then it must first be unmapped as described in `vmm_unmap()`.
189 *
190 * @param space The target address space, if `NULL`, the kernel space is used.
191 * @param virtAddr The desired virtual address to map to, if `NULL`, the kernel chooses an available address.
192 * @param physAddr The physical address to map from. Must not be `NULL`.
193 * @param length The length of the memory region to map, in bytes.
194 * @param flags The page table flags for the mapping, must have `PML_PRESENT` set.
195 * @param func The callback function to call when the mapped memory is unmapped or the address space is freed. If
196 * `NULL`, then no callback will be called.
197 * @param private Private data to pass to the callback function.
198 * @return On success, the virtual address. On failure, returns `NULL` and `errno` is set to:
199 * - `EINVAL`: Invalid parameters.
200 * - `EBUSY`: The region contains pinned pages.
201 * - `ENOSPC`: No available callback slots.
202 * - `ENOMEM`: Not enough memory.
203 * - Other values from `space_mapping_start()`.
204 */
205void* vmm_map(space_t* space, void* virtAddr, void* physAddr, uint64_t length, pml_flags_t flags,
206 space_callback_func_t func, void* private);
207
208/**
209 * @brief Maps an array of physical pages to virtual memory in a given address space.
210 *
211 * Will overwrite any existing mappings in the specified range.
212 *
213 * @see `vmm_map()` for details on TLB shootdowns.
214 *
215 * @param space The target address space, if `NULL`, the kernel space is used.
216 * @param virtAddr The desired virtual address to map to, if `NULL`, the kernel chooses an available address.
217 * @param pages An array of physical page addresses to map.
218 * @param pageAmount The number of physical pages in the `pages` array, must not be zero.
219 * @param flags The page table flags for the mapping, must have `PML_PRESENT` set.
220 * @param func The callback function to call when the mapped memory is unmapped or the address space is freed. If
221 * `NULL`, then no callback will be called.
222 * @param private Private data to pass to the callback function.
223 * @return On success, the virtual address. On failure, returns `NULL` and `errno` is set to:
224 * - `EINVAL`: Invalid parameters.
225 * - `EBUSY`: The region contains pinned pages.
226 * - `ENOSPC`: No available callback slots.
227 * - `ENOMEM`: Not enough memory.
228 * - Other values from `space_mapping_start()`.
229 */
230void* vmm_map_pages(space_t* space, void* virtAddr, void** pages, uint64_t pageAmount, pml_flags_t flags,
231 space_callback_func_t func, void* private);
232
233/**
234 * @brief Unmaps virtual memory from a given address space.
235 *
236 * If the memory is already unmapped, this function will do nothing.
237 *
238 * When unmapping memory, there is a need for TLB shootdowns on all CPUs that have the address space loaded. To perform
239 * the shootdown we first set all page entries for the region to be non-present, perform the shootdown, wait for
240 * acknowledgements from all CPUs, and finally free any underlying physical memory if the `PML_OWNED` flag is set.
241 *
242 * @param space The target address space, if `NULL`, the kernel space is used.
243 * @param virtAddr The virtual address of the memory region.
244 * @param length The length of the memory region, in bytes.
245 * @return On success, `virtAddr`. On failure, `NULL` and `errno` is set to:
246 * - `EINVAL`: Invalid parameters.
247 * - `EBUSY`: The region contains pinned pages.
248 * - Other values from `space_mapping_start()`.
249 */
250void* vmm_unmap(space_t* space, void* virtAddr, uint64_t length);
251
252/**
253 * @brief Changes memory protection flags for a virtual memory region in a given address space.
254 *
255 * The memory region must be fully mapped, otherwise this function will fail.
256 *
257 * When changing memory protection flags, there is a need for TLB shootdowns on all CPUs that have the address space
258 * loaded. To perform the shootdown we first update the page entries for the region, perform the shootdown, and wait for
259 * acknowledgements from all CPUs and finally return.
260 *
261 * @param space The target address space, if `NULL`, the kernel space is used.
262 * @param virtAddr The virtual address of the memory region.
263 * @param length The length of the memory region, in bytes.
264 * @param flags The new page table flags for the memory region, if `PML_PRESENT` is not set, the memory will be
265 * unmapped.
266 * @return On success, `virtAddr`. On failure, `NULL` and `errno` is set to:
267 * - `EINVAL`: Invalid parameters.
268 * - `EBUSY`: The region contains pinned pages.
269 * - `ENOENT`: The region is unmapped, or only partially mapped.
270 * - Other values from `space_mapping_start()`.
271 */
272void* vmm_protect(space_t* space, void* virtAddr, uint64_t length, pml_flags_t flags);
273
274/** @} */
void(* space_callback_func_t)(void *private)
Space callback function.
Definition space.h:43
void vmm_cpu_ctx_init(vmm_cpu_ctx_t *ctx)
Initializes a per-CPU VMM context and performs per-CPU VMM initialization.
Definition vmm.c:122
void vmm_init(void)
Initializes the Virtual Memory Manager.
Definition vmm.c:41
space_t * vmm_kernel_space_get(void)
Retrieves the kernel's address space.
Definition vmm.c:133
void vmm_kernel_space_load(void)
Loads the kernel's address space into the current CPU.
Definition vmm.c:110
void * vmm_alloc(space_t *space, void *virtAddr, uint64_t length, pml_flags_t pmlFlags, vmm_alloc_flags_t allocFlags)
Allocates and maps virtual memory in a given address space.
Definition vmm.c:163
#define VMM_MAX_SHOOTDOWN_REQUESTS
Maximum number of shootdown requests that can be queued per CPU.
Definition vmm.h:102
void * vmm_map(space_t *space, void *virtAddr, void *physAddr, uint64_t length, pml_flags_t flags, space_callback_func_t func, void *private)
Maps physical memory to virtual memory in a given address space.
Definition vmm.c:226
void * vmm_protect(space_t *space, void *virtAddr, uint64_t length, pml_flags_t flags)
Changes memory protection flags for a virtual memory region in a given address space.
Definition vmm.c:396
void * vmm_unmap(space_t *space, void *virtAddr, uint64_t length)
Unmaps virtual memory from a given address space.
Definition vmm.c:334
pml_flags_t vmm_prot_to_flags(prot_t prot)
Converts the user space memory protection flags to page table entry flags.
Definition vmm.c:138
void * vmm_map_pages(space_t *space, void *virtAddr, void **pages, uint64_t pageAmount, pml_flags_t flags, space_callback_func_t func, void *private)
Maps an array of physical pages to virtual memory in a given address space.
Definition vmm.c:280
vmm_alloc_flags_t
Flags for vmm_alloc().
Definition vmm.h:122
@ VMM_ALLOC_OVERWRITE
If any page is already mapped, overwrite the mapping.
Definition vmm.h:123
@ VMM_ALLOC_FAIL_IF_MAPPED
If set and any page is already mapped, fail and set errno to EEXIST.
Definition vmm.h:124
prot_t
Memory protection flags.
Definition proc.h:129
static const path_flag_t flags[]
Definition path.c:42
static uint64_t pageAmount
Definition pmm.c:44
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
__UINT8_TYPE__ uint8_t
Definition stdint.h:11
A entry in a doubly linked list.
Definition list.h:36
A simple ticket lock implementation.
Definition lock.h:43
A entry in a page table without a specified address or callback ID.
Virtual address space structure.
Definition space.h:79
Per-CPU VMM context.
Definition vmm.h:109
uint8_t shootdownCount
Definition vmm.h:112
space_t * currentSpace
Will only be accessed by the owner CPU, so no lock.
Definition vmm.h:114
list_entry_t entry
Used by a space to know which CPUs are using it, protected by the space lock.
Definition vmm.h:110
lock_t lock
Definition vmm.h:113
TLB shootdown structure.
Definition vmm.h:92
uint64_t pageAmount
Definition vmm.h:96
space_t * space
Definition vmm.h:94
void * virtAddr
Definition vmm.h:95
list_entry_t entry
Definition vmm.h:93