PatchworkOS  19e446b
A non-POSIX operating system.
Loading...
Searching...
No Matches
vmm.h
Go to the documentation of this file.
1#pragma once
2
4#include <kernel/mem/space.h>
6
7#include <boot/boot_info.h>
8
9#include <sys/list.h>
10#include <sys/proc.h>
11
12/**
13 * @brief Virtual Memory Manager (VMM).
14 * @defgroup kernel_mem_vmm VMM
15 * @ingroup kernel_mem
16 *
17 * The Virtual Memory Manager (VMM) is responsible for allocating and mapping virtual memory.
18 *
19 * ## TLB Shootdowns
20 *
21 * When we change a mapping in a address space its possible that other CPUs have the same address space loaded and
22 * have the old mappings in their "TLB", which is a hardware feature letting the CPUs cache page table entries. This
23 * cache must be cleared when we change the mappings of a page table. This is called a TLB shootdown.
24 *
25 * Details can be found in `vmm_map()`, `vmm_unmap()` and `vmm_protect()`.
26 *
27 * ## Address Space Layout
28 *
29 * The address space layout is split into several regions. For convenience, the regions are defined using page table
30 * indices, as in the entire virtual address space is divided into 512 regions, each mapped by one entry in the top
31 * level page table (PML4) with 256 entries for the lower half and 256 entries for the higher half. By doing this we can
32 * very easily copy mappings between address spaces by just copying the relevant PML4 entries.
33 *
34 * First, at the very top, we have the kernel binary itself and all its data, code, bss, rodata, etc. This region uses
35 * the last index in the page table. This region will never be fully filled and the kernel itself is not guaranteed to
36 * be loaded at the very start of this region, the exact address is decided by the `linker.lds` script. This section is
37 * mapped identically for all processes.
38 *
39 * Secondly, we have the per-thread kernel stacks, one stack per thread. Each stack is allocated on demand and can grow
40 * dynamically up to `CONFIG_MAX_KERNEL_STACK_PAGES` pages not including its guard page. This section takes up 2 indices
41 * in the page table and will be process-specific as each process has its own threads and thus its own kernel stacks.
42 *
43 * Thirdly, we have the kernel heap, which is used for dynamic memory allocation in the kernel. The kernel heap starts
44 * at `VMM_KERNEL_HEAP_MIN` and grows up towards `VMM_KERNEL_HEAP_MAX`. This section takes up 2 indices in the
45 * page table and is mapped identically for all processes.
46 *
47 * Fourthly, we have the identity mapped physical memory. All physical memory will be
48 * mapped here by simply taking the original physical address and adding `0xFFFF800000000000` to it. This means that the
49 * physical address `0x123456` will be mapped to the virtual address `0xFFFF800000123456`. This section takes up all
50 * remaining indices below the kernel heap to the end of the higher half and is mapped identically for all processes.
51 *
52 * Fithly, we have non-canonical memory, which is impossible to access and will trigger a general protection fault if
53 * accessed. This section takes up the gap between the lower half and higher half of the address space.
54 *
55 * Finally, we have user space, which starts at `0x400000` (4MiB) and goes up to the top of the lower half. The first
56 * 4MiB is left unmapped to catch null pointer dereferences. This section is different for each process.
57 *
58 * @{
59 */
60
61#define VMM_KERNEL_BINARY_MAX PML_HIGHER_HALF_END ///< The maximum address for the content of the kernel binary.
62#define VMM_KERNEL_BINARY_MIN \
63 PML_INDEX_TO_ADDR(PML_INDEX_AMOUNT - 1, PML4) ///< The minimum address for the content of the kernel binary.
64
65#define VMM_KERNEL_STACKS_MAX VMM_KERNEL_BINARY_MIN ///< The maximum address for kernel stacks.
66#define VMM_KERNEL_STACKS_MIN PML_INDEX_TO_ADDR(PML_INDEX_AMOUNT - 3, PML4) ///< The minimum address for kernel stacks.
67
68#define VMM_KERNEL_HEAP_MAX VMM_KERNEL_STACKS_MIN ///< The maximum address for the kernel heap.
69#define VMM_KERNEL_HEAP_MIN PML_INDEX_TO_ADDR(PML_INDEX_AMOUNT - 5, PML4) ///< The minimum address for the kernel heap.
70
71#define VMM_IDENTITY_MAPPED_MAX VMM_KERNEL_HEAP_MIN ///< The maximum address for the identity mapped physical memory.
72#define VMM_IDENTITY_MAPPED_MIN PML_HIGHER_HALF_START ///< The minimum address for the identity mapped physical memory.
73
74#define VMM_USER_SPACE_MAX PML_LOWER_HALF_END ///< The maximum address for user space.
75#define VMM_USER_SPACE_MIN (0x400000) ///< The minimum address for user space.
76
77/**
78 * @brief Check if an address is page aligned.
79 *
80 * @param addr The address to check.
81 * @return true if the address is page aligned, false otherwise.
82 */
83#define VMM_IS_PAGE_ALIGNED(addr) (((uintptr_t)(addr) & (PAGE_SIZE - 1)) == 0)
84
85/**
86 * @brief TLB shootdown structure.
87 * @struct vmm_shootdown_t
88 *
89 * Stored in a CPU's shootdown list and will be processed when it receives a `INTERRUPT_TLB_SHOOTDOWN` interrupt.
90 */
98
99/**
100 * @brief Maximum number of shootdown requests that can be queued per CPU.
101 */
102#define VMM_MAX_SHOOTDOWN_REQUESTS 16
103
104/**
105 * @brief Per-CPU VMM context.
106 * @struct vmm_cpu_t
107 */
108typedef struct
109{
113 space_t* space; ///< Will only be accessed by the owner CPU, so no lock.
114} vmm_cpu_t;
115
116/**
117 * @brief Flags for `vmm_alloc()`.
118 * @enum vmm_alloc_flags_t
119 */
120typedef enum
121{
122 VMM_ALLOC_OVERWRITE = 0 << 0, ///< If any page is already mapped, overwrite the mapping.
123 VMM_ALLOC_FAIL_IF_MAPPED = 1 << 0, ///< If set and any page is already mapped, fail and set `errno` to `EEXIST`.
124 VMM_ALLOC_ZERO = 1 << 1 ///< If set, atomically zero the allocated pages.
126
127/**
128 * @brief Initializes the Virtual Memory Manager.
129 */
130void vmm_init(void);
131
132/**
133 * @brief Loads the kernel's address space into the current CPU.
134 */
135void vmm_kernel_space_load(void);
136
137/**
138 * @brief Retrieves the kernel's address space.
139 *
140 * @return Pointer to the kernel's address space.
141 */
143
144/**
145 * @brief Converts the user space memory protection flags to page table entry flags.
146 *
147 * @param prot The memory protection flags.
148 * @return The corresponding page table entry flags.
149 */
151
152/**
153 * @brief Allocates and maps virtual memory in a given address space.
154 *
155 * The allocated memory will be backed by newly allocated physical memory pages and is not guaranteed to be zeroed.
156 *
157 * @see `vmm_map()` for details on TLB shootdowns.
158 *
159 * @param space The target address space, if `NULL`, the kernel space is used.
160 * @param virtAddr The desired virtual address. If `NULL`, the kernel chooses an available address.
161 * @param length The length of the virtual memory region to allocate, in bytes.
162 * @param alignment The required alignment for the virtual memory region in bytes.
163 * @param pmlFlags The page table flags for the mapping, will always include `PML_OWNED`, must have `PML_PRESENT` set.
164 * @param allocFlags The allocation flags.
165 * @return On success, the virtual address. On failure, returns `NULL` and `errno` is set to:
166 * - `EINVAL`: Invalid parameters.
167 * - `EBUSY`: The region contains pinned pages.
168 * - `EEXIST`: The region is already mapped and `VMM_ALLOC_FAIL_IF_MAPPED` is set.
169 * - `ENOMEM`: Not enough memory.
170 * - Other values from `space_mapping_start()`.
171 */
172void* vmm_alloc(space_t* space, void* virtAddr, size_t length, size_t alignment, pml_flags_t pmlFlags,
173 vmm_alloc_flags_t allocFlags);
174
175/**
176 * @brief Maps physical memory to virtual memory in a given address space.
177 *
178 * Will overwrite any existing mappings in the specified range.
179 *
180 * When mapping a page there is no need for a TLN shootdown as any previous access to that page will cause a non-present
181 * page fault. However if the page is already mapped then it must first be unmapped as described in `vmm_unmap()`.
182 *
183 * @param space The target address space, if `NULL`, the kernel space is used.
184 * @param virtAddr The desired virtual address to map to, if `NULL`, the kernel chooses an available address.
185 * @param physAddr The physical address to map from. Must not be `PHYS_ADDR_INVALID`.
186 * @param length The length of the memory region to map, in bytes.
187 * @param flags The page table flags for the mapping, must have `PML_PRESENT` set.
188 * @param func The callback function to call when the mapped memory is unmapped or the address space is freed. If
189 * `NULL`, then no callback will be called.
190 * @param private Private data to pass to the callback function.
191 * @return On success, the virtual address. On failure, returns `NULL` and `errno` is set to:
192 * - `EINVAL`: Invalid parameters.
193 * - `EBUSY`: The region contains pinned pages.
194 * - `ENOSPC`: No available callback slots.
195 * - `ENOMEM`: Not enough memory.
196 * - Other values from `space_mapping_start()`.
197 */
198void* vmm_map(space_t* space, void* virtAddr, phys_addr_t physAddr, size_t length, pml_flags_t flags,
199 space_callback_func_t func, void* data);
200
201/**
202 * @brief Maps an array of physical pages to virtual memory in a given address space.
203 *
204 * Will overwrite any existing mappings in the specified range.
205 *
206 * @see `vmm_map()` for details on TLB shootdowns.
207 *
208 * @param space The target address space, if `NULL`, the kernel space is used.
209 * @param virtAddr The desired virtual address to map to, if `NULL`, the kernel chooses an available address.
210 * @param pfns An array of page frame numbers to map from.
211 * @param amount The number of pages to map.
212 * @param flags The page table flags for the mapping, must have `PML_PRESENT` set.
213 * @param func The callback function to call when the mapped memory is unmapped or the address space is freed. If
214 * `NULL`, then no callback will be called.
215 * @param private Private data to pass to the callback function.
216 * @return On success, the virtual address. On failure, returns `NULL` and `errno` is set to:
217 * - `EINVAL`: Invalid parameters.
218 * - `EBUSY`: The region contains pinned pages.
219 * - `ENOSPC`: No available callback slots.
220 * - `ENOMEM`: Not enough memory.
221 * - Other values from `space_mapping_start()`.
222 */
223void* vmm_map_pages(space_t* space, void* virtAddr, pfn_t* pfns, size_t amount, pml_flags_t flags,
224 space_callback_func_t func, void* data);
225
226/**
227 * @brief Unmaps virtual memory from a given address space.
228 *
229 * If the memory is already unmapped, this function will do nothing.
230 *
231 * When unmapping memory, there is a need for TLB shootdowns on all CPUs that have the address space loaded. To perform
232 * the shootdown we first set all page entries for the region to be non-present, perform the shootdown, wait for
233 * acknowledgements from all CPUs, and finally free any underlying physical memory if the `PML_OWNED` flag is set.
234 *
235 * @param space The target address space, if `NULL`, the kernel space is used.
236 * @param virtAddr The virtual address of the memory region.
237 * @param length The length of the memory region, in bytes.
238 * @return On success, `virtAddr`. On failure, `NULL` and `errno` is set to:
239 * - `EINVAL`: Invalid parameters.
240 * - `EBUSY`: The region contains pinned pages.
241 * - Other values from `space_mapping_start()`.
242 */
243void* vmm_unmap(space_t* space, void* virtAddr, size_t length);
244
245/**
246 * @brief Changes memory protection flags for a virtual memory region in a given address space.
247 *
248 * The memory region must be fully mapped, otherwise this function will fail.
249 *
250 * When changing memory protection flags, there is a need for TLB shootdowns on all CPUs that have the address space
251 * loaded. To perform the shootdown we first update the page entries for the region, perform the shootdown, and wait for
252 * acknowledgements from all CPUs and finally return.
253 *
254 * @param space The target address space, if `NULL`, the kernel space is used.
255 * @param virtAddr The virtual address of the memory region.
256 * @param length The length of the memory region, in bytes.
257 * @param flags The new page table flags for the memory region, if `PML_PRESENT` is not set, the memory will be
258 * unmapped.
259 * @return On success, `virtAddr`. On failure, `NULL` and `errno` is set to:
260 * - `EINVAL`: Invalid parameters.
261 * - `EBUSY`: The region contains pinned pages.
262 * - `ENOENT`: The region is unmapped, or only partially mapped.
263 * - Other values from `space_mapping_start()`.
264 */
265void* vmm_protect(space_t* space, void* virtAddr, size_t length, pml_flags_t flags);
266
267/**
268 * @brief Loads a virtual address space.
269 *
270 * Must be called with interrupts disabled.
271 *
272 * Will do nothing if the space is already loaded.
273 *
274 * @param space The address space to load.
275 */
276void vmm_load(space_t* space);
277
278/**
279 * @brief Performs a TLB shootdown for a region of the address space, and wait for acknowledgements.
280 *
281 * Must be called between `space_mapping_start()` and `space_mapping_end()`.
282 *
283 * This will cause all CPUs that have the address space loaded to invalidate their TLB entries for the specified region.
284 *
285 * Will not affect the current CPU's TLB, that is handled by the `page_table_t` directly when modifying page table
286 * entries.
287 *
288 * @todo Currently this does a busy wait for acknowledgements. Use a wait queue?
289 *
290 * @param space The target address space.
291 * @param virtAddr The starting virtual address of the region.
292 * @param pageAmount The number of pages in the region.
293 */
294void vmm_tlb_shootdown(space_t* space, void* virtAddr, size_t pageAmount);
295
296/** @} */
static fd_t data
Definition dwm.c:21
uintptr_t phys_addr_t
Physical address type.
size_t pfn_t
Page Frame Number type.
void(* space_callback_func_t)(void *data)
Space callback function.
Definition space.h:42
void * vmm_map_pages(space_t *space, void *virtAddr, pfn_t *pfns, size_t amount, pml_flags_t flags, space_callback_func_t func, void *data)
Maps an array of physical pages to virtual memory in a given address space.
Definition vmm.c:281
void vmm_tlb_shootdown(space_t *space, void *virtAddr, size_t pageAmount)
Performs a TLB shootdown for a region of the address space, and wait for acknowledgements.
Definition vmm.c:499
void vmm_init(void)
Initializes the Virtual Memory Manager.
Definition vmm.c:47
space_t * vmm_kernel_space_get(void)
Retrieves the kernel's address space.
Definition vmm.c:123
void vmm_load(space_t *space)
Loads a virtual address space.
Definition vmm.c:442
void * vmm_map(space_t *space, void *virtAddr, phys_addr_t physAddr, size_t length, pml_flags_t flags, space_callback_func_t func, void *data)
Maps physical memory to virtual memory in a given address space.
Definition vmm.c:226
void vmm_kernel_space_load(void)
Loads the kernel's address space into the current CPU.
Definition vmm.c:116
void * vmm_alloc(space_t *space, void *virtAddr, size_t length, size_t alignment, pml_flags_t pmlFlags, vmm_alloc_flags_t allocFlags)
Allocates and maps virtual memory in a given address space.
Definition vmm.c:153
#define VMM_MAX_SHOOTDOWN_REQUESTS
Maximum number of shootdown requests that can be queued per CPU.
Definition vmm.h:102
void * vmm_unmap(space_t *space, void *virtAddr, size_t length)
Unmaps virtual memory from a given address space.
Definition vmm.c:336
void * vmm_protect(space_t *space, void *virtAddr, size_t length, pml_flags_t flags)
Changes memory protection flags for a virtual memory region in a given address space.
Definition vmm.c:398
pml_flags_t vmm_prot_to_flags(prot_t prot)
Converts the user space memory protection flags to page table entry flags.
Definition vmm.c:128
vmm_alloc_flags_t
Flags for vmm_alloc().
Definition vmm.h:121
@ VMM_ALLOC_ZERO
If set, atomically zero the allocated pages.
Definition vmm.h:124
@ VMM_ALLOC_OVERWRITE
If any page is already mapped, overwrite the mapping.
Definition vmm.h:122
@ VMM_ALLOC_FAIL_IF_MAPPED
If set and any page is already mapped, fail and set errno to EEXIST.
Definition vmm.h:123
prot_t
Memory protection flags.
Definition proc.h:122
static const path_flag_t flags[]
Definition path.c:47
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
__UINT8_TYPE__ uint8_t
Definition stdint.h:11
A entry in a doubly linked list.
Definition list.h:37
A simple ticket lock implementation.
Definition lock.h:44
A entry in a page table without a specified address or callback ID.
Virtual address space structure.
Definition space.h:78
Per-CPU VMM context.
Definition vmm.h:109
lock_t lock
Definition vmm.h:112
space_t * space
Will only be accessed by the owner CPU, so no lock.
Definition vmm.h:113
uint8_t shootdownCount
Definition vmm.h:111
TLB shootdown structure.
Definition vmm.h:92
uint64_t pageAmount
Definition vmm.h:96
space_t * space
Definition vmm.h:94
void * virtAddr
Definition vmm.h:95
list_entry_t entry
Definition vmm.h:93