PatchworkOS  69292a3
A non-POSIX operating system.
Loading...
Searching...
No Matches
cache.c
Go to the documentation of this file.
1#include <kernel/cpu/cpu.h>
3#include <kernel/log/log.h>
4#include <kernel/mem/cache.h>
5#include <kernel/mem/pmm.h>
6
7#include <errno.h>
8#include <kernel/mem/vmm.h>
9#include <kernel/sync/lock.h>
10#include <stdlib.h>
11#include <sys/list.h>
12#include <sys/math.h>
13
15{
18 if (slab == NULL)
19 {
20 return NULL;
21 }
22 list_entry_init(&slab->entry);
23 slab->owner = CPU_ID_INVALID;
24 slab->freeCount = cache->layout.amount;
25 slab->firstFree = 0;
26 lock_init(&slab->lock);
27 slab->cache = cache;
28 slab->objects = (void*)((uintptr_t)slab + cache->layout.start);
29
30 if (cache->ctor != NULL)
31 {
32 for (uint32_t i = 0; i < cache->layout.amount; i++)
33 {
34 cache->ctor((void*)((uintptr_t)slab->objects + (i * cache->layout.step)));
35 }
36 }
37
38 for (uint32_t i = 0; i < cache->layout.amount - 1; i++)
39 {
40 slab->bufctl[i] = i + 1;
41 }
42 slab->bufctl[cache->layout.amount - 1] = CACHE_BUFCTL_END;
43
44 return slab;
45}
46
48{
49 if (slab->cache->dtor != NULL)
50 {
51 for (uint32_t i = 0; i < slab->cache->layout.amount; i++)
52 {
53 slab->cache->dtor((void*)((uintptr_t)slab->objects + (i * slab->cache->layout.step)));
54 }
55 }
57}
58
59static inline void* cache_slab_alloc(cache_slab_t* slab)
60{
61 if (slab->freeCount == 0)
62 {
63 return NULL;
64 }
65
66 void* object = (void*)((uintptr_t)slab->objects + (slab->firstFree * slab->cache->layout.step));
67 slab->firstFree = slab->bufctl[slab->firstFree];
68 slab->freeCount--;
69 return object;
70}
71
72static inline void cache_slab_free(cache_slab_t* slab, void* ptr)
73{
74 uintptr_t offset = (uintptr_t)ptr - (uintptr_t)slab->objects;
75 uint32_t index = offset >> slab->cache->layout.stepShift;
76
77 slab->bufctl[index] = slab->firstFree;
78 slab->firstFree = index;
79 slab->freeCount++;
80}
81
83{
85 if (!IS_POW2(cache->layout.step))
86 {
88 }
89 cache->layout.stepShift = __builtin_ctz(cache->layout.step);
90
91 uint32_t available = CACHE_SLAB_PAGES * PAGE_SIZE - sizeof(cache_slab_t);
92 cache->layout.amount = available / (cache->size + sizeof(cache_bufctl_t));
93
94 while (cache->layout.amount > 0)
95 {
98
100 {
101 break;
102 }
104 }
105
106 assert(cache->layout.amount != 0);
107}
108
110{
111 if (cache == NULL)
112 {
113 return NULL;
114 }
115
116 CLI_SCOPE();
117
118 if (cache->cpus[SELF->id].active != NULL)
119 {
120 cache_slab_t* active = cache->cpus[SELF->id].active;
121 lock_acquire(&active->lock);
122
123 void* result = cache_slab_alloc(active);
124 if (result != NULL)
125 {
126 lock_release(&active->lock);
127 return result;
128 }
129 lock_release(&active->lock);
130
132 lock_acquire(&active->lock);
133
134 if (active->freeCount > 0)
135 {
136 result = cache_slab_alloc(active);
137 lock_release(&active->lock);
139 return result;
140 }
141
142 active->owner = CPU_ID_INVALID;
143 cache->cpus[SELF->id].active = NULL;
144 list_remove(&active->entry);
145 list_push_back(&cache->full, &active->entry);
146 lock_release(&active->lock);
147 goto allocate;
148 }
149
151allocate:
152
153 if (cache->layout.amount == 0)
154 {
156 LOG_DEBUG("cache '%s' layout: step=%u, amount=%u, start=%u\n", cache->name, cache->layout.step,
158 }
159
160 cache_slab_t* slab;
161 LIST_FOR_EACH(slab, &cache->partial, entry)
162 {
163 if (slab->owner == CPU_ID_INVALID)
164 {
165 list_remove(&slab->entry);
166 list_push_back(&cache->partial, &slab->entry);
167 slab->owner = SELF->id;
168 cache->cpus[SELF->id].active = slab;
170
171 LOCK_SCOPE(&slab->lock);
172 return cache_slab_alloc(slab);
173 }
174 }
175
176 if (!list_is_empty(&cache->free))
177 {
179 cache->freeCount--;
180 list_push_back(&cache->partial, &slab->entry);
181 slab->owner = SELF->id;
182 cache->cpus[SELF->id].active = slab;
184
185 LOCK_SCOPE(&slab->lock);
186 return cache_slab_alloc(slab);
187 }
188
189 slab = cache_slab_new(cache);
190 if (slab == NULL)
191 {
193 return NULL;
194 }
195
196 list_push_back(&cache->partial, &slab->entry);
197 slab->owner = SELF->id;
198 cache->cpus[SELF->id].active = slab;
200
201 LOCK_SCOPE(&slab->lock);
202 return cache_slab_alloc(slab);
203}
204
205void cache_free(void* ptr)
206{
207 if (ptr == NULL)
208 {
209 return;
210 }
211
213 cache_t* cache = slab->cache;
214
215 lock_acquire(&slab->lock);
216 bool wasFull = (slab->freeCount == 0);
217 cache_slab_free(slab, ptr);
218 bool isEmpty = (slab->freeCount == cache->layout.amount);
219 lock_release(&slab->lock);
220
221 if (wasFull || isEmpty)
222 {
224
225 if (wasFull)
226 {
227 list_remove(&slab->entry);
228 list_push_back(&cache->partial, &slab->entry);
229 }
230
231 if (isEmpty && slab->owner == CPU_ID_INVALID)
232 {
233 list_remove(&slab->entry);
234 list_push_back(&cache->free, &slab->entry);
235 cache->freeCount++;
236 while (cache->freeCount > CACHE_LIMIT)
237 {
239 cache->freeCount--;
240 cache_slab_destroy(freeSlab);
241 }
242 }
243
245 }
246}
247
248#ifdef _TESTING_
249
250#include <kernel/sched/clock.h>
251#include <kernel/utils/test.h>
252
253static cache_t testCache = CACHE_CREATE(testCache, "test", 100, CACHE_LINE, NULL, NULL);
254
256{
257 void* ptr1 = cache_alloc(&testCache);
258 TEST_ASSERT(ptr1 != NULL);
259 TEST_ASSERT(((uintptr_t)ptr1 & 7) == 0);
260
261 void* ptr2 = cache_alloc(&testCache);
262 TEST_ASSERT(ptr2 != NULL);
263 TEST_ASSERT(ptr1 != ptr2);
264
265 cache_free(ptr1);
266 cache_free(ptr2);
267
268 return 0;
269}
270
271#endif
#define assert(expression)
Definition assert.h:29
static void * cache_slab_alloc(cache_slab_t *slab)
Definition cache.c:59
static cache_slab_t * cache_slab_new(cache_t *cache)
Definition cache.c:14
static void cache_slab_free(cache_slab_t *slab, void *ptr)
Definition cache.c:72
static void cache_slab_destroy(cache_slab_t *slab)
Definition cache.c:47
static void cache_precalc_layout(cache_t *cache)
Definition cache.c:82
static cache_t cache
Definition dentry.c:132
#define CLI_SCOPE()
Macro to increment CLI depth for the duration of the current scope.
Definition cli.h:56
#define SELF
Macro to access data in the current cpu.
Definition percpu.h:85
#define CPU_ID_INVALID
Invalid CPU ID.
Definition cpu.h:60
#define LOG_DEBUG(format,...)
Definition log.h:85
void cache_free(void *ptr)
Free an object back to its cache.
Definition cache.c:205
uint16_t cache_bufctl_t
Buffer control type.
Definition cache.h:71
#define CACHE_LINE
Cache line size in bytes.
Definition cache.h:75
#define CACHE_BUFCTL_END
End of buffer control list marker.
Definition cache.h:73
void * cache_alloc(cache_t *cache)
Allocate an object from the cache.
Definition cache.c:109
#define CACHE_CREATE(_cache, _name, _size, _alignment, _ctor, _dtor)
Macro to create a cache initializer.
Definition cache.h:148
#define CACHE_LIMIT
Maximum number of free slabs in a cache.
Definition cache.h:69
#define CACHE_SLAB_PAGES
Number of pages in a slab.
Definition cache.h:77
@ PML_PRESENT
@ PML_WRITE
@ PML_GLOBAL
void * vmm_alloc(space_t *space, void *virtAddr, size_t length, size_t alignment, pml_flags_t pmlFlags, vmm_alloc_flags_t allocFlags)
Allocates and maps virtual memory in a given address space.
Definition vmm.c:153
void * vmm_unmap(space_t *space, void *virtAddr, size_t length)
Unmaps virtual memory from a given address space.
Definition vmm.c:327
@ VMM_ALLOC_FAIL_IF_MAPPED
If set and any page is already mapped, fail and set errno to EEXIST.
Definition vmm.h:123
static void lock_init(lock_t *lock)
Initializes a lock.
Definition lock.h:79
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
Definition lock.h:58
static void lock_release(lock_t *lock)
Releases a lock.
Definition lock.h:175
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
Definition lock.h:96
#define TEST_DEFINE(_name)
Define a test function to be run by TEST_ALL().
Definition test.h:71
#define TEST_ASSERT(cond)
Assert a condition in a test.
Definition test.h:82
#define LIST_FOR_EACH(elem, list, member)
Iterates over a list.
Definition list.h:58
static void list_remove(list_entry_t *entry)
Removes a list entry from its current list.
Definition list.h:290
static void list_push_back(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
Definition list.h:321
static bool list_is_empty(list_t *list)
Checks if a list is empty.
Definition list.h:210
static void list_entry_init(list_entry_t *entry)
Initializes a list entry.
Definition list.h:173
static list_entry_t * list_pop_front(list_t *list)
Pops the first entry from the list.
Definition list.h:365
uint64_t next_pow2(uint64_t n)
Definition new_pow2.c:3
#define ROUND_DOWN(number, multiple)
Definition math.h:23
#define ROUND_UP(number, multiple)
Definition math.h:21
#define IS_POW2(x)
Definition math.h:27
#define NULL
Pointer error value.
Definition NULL.h:25
#define PAGE_SIZE
The size of a memory page in bytes.
Definition PAGE_SIZE.h:8
#define CONTAINER_OF(ptr, type, member)
Container of macro.
static uint64_t offset
Definition screen.c:19
static bool active
Definition rcu.c:13
__UINT32_TYPE__ uint32_t
Definition stdint.h:15
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
uint32_t step
Definition cache.h:86
uint32_t stepShift
Definition cache.h:87
uint32_t start
Definition cache.h:85
uint32_t amount
Definition cache.h:88
Cache slab structure.
Cache structure.
Definition cache.h:123
const char * name
Definition cache.h:124
cache_cpu_t cpus[CPU_MAX]
Definition cache.h:135
list_t free
Definition cache.h:131
size_t size
Definition cache.h:125
void(* ctor)(void *obj)
Definition cache.h:128
cache_slab_layout_t layout
Definition cache.h:127
lock_t lock
Definition cache.h:130
uint64_t freeCount
Definition cache.h:134
size_t alignment
Definition cache.h:126
list_t full
Definition cache.h:133
list_t partial
Definition cache.h:132