PatchworkOS  19e446b
A non-POSIX operating system.
Loading...
Searching...
No Matches
irp.c
Go to the documentation of this file.
1#include <kernel/cpu/cpu.h>
3#include <kernel/io/irp.h>
4#include <kernel/log/log.h>
5#include <kernel/log/panic.h>
6#include <kernel/mem/mdl.h>
7#include <kernel/mem/pool.h>
10#include <kernel/sched/timer.h>
11#include <kernel/sched/wait.h>
12#include <kernel/sync/lock.h>
13#include <stdatomic.h>
14#include <string.h>
15
16#include <kernel/cpu/percpu.h>
17
18typedef struct irp_ctx
19{
22} irp_ctx_t;
23
25{
26 irp_ctx_t* ctx = SELF_PTR(pcpu_irps);
27
28 list_init(&ctx->timeouts);
29 lock_init(&ctx->lock);
30}
31
32irp_pool_t* irp_pool_new(size_t size, process_t* process, void* ctx)
33{
34 if (size == 0 || process == NULL || size >= POOL_IDX_MAX)
35 {
36 errno = EINVAL;
37 return NULL;
38 }
39
40 irp_pool_t* pool = malloc(sizeof(irp_pool_t) + (sizeof(irp_t) * size));
41 if (pool == NULL)
42 {
43 errno = ENOMEM;
44 return NULL;
45 }
46
47 pool->ctx = ctx;
48 pool->process = process;
49 atomic_init(&pool->active, 0);
50 memset(&pool->irps, 0, sizeof(irp_t) * size);
51 for (size_t i = 0; i < size; i++)
52 {
53 irp_t* irp = &pool->irps[i];
54 irp->index = i;
55 }
56
57 pool_init(&pool->pool, pool->irps, size, sizeof(irp_t), offsetof(irp_t, next));
58
59 return pool;
60}
61
63{
64 free(pool);
65}
66
67void irp_timeout_add(irp_t* irp, clock_t timeout)
68{
69 if (timeout == CLOCKS_NEVER)
70 {
71 return;
72 }
73
74 irp_ctx_t* ctx = SELF_PTR(pcpu_irps);
75 LOCK_SCOPE(&ctx->lock);
76
77 irp->cpu = SELF->id;
78
79 clock_t now = clock_uptime();
80 irp->deadline = CLOCKS_DEADLINE(timeout, now);
81
82 irp_t* entry;
83 LIST_FOR_EACH(entry, &ctx->timeouts, timeoutEntry)
84 {
85 if (irp->deadline < entry->deadline)
86 {
87 list_prepend(&entry->timeoutEntry, &irp->timeoutEntry);
88 timer_set(now, irp->deadline);
89 return;
90 }
91 }
92
93 list_push_back(&ctx->timeouts, &irp->timeoutEntry);
94 timer_set(now, irp->deadline);
95}
96
98{
99 cpu_id_t cpu = irp->cpu;
100 if (cpu == CPU_ID_INVALID)
101 {
102 return;
103 }
104
105 irp_ctx_t* ctx = CPU_PTR(cpu, pcpu_irps);
106 assert(ctx != NULL);
107
108 LOCK_SCOPE(&ctx->lock);
109 if (irp->cpu != cpu) // Check for race condition
110 {
111 return;
112 }
113
114 list_remove(&irp->timeoutEntry);
115 irp->cpu = CPU_ID_INVALID;
116}
117
119{
120 while (irp->frame < IRP_FRAME_MAX)
121 {
122 irp_frame_t* frame = irp_current(irp);
123 irp->frame++;
124
125 if (irp->frame == IRP_FRAME_MAX)
126 {
128 }
129
130 if (frame->vnode != NULL)
131 {
132 UNREF(frame->vnode);
133 frame->vnode = NULL;
134 }
135
136 if (frame->complete != NULL)
137 {
138 frame->complete(irp, frame->ctx);
139 return;
140 }
141 }
142
143 assert(irp->frame == IRP_FRAME_MAX);
144 assert(irp->next == POOL_IDX_MAX);
145 assert(irp->cpu == CPU_ID_INVALID);
146
147 mdl_t* next = irp->mdl.next;
148 mdl_deinit(&irp->mdl);
150
151 irp_pool_t* pool = irp_get_pool(irp);
152 pool_free(&pool->pool, irp->index);
153
154 if (atomic_fetch_sub(&pool->active, 1) == 1)
155 {
156 UNREF(pool->process);
157 }
158}
159
161{
162 irp_ctx_t* ctx = SELF_PTR(pcpu_irps);
163 assert(ctx != NULL);
164
165 clock_t now = clock_uptime();
166
167 lock_acquire(&ctx->lock);
168
169 irp_t* irp;
170 while (true)
171 {
172 irp = CONTAINER_OF_SAFE(list_first(&ctx->timeouts), irp_t, timeoutEntry);
173 if (irp == NULL)
174 {
175 break;
176 }
177
178 if (irp->deadline > now)
179 {
180 timer_set(now, irp->deadline);
181 break;
182 }
183
184 list_remove(&irp->timeoutEntry);
185 irp->deadline = CLOCKS_NEVER;
186 irp->cpu = CPU_ID_INVALID;
187 irp_cancel_t handler = atomic_exchange(&irp->cancel, IRP_CANCELLED);
188 lock_release(&ctx->lock);
189
190 if (handler == IRP_CANCELLED)
191 {
192 // Already cancelled
193 }
194 else if (handler != NULL)
195 {
196 irp->err = ETIMEDOUT;
197 handler(irp);
199 }
200 else
201 {
202 atomic_store(&irp->cancel, NULL);
203 }
204
205 lock_acquire(&ctx->lock);
206 }
207
208 lock_release(&ctx->lock);
209}
210
212{
213 pool_idx_t idx = pool_alloc(&pool->pool);
214 if (idx == POOL_IDX_MAX)
215 {
216 errno = ENOSPC;
217 return NULL;
218 }
219
220 if (atomic_fetch_add(&pool->active, 1) == 0)
221 {
222 REF(pool->process);
223 }
224
225 irp_t* irp = &pool->irps[idx];
226 assert(irp->index == idx);
227
228 list_entry_init(&irp->entry);
229 list_entry_init(&irp->timeoutEntry);
230 atomic_init(&irp->cancel, NULL);
231 irp->deadline = CLOCKS_NEVER;
232 irp->res._raw = 0;
233 mdl_init(&irp->mdl, NULL);
234 irp->next = POOL_IDX_MAX;
235 irp->cpu = CPU_ID_INVALID;
236 irp->err = EOK;
237 irp->frame = IRP_FRAME_MAX;
238 return irp;
239}
240
241mdl_t* irp_get_mdl(irp_t* irp, const void* addr, size_t size)
242{
243 if (irp == NULL)
244 {
245 errno = EINVAL;
246 return NULL;
247 }
248
249 process_t* process = irp_get_process(irp);
250 if (process == NULL)
251 {
252 errno = EINVAL;
253 return NULL;
254 }
255
256 mdl_t* mdl = &irp->mdl;
257 while (mdl->amount > 0)
258 {
259 if (mdl->next != NULL)
260 {
261 mdl = mdl->next;
262 continue;
263 }
264
265 mdl_t* next = malloc(sizeof(mdl_t));
266 if (next == NULL)
267 {
268 errno = ENOMEM;
269 return NULL;
270 }
271 mdl_init(next, mdl);
272 mdl = next;
273 }
274
275 if (mdl_add(mdl, &process->space, addr, size) == ERR)
276 {
277 return NULL;
278 }
279
280 return mdl;
281}
282
283void irp_call(irp_t* irp, vnode_t* vnode)
284{
285 assert(irp->frame > 0);
286 irp->frame--;
287
288 irp_frame_t* frame = irp_current(irp);
289 if (UNLIKELY(frame->major >= IRP_MJ_MAX))
290 {
291 irp_error(irp, EINVAL);
292 return;
293 }
294
295 if (vnode == NULL || vnode->vtable == NULL)
296 {
297 irp_error(irp, EINVAL);
298 return;
299 }
300
301 irp_func_t func = vnode->vtable->funcs[frame->major];
302 if (func == NULL)
303 {
304 irp_error(irp, ENOSYS);
305 return;
306 }
307
309 frame->vnode = REF(vnode);
310
311 func(irp);
312}
313
315{
316 assert(irp->frame > 0);
317 irp->frame--;
318
319 irp_frame_t* frame = irp_current(irp);
320
321 if (frame->vnode != NULL)
322 {
323 UNREF(frame->vnode);
324 frame->vnode = NULL;
325 }
326
328 func(irp);
329}
330
332{
333 irp_cancel_t handler = atomic_exchange(&irp->cancel, IRP_CANCELLED);
334 if (handler == IRP_CANCELLED)
335 {
336 errno = EBUSY;
337 return ERR;
338 }
339
340 if (handler == NULL)
341 {
342 atomic_store(&irp->cancel, NULL);
343 errno = EBUSY;
344 return ERR;
345 }
346
348
349 irp->err = ECANCELED;
350 uint64_t result = handler(irp);
352 return result;
353}
354
356{
357 if (irp_set_cancel(irp, NULL) == IRP_CANCELLED)
358 {
359 return;
360 }
362}
#define assert(expression)
Definition assert.h:29
#define CLOCKS_NEVER
Definition clock_t.h:18
#define SELF_PTR(ptr)
Macro to get a pointer to a percpu variable on the current CPU.
Definition percpu.h:93
#define SELF
Macro to access data in the current cpu.
Definition percpu.h:85
#define CPU_PTR(id, ptr)
Macro to get a pointer to a percpu variable on a specific CPU.
Definition percpu.h:102
#define PERCPU_DEFINE_CTOR(type, name)
Macro to define a percpu variable with a constructor.
Definition percpu.h:130
uint16_t cpu_id_t
Type used to identify a CPU.
Definition cpu.h:65
#define CPU_ID_INVALID
Invalid CPU ID.
Definition cpu.h:60
void irp_timeouts_check(void)
Check and handle expired IRP timeouts on the current CPU.
Definition irp.c:160
#define IRP_CANCELLED
Sentinel value indicating that the IRP has been cancelled.
Definition irp.h:127
void irp_complete(irp_t *irp)
Complete the current frame in the IRP stack.
Definition irp.c:355
irp_t * irp_new(irp_pool_t *pool)
Allocate a new IRP from a pool.
Definition irp.c:211
static irp_frame_t * irp_current(irp_t *irp)
Retrieve the current frame in the IRP stack.
Definition irp.h:385
uint64_t(* irp_cancel_t)(irp_t *irp)
IRP cancellation callback type.
Definition irp.h:122
#define IRP_FRAME_MAX
The maximum number of frames in a IRP stack.
Definition irp.h:175
irp_pool_t * irp_pool_new(size_t size, process_t *process, void *ctx)
Allocate a new IRP pool.
Definition irp.c:32
void irp_call_direct(irp_t *irp, irp_func_t func)
Send an IRP to a specified function directly.
Definition irp.c:314
static irp_cancel_t irp_set_cancel(irp_t *irp, irp_cancel_t cancel)
Set the cancellation callback for an IRP.
Definition irp.h:366
void irp_pool_free(irp_pool_t *pool)
Free a IRP pool.
Definition irp.c:62
static void irp_error(irp_t *irp, uint8_t err)
Helper to set an error code and complete the IRP.
Definition irp.h:497
static process_t * irp_get_process(irp_t *irp)
Retrieve the process that owns an IRP.
Definition irp.h:326
static irp_pool_t * irp_get_pool(irp_t *irp)
Retrieve the IRP pool that an IRP was allocated from.
Definition irp.h:304
void irp_call(irp_t *irp, vnode_t *vnode)
Send an IRP to a specified vnode.
Definition irp.c:283
void irp_timeout_remove(irp_t *irp)
Remove an IRP from its per-CPU timeout queue.
Definition irp.c:97
uint64_t irp_cancel(irp_t *irp)
Attempt to cancel an IRP.
Definition irp.c:331
void(* irp_func_t)(irp_t *irp)
IRP function type.
Definition irp.h:227
mdl_t * irp_get_mdl(irp_t *irp, const void *addr, size_t size)
Retrieve a memory descriptor list and associate it with an IRP.
Definition irp.c:241
#define IRP_MJ_MAX
Definition irp.h:133
void irp_timeout_add(irp_t *irp, clock_t timeout)
Add an IRP to a per-CPU timeout queue.
Definition irp.c:67
void mdl_free_chain(mdl_t *mdl, void(*free)(void *))
Free a Memory Descriptor List chain.
Definition mdl.c:34
void mdl_deinit(mdl_t *mdl)
Deinitialize a Memory Descriptor List.
Definition mdl.c:11
static void mdl_init(mdl_t *next, mdl_t *prev)
Initialize a Memory Descriptor List.
Definition mdl.h:69
uint64_t mdl_add(mdl_t *mdl, space_t *space, const void *addr, size_t size)
Add a memory region to the Memory Descriptor List.
Definition mdl.c:125
pool_idx_t pool_alloc(pool_t *pool)
Allocate an element from the pool.
Definition pool.c:20
void pool_free(pool_t *pool, pool_idx_t idx)
Free an element back to the pool.
Definition pool.c:48
#define POOL_IDX_MAX
The maximum index value for pool.
Definition pool.h:29
void pool_init(pool_t *pool, void *elements, size_t capacity, size_t elementSize, size_t nextOffset)
Initialize a pool.
Definition pool.c:3
uint16_t pool_idx_t
Pool index type.
Definition pool.h:27
clock_t clock_uptime(void)
Retrieve the time in nanoseconds since boot.
Definition clock.c:99
static void lock_init(lock_t *lock)
Initializes a lock.
Definition lock.h:79
#define LOCK_SCOPE(lock)
Acquires a lock for the reminder of the current scope.
Definition lock.h:58
static void lock_release(lock_t *lock)
Releases a lock.
Definition lock.h:175
static void lock_acquire(lock_t *lock)
Acquires a lock, blocking until it is available.
Definition lock.h:96
void timer_set(clock_t now, clock_t deadline)
Schedule a one-shot timer interrupt on the current CPU.
Definition timer.c:139
#define REF(ptr)
Increment reference count.
Definition ref.h:82
#define UNREF(ptr)
Decrement reference count.
Definition ref.h:109
#define ENOSPC
No space left on device.
Definition errno.h:172
#define EINVAL
Invalid argument.
Definition errno.h:142
#define ENOSYS
Function not implemented.
Definition errno.h:222
#define ETIMEDOUT
Connection timed out.
Definition errno.h:577
#define ENOMEM
Out of memory.
Definition errno.h:92
#define EBUSY
Device or resource busy.
Definition errno.h:112
#define ECANCELED
Operation Canceled.
Definition errno.h:652
#define errno
Error number variable.
Definition errno.h:27
#define EOK
No error.
Definition errno.h:32
#define UNLIKELY(x)
Mark a condition as unlikely.
Definition defs.h:129
#define LIST_FOR_EACH(elem, list, member)
Iterates over a list.
Definition list.h:58
static void list_remove(list_entry_t *entry)
Removes a list entry from its current list.
Definition list.h:290
static list_entry_t * list_first(list_t *list)
Gets the first entry in the list without removing it.
Definition list.h:406
static void list_push_back(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
Definition list.h:322
static void list_prepend(list_entry_t *head, list_entry_t *entry)
Prepends an entry to the list.
Definition list.h:280
static void list_entry_init(list_entry_t *entry)
Initializes a list entry.
Definition list.h:173
static void list_init(list_t *list)
Initializes a list.
Definition list.h:185
#define NULL
Pointer error value.
Definition NULL.h:25
#define ERR
Integer error value.
Definition ERR.h:17
#define CONTAINER_OF_SAFE(ptr, type, member)
Safe container of macro.
#define CLOCKS_DEADLINE(timeout, uptime)
Safely calculate deadline from timeout.
Definition clock_t.h:47
__UINT64_TYPE__ clock_t
A nanosecond time.
Definition clock_t.h:13
static void irp_perform_completion(irp_t *irp)
Definition irp.c:118
static atomic_long next
Definition main.c:12
#define atomic_store(object, desired)
Definition stdatomic.h:289
@ memory_order_relaxed
Definition stdatomic.h:116
#define atomic_exchange(object, desired)
Definition stdatomic.h:282
#define atomic_fetch_sub(object, operand)
Definition stdatomic.h:286
#define atomic_store_explicit(object, desired, order)
Definition stdatomic.h:265
#define atomic_fetch_add(object, operand)
Definition stdatomic.h:283
#define atomic_init(obj, value)
Definition stdatomic.h:75
#define offsetof(type, member)
Definition stddef.h:16
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
_PUBLIC void * malloc(size_t size)
Definition malloc.c:5
_PUBLIC void free(void *ptr)
Definition free.c:11
_PUBLIC void * memset(void *s, int c, size_t n)
Definition memset.c:4
lock_t lock
Definition irp.c:21
list_t timeouts
Definition irp.c:20
IRP stack frame structure.
Definition irp.h:145
vnode_t * vnode
Vnode associated with the operation.
Definition irp.h:151
irp_complete_t complete
Completion callback.
Definition irp.h:149
void * ctx
Local context.
Definition irp.h:150
irp_major_t major
Major function number.
Definition irp.h:146
void * ctx
Definition irp.h:217
pool_t pool
Definition irp.h:220
process_t * process
Will only hold a reference if there is at least one active IRP.
Definition irp.h:218
atomic_size_t active
Definition irp.h:219
irp_t irps[]
Definition irp.h:221
I/O Request Packet structure.
irp_func_t funcs[IRP_MJ_MAX]
Definition irp.h:235
A doubly linked list.
Definition list.h:46
A simple ticket lock implementation.
Definition lock.h:44
Memory Descriptor List structure.
Definition mdl.h:55
uint32_t amount
Number of memory segments.
Definition mdl.h:59
struct mdl * next
Pointer to the next MDL.
Definition mdl.h:56
Process structure.
Definition process.h:76
space_t space
Definition process.h:84
vnode structure.
Definition vnode.h:48
const irp_vtable_t * vtable
Definition vnode.h:57