PatchworkOS  19e446b
A non-POSIX operating system.
Loading...
Searching...
No Matches
ring.c
Go to the documentation of this file.
3#include <kernel/fs/path.h>
4#include <kernel/io/ring.h>
5#include <kernel/io/irp.h>
6#include <kernel/log/log.h>
7#include <kernel/log/panic.h>
9#include <kernel/mem/pmm.h>
10#include <kernel/mem/vmm.h>
11#include <kernel/proc/process.h>
12#include <kernel/sched/clock.h>
13
14#include <errno.h>
15#include <sys/ioring.h>
16#include <sys/list.h>
17#include <time.h>
18
20{
21 ioring_ctx_flags_t expected = atomic_load(&ctx->flags);
22 if (!(expected & IORING_CTX_BUSY) && atomic_compare_exchange_strong(&ctx->flags, &expected, expected | IORING_CTX_BUSY))
23 {
24 return 0;
25 }
26
27 return ERR;
28}
29
30static inline void ioring_ctx_release(ioring_ctx_t* ctx)
31{
32 atomic_fetch_and(&ctx->flags, ~IORING_CTX_BUSY);
33}
34
35static inline uint64_t ioring_ctx_map(ioring_ctx_t* ctx, process_t* process, ioring_id_t id, ioring_t* userRing, void* address,
36 size_t sentries, size_t centries)
37{
38 ioring_t* kernelRing = &ctx->ring;
39
40 size_t pageAmount = BYTES_TO_PAGES(sizeof(ioring_ctrl_t) + (sentries * sizeof(sqe_t)) + (centries * sizeof(cqe_t)));
41 if (pageAmount >= CONFIG_MAX_RINGS_PAGES)
42 {
43 errno = ENOMEM;
44 return ERR;
45 }
46
47 if (centries >= POOL_IDX_MAX)
48 {
49 errno = EINVAL;
50 return ERR;
51 }
52
54 if (pmm_alloc_pages(pages, pageAmount) == ERR)
55 {
56 errno = ENOMEM;
57 return ERR;
58 }
59
60 for (size_t i = 0; i < pageAmount; i++)
61 {
63 }
64
65 // PML_OWNED means that the pages will be freed when unmapped.
66 void* kernelAddr = vmm_map_pages(NULL, NULL, pages, pageAmount, PML_WRITE | PML_PRESENT | PML_OWNED, NULL, NULL);
67 if (kernelAddr == NULL)
68 {
69 pmm_free_pages(pages, pageAmount);
70 return ERR;
71 }
72
73 void* userAddr =
74 vmm_map_pages(&process->space, address, pages, pageAmount, PML_WRITE | PML_PRESENT | PML_USER, NULL, NULL);
75 if (userAddr == NULL)
76 {
77 vmm_unmap(NULL, kernelAddr, pageAmount * PAGE_SIZE);
78 return ERR;
79 }
80
81 irp_pool_t* irps = irp_pool_new(centries, process, ctx);
82 if (irps == NULL)
83 {
84 vmm_unmap(&process->space, userAddr, pageAmount * PAGE_SIZE);
85 vmm_unmap(NULL, kernelAddr, pageAmount * PAGE_SIZE);
86 return ERR;
87 }
88
89 ioring_ctrl_t* ctrl = (ioring_ctrl_t*)kernelAddr;
90 atomic_init(&ctrl->shead, 0);
91 atomic_init(&ctrl->stail, 0);
92 atomic_init(&ctrl->ctail, 0);
93 atomic_init(&ctrl->chead, 0);
94 for (size_t i = 0; i < SQE_REGS_MAX; i++)
95 {
96 atomic_init(&ctrl->regs[i], 0);
97 }
98
99 userRing->ctrl = userAddr;
100 userRing->id = id;
101 userRing->squeue = (sqe_t*)((uintptr_t)userAddr + sizeof(ioring_ctrl_t));
102 userRing->sentries = sentries;
103 userRing->smask = sentries - 1;
104 userRing->cqueue = (cqe_t*)((uintptr_t)userAddr + sizeof(ioring_ctrl_t) + (sentries * sizeof(sqe_t)));
105 userRing->centries = centries;
106 userRing->cmask = centries - 1;
107
108 kernelRing->ctrl = kernelAddr;
109 kernelRing->id = id;
110 kernelRing->squeue = (sqe_t*)((uintptr_t)kernelAddr + sizeof(ioring_ctrl_t));
111 kernelRing->sentries = sentries;
112 kernelRing->smask = sentries - 1;
113 kernelRing->cqueue = (cqe_t*)((uintptr_t)kernelAddr + sizeof(ioring_ctrl_t) + (sentries * sizeof(sqe_t)));
114 kernelRing->centries = centries;
115 kernelRing->cmask = centries - 1;
116
117 ctx->irps = irps;
118 ctx->userAddr = userAddr;
119 ctx->kernelAddr = kernelAddr;
120 ctx->pageAmount = pageAmount;
121
123 return 0;
124}
125
127{
128 vmm_unmap(&ctx->irps->process->space, ctx->userAddr, ctx->pageAmount * PAGE_SIZE);
130
131 irp_pool_free(ctx->irps);
132 ctx->irps = NULL;
133
134 atomic_fetch_and(&ctx->flags, ~IORING_CTX_MAPPED);
135 return 0;
136}
137
139{
140 ioring_t* ring = &ctx->ring;
143 return ctail - chead;
144}
145
147{
148 if (ctx == NULL)
149 {
150 return;
151 }
152
153 ctx->ring = (ioring_t){0};
154 ctx->irps = NULL;
155 ctx->userAddr = NULL;
156 ctx->kernelAddr = NULL;
157 ctx->pageAmount = 0;
159 atomic_init(&ctx->flags, IORING_CTX_NONE);
160}
161
163{
164 if (ctx == NULL)
165 {
166 return;
167 }
168
169 if (ioring_ctx_acquire(ctx) == ERR)
170 {
171 panic(NULL, "failed to acquire async context for deinitialization");
172 }
173
174 if (atomic_load(&ctx->flags) & IORING_CTX_MAPPED)
175 {
176 if (ioring_ctx_unmap(ctx) == ERR)
177 {
178 panic(NULL, "failed to deinitialize async context");
179 }
180 }
181
184}
185
186static void ioring_ctx_dispatch(irp_t* irp);
187
188static void ioring_ctx_complete(irp_t* irp, void* _ptr)
189{
190 UNUSED(_ptr);
191
192 ioring_ctx_t* ctx = irp_get_ctx(irp);
193 ioring_t* ring = &ctx->ring;
194
195 sqe_flags_t reg = (irp->sqe.flags >> SQE_SAVE) & SQE_REG_MASK;
196 if (reg != SQE_REG_NONE)
197 {
198 atomic_store_explicit(&ring->ctrl->regs[reg], irp->res._raw, memory_order_release);
199 }
200
203
204 if ((tail - head) >= ring->centries)
205 {
206 /// @todo Handle overflow properly.
207 panic(NULL, "Async completion queue overflow");
208 }
209
210 cqe_t* cqe = &ring->cqueue[tail & ring->cmask];
211 cqe->op = irp->sqe.op;
212 cqe->error = irp->err;
213 cqe->data = irp->sqe.data;
214 cqe->_result = irp->res._raw;
215
216 atomic_store_explicit(&ring->ctrl->ctail, tail + 1, memory_order_release);
218
219 if (irp->err != EOK && !(irp->sqe.flags & SQE_HARDLINK))
220 {
221 while (true)
222 {
223 irp_t* next = irp_chain_next(irp);
224 if (next == NULL)
225 {
226 break;
227 }
228
230 }
231 }
232 else
233 {
234 irp_t* next = irp_chain_next(irp);
235 if (next != NULL)
236 {
239 }
240 }
241
242 irp_complete(irp);
243}
244
246{
247 irp_complete(irp);
248 return 0;
249}
250
251static void ioring_ctx_dispatch(irp_t* irp)
252{
253 ioring_ctx_t* ctx = irp_get_ctx(irp);
254 ioring_t* ring = &ctx->ring;
255
256 // Ugly but the alternative is a super messy SQE structure.
257
258 sqe_flags_t reg = (irp->sqe.flags >> SQE_LOAD0) & SQE_REG_MASK;
259 if (reg != SQE_REG_NONE)
260 {
261 irp->sqe.arg0 = atomic_load_explicit(&ring->ctrl->regs[reg], memory_order_acquire);
262 }
263
264 reg = (irp->sqe.flags >> SQE_LOAD1) & SQE_REG_MASK;
265 if (reg != SQE_REG_NONE)
266 {
267 irp->sqe.arg1 = atomic_load_explicit(&ring->ctrl->regs[reg], memory_order_acquire);
268 }
269
270 reg = (irp->sqe.flags >> SQE_LOAD2) & SQE_REG_MASK;
271 if (reg != SQE_REG_NONE)
272 {
273 irp->sqe.arg2 = atomic_load_explicit(&ring->ctrl->regs[reg], memory_order_acquire);
274 }
275
276 reg = (irp->sqe.flags >> SQE_LOAD3) & SQE_REG_MASK;
277 if (reg != SQE_REG_NONE)
278 {
279 irp->sqe.arg3 = atomic_load_explicit(&ring->ctrl->regs[reg], memory_order_acquire);
280 }
281
282 reg = (irp->sqe.flags >> SQE_LOAD4) & SQE_REG_MASK;
283 if (reg != SQE_REG_NONE)
284 {
285 irp->sqe.arg4 = atomic_load_explicit(&ring->ctrl->regs[reg], memory_order_acquire);
286 }
287
288 switch (irp->sqe.op)
289 {
290 case IO_OP_NOP:
292 irp_timeout_add(irp, irp->sqe.timeout);
293 break;
294 default:
295 irp_error(irp, EINVAL);
296 break;
297 }
298}
299
305
307{
308 ioring_t* ring = &ctx->ring;
309
312
313 if (shead == stail)
314 {
315 errno = EAGAIN;
316 return ERR;
317 }
318
319 irp_t* irp = irp_new(ctx->irps);
320 if (irp == NULL)
321 {
322 return ERR;
323 }
324 irp->sqe = ring->squeue[shead & ring->smask];
325
326 atomic_store_explicit(&ring->ctrl->shead, shead + 1, memory_order_release);
327
328 if (notify->link != NULL)
329 {
330 notify->link->next = irp->index;
331 notify->link = NULL;
332 }
333 else
334 {
335 list_push_back(&notify->irps, &irp->entry);
336 }
337
338 if (irp->sqe.flags & SQE_LINK || irp->sqe.flags & SQE_HARDLINK)
339 {
340 notify->link = irp;
341 }
342
343 return 0;
344}
345
346uint64_t ioring_ctx_notify(ioring_ctx_t* ctx, size_t amount, size_t wait)
347{
348 if (amount == 0)
349 {
350 return 0;
351 }
352
353 if (ioring_ctx_acquire(ctx) == ERR)
354 {
355 errno = EBUSY;
356 return ERR;
357 }
358
359 if (!(atomic_load(&ctx->flags) & IORING_CTX_MAPPED))
360 {
362 errno = EINVAL;
363 return ERR;
364 }
365
367 .irps = LIST_CREATE(notify.irps),
368 .link = NULL,
369 };
370
371 size_t processed = 0;
372 while (processed < amount)
373 {
374 if (ioring_ctx_sqe_pop(ctx, &notify) == ERR)
375 {
376 break;
377 }
378 processed++;
379 }
380
381 while (!list_is_empty(&notify.irps))
382 {
383 irp_t* irp = CONTAINER_OF(list_pop_front(&notify.irps), irp_t, entry);
384
387 }
388
389 if (wait == 0)
390 {
392 return processed;
393 }
394
395 if (WAIT_BLOCK(&ctx->waitQueue, ioring_ctx_avail_cqes(ctx) >= wait) == ERR)
396 {
398 return processed > 0 ? processed : ERR;
399 }
400
402 return processed;
403}
404
405SYSCALL_DEFINE(SYS_SETUP, ioring_id_t, ioring_t* userRing, void* address, size_t sentries, size_t centries)
406{
407 if (userRing == NULL || sentries == 0 || centries == 0 || !IS_POW2(sentries) || !IS_POW2(centries))
408 {
409 errno = EINVAL;
410 return ERR;
411 }
412
413 process_t* process = process_current();
414
415 ioring_ctx_t* ctx = NULL;
416 ioring_id_t id = 0;
417 for (id = 0; id < ARRAY_SIZE(process->rings); id++)
418 {
420 if (atomic_compare_exchange_strong(&process->rings[id].flags, &expected, IORING_CTX_BUSY))
421 {
422 ctx = &process->rings[id];
423 break;
424 }
425 }
426
427 if (ctx == NULL)
428 {
429 errno = EMFILE;
430 return ERR;
431 }
432
433 if (ioring_ctx_map(ctx, process, id, userRing, address, sentries, centries) == ERR)
434 {
436 return ERR;
437 }
438
440 return id;
441}
442
444{
445 process_t* process = process_current();
446 if (id >= ARRAY_SIZE(process->rings))
447 {
448 errno = EINVAL;
449 return ERR;
450 }
451
452 ioring_ctx_t* ctx = &process->rings[id];
453 if (ioring_ctx_acquire(ctx) == ERR)
454 {
455 errno = EBUSY;
456 return ERR;
457 }
458
459 if (!(atomic_load(&ctx->flags) & IORING_CTX_MAPPED))
460 {
462 errno = EINVAL;
463 return ERR;
464 }
465
466 if (ctx->irps != NULL && atomic_load(&ctx->irps->pool.used) != 0)
467 {
469 errno = EBUSY;
470 return ERR;
471 }
472
473 if (ioring_ctx_unmap(ctx) == ERR)
474 {
476 return ERR;
477 }
478
480 return 0;
481}
482
483SYSCALL_DEFINE(SYS_ENTER, uint64_t, ioring_id_t id, size_t amount, size_t wait)
484{
485 process_t* process = process_current();
486 if (id >= ARRAY_SIZE(process->rings))
487 {
488 errno = EINVAL;
489 return ERR;
490 }
491
492 ioring_ctx_t* ctx = &process->rings[id];
493 return ioring_ctx_notify(ctx, amount, wait);
494}
static char * id
Definition dwm.c:20
#define CONFIG_MAX_RINGS_PAGES
Maximum async ring pages configuration.
Definition config.h:202
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
Definition syscall.h:172
@ SYS_SETUP
Definition syscall.h:105
@ SYS_TEARDOWN
Definition syscall.h:106
@ SYS_ENTER
Definition syscall.h:107
static uintptr_t address
Mapped virtual address of the HPET registers.
Definition hpet.c:96
static void irp_set_complete(irp_t *irp, irp_complete_t complete, void *ctx)
Set the completion callback and context for the next frame in the IRP stack.
Definition irp.h:484
static void * irp_get_ctx(irp_t *irp)
Retrieve the context of the IRP pool that an IRP was allocated from.
Definition irp.h:315
void irp_complete(irp_t *irp)
Complete the current frame in the IRP stack.
Definition irp.c:355
irp_t * irp_new(irp_pool_t *pool)
Allocate a new IRP from a pool.
Definition irp.c:211
irp_pool_t * irp_pool_new(size_t size, process_t *process, void *ctx)
Allocate a new IRP pool.
Definition irp.c:32
void irp_call_direct(irp_t *irp, irp_func_t func)
Send an IRP to a specified function directly.
Definition irp.c:314
static irp_cancel_t irp_set_cancel(irp_t *irp, irp_cancel_t cancel)
Set the cancellation callback for an IRP.
Definition irp.h:366
void irp_pool_free(irp_pool_t *pool)
Free a IRP pool.
Definition irp.c:62
static irp_t * irp_chain_next(irp_t *irp)
Retrieve the next IRP in a chain and advance the chain.
Definition irp.h:337
static void irp_error(irp_t *irp, uint8_t err)
Helper to set an error code and complete the IRP.
Definition irp.h:497
void irp_timeout_add(irp_t *irp, clock_t timeout)
Add an IRP to a per-CPU timeout queue.
Definition irp.c:67
uint64_t ioring_ctx_notify(ioring_ctx_t *ctx, size_t amount, size_t wait)
Notify the context of new SQEs.
Definition ring.c:346
void ioring_ctx_deinit(ioring_ctx_t *ctx)
Deinitialize a I/O context.
Definition ring.c:162
void ioring_ctx_init(ioring_ctx_t *ctx)
Initialize a I/O context.
Definition ring.c:146
ioring_ctx_flags_t
Ring context flags.
Definition ring.h:171
@ IORING_CTX_NONE
No flags set.
Definition ring.h:172
@ IORING_CTX_MAPPED
Context is currently mapped into userspace.
Definition ring.h:174
@ IORING_CTX_BUSY
Context is currently being used, used for fast locking.
Definition ring.h:173
#define SQE_LOAD3
The offset to specify the register to load into the fourth argument.
Definition ioring.h:64
#define SQE_LOAD1
The offset to specify the register to load into the second argument.
Definition ioring.h:62
#define IO_OP_NOP
No-op operation.
Definition ioring.h:42
#define SQE_REG_MASK
The bitmask for a register specifier in a sqe_flags_t.
Definition ioring.h:59
#define SQE_LINK
Definition ioring.h:73
#define SQE_REGS_MAX
The maximum number of registers.
Definition ioring.h:57
#define SQE_HARDLINK
Definition ioring.h:77
#define SQE_LOAD2
The offset to specify the register to load into the third argument.
Definition ioring.h:63
#define SQE_REG_NONE
No register.
Definition ioring.h:56
#define SQE_LOAD0
The offset to specify the register to load into the first argument.
Definition ioring.h:61
#define SQE_SAVE
The offset to specify the register to save the result into.
Definition ioring.h:66
uint32_t sqe_flags_t
Submission queue entry (SQE) flags.
Definition ioring.h:48
#define SQE_LOAD4
The offset to specify the register to load into the fifth argument.
Definition ioring.h:65
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
Definition panic.c:292
#define PFN_TO_VIRT(_pfn)
Convert a PFN to its identity mapped higher half virtual address.
size_t pfn_t
Page Frame Number type.
@ PML_USER
@ PML_PRESENT
@ PML_WRITE
@ PML_OWNED
void pmm_free_pages(pfn_t *pfns, size_t count)
Free multiple pages of physical memory.
Definition pmm.c:348
uint64_t pmm_alloc_pages(pfn_t *pfns, size_t count)
Allocate multiple pages of physical memory.
Definition pmm.c:275
#define POOL_IDX_MAX
The maximum index value for pool.
Definition pool.h:29
void * vmm_map_pages(space_t *space, void *virtAddr, pfn_t *pfns, size_t amount, pml_flags_t flags, space_callback_func_t func, void *data)
Maps an array of physical pages to virtual memory in a given address space.
Definition vmm.c:281
void * vmm_unmap(space_t *space, void *virtAddr, size_t length)
Unmaps virtual memory from a given address space.
Definition vmm.c:336
static process_t * process_current(void)
Retrieves the process of the currently running thread.
Definition process.h:131
uint64_t wait_unblock(wait_queue_t *queue, uint64_t amount, errno_t err)
Unblock threads waiting on a wait queue.
Definition wait.c:307
void wait_queue_deinit(wait_queue_t *queue)
Deinitialize wait queue.
Definition wait.c:57
#define WAIT_ALL
Used to indicate that the wait should unblock all waiting threads.
Definition wait.h:43
void wait_queue_init(wait_queue_t *queue)
Initialize wait queue.
Definition wait.c:51
#define WAIT_BLOCK(queue, condition)
Blocks until the condition is true, will test the condition on every wakeup.
Definition wait.h:51
#define EINVAL
Invalid argument.
Definition errno.h:142
#define EMFILE
Too many open files.
Definition errno.h:152
#define ENOMEM
Out of memory.
Definition errno.h:92
#define EBUSY
Device or resource busy.
Definition errno.h:112
#define errno
Error number variable.
Definition errno.h:27
#define EOK
No error.
Definition errno.h:32
#define EAGAIN
Try again.
Definition errno.h:87
#define ARRAY_SIZE(x)
Get the number of elements in a static array.
Definition defs.h:111
#define UNUSED(x)
Mark a variable as unused.
Definition defs.h:96
uint64_t ioring_id_t
I/O ring ID type.
Definition ioring.h:195
static void list_push_back(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
Definition list.h:322
#define LIST_CREATE(name)
Creates a list initializer.
Definition list.h:163
static bool list_is_empty(list_t *list)
Checks if a list is empty.
Definition list.h:210
static list_entry_t * list_pop_front(list_t *list)
Pops the first entry from the list.
Definition list.h:366
#define IS_POW2(x)
Definition math.h:27
uint64_t notify(note_func_t handler)
System call that sets the handler to be called when a note is received.
Definition notify.c:5
#define BYTES_TO_PAGES(amount)
Convert a size in bytes to pages.
Definition proc.h:107
#define NULL
Pointer error value.
Definition NULL.h:25
#define ERR
Integer error value.
Definition ERR.h:17
#define PAGE_SIZE
The size of a memory page in bytes.
Definition PAGE_SIZE.h:8
#define CONTAINER_OF(ptr, type, member)
Container of macro.
static page_t * pages
Definition pmm.c:38
static atomic_long next
Definition main.c:12
static uint64_t ioring_ctx_avail_cqes(ioring_ctx_t *ctx)
Definition ring.c:138
static uint64_t ioring_ctx_sqe_pop(ioring_ctx_t *ctx, ioring_ctx_notify_ctx_t *notify)
Definition ring.c:306
static uint64_t ioring_ctx_unmap(ioring_ctx_t *ctx)
Definition ring.c:126
static void ioring_ctx_complete(irp_t *irp, void *_ptr)
Definition ring.c:188
static void ioring_ctx_release(ioring_ctx_t *ctx)
Definition ring.c:30
static uint64_t nop_cancel(irp_t *irp)
Definition ring.c:245
static uint64_t ioring_ctx_acquire(ioring_ctx_t *ctx)
Definition ring.c:19
static void ioring_ctx_dispatch(irp_t *irp)
Definition ring.c:251
static uint64_t ioring_ctx_map(ioring_ctx_t *ctx, process_t *process, ioring_id_t id, ioring_t *userRing, void *address, size_t sentries, size_t centries)
Definition ring.c:35
@ memory_order_release
Definition stdatomic.h:119
@ memory_order_relaxed
Definition stdatomic.h:116
@ memory_order_acquire
Definition stdatomic.h:118
#define atomic_compare_exchange_strong(object, expected, desired)
Definition stdatomic.h:278
#define atomic_fetch_or(object, operand)
Definition stdatomic.h:285
#define atomic_load_explicit(object, order)
Definition stdatomic.h:264
#define atomic_load(object)
Definition stdatomic.h:288
#define atomic_store_explicit(object, desired, order)
Definition stdatomic.h:265
#define atomic_fetch_and(object, operand)
Definition stdatomic.h:284
#define atomic_init(obj, value)
Definition stdatomic.h:75
__UINT32_TYPE__ uint32_t
Definition stdint.h:15
__UINT64_TYPE__ uint64_t
Definition stdint.h:17
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:43
_PUBLIC void * memset(void *s, int c, size_t n)
Definition memset.c:4
Asynchronous completion queue entry (CQE).
Definition ioring.h:143
errno_t error
Error code, if not equal to EOK an error occurred.
Definition ioring.h:145
io_op_t op
The operation that was performed.
Definition ioring.h:144
uint64_t _result
Definition ioring.h:152
void * data
Private data from the submission entry.
Definition ioring.h:146
Shared ring control structure.
The kernel-side ring context structure.
Definition ring.h:182
ioring_t ring
The kernel-side ring structure.
Definition ring.h:183
irp_pool_t * irps
Pool of preallocated IRPs.
Definition ring.h:184
wait_queue_t waitQueue
Wait queue for completions.
Definition ring.h:188
size_t pageAmount
Amount of pages mapped for the ring.
Definition ring.h:187
void * kernelAddr
Kernel address of the ring.
Definition ring.h:186
void * userAddr
Userspace address of the ring.
Definition ring.h:185
User I/O ring structure.
Definition ioring.h:204
ioring_ctrl_t * ctrl
Pointer to the shared control structure.
Definition ioring.h:205
sqe_t * squeue
Pointer to the submission queue.
Definition ioring.h:207
size_t smask
Bitmask for submission queue (sentries - 1).
Definition ioring.h:209
cqe_t * cqueue
Pointer to the completion queue.
Definition ioring.h:210
size_t cmask
Bitmask for completion queue (centries - 1).
Definition ioring.h:212
size_t sentries
Number of entries in the submission queue.
Definition ioring.h:208
size_t centries
Number of entries in the completion queue.
Definition ioring.h:211
ioring_id_t id
The ID of the ring.
Definition ioring.h:206
pool_t pool
Definition irp.h:220
process_t * process
Will only hold a reference if there is at least one active IRP.
Definition irp.h:218
I/O Request Packet structure.
A doubly linked list.
Definition list.h:46
atomic_size_t used
Number of used elements.
Definition pool.h:39
Process structure.
Definition process.h:76
ioring_ctx_t rings[CONFIG_MAX_RINGS]
Definition process.h:91
space_t space
Definition process.h:84
Asynchronous submission queue entry (SQE).
Definition ioring.h:89