36 size_t sentries,
size_t centries)
60 for (
size_t i = 0; i < pageAmount; i++)
67 if (kernelAddr ==
NULL)
99 userRing->
ctrl = userAddr;
103 userRing->
smask = sentries - 1;
106 userRing->
cmask = centries - 1;
108 kernelRing->
ctrl = kernelAddr;
112 kernelRing->
smask = sentries - 1;
115 kernelRing->
cmask = centries - 1;
143 return ctail - chead;
171 panic(
NULL,
"failed to acquire async context for deinitialization");
178 panic(
NULL,
"failed to deinitialize async context");
204 if ((tail - head) >= ring->
centries)
207 panic(
NULL,
"Async completion queue overflow");
211 cqe->
op = irp->sqe.op;
212 cqe->
error = irp->err;
213 cqe->
data = irp->sqe.data;
330 notify->link->next = irp->index;
371 size_t processed = 0;
372 while (processed < amount)
398 return processed > 0 ? processed :
ERR;
407 if (userRing ==
NULL || sentries == 0 || centries == 0 || !
IS_POW2(sentries) || !
IS_POW2(centries))
#define CONFIG_MAX_RINGS_PAGES
Maximum async ring pages configuration.
#define SYSCALL_DEFINE(num, returnType,...)
Macro to define a syscall.
static uintptr_t address
Mapped virtual address of the HPET registers.
static void irp_set_complete(irp_t *irp, irp_complete_t complete, void *ctx)
Set the completion callback and context for the next frame in the IRP stack.
static void * irp_get_ctx(irp_t *irp)
Retrieve the context of the IRP pool that an IRP was allocated from.
void irp_complete(irp_t *irp)
Complete the current frame in the IRP stack.
irp_t * irp_new(irp_pool_t *pool)
Allocate a new IRP from a pool.
irp_pool_t * irp_pool_new(size_t size, process_t *process, void *ctx)
Allocate a new IRP pool.
void irp_call_direct(irp_t *irp, irp_func_t func)
Send an IRP to a specified function directly.
static irp_cancel_t irp_set_cancel(irp_t *irp, irp_cancel_t cancel)
Set the cancellation callback for an IRP.
void irp_pool_free(irp_pool_t *pool)
Free a IRP pool.
static irp_t * irp_chain_next(irp_t *irp)
Retrieve the next IRP in a chain and advance the chain.
static void irp_error(irp_t *irp, uint8_t err)
Helper to set an error code and complete the IRP.
void irp_timeout_add(irp_t *irp, clock_t timeout)
Add an IRP to a per-CPU timeout queue.
uint64_t ioring_ctx_notify(ioring_ctx_t *ctx, size_t amount, size_t wait)
Notify the context of new SQEs.
void ioring_ctx_deinit(ioring_ctx_t *ctx)
Deinitialize a I/O context.
void ioring_ctx_init(ioring_ctx_t *ctx)
Initialize a I/O context.
ioring_ctx_flags_t
Ring context flags.
@ IORING_CTX_NONE
No flags set.
@ IORING_CTX_MAPPED
Context is currently mapped into userspace.
@ IORING_CTX_BUSY
Context is currently being used, used for fast locking.
#define SQE_LOAD3
The offset to specify the register to load into the fourth argument.
#define SQE_LOAD1
The offset to specify the register to load into the second argument.
#define IO_OP_NOP
No-op operation.
#define SQE_REG_MASK
The bitmask for a register specifier in a sqe_flags_t.
#define SQE_REGS_MAX
The maximum number of registers.
#define SQE_LOAD2
The offset to specify the register to load into the third argument.
#define SQE_REG_NONE
No register.
#define SQE_LOAD0
The offset to specify the register to load into the first argument.
#define SQE_SAVE
The offset to specify the register to save the result into.
uint32_t sqe_flags_t
Submission queue entry (SQE) flags.
#define SQE_LOAD4
The offset to specify the register to load into the fifth argument.
NORETURN void panic(const interrupt_frame_t *frame, const char *format,...)
Panic the kernel, printing a message and halting.
#define PFN_TO_VIRT(_pfn)
Convert a PFN to its identity mapped higher half virtual address.
size_t pfn_t
Page Frame Number type.
void pmm_free_pages(pfn_t *pfns, size_t count)
Free multiple pages of physical memory.
uint64_t pmm_alloc_pages(pfn_t *pfns, size_t count)
Allocate multiple pages of physical memory.
#define POOL_IDX_MAX
The maximum index value for pool.
void * vmm_map_pages(space_t *space, void *virtAddr, pfn_t *pfns, size_t amount, pml_flags_t flags, space_callback_func_t func, void *data)
Maps an array of physical pages to virtual memory in a given address space.
void * vmm_unmap(space_t *space, void *virtAddr, size_t length)
Unmaps virtual memory from a given address space.
static process_t * process_current(void)
Retrieves the process of the currently running thread.
uint64_t wait_unblock(wait_queue_t *queue, uint64_t amount, errno_t err)
Unblock threads waiting on a wait queue.
void wait_queue_deinit(wait_queue_t *queue)
Deinitialize wait queue.
#define WAIT_ALL
Used to indicate that the wait should unblock all waiting threads.
void wait_queue_init(wait_queue_t *queue)
Initialize wait queue.
#define WAIT_BLOCK(queue, condition)
Blocks until the condition is true, will test the condition on every wakeup.
#define EINVAL
Invalid argument.
#define EMFILE
Too many open files.
#define ENOMEM
Out of memory.
#define EBUSY
Device or resource busy.
#define errno
Error number variable.
#define ARRAY_SIZE(x)
Get the number of elements in a static array.
#define UNUSED(x)
Mark a variable as unused.
uint64_t ioring_id_t
I/O ring ID type.
static void list_push_back(list_t *list, list_entry_t *entry)
Pushes an entry to the end of the list.
#define LIST_CREATE(name)
Creates a list initializer.
static bool list_is_empty(list_t *list)
Checks if a list is empty.
static list_entry_t * list_pop_front(list_t *list)
Pops the first entry from the list.
uint64_t notify(note_func_t handler)
System call that sets the handler to be called when a note is received.
#define BYTES_TO_PAGES(amount)
Convert a size in bytes to pages.
#define NULL
Pointer error value.
#define ERR
Integer error value.
#define PAGE_SIZE
The size of a memory page in bytes.
#define CONTAINER_OF(ptr, type, member)
Container of macro.
static uint64_t ioring_ctx_avail_cqes(ioring_ctx_t *ctx)
static uint64_t ioring_ctx_sqe_pop(ioring_ctx_t *ctx, ioring_ctx_notify_ctx_t *notify)
static uint64_t ioring_ctx_unmap(ioring_ctx_t *ctx)
static void ioring_ctx_complete(irp_t *irp, void *_ptr)
static void ioring_ctx_release(ioring_ctx_t *ctx)
static uint64_t nop_cancel(irp_t *irp)
static uint64_t ioring_ctx_acquire(ioring_ctx_t *ctx)
static void ioring_ctx_dispatch(irp_t *irp)
static uint64_t ioring_ctx_map(ioring_ctx_t *ctx, process_t *process, ioring_id_t id, ioring_t *userRing, void *address, size_t sentries, size_t centries)
#define atomic_compare_exchange_strong(object, expected, desired)
#define atomic_fetch_or(object, operand)
#define atomic_load_explicit(object, order)
#define atomic_load(object)
#define atomic_store_explicit(object, desired, order)
#define atomic_fetch_and(object, operand)
#define atomic_init(obj, value)
__UINTPTR_TYPE__ uintptr_t
_PUBLIC void * memset(void *s, int c, size_t n)
Asynchronous completion queue entry (CQE).
errno_t error
Error code, if not equal to EOK an error occurred.
io_op_t op
The operation that was performed.
void * data
Private data from the submission entry.
Shared ring control structure.
The kernel-side ring context structure.
ioring_t ring
The kernel-side ring structure.
irp_pool_t * irps
Pool of preallocated IRPs.
wait_queue_t waitQueue
Wait queue for completions.
size_t pageAmount
Amount of pages mapped for the ring.
void * kernelAddr
Kernel address of the ring.
void * userAddr
Userspace address of the ring.
ioring_ctrl_t * ctrl
Pointer to the shared control structure.
sqe_t * squeue
Pointer to the submission queue.
size_t smask
Bitmask for submission queue (sentries - 1).
cqe_t * cqueue
Pointer to the completion queue.
size_t cmask
Bitmask for completion queue (centries - 1).
size_t sentries
Number of entries in the submission queue.
size_t centries
Number of entries in the completion queue.
ioring_id_t id
The ID of the ring.
process_t * process
Will only hold a reference if there is at least one active IRP.
I/O Request Packet structure.
atomic_size_t used
Number of used elements.
ioring_ctx_t rings[CONFIG_MAX_RINGS]
Asynchronous submission queue entry (SQE).