Blame view
tools/io_uring/liburing.h
4.21 KB
21b4aa5d2
|
1 2 |
#ifndef LIB_URING_H #define LIB_URING_H |
004d564f9
|
3 4 5 |
#ifdef __cplusplus extern "C" { #endif |
21b4aa5d2
|
6 7 8 9 |
#include <sys/uio.h> #include <signal.h> #include <string.h> #include "../../include/uapi/linux/io_uring.h" |
004d564f9
|
10 11 |
#include <inttypes.h> #include "barrier.h" |
21b4aa5d2
|
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
/* * Library interface to io_uring */ struct io_uring_sq { unsigned *khead; unsigned *ktail; unsigned *kring_mask; unsigned *kring_entries; unsigned *kflags; unsigned *kdropped; unsigned *array; struct io_uring_sqe *sqes; unsigned sqe_head; unsigned sqe_tail; size_t ring_sz; }; struct io_uring_cq { unsigned *khead; unsigned *ktail; unsigned *kring_mask; unsigned *kring_entries; unsigned *koverflow; struct io_uring_cqe *cqes; size_t ring_sz; }; struct io_uring { struct io_uring_sq sq; struct io_uring_cq cq; int ring_fd; }; /* * System calls */ extern int io_uring_setup(unsigned entries, struct io_uring_params *p); |
004d564f9
|
53 |
extern int io_uring_enter(int fd, unsigned to_submit, |
21b4aa5d2
|
54 55 56 57 58 59 60 61 62 63 64 65 |
unsigned min_complete, unsigned flags, sigset_t *sig); extern int io_uring_register(int fd, unsigned int opcode, void *arg, unsigned int nr_args); /* * Library interface */ extern int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags); extern int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring); extern void io_uring_queue_exit(struct io_uring *ring); |
004d564f9
|
66 |
extern int io_uring_peek_cqe(struct io_uring *ring, |
21b4aa5d2
|
67 |
struct io_uring_cqe **cqe_ptr); |
004d564f9
|
68 |
extern int io_uring_wait_cqe(struct io_uring *ring, |
21b4aa5d2
|
69 70 71 72 73 |
struct io_uring_cqe **cqe_ptr); extern int io_uring_submit(struct io_uring *ring); extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring); /* |
004d564f9
|
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
* Must be called after io_uring_{peek,wait}_cqe() after the cqe has * been processed by the application. */ static inline void io_uring_cqe_seen(struct io_uring *ring, struct io_uring_cqe *cqe) { if (cqe) { struct io_uring_cq *cq = &ring->cq; (*cq->khead)++; /* * Ensure that the kernel sees our new head, the kernel has * the matching read barrier. */ write_barrier(); } } /* |
21b4aa5d2
|
93 94 95 96 97 98 |
* Command prep helpers */ static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data) { sqe->user_data = (unsigned long) data; } |
004d564f9
|
99 100 101 102 |
static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe) { return (void *) (uintptr_t) cqe->user_data; } |
21b4aa5d2
|
103 |
static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd, |
004d564f9
|
104 105 |
const void *addr, unsigned len, off_t offset) |
21b4aa5d2
|
106 107 108 109 110 111 112 113 114 115 |
{ memset(sqe, 0, sizeof(*sqe)); sqe->opcode = op; sqe->fd = fd; sqe->off = offset; sqe->addr = (unsigned long) addr; sqe->len = len; } static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd, |
004d564f9
|
116 117 |
const struct iovec *iovecs, unsigned nr_vecs, off_t offset) |
21b4aa5d2
|
118 119 120 121 122 123 124 125 126 127 128 129 |
{ io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset); } static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd, void *buf, unsigned nbytes, off_t offset) { io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset); } static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd, |
004d564f9
|
130 131 |
const struct iovec *iovecs, unsigned nr_vecs, off_t offset) |
21b4aa5d2
|
132 133 134 135 136 |
{ io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset); } static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd, |
004d564f9
|
137 |
const void *buf, unsigned nbytes, |
21b4aa5d2
|
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
off_t offset) { io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset); } static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd, short poll_mask) { memset(sqe, 0, sizeof(*sqe)); sqe->opcode = IORING_OP_POLL_ADD; sqe->fd = fd; sqe->poll_events = poll_mask; } static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe, void *user_data) { memset(sqe, 0, sizeof(*sqe)); sqe->opcode = IORING_OP_POLL_REMOVE; sqe->addr = (unsigned long) user_data; } static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd, |
004d564f9
|
161 |
unsigned fsync_flags) |
21b4aa5d2
|
162 163 164 165 |
{ memset(sqe, 0, sizeof(*sqe)); sqe->opcode = IORING_OP_FSYNC; sqe->fd = fd; |
004d564f9
|
166 167 168 169 170 171 172 173 174 175 |
sqe->fsync_flags = fsync_flags; } static inline void io_uring_prep_nop(struct io_uring_sqe *sqe) { memset(sqe, 0, sizeof(*sqe)); sqe->opcode = IORING_OP_NOP; } #ifdef __cplusplus |
21b4aa5d2
|
176 |
} |
004d564f9
|
177 |
#endif |
21b4aa5d2
|
178 179 |
#endif |