Blame view
tools/io_uring/queue.c
3.54 KB
21b4aa5d2
|
1 2 3 4 5 6 7 8 9 |
#include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <unistd.h> #include <errno.h> #include <string.h> #include "liburing.h" #include "barrier.h" |
004d564f9
|
10 11 |
static int __io_uring_get_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr, int wait) |
21b4aa5d2
|
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
{ struct io_uring_cq *cq = &ring->cq; const unsigned mask = *cq->kring_mask; unsigned head; int ret; *cqe_ptr = NULL; head = *cq->khead; do { /* * It's necessary to use a read_barrier() before reading * the CQ tail, since the kernel updates it locklessly. The * kernel has the matching store barrier for the update. The * kernel also ensures that previous stores to CQEs are ordered * with the tail update. */ read_barrier(); if (head != *cq->ktail) { *cqe_ptr = &cq->cqes[head & mask]; break; } if (!wait) break; ret = io_uring_enter(ring->ring_fd, 0, 1, IORING_ENTER_GETEVENTS, NULL); if (ret < 0) return -errno; } while (1); |
21b4aa5d2
|
40 41 42 43 |
return 0; } /* |
004d564f9
|
44 45 |
* Return an IO completion, if one is readily available. Returns 0 with * cqe_ptr filled in on success, -errno on failure. |
21b4aa5d2
|
46 |
*/ |
004d564f9
|
47 |
int io_uring_peek_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr) |
21b4aa5d2
|
48 |
{ |
004d564f9
|
49 |
return __io_uring_get_cqe(ring, cqe_ptr, 0); |
21b4aa5d2
|
50 51 52 |
} /* |
004d564f9
|
53 54 |
* Return an IO completion, waiting for it if necessary. Returns 0 with * cqe_ptr filled in on success, -errno on failure. |
21b4aa5d2
|
55 |
*/ |
004d564f9
|
56 |
int io_uring_wait_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr) |
21b4aa5d2
|
57 |
{ |
004d564f9
|
58 |
return __io_uring_get_cqe(ring, cqe_ptr, 1); |
21b4aa5d2
|
59 60 61 62 63 64 65 66 67 68 69 |
} /* * Submit sqes acquired from io_uring_get_sqe() to the kernel. * * Returns number of sqes submitted */ int io_uring_submit(struct io_uring *ring) { struct io_uring_sq *sq = &ring->sq; const unsigned mask = *sq->kring_mask; |
004d564f9
|
70 |
unsigned ktail, ktail_next, submitted, to_submit; |
21b4aa5d2
|
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
int ret; /* * If we have pending IO in the kring, submit it first. We need a * read barrier here to match the kernels store barrier when updating * the SQ head. */ read_barrier(); if (*sq->khead != *sq->ktail) { submitted = *sq->kring_entries; goto submit; } if (sq->sqe_head == sq->sqe_tail) return 0; /* * Fill in sqes that we have queued up, adding them to the kernel ring */ submitted = 0; ktail = ktail_next = *sq->ktail; |
004d564f9
|
92 93 |
to_submit = sq->sqe_tail - sq->sqe_head; while (to_submit--) { |
21b4aa5d2
|
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
ktail_next++; read_barrier(); sq->array[ktail & mask] = sq->sqe_head & mask; ktail = ktail_next; sq->sqe_head++; submitted++; } if (!submitted) return 0; if (*sq->ktail != ktail) { /* * First write barrier ensures that the SQE stores are updated * with the tail update. This is needed so that the kernel * will never see a tail update without the preceeding sQE * stores being done. */ write_barrier(); *sq->ktail = ktail; /* * The kernel has the matching read barrier for reading the * SQ tail. */ write_barrier(); } submit: ret = io_uring_enter(ring->ring_fd, submitted, 0, IORING_ENTER_GETEVENTS, NULL); if (ret < 0) return -errno; |
004d564f9
|
128 |
return ret; |
21b4aa5d2
|
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
} /* * Return an sqe to fill. Application must later call io_uring_submit() * when it's ready to tell the kernel about it. The caller may call this * function multiple times before calling io_uring_submit(). * * Returns a vacant sqe, or NULL if we're full. */ struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring) { struct io_uring_sq *sq = &ring->sq; unsigned next = sq->sqe_tail + 1; struct io_uring_sqe *sqe; /* * All sqes are used */ if (next - sq->sqe_head > *sq->kring_entries) return NULL; sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask]; sq->sqe_tail = next; return sqe; } |