Blame view
net/rds/ib.h
14.3 KB
b24413180 License cleanup: ... |
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
ec16227e1 RDS/IB: Infiniban... |
2 3 4 5 6 |
#ifndef _RDS_IB_H #define _RDS_IB_H #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> |
a6b7a4078 net: remove inter... |
7 |
#include <linux/interrupt.h> |
e4c52c98e RDS/IB: add _to_n... |
8 9 |
#include <linux/pci.h> #include <linux/slab.h> |
ec16227e1 RDS/IB: Infiniban... |
10 11 |
#include "rds.h" #include "rdma_transport.h" |
ec16227e1 RDS/IB: Infiniban... |
12 13 14 15 16 |
#define RDS_IB_MAX_SGE 8 #define RDS_IB_RECV_SGE 2 #define RDS_IB_DEFAULT_RECV_WR 1024 #define RDS_IB_DEFAULT_SEND_WR 256 |
a55207884 Revert "RDS: IB: ... |
17 |
#define RDS_IB_DEFAULT_FR_WR 512 |
ec16227e1 RDS/IB: Infiniban... |
18 |
|
fab8688d7 RDS: IB: make the... |
19 |
#define RDS_IB_DEFAULT_RETRY_COUNT 1 |
3ba23ade4 RDS: Set retry_co... |
20 |
|
ec16227e1 RDS/IB: Infiniban... |
21 |
#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ |
332441258 RDS/IB: Add cachi... |
22 |
#define RDS_IB_RECYCLE_BATCH_COUNT 32 |
f4f943c95 RDS: IB: ack more... |
23 |
#define RDS_IB_WC_MAX 32 |
ea819867b RDS/IB: protect t... |
24 |
extern struct rw_semaphore rds_ib_devices_lock; |
ec16227e1 RDS/IB: Infiniban... |
25 26 27 28 29 30 31 |
extern struct list_head rds_ib_devices; /* * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to * try and minimize the amount of memory tied up both the device and * socket receive queues. */ |
ec16227e1 RDS/IB: Infiniban... |
32 33 |
struct rds_page_frag { struct list_head f_item; |
332441258 RDS/IB: Add cachi... |
34 |
struct list_head f_cache_entry; |
0b088e003 RDS: Use page_rem... |
35 |
struct scatterlist f_sg; |
ec16227e1 RDS/IB: Infiniban... |
36 37 38 39 |
}; struct rds_ib_incoming { struct list_head ii_frags; |
332441258 RDS/IB: Add cachi... |
40 |
struct list_head ii_cache_entry; |
ec16227e1 RDS/IB: Infiniban... |
41 42 |
struct rds_incoming ii_inc; }; |
332441258 RDS/IB: Add cachi... |
43 44 45 46 47 48 |
struct rds_ib_cache_head { struct list_head *first; unsigned long count; }; struct rds_ib_refill_cache { |
ae4b46e9d net: rds: use thi... |
49 |
struct rds_ib_cache_head __percpu *percpu; |
332441258 RDS/IB: Add cachi... |
50 51 52 |
struct list_head *xfer; struct list_head *ready; }; |
eee2fa6ab rds: Changing IP ... |
53 54 55 56 57 58 59 60 61 62 |
/* This is the common structure for the IB private data exchange in setting up * an RDS connection. The exchange is different for IPv4 and IPv6 connections. * The reason is that the address size is different and the addresses * exchanged are in the beginning of the structure. Hence it is not possible * for interoperability if same structure is used. */ struct rds_ib_conn_priv_cmn { u8 ricpc_protocol_major; u8 ricpc_protocol_minor; __be16 ricpc_protocol_minor_mask; /* bitmask */ |
fd261ce6a rds: rdma: update... |
63 64 65 |
u8 ricpc_dp_toss; u8 ripc_reserved1; __be16 ripc_reserved2; |
eee2fa6ab rds: Changing IP ... |
66 67 68 |
__be64 ricpc_ack_seq; __be32 ricpc_credit; /* non-zero enables flow ctl */ }; |
ec16227e1 RDS/IB: Infiniban... |
69 70 |
struct rds_ib_connect_private { /* Add new fields at the end, and don't permute existing fields. */ |
eee2fa6ab rds: Changing IP ... |
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
__be32 dp_saddr; __be32 dp_daddr; struct rds_ib_conn_priv_cmn dp_cmn; }; struct rds6_ib_connect_private { /* Add new fields at the end, and don't permute existing fields. */ struct in6_addr dp_saddr; struct in6_addr dp_daddr; struct rds_ib_conn_priv_cmn dp_cmn; }; #define dp_protocol_major dp_cmn.ricpc_protocol_major #define dp_protocol_minor dp_cmn.ricpc_protocol_minor #define dp_protocol_minor_mask dp_cmn.ricpc_protocol_minor_mask #define dp_ack_seq dp_cmn.ricpc_ack_seq #define dp_credit dp_cmn.ricpc_credit union rds_ib_conn_priv { struct rds_ib_connect_private ricp_v4; struct rds6_ib_connect_private ricp_v6; |
ec16227e1 RDS/IB: Infiniban... |
92 93 94 |
}; struct rds_ib_send_work { |
ff3d7d361 RDS: Perform unma... |
95 |
void *s_op; |
e622f2f4a IB: split struct ... |
96 97 98 99 100 |
union { struct ib_send_wr s_wr; struct ib_rdma_wr s_rdma_wr; struct ib_atomic_wr s_atomic_wr; }; |
ec16227e1 RDS/IB: Infiniban... |
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
struct ib_sge s_sge[RDS_IB_MAX_SGE]; unsigned long s_queued; }; struct rds_ib_recv_work { struct rds_ib_incoming *r_ibinc; struct rds_page_frag *r_frag; struct ib_recv_wr r_wr; struct ib_sge r_sge[2]; }; struct rds_ib_work_ring { u32 w_nr; u32 w_alloc_ptr; u32 w_alloc_ctr; u32 w_free_ptr; atomic_t w_free_ctr; }; |
f4f943c95 RDS: IB: ack more... |
119 120 121 122 123 124 125 126 127 128 129 130 |
/* Rings are posted with all the allocations they'll need to queue the * incoming message to the receiving socket so this can't fail. * All fragments start with a header, so we can make sure we're not receiving * garbage, and we can tell a small 8 byte fragment from an ACK frame. */ struct rds_ib_ack_state { u64 ack_next; u64 ack_recv; unsigned int ack_required:1; unsigned int ack_next_valid:1; unsigned int ack_recv_valid:1; }; |
ec16227e1 RDS/IB: Infiniban... |
131 132 133 134 135 136 137 138 139 140 141 |
struct rds_ib_device; struct rds_ib_connection { struct list_head ib_node; struct rds_ib_device *rds_ibdev; struct rds_connection *conn; /* alphabet soup, IBTA style */ struct rdma_cm_id *i_cm_id; struct ib_pd *i_pd; |
ec16227e1 RDS/IB: Infiniban... |
142 143 |
struct ib_cq *i_send_cq; struct ib_cq *i_recv_cq; |
0c28c0450 RDS: IB: split se... |
144 |
struct ib_wc i_send_wc[RDS_IB_WC_MAX]; |
f4f943c95 RDS: IB: ack more... |
145 |
struct ib_wc i_recv_wc[RDS_IB_WC_MAX]; |
ad6832f95 RDS: IB: allocate... |
146 147 |
/* To control the number of wrs from fastreg */ atomic_t i_fastreg_wrs; |
3a2886cca net/rds: Keep tra... |
148 |
atomic_t i_fastreg_inuse_count; |
ad6832f95 RDS: IB: allocate... |
149 |
|
f4f943c95 RDS: IB: ack more... |
150 |
/* interrupt handling */ |
0c28c0450 RDS: IB: split se... |
151 |
struct tasklet_struct i_send_tasklet; |
f4f943c95 RDS: IB: ack more... |
152 |
struct tasklet_struct i_recv_tasklet; |
ec16227e1 RDS/IB: Infiniban... |
153 154 155 |
/* tx */ struct rds_ib_work_ring i_send_ring; |
ff3d7d361 RDS: Perform unma... |
156 |
struct rm_data_op *i_data_op; |
9b17f5884 net/rds: Use DMA ... |
157 158 |
struct rds_header **i_send_hdrs; dma_addr_t *i_send_hdrs_dma; |
ec16227e1 RDS/IB: Infiniban... |
159 |
struct rds_ib_send_work *i_sends; |
f046011cd RDS/IB: track sig... |
160 |
atomic_t i_signaled_sends; |
ec16227e1 RDS/IB: Infiniban... |
161 162 163 164 165 166 |
/* rx */ struct mutex i_recv_mutex; struct rds_ib_work_ring i_recv_ring; struct rds_ib_incoming *i_ibinc; u32 i_recv_data_rem; |
9b17f5884 net/rds: Use DMA ... |
167 168 |
struct rds_header **i_recv_hdrs; dma_addr_t *i_recv_hdrs_dma; |
ec16227e1 RDS/IB: Infiniban... |
169 |
struct rds_ib_recv_work *i_recvs; |
ec16227e1 RDS/IB: Infiniban... |
170 |
u64 i_ack_recv; /* last ACK received */ |
332441258 RDS/IB: Add cachi... |
171 172 |
struct rds_ib_refill_cache i_cache_incs; struct rds_ib_refill_cache i_cache_frags; |
09b2b8f52 RDS: IB: add few ... |
173 |
atomic_t i_cache_allocs; |
ec16227e1 RDS/IB: Infiniban... |
174 175 176 |
/* sending acks */ unsigned long i_ack_flags; |
8cbd9606a RDS: Use spinlock... |
177 178 179 180 |
#ifdef KERNEL_HAS_ATOMIC64 atomic64_t i_ack_next; /* next ACK to send */ #else spinlock_t i_ack_lock; /* protect i_ack_next */ |
ec16227e1 RDS/IB: Infiniban... |
181 |
u64 i_ack_next; /* next ACK to send */ |
8cbd9606a RDS: Use spinlock... |
182 |
#endif |
ec16227e1 RDS/IB: Infiniban... |
183 184 185 |
struct rds_header *i_ack; struct ib_send_wr i_ack_wr; struct ib_sge i_ack_sge; |
d43dbacfc IB/core: Change t... |
186 |
dma_addr_t i_ack_dma; |
ec16227e1 RDS/IB: Infiniban... |
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
unsigned long i_ack_queued; /* Flow control related information * * Our algorithm uses a pair variables that we need to access * atomically - one for the send credits, and one posted * recv credits we need to transfer to remote. * Rather than protect them using a slow spinlock, we put both into * a single atomic_t and update it using cmpxchg */ atomic_t i_credits; /* Protocol version specific information */ unsigned int i_flowctl:1; /* enable/disable flow ctl */ /* Batched completions */ unsigned int i_unsignaled_wrs; |
581d53c91 RDS: IB: track an... |
204 205 206 |
/* Endpoint role in connection */ bool i_active_side; |
cf657269d RDS: IB: fix pani... |
207 |
atomic_t i_cq_quiesce; |
be2f76eac RDS: IB: Add vect... |
208 209 210 211 |
/* Send/Recv vectors */ int i_scq_vector; int i_rcq_vector; |
e0e6d0628 net: rds: add ser... |
212 |
u8 i_sl; |
ec16227e1 RDS/IB: Infiniban... |
213 214 215 216 217 218 219 220 221 222 223 |
}; /* This assumes that atomic_t is at least 32 bits */ #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff) #define IB_GET_POST_CREDITS(v) ((v) >> 16) #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff) #define IB_SET_POST_CREDITS(v) ((v) << 16) struct rds_ib_ipaddr { struct list_head list; __be32 ipaddr; |
59fe46067 RDS: use kfree_rc... |
224 |
struct rcu_head rcu; |
ec16227e1 RDS/IB: Infiniban... |
225 |
}; |
067665132 RDS: IB: split mr... |
226 227 228 229 |
enum { RDS_IB_MR_8K_POOL, RDS_IB_MR_1M_POOL, }; |
ec16227e1 RDS/IB: Infiniban... |
230 231 232 233 234 235 |
struct rds_ib_device { struct list_head list; struct list_head ipaddr_list; struct list_head conn_list; struct ib_device *dev; struct ib_pd *pd; |
9b17f5884 net/rds: Use DMA ... |
236 |
struct dma_pool *rid_hdrs_pool; /* RDS headers DMA pool */ |
2eafa1746 net/rds: Handle O... |
237 |
u8 odp_capable:1; |
2cb2912d6 RDS: IB: add Fast... |
238 |
|
f6df683f3 RDS: IB: Re-organ... |
239 |
unsigned int max_mrs; |
067665132 RDS: IB: split mr... |
240 241 |
struct rds_ib_mr_pool *mr_1m_pool; struct rds_ib_mr_pool *mr_8k_pool; |
f6df683f3 RDS: IB: Re-organ... |
242 243 |
unsigned int max_8k_mrs; unsigned int max_1m_mrs; |
ec16227e1 RDS/IB: Infiniban... |
244 245 |
int max_sge; unsigned int max_wrs; |
40589e74f RDS: Base init_de... |
246 247 |
unsigned int max_initiator_depth; unsigned int max_responder_resources; |
ec16227e1 RDS/IB: Infiniban... |
248 |
spinlock_t spinlock; /* protect the above */ |
50d61ff78 net, rds: convert... |
249 |
refcount_t refcount; |
3e0249f9c RDS/IB: add refco... |
250 |
struct work_struct free_work; |
be2f76eac RDS: IB: Add vect... |
251 |
int *vector_load; |
ec16227e1 RDS/IB: Infiniban... |
252 |
}; |
33cf601da net/rds: NULL poi... |
253 254 255 256 257 258 259 |
static inline int ibdev_to_node(struct ib_device *ibdev) { struct device *parent; parent = ibdev->dev.parent; return parent ? dev_to_node(parent) : NUMA_NO_NODE; } |
e4c52c98e RDS/IB: add _to_n... |
260 |
#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) |
ec16227e1 RDS/IB: Infiniban... |
261 262 263 264 265 266 267 268 269 270 |
/* bits for i_ack_flags */ #define IB_ACK_IN_FLIGHT 0 #define IB_ACK_REQUESTED 1 /* Magic WR_ID for ACKs */ #define RDS_IB_ACK_WR_ID (~(u64) 0) struct rds_ib_statistics { uint64_t s_ib_connect_raced; uint64_t s_ib_listen_closed_stale; |
f4f943c95 RDS: IB: ack more... |
271 272 |
uint64_t s_ib_evt_handler_call; uint64_t s_ib_tasklet_call; |
ec16227e1 RDS/IB: Infiniban... |
273 274 275 276 277 278 |
uint64_t s_ib_tx_cq_event; uint64_t s_ib_tx_ring_full; uint64_t s_ib_tx_throttle; uint64_t s_ib_tx_sg_mapping_failure; uint64_t s_ib_tx_stalled; uint64_t s_ib_tx_credit_updates; |
ec16227e1 RDS/IB: Infiniban... |
279 280 281 282 283 |
uint64_t s_ib_rx_cq_event; uint64_t s_ib_rx_ring_empty; uint64_t s_ib_rx_refill_from_cq; uint64_t s_ib_rx_refill_from_thread; uint64_t s_ib_rx_alloc_limit; |
09b2b8f52 RDS: IB: add few ... |
284 285 |
uint64_t s_ib_rx_total_frags; uint64_t s_ib_rx_total_incs; |
ec16227e1 RDS/IB: Infiniban... |
286 287 288 289 290 291 |
uint64_t s_ib_rx_credit_updates; uint64_t s_ib_ack_sent; uint64_t s_ib_ack_send_failure; uint64_t s_ib_ack_send_delayed; uint64_t s_ib_ack_send_piggybacked; uint64_t s_ib_ack_received; |
067665132 RDS: IB: split mr... |
292 293 294 295 296 297 298 299 300 301 302 303 |
uint64_t s_ib_rdma_mr_8k_alloc; uint64_t s_ib_rdma_mr_8k_free; uint64_t s_ib_rdma_mr_8k_used; uint64_t s_ib_rdma_mr_8k_pool_flush; uint64_t s_ib_rdma_mr_8k_pool_wait; uint64_t s_ib_rdma_mr_8k_pool_depleted; uint64_t s_ib_rdma_mr_1m_alloc; uint64_t s_ib_rdma_mr_1m_free; uint64_t s_ib_rdma_mr_1m_used; uint64_t s_ib_rdma_mr_1m_pool_flush; uint64_t s_ib_rdma_mr_1m_pool_wait; uint64_t s_ib_rdma_mr_1m_pool_depleted; |
db42753ad RDS: IB: add mr r... |
304 305 |
uint64_t s_ib_rdma_mr_8k_reused; uint64_t s_ib_rdma_mr_1m_reused; |
51e2cba8b RDS: Move atomic ... |
306 307 |
uint64_t s_ib_atomic_cswp; uint64_t s_ib_atomic_fadd; |
09b2b8f52 RDS: IB: add few ... |
308 309 |
uint64_t s_ib_recv_added_to_cache; uint64_t s_ib_recv_removed_from_cache; |
ec16227e1 RDS/IB: Infiniban... |
310 311 312 313 314 315 316 317 318 |
}; extern struct workqueue_struct *rds_ib_wq; /* * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h * doesn't define it. */ static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev, |
d2a9ec647 net: rds: use for... |
319 320 321 |
struct scatterlist *sglist, unsigned int sg_dma_len, int direction) |
ec16227e1 RDS/IB: Infiniban... |
322 |
{ |
d2a9ec647 net: rds: use for... |
323 |
struct scatterlist *sg; |
ec16227e1 RDS/IB: Infiniban... |
324 |
unsigned int i; |
d2a9ec647 net: rds: use for... |
325 |
for_each_sg(sglist, sg, sg_dma_len, i) { |
a163afc88 IB/core: Remove i... |
326 327 |
ib_dma_sync_single_for_cpu(dev, sg_dma_address(sg), sg_dma_len(sg), direction); |
ec16227e1 RDS/IB: Infiniban... |
328 329 330 331 332 |
} } #define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev, |
d2a9ec647 net: rds: use for... |
333 334 335 |
struct scatterlist *sglist, unsigned int sg_dma_len, int direction) |
ec16227e1 RDS/IB: Infiniban... |
336 |
{ |
d2a9ec647 net: rds: use for... |
337 |
struct scatterlist *sg; |
ec16227e1 RDS/IB: Infiniban... |
338 |
unsigned int i; |
d2a9ec647 net: rds: use for... |
339 |
for_each_sg(sglist, sg, sg_dma_len, i) { |
a163afc88 IB/core: Remove i... |
340 341 |
ib_dma_sync_single_for_device(dev, sg_dma_address(sg), sg_dma_len(sg), direction); |
ec16227e1 RDS/IB: Infiniban... |
342 343 344 345 346 347 348 |
} } #define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device /* ib.c */ extern struct rds_transport rds_ib_transport; |
3e0249f9c RDS/IB: add refco... |
349 350 |
struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device); void rds_ib_dev_put(struct rds_ib_device *rds_ibdev); |
ec16227e1 RDS/IB: Infiniban... |
351 |
extern struct ib_client rds_ib_client; |
3ba23ade4 RDS: Set retry_co... |
352 |
extern unsigned int rds_ib_retry_count; |
ec16227e1 RDS/IB: Infiniban... |
353 354 355 356 357 358 359 |
extern spinlock_t ib_nodev_conns_lock; extern struct list_head ib_nodev_conns; /* ib_cm.c */ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp); void rds_ib_conn_free(void *arg); |
b04e8554f RDS: TCP: Hooks t... |
360 |
int rds_ib_conn_path_connect(struct rds_conn_path *cp); |
226f7a7d9 RDS: Rework path ... |
361 |
void rds_ib_conn_path_shutdown(struct rds_conn_path *cp); |
ec16227e1 RDS/IB: Infiniban... |
362 |
void rds_ib_state_change(struct sock *sk); |
ef87b7ea3 RDS: remove __ini... |
363 |
int rds_ib_listen_init(void); |
ec16227e1 RDS/IB: Infiniban... |
364 |
void rds_ib_listen_stop(void); |
6cdaf03f8 RDS: add __printf... |
365 |
__printf(2, 3) |
ec16227e1 RDS/IB: Infiniban... |
366 367 |
void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...); int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, |
eee2fa6ab rds: Changing IP ... |
368 369 |
struct rdma_cm_event *event, bool isv6); int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6); |
ec16227e1 RDS/IB: Infiniban... |
370 371 |
void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event); |
9b17f5884 net/rds: Use DMA ... |
372 373 374 375 376 |
struct rds_header **rds_dma_hdrs_alloc(struct ib_device *ibdev, struct dma_pool *pool, dma_addr_t **dma_addrs, u32 num_hdrs); void rds_dma_hdrs_free(struct dma_pool *pool, struct rds_header **hdrs, dma_addr_t *dma_addrs, u32 num_hdrs); |
ec16227e1 RDS/IB: Infiniban... |
377 378 379 380 381 |
#define rds_ib_conn_error(conn, fmt...) \ __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt) /* ib_rdma.c */ |
eee2fa6ab rds: Changing IP ... |
382 383 |
int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, struct in6_addr *ipaddr); |
745cbccac RDS: Rewrite conn... |
384 385 |
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); |
8aeb1ba66 RDS/IB: destroy c... |
386 |
void rds_ib_destroy_nodev_conns(void); |
1659185fb RDS: IB: Support ... |
387 |
void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc); |
ec16227e1 RDS/IB: Infiniban... |
388 389 |
/* ib_recv.c */ |
ef87b7ea3 RDS: remove __ini... |
390 |
int rds_ib_recv_init(void); |
ec16227e1 RDS/IB: Infiniban... |
391 |
void rds_ib_recv_exit(void); |
2da43c4a1 RDS: TCP: make re... |
392 |
int rds_ib_recv_path(struct rds_conn_path *conn); |
f394ad28f rds: rds_ib_recv_... |
393 |
int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp); |
332441258 RDS/IB: Add cachi... |
394 |
void rds_ib_recv_free_caches(struct rds_ib_connection *ic); |
73ce4317b RDS: make sure we... |
395 |
void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp); |
ec16227e1 RDS/IB: Infiniban... |
396 |
void rds_ib_inc_free(struct rds_incoming *inc); |
c310e72c8 rds: switch ->inc... |
397 |
int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); |
f4f943c95 RDS: IB: ack more... |
398 399 |
void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc, struct rds_ib_ack_state *state); |
d521b63b2 RDS/IB+IW: Move r... |
400 |
void rds_ib_recv_tasklet_fn(unsigned long data); |
ec16227e1 RDS/IB: Infiniban... |
401 402 403 404 405 406 |
void rds_ib_recv_init_ring(struct rds_ib_connection *ic); void rds_ib_recv_clear_ring(struct rds_ib_connection *ic); void rds_ib_recv_init_ack(struct rds_ib_connection *ic); void rds_ib_attempt_ack(struct rds_ib_connection *ic); void rds_ib_ack_send_complete(struct rds_ib_connection *ic); u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic); |
f4f943c95 RDS: IB: ack more... |
407 |
void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required); |
ec16227e1 RDS/IB: Infiniban... |
408 409 410 411 412 413 414 415 416 417 418 419 420 421 |
/* ib_ring.c */ void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr); void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr); u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos); void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val); void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val); int rds_ib_ring_empty(struct rds_ib_work_ring *ring); int rds_ib_ring_low(struct rds_ib_work_ring *ring); u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring); u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest); extern wait_queue_head_t rds_ib_ring_empty_wait; /* ib_send.c */ |
226f7a7d9 RDS: Rework path ... |
422 |
void rds_ib_xmit_path_complete(struct rds_conn_path *cp); |
ec16227e1 RDS/IB: Infiniban... |
423 424 |
int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off); |
0c28c0450 RDS: IB: split se... |
425 |
void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc); |
ec16227e1 RDS/IB: Infiniban... |
426 427 |
void rds_ib_send_init_ring(struct rds_ib_connection *ic); void rds_ib_send_clear_ring(struct rds_ib_connection *ic); |
f8b3aaf2b RDS: Remove struc... |
428 |
int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op); |
ec16227e1 RDS/IB: Infiniban... |
429 430 431 |
void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits); void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted); int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, |
7b70d0336 RDS/IW+IB: Allow ... |
432 |
u32 *adv_credits, int need_posted, int max_posted); |
ff3d7d361 RDS: Perform unma... |
433 |
int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); |
ec16227e1 RDS/IB: Infiniban... |
434 435 |
/* ib_stats.c */ |
16fdf8ba9 rds: Fix build re... |
436 |
DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats); |
ec16227e1 RDS/IB: Infiniban... |
437 |
#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) |
09b2b8f52 RDS: IB: add few ... |
438 439 |
#define rds_ib_stats_add(member, count) \ rds_stats_add_which(rds_ib_stats, member, count) |
ec16227e1 RDS/IB: Infiniban... |
440 441 442 443 |
unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail); /* ib_sysctl.c */ |
ef87b7ea3 RDS: remove __ini... |
444 |
int rds_ib_sysctl_init(void); |
ec16227e1 RDS/IB: Infiniban... |
445 446 447 448 449 450 451 |
void rds_ib_sysctl_exit(void); extern unsigned long rds_ib_sysctl_max_send_wr; extern unsigned long rds_ib_sysctl_max_recv_wr; extern unsigned long rds_ib_sysctl_max_unsig_wrs; extern unsigned long rds_ib_sysctl_max_unsig_bytes; extern unsigned long rds_ib_sysctl_max_recv_allocation; extern unsigned int rds_ib_sysctl_flow_control; |
ec16227e1 RDS/IB: Infiniban... |
452 |
|
ec16227e1 RDS/IB: Infiniban... |
453 |
#endif |