Blame view
net/rds/ib.h
11.4 KB
ec16227e1 RDS/IB: Infiniban... |
1 2 3 4 5 |
#ifndef _RDS_IB_H #define _RDS_IB_H #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> |
a6b7a4078 net: remove inter... |
6 |
#include <linux/interrupt.h> |
e4c52c98e RDS/IB: add _to_n... |
7 8 |
#include <linux/pci.h> #include <linux/slab.h> |
ec16227e1 RDS/IB: Infiniban... |
9 10 11 12 |
#include "rds.h" #include "rdma_transport.h" #define RDS_FMR_SIZE 256 |
eabb73227 rds: more FMRs ar... |
13 |
#define RDS_FMR_POOL_SIZE 8192 |
ec16227e1 RDS/IB: Infiniban... |
14 15 16 17 18 19 |
#define RDS_IB_MAX_SGE 8 #define RDS_IB_RECV_SGE 2 #define RDS_IB_DEFAULT_RECV_WR 1024 #define RDS_IB_DEFAULT_SEND_WR 256 |
3ba23ade4 RDS: Set retry_co... |
20 |
#define RDS_IB_DEFAULT_RETRY_COUNT 2 |
ec16227e1 RDS/IB: Infiniban... |
21 |
#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ |
332441258 RDS/IB: Add cachi... |
22 |
#define RDS_IB_RECYCLE_BATCH_COUNT 32 |
ea819867b RDS/IB: protect t... |
23 |
extern struct rw_semaphore rds_ib_devices_lock; |
ec16227e1 RDS/IB: Infiniban... |
24 25 26 27 28 29 30 |
extern struct list_head rds_ib_devices; /* * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to * try and minimize the amount of memory tied up both the device and * socket receive queues. */ |
ec16227e1 RDS/IB: Infiniban... |
31 32 |
struct rds_page_frag { struct list_head f_item; |
332441258 RDS/IB: Add cachi... |
33 |
struct list_head f_cache_entry; |
0b088e003 RDS: Use page_rem... |
34 |
struct scatterlist f_sg; |
ec16227e1 RDS/IB: Infiniban... |
35 36 37 38 |
}; struct rds_ib_incoming { struct list_head ii_frags; |
332441258 RDS/IB: Add cachi... |
39 |
struct list_head ii_cache_entry; |
ec16227e1 RDS/IB: Infiniban... |
40 41 |
struct rds_incoming ii_inc; }; |
332441258 RDS/IB: Add cachi... |
42 43 44 45 46 47 48 49 50 51 |
struct rds_ib_cache_head { struct list_head *first; unsigned long count; }; struct rds_ib_refill_cache { struct rds_ib_cache_head *percpu; struct list_head *xfer; struct list_head *ready; }; |
ec16227e1 RDS/IB: Infiniban... |
52 53 54 55 56 57 58 59 60 61 62 63 64 |
struct rds_ib_connect_private { /* Add new fields at the end, and don't permute existing fields. */ __be32 dp_saddr; __be32 dp_daddr; u8 dp_protocol_major; u8 dp_protocol_minor; __be16 dp_protocol_minor_mask; /* bitmask */ __be32 dp_reserved1; __be64 dp_ack_seq; __be32 dp_credit; /* non-zero enables flow ctl */ }; struct rds_ib_send_work { |
ff3d7d361 RDS: Perform unma... |
65 |
void *s_op; |
ec16227e1 RDS/IB: Infiniban... |
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
struct ib_send_wr s_wr; struct ib_sge s_sge[RDS_IB_MAX_SGE]; unsigned long s_queued; }; struct rds_ib_recv_work { struct rds_ib_incoming *r_ibinc; struct rds_page_frag *r_frag; struct ib_recv_wr r_wr; struct ib_sge r_sge[2]; }; struct rds_ib_work_ring { u32 w_nr; u32 w_alloc_ptr; u32 w_alloc_ctr; u32 w_free_ptr; atomic_t w_free_ctr; }; struct rds_ib_device; struct rds_ib_connection { struct list_head ib_node; struct rds_ib_device *rds_ibdev; struct rds_connection *conn; /* alphabet soup, IBTA style */ struct rdma_cm_id *i_cm_id; struct ib_pd *i_pd; struct ib_mr *i_mr; struct ib_cq *i_send_cq; struct ib_cq *i_recv_cq; /* tx */ struct rds_ib_work_ring i_send_ring; |
ff3d7d361 RDS: Perform unma... |
103 |
struct rm_data_op *i_data_op; |
ec16227e1 RDS/IB: Infiniban... |
104 105 106 |
struct rds_header *i_send_hdrs; u64 i_send_hdrs_dma; struct rds_ib_send_work *i_sends; |
f046011cd RDS/IB: track sig... |
107 |
atomic_t i_signaled_sends; |
ec16227e1 RDS/IB: Infiniban... |
108 109 |
/* rx */ |
d521b63b2 RDS/IB+IW: Move r... |
110 |
struct tasklet_struct i_recv_tasklet; |
ec16227e1 RDS/IB: Infiniban... |
111 112 113 114 115 116 117 |
struct mutex i_recv_mutex; struct rds_ib_work_ring i_recv_ring; struct rds_ib_incoming *i_ibinc; u32 i_recv_data_rem; struct rds_header *i_recv_hdrs; u64 i_recv_hdrs_dma; struct rds_ib_recv_work *i_recvs; |
ec16227e1 RDS/IB: Infiniban... |
118 |
u64 i_ack_recv; /* last ACK received */ |
332441258 RDS/IB: Add cachi... |
119 120 |
struct rds_ib_refill_cache i_cache_incs; struct rds_ib_refill_cache i_cache_frags; |
ec16227e1 RDS/IB: Infiniban... |
121 122 123 |
/* sending acks */ unsigned long i_ack_flags; |
8cbd9606a RDS: Use spinlock... |
124 125 126 127 |
#ifdef KERNEL_HAS_ATOMIC64 atomic64_t i_ack_next; /* next ACK to send */ #else spinlock_t i_ack_lock; /* protect i_ack_next */ |
ec16227e1 RDS/IB: Infiniban... |
128 |
u64 i_ack_next; /* next ACK to send */ |
8cbd9606a RDS: Use spinlock... |
129 |
#endif |
ec16227e1 RDS/IB: Infiniban... |
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
struct rds_header *i_ack; struct ib_send_wr i_ack_wr; struct ib_sge i_ack_sge; u64 i_ack_dma; unsigned long i_ack_queued; /* Flow control related information * * Our algorithm uses a pair variables that we need to access * atomically - one for the send credits, and one posted * recv credits we need to transfer to remote. * Rather than protect them using a slow spinlock, we put both into * a single atomic_t and update it using cmpxchg */ atomic_t i_credits; /* Protocol version specific information */ unsigned int i_flowctl:1; /* enable/disable flow ctl */ /* Batched completions */ unsigned int i_unsignaled_wrs; |
ec16227e1 RDS/IB: Infiniban... |
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
}; /* This assumes that atomic_t is at least 32 bits */ #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff) #define IB_GET_POST_CREDITS(v) ((v) >> 16) #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff) #define IB_SET_POST_CREDITS(v) ((v) << 16) struct rds_ib_ipaddr { struct list_head list; __be32 ipaddr; }; struct rds_ib_device { struct list_head list; struct list_head ipaddr_list; struct list_head conn_list; struct ib_device *dev; struct ib_pd *pd; struct ib_mr *mr; struct rds_ib_mr_pool *mr_pool; |
ec16227e1 RDS/IB: Infiniban... |
172 173 174 175 |
unsigned int fmr_max_remaps; unsigned int max_fmrs; int max_sge; unsigned int max_wrs; |
40589e74f RDS: Base init_de... |
176 177 |
unsigned int max_initiator_depth; unsigned int max_responder_resources; |
ec16227e1 RDS/IB: Infiniban... |
178 |
spinlock_t spinlock; /* protect the above */ |
3e0249f9c RDS/IB: add refco... |
179 180 |
atomic_t refcount; struct work_struct free_work; |
ec16227e1 RDS/IB: Infiniban... |
181 |
}; |
e4c52c98e RDS/IB: add _to_n... |
182 183 184 |
#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus) #define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device)) #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) |
ec16227e1 RDS/IB: Infiniban... |
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 |
/* bits for i_ack_flags */ #define IB_ACK_IN_FLIGHT 0 #define IB_ACK_REQUESTED 1 /* Magic WR_ID for ACKs */ #define RDS_IB_ACK_WR_ID (~(u64) 0) struct rds_ib_statistics { uint64_t s_ib_connect_raced; uint64_t s_ib_listen_closed_stale; uint64_t s_ib_tx_cq_call; uint64_t s_ib_tx_cq_event; uint64_t s_ib_tx_ring_full; uint64_t s_ib_tx_throttle; uint64_t s_ib_tx_sg_mapping_failure; uint64_t s_ib_tx_stalled; uint64_t s_ib_tx_credit_updates; uint64_t s_ib_rx_cq_call; uint64_t s_ib_rx_cq_event; uint64_t s_ib_rx_ring_empty; uint64_t s_ib_rx_refill_from_cq; uint64_t s_ib_rx_refill_from_thread; uint64_t s_ib_rx_alloc_limit; uint64_t s_ib_rx_credit_updates; uint64_t s_ib_ack_sent; uint64_t s_ib_ack_send_failure; uint64_t s_ib_ack_send_delayed; uint64_t s_ib_ack_send_piggybacked; uint64_t s_ib_ack_received; uint64_t s_ib_rdma_mr_alloc; uint64_t s_ib_rdma_mr_free; uint64_t s_ib_rdma_mr_used; uint64_t s_ib_rdma_mr_pool_flush; uint64_t s_ib_rdma_mr_pool_wait; uint64_t s_ib_rdma_mr_pool_depleted; |
51e2cba8b RDS: Move atomic ... |
220 221 |
uint64_t s_ib_atomic_cswp; uint64_t s_ib_atomic_fadd; |
ec16227e1 RDS/IB: Infiniban... |
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 |
}; extern struct workqueue_struct *rds_ib_wq; /* * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h * doesn't define it. */ static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev, struct scatterlist *sg, unsigned int sg_dma_len, int direction) { unsigned int i; for (i = 0; i < sg_dma_len; ++i) { ib_dma_sync_single_for_cpu(dev, ib_sg_dma_address(dev, &sg[i]), ib_sg_dma_len(dev, &sg[i]), direction); } } #define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev, struct scatterlist *sg, unsigned int sg_dma_len, int direction) { unsigned int i; for (i = 0; i < sg_dma_len; ++i) { ib_dma_sync_single_for_device(dev, ib_sg_dma_address(dev, &sg[i]), ib_sg_dma_len(dev, &sg[i]), direction); } } #define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device /* ib.c */ extern struct rds_transport rds_ib_transport; |
3e0249f9c RDS/IB: add refco... |
261 262 |
struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device); void rds_ib_dev_put(struct rds_ib_device *rds_ibdev); |
ec16227e1 RDS/IB: Infiniban... |
263 |
extern struct ib_client rds_ib_client; |
ec16227e1 RDS/IB: Infiniban... |
264 |
extern unsigned int fmr_message_size; |
3ba23ade4 RDS: Set retry_co... |
265 |
extern unsigned int rds_ib_retry_count; |
ec16227e1 RDS/IB: Infiniban... |
266 267 268 269 270 271 272 273 274 275 |
extern spinlock_t ib_nodev_conns_lock; extern struct list_head ib_nodev_conns; /* ib_cm.c */ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp); void rds_ib_conn_free(void *arg); int rds_ib_conn_connect(struct rds_connection *conn); void rds_ib_conn_shutdown(struct rds_connection *conn); void rds_ib_state_change(struct sock *sk); |
ef87b7ea3 RDS: remove __ini... |
276 |
int rds_ib_listen_init(void); |
ec16227e1 RDS/IB: Infiniban... |
277 278 279 280 281 282 283 284 285 286 287 288 289 290 |
void rds_ib_listen_stop(void); void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...); int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, struct rdma_cm_event *event); int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id); void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event); #define rds_ib_conn_error(conn, fmt...) \ __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt) /* ib_rdma.c */ int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr); |
745cbccac RDS: Rewrite conn... |
291 292 |
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); |
8aeb1ba66 RDS/IB: destroy c... |
293 |
void rds_ib_destroy_nodev_conns(void); |
ec16227e1 RDS/IB: Infiniban... |
294 295 296 297 298 299 300 301 302 303 |
struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *); void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo); void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, struct rds_sock *rs, u32 *key_ret); void rds_ib_sync_mr(void *trans_private, int dir); void rds_ib_free_mr(void *trans_private, int invalidate); void rds_ib_flush_mrs(void); /* ib_recv.c */ |
ef87b7ea3 RDS: remove __ini... |
304 |
int rds_ib_recv_init(void); |
ec16227e1 RDS/IB: Infiniban... |
305 306 |
void rds_ib_recv_exit(void); int rds_ib_recv(struct rds_connection *conn); |
332441258 RDS/IB: Add cachi... |
307 308 |
int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic); void rds_ib_recv_free_caches(struct rds_ib_connection *ic); |
b6fb0df12 RDS/IB: Make ib_r... |
309 |
void rds_ib_recv_refill(struct rds_connection *conn, int prefill); |
ec16227e1 RDS/IB: Infiniban... |
310 311 312 313 |
void rds_ib_inc_free(struct rds_incoming *inc); int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, size_t size); void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context); |
d521b63b2 RDS/IB+IW: Move r... |
314 |
void rds_ib_recv_tasklet_fn(unsigned long data); |
ec16227e1 RDS/IB: Infiniban... |
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 |
void rds_ib_recv_init_ring(struct rds_ib_connection *ic); void rds_ib_recv_clear_ring(struct rds_ib_connection *ic); void rds_ib_recv_init_ack(struct rds_ib_connection *ic); void rds_ib_attempt_ack(struct rds_ib_connection *ic); void rds_ib_ack_send_complete(struct rds_ib_connection *ic); u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic); /* ib_ring.c */ void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr); void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr); u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos); void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val); void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val); int rds_ib_ring_empty(struct rds_ib_work_ring *ring); int rds_ib_ring_low(struct rds_ib_work_ring *ring); u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring); u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest); extern wait_queue_head_t rds_ib_ring_empty_wait; /* ib_send.c */ |
59f740a6a RDS/IB: print str... |
335 |
char *rds_ib_wc_status_str(enum ib_wc_status status); |
ec16227e1 RDS/IB: Infiniban... |
336 337 338 339 340 341 |
void rds_ib_xmit_complete(struct rds_connection *conn); int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off); void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context); void rds_ib_send_init_ring(struct rds_ib_connection *ic); void rds_ib_send_clear_ring(struct rds_ib_connection *ic); |
f8b3aaf2b RDS: Remove struc... |
342 |
int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op); |
ec16227e1 RDS/IB: Infiniban... |
343 344 345 |
void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits); void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted); int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, |
7b70d0336 RDS/IW+IB: Allow ... |
346 |
u32 *adv_credits, int need_posted, int max_posted); |
ff3d7d361 RDS: Perform unma... |
347 |
int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); |
ec16227e1 RDS/IB: Infiniban... |
348 349 350 351 352 353 354 355 |
/* ib_stats.c */ DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail); /* ib_sysctl.c */ |
ef87b7ea3 RDS: remove __ini... |
356 |
int rds_ib_sysctl_init(void); |
ec16227e1 RDS/IB: Infiniban... |
357 358 359 360 361 362 363 |
void rds_ib_sysctl_exit(void); extern unsigned long rds_ib_sysctl_max_send_wr; extern unsigned long rds_ib_sysctl_max_recv_wr; extern unsigned long rds_ib_sysctl_max_unsig_wrs; extern unsigned long rds_ib_sysctl_max_unsig_bytes; extern unsigned long rds_ib_sysctl_max_recv_allocation; extern unsigned int rds_ib_sysctl_flow_control; |
ec16227e1 RDS/IB: Infiniban... |
364 |
|
ec16227e1 RDS/IB: Infiniban... |
365 |
#endif |