Blame view
net/rds/ib_recv.c
30.2 KB
1e23b3ee0 RDS/IB: Receive d... |
1 |
/* |
eee2fa6ab rds: Changing IP ... |
2 |
* Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. |
1e23b3ee0 RDS/IB: Receive d... |
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
* * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> |
5a0e3ad6a include cleanup: ... |
34 |
#include <linux/slab.h> |
1e23b3ee0 RDS/IB: Receive d... |
35 36 37 |
#include <linux/pci.h> #include <linux/dma-mapping.h> #include <rdma/rdma_cm.h> |
0cb43965d RDS: split out co... |
38 |
#include "rds_single_path.h" |
1e23b3ee0 RDS/IB: Receive d... |
39 40 41 42 43 44 |
#include "rds.h" #include "ib.h" static struct kmem_cache *rds_ib_incoming_slab; static struct kmem_cache *rds_ib_frag_slab; static atomic_t rds_ib_allocation = ATOMIC_INIT(0); |
1e23b3ee0 RDS/IB: Receive d... |
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
void rds_ib_recv_init_ring(struct rds_ib_connection *ic) { struct rds_ib_recv_work *recv; u32 i; for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) { struct ib_sge *sge; recv->r_ibinc = NULL; recv->r_frag = NULL; recv->r_wr.next = NULL; recv->r_wr.wr_id = i; recv->r_wr.sg_list = recv->r_sge; recv->r_wr.num_sge = RDS_IB_RECV_SGE; |
919ced4ce RDS/IB: Remove ib... |
60 |
sge = &recv->r_sge[0]; |
1e23b3ee0 RDS/IB: Receive d... |
61 62 |
sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); sge->length = sizeof(struct rds_header); |
e5580242a rds/ib: Remove ib... |
63 |
sge->lkey = ic->i_pd->local_dma_lkey; |
919ced4ce RDS/IB: Remove ib... |
64 65 66 67 |
sge = &recv->r_sge[1]; sge->addr = 0; sge->length = RDS_FRAG_SIZE; |
e5580242a rds/ib: Remove ib... |
68 |
sge->lkey = ic->i_pd->local_dma_lkey; |
1e23b3ee0 RDS/IB: Receive d... |
69 70 |
} } |
332441258 RDS/IB: Add cachi... |
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
/* * The entire 'from' list, including the from element itself, is put on * to the tail of the 'to' list. */ static void list_splice_entire_tail(struct list_head *from, struct list_head *to) { struct list_head *from_last = from->prev; list_splice_tail(from_last, to); list_add_tail(from_last, to); } static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache) { struct list_head *tmp; tmp = xchg(&cache->xfer, NULL); if (tmp) { if (cache->ready) list_splice_entire_tail(tmp, cache->ready); else cache->ready = tmp; } } |
f394ad28f rds: rds_ib_recv_... |
96 |
static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp) |
332441258 RDS/IB: Add cachi... |
97 98 99 |
{ struct rds_ib_cache_head *head; int cpu; |
f394ad28f rds: rds_ib_recv_... |
100 |
cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); |
332441258 RDS/IB: Add cachi... |
101 102 103 104 105 106 107 108 109 110 111 112 113 |
if (!cache->percpu) return -ENOMEM; for_each_possible_cpu(cpu) { head = per_cpu_ptr(cache->percpu, cpu); head->first = NULL; head->count = 0; } cache->xfer = NULL; cache->ready = NULL; return 0; } |
f394ad28f rds: rds_ib_recv_... |
114 |
int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp) |
332441258 RDS/IB: Add cachi... |
115 116 |
{ int ret; |
f394ad28f rds: rds_ib_recv_... |
117 |
ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp); |
332441258 RDS/IB: Add cachi... |
118 |
if (!ret) { |
f394ad28f rds: rds_ib_recv_... |
119 |
ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp); |
332441258 RDS/IB: Add cachi... |
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
if (ret) free_percpu(ic->i_cache_incs.percpu); } return ret; } static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache, struct list_head *caller_list) { struct rds_ib_cache_head *head; int cpu; for_each_possible_cpu(cpu) { head = per_cpu_ptr(cache->percpu, cpu); if (head->first) { list_splice_entire_tail(head->first, caller_list); head->first = NULL; } } if (cache->ready) { list_splice_entire_tail(cache->ready, caller_list); cache->ready = NULL; } } void rds_ib_recv_free_caches(struct rds_ib_connection *ic) { struct rds_ib_incoming *inc; struct rds_ib_incoming *inc_tmp; struct rds_page_frag *frag; struct rds_page_frag *frag_tmp; LIST_HEAD(list); rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list); free_percpu(ic->i_cache_incs.percpu); list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) { list_del(&inc->ii_cache_entry); WARN_ON(!list_empty(&inc->ii_frags)); kmem_cache_free(rds_ib_incoming_slab, inc); } rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list); free_percpu(ic->i_cache_frags.percpu); list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) { list_del(&frag->f_cache_entry); WARN_ON(!list_empty(&frag->f_item)); kmem_cache_free(rds_ib_frag_slab, frag); } } /* fwd decl */ static void rds_ib_recv_cache_put(struct list_head *new_item, struct rds_ib_refill_cache *cache); static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache); /* Recycle frag and attached recv buffer f_sg */ static void rds_ib_frag_free(struct rds_ib_connection *ic, struct rds_page_frag *frag) { rdsdebug("frag %p page %p ", frag, sg_page(&frag->f_sg)); rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags); |
09b2b8f52 RDS: IB: add few ... |
190 191 |
atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE); |
332441258 RDS/IB: Add cachi... |
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
} /* Recycle inc after freeing attached frags */ void rds_ib_inc_free(struct rds_incoming *inc) { struct rds_ib_incoming *ibinc; struct rds_page_frag *frag; struct rds_page_frag *pos; struct rds_ib_connection *ic = inc->i_conn->c_transport_data; ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); /* Free attached frags */ list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) { list_del_init(&frag->f_item); rds_ib_frag_free(ic, frag); } BUG_ON(!list_empty(&ibinc->ii_frags)); rdsdebug("freeing ibinc %p inc %p ", ibinc, inc); rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs); } |
1e23b3ee0 RDS/IB: Receive d... |
215 216 217 218 219 220 221 222 |
static void rds_ib_recv_clear_one(struct rds_ib_connection *ic, struct rds_ib_recv_work *recv) { if (recv->r_ibinc) { rds_inc_put(&recv->r_ibinc->ii_inc); recv->r_ibinc = NULL; } if (recv->r_frag) { |
fc24f7808 RDS/IB: Remove ib... |
223 |
ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); |
332441258 RDS/IB: Add cachi... |
224 |
rds_ib_frag_free(ic, recv->r_frag); |
1e23b3ee0 RDS/IB: Receive d... |
225 226 227 228 229 230 231 232 233 234 |
recv->r_frag = NULL; } } void rds_ib_recv_clear_ring(struct rds_ib_connection *ic) { u32 i; for (i = 0; i < ic->i_recv_ring.w_nr; i++) rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); |
1e23b3ee0 RDS/IB: Receive d... |
235 |
} |
037f18a30 RDS: use friendly... |
236 237 |
static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic, gfp_t slab_mask) |
332441258 RDS/IB: Add cachi... |
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 |
{ struct rds_ib_incoming *ibinc; struct list_head *cache_item; int avail_allocs; cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs); if (cache_item) { ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry); } else { avail_allocs = atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation); if (!avail_allocs) { rds_ib_stats_inc(s_ib_rx_alloc_limit); return NULL; } |
037f18a30 RDS: use friendly... |
253 |
ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask); |
332441258 RDS/IB: Add cachi... |
254 255 256 257 |
if (!ibinc) { atomic_dec(&rds_ib_allocation); return NULL; } |
09b2b8f52 RDS: IB: add few ... |
258 |
rds_ib_stats_inc(s_ib_rx_total_incs); |
332441258 RDS/IB: Add cachi... |
259 260 |
} INIT_LIST_HEAD(&ibinc->ii_frags); |
eee2fa6ab rds: Changing IP ... |
261 |
rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr); |
332441258 RDS/IB: Add cachi... |
262 263 264 |
return ibinc; } |
037f18a30 RDS: use friendly... |
265 266 |
static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic, gfp_t slab_mask, gfp_t page_mask) |
332441258 RDS/IB: Add cachi... |
267 268 269 270 271 272 273 274 |
{ struct rds_page_frag *frag; struct list_head *cache_item; int ret; cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags); if (cache_item) { frag = container_of(cache_item, struct rds_page_frag, f_cache_entry); |
09b2b8f52 RDS: IB: add few ... |
275 276 |
atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE); |
332441258 RDS/IB: Add cachi... |
277 |
} else { |
037f18a30 RDS: use friendly... |
278 |
frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask); |
332441258 RDS/IB: Add cachi... |
279 280 |
if (!frag) return NULL; |
b4e1da3c9 RDS: properly use... |
281 |
sg_init_table(&frag->f_sg, 1); |
332441258 RDS/IB: Add cachi... |
282 |
ret = rds_page_remainder_alloc(&frag->f_sg, |
037f18a30 RDS: use friendly... |
283 |
RDS_FRAG_SIZE, page_mask); |
332441258 RDS/IB: Add cachi... |
284 285 286 287 |
if (ret) { kmem_cache_free(rds_ib_frag_slab, frag); return NULL; } |
09b2b8f52 RDS: IB: add few ... |
288 |
rds_ib_stats_inc(s_ib_rx_total_frags); |
332441258 RDS/IB: Add cachi... |
289 290 291 292 293 294 |
} INIT_LIST_HEAD(&frag->f_item); return frag; } |
1e23b3ee0 RDS/IB: Receive d... |
295 |
static int rds_ib_recv_refill_one(struct rds_connection *conn, |
73ce4317b RDS: make sure we... |
296 |
struct rds_ib_recv_work *recv, gfp_t gfp) |
1e23b3ee0 RDS/IB: Receive d... |
297 298 |
{ struct rds_ib_connection *ic = conn->c_transport_data; |
1e23b3ee0 RDS/IB: Receive d... |
299 300 |
struct ib_sge *sge; int ret = -ENOMEM; |
037f18a30 RDS: use friendly... |
301 302 |
gfp_t slab_mask = GFP_NOWAIT; gfp_t page_mask = GFP_NOWAIT; |
d0164adc8 mm, page_alloc: d... |
303 |
if (gfp & __GFP_DIRECT_RECLAIM) { |
037f18a30 RDS: use friendly... |
304 305 306 |
slab_mask = GFP_KERNEL; page_mask = GFP_HIGHUSER; } |
1e23b3ee0 RDS/IB: Receive d... |
307 |
|
332441258 RDS/IB: Add cachi... |
308 309 310 311 |
if (!ic->i_cache_incs.ready) rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); if (!ic->i_cache_frags.ready) rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); |
3427e854e RDS: Assume recv-... |
312 313 314 315 |
/* * ibinc was taken from recv if recv contained the start of a message. * recvs that were continuations will still have this allocated. */ |
8690bfa17 RDS: cleanup: rem... |
316 |
if (!recv->r_ibinc) { |
037f18a30 RDS: use friendly... |
317 |
recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask); |
332441258 RDS/IB: Add cachi... |
318 |
if (!recv->r_ibinc) |
1e23b3ee0 RDS/IB: Receive d... |
319 |
goto out; |
1e23b3ee0 RDS/IB: Receive d... |
320 |
} |
3427e854e RDS: Assume recv-... |
321 |
WARN_ON(recv->r_frag); /* leak! */ |
037f18a30 RDS: use friendly... |
322 |
recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask); |
3427e854e RDS: Assume recv-... |
323 324 |
if (!recv->r_frag) goto out; |
1e23b3ee0 RDS/IB: Receive d... |
325 |
|
0b088e003 RDS: Use page_rem... |
326 327 328 |
ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); WARN_ON(ret != 1); |
1e23b3ee0 RDS/IB: Receive d... |
329 |
|
919ced4ce RDS/IB: Remove ib... |
330 |
sge = &recv->r_sge[0]; |
1e23b3ee0 RDS/IB: Receive d... |
331 332 |
sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header); sge->length = sizeof(struct rds_header); |
919ced4ce RDS/IB: Remove ib... |
333 |
sge = &recv->r_sge[1]; |
f2e9bd703 IB/rds: Correct i... |
334 335 |
sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg); sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg); |
1e23b3ee0 RDS/IB: Receive d... |
336 337 338 339 340 |
ret = 0; out: return ret; } |
73ce4317b RDS: make sure we... |
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 |
static int acquire_refill(struct rds_connection *conn) { return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0; } static void release_refill(struct rds_connection *conn) { clear_bit(RDS_RECV_REFILL, &conn->c_flags); /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a * hot path and finding waiters is very rare. We don't want to walk * the system-wide hashed waitqueue buckets in the fast path only to * almost never find waiters. */ if (waitqueue_active(&conn->c_waitq)) wake_up_all(&conn->c_waitq); } |
1e23b3ee0 RDS/IB: Receive d... |
358 359 360 |
/* * This tries to allocate and post unused work requests after making sure that * they have all the allocations they need to queue received fragments into |
332441258 RDS/IB: Add cachi... |
361 |
* sockets. |
1e23b3ee0 RDS/IB: Receive d... |
362 |
*/ |
73ce4317b RDS: make sure we... |
363 |
void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp) |
1e23b3ee0 RDS/IB: Receive d... |
364 365 366 |
{ struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_recv_work *recv; |
1e23b3ee0 RDS/IB: Receive d... |
367 368 |
unsigned int posted = 0; int ret = 0; |
d0164adc8 mm, page_alloc: d... |
369 |
bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM); |
1e23b3ee0 RDS/IB: Receive d... |
370 |
u32 pos; |
73ce4317b RDS: make sure we... |
371 372 373 374 375 376 |
/* the goal here is to just make sure that someone, somewhere * is posting buffers. If we can't get the refill lock, * let them do their thing */ if (!acquire_refill(conn)) return; |
f64f9e719 net: Move && and ... |
377 378 |
while ((prefill || rds_conn_up(conn)) && rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { |
1e23b3ee0 RDS/IB: Receive d... |
379 380 381 382 |
if (pos >= ic->i_recv_ring.w_nr) { printk(KERN_NOTICE "Argh - ring alloc returned pos=%u ", pos); |
1e23b3ee0 RDS/IB: Receive d... |
383 384 385 386 |
break; } recv = &ic->i_recvs[pos]; |
73ce4317b RDS: make sure we... |
387 |
ret = rds_ib_recv_refill_one(conn, recv, gfp); |
1e23b3ee0 RDS/IB: Receive d... |
388 |
if (ret) { |
1e23b3ee0 RDS/IB: Receive d... |
389 390 |
break; } |
1cb483a5c rds: ib: Fix NULL... |
391 392 |
rdsdebug("recv %p ibinc %p page %p addr %lu ", recv, |
0b088e003 RDS: Use page_rem... |
393 |
recv->r_ibinc, sg_page(&recv->r_frag->f_sg), |
f2e9bd703 IB/rds: Correct i... |
394 395 |
(long) ib_sg_dma_address( ic->i_cm_id->device, |
1cb483a5c rds: ib: Fix NULL... |
396 397 398 |
&recv->r_frag->f_sg)); /* XXX when can this fail? */ |
f112d53b4 net/rds: Simplify... |
399 |
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL); |
1e23b3ee0 RDS/IB: Receive d... |
400 401 |
if (ret) { rds_ib_conn_error(conn, "recv post on " |
eee2fa6ab rds: Changing IP ... |
402 |
"%pI6c returned %d, disconnecting and " |
1e23b3ee0 RDS/IB: Receive d... |
403 404 405 |
"reconnecting ", &conn->c_faddr, ret); |
1e23b3ee0 RDS/IB: Receive d... |
406 407 408 409 410 411 412 413 414 415 416 417 |
break; } posted++; } /* We're doing flow control - update the window. */ if (ic->i_flowctl && posted) rds_ib_advertise_credits(conn, posted); if (ret) rds_ib_ring_unalloc(&ic->i_recv_ring, 1); |
73ce4317b RDS: make sure we... |
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 |
release_refill(conn); /* if we're called from the softirq handler, we'll be GFP_NOWAIT. * in this case the ring being low is going to lead to more interrupts * and we can safely let the softirq code take care of it unless the * ring is completely empty. * * if we're called from krdsd, we'll be GFP_KERNEL. In this case * we might have raced with the softirq code while we had the refill * lock held. Use rds_ib_ring_low() instead of ring_empty to decide * if we should requeue. */ if (rds_conn_up(conn) && ((can_wait && rds_ib_ring_low(&ic->i_recv_ring)) || rds_ib_ring_empty(&ic->i_recv_ring))) { queue_delayed_work(rds_wq, &conn->c_recv_w, 1); } |
1e23b3ee0 RDS/IB: Receive d... |
436 |
} |
332441258 RDS/IB: Add cachi... |
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 |
/* * We want to recycle several types of recv allocations, like incs and frags. * To use this, the *_free() function passes in the ptr to a list_head within * the recyclee, as well as the cache to put it on. * * First, we put the memory on a percpu list. When this reaches a certain size, * We move it to an intermediate non-percpu list in a lockless manner, with some * xchg/compxchg wizardry. * * N.B. Instead of a list_head as the anchor, we use a single pointer, which can * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and * list_empty() will return true with one element is actually present. */ static void rds_ib_recv_cache_put(struct list_head *new_item, struct rds_ib_refill_cache *cache) |
1e23b3ee0 RDS/IB: Receive d... |
452 |
{ |
332441258 RDS/IB: Add cachi... |
453 |
unsigned long flags; |
c196403b7 net: rds: fix per... |
454 |
struct list_head *old, *chpfirst; |
1e23b3ee0 RDS/IB: Receive d... |
455 |
|
332441258 RDS/IB: Add cachi... |
456 |
local_irq_save(flags); |
1e23b3ee0 RDS/IB: Receive d... |
457 |
|
ae4b46e9d net: rds: use thi... |
458 459 |
chpfirst = __this_cpu_read(cache->percpu->first); if (!chpfirst) |
332441258 RDS/IB: Add cachi... |
460 461 |
INIT_LIST_HEAD(new_item); else /* put on front */ |
ae4b46e9d net: rds: use thi... |
462 |
list_add_tail(new_item, chpfirst); |
332441258 RDS/IB: Add cachi... |
463 |
|
c196403b7 net: rds: fix per... |
464 |
__this_cpu_write(cache->percpu->first, new_item); |
ae4b46e9d net: rds: use thi... |
465 466 467 |
__this_cpu_inc(cache->percpu->count); if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) |
332441258 RDS/IB: Add cachi... |
468 469 470 471 472 473 474 475 476 477 478 |
goto end; /* * Return our per-cpu first list to the cache's xfer by atomically * grabbing the current xfer list, appending it to our per-cpu list, * and then atomically returning that entire list back to the * cache's xfer list as long as it's still empty. */ do { old = xchg(&cache->xfer, NULL); if (old) |
ae4b46e9d net: rds: use thi... |
479 480 |
list_splice_entire_tail(old, chpfirst); old = cmpxchg(&cache->xfer, NULL, chpfirst); |
332441258 RDS/IB: Add cachi... |
481 |
} while (old); |
ae4b46e9d net: rds: use thi... |
482 |
|
c196403b7 net: rds: fix per... |
483 |
__this_cpu_write(cache->percpu->first, NULL); |
ae4b46e9d net: rds: use thi... |
484 |
__this_cpu_write(cache->percpu->count, 0); |
332441258 RDS/IB: Add cachi... |
485 486 |
end: local_irq_restore(flags); |
1e23b3ee0 RDS/IB: Receive d... |
487 |
} |
332441258 RDS/IB: Add cachi... |
488 |
static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache) |
1e23b3ee0 RDS/IB: Receive d... |
489 |
{ |
332441258 RDS/IB: Add cachi... |
490 491 492 493 494 495 496 497 498 |
struct list_head *head = cache->ready; if (head) { if (!list_empty(head)) { cache->ready = head->next; list_del_init(head); } else cache->ready = NULL; } |
1e23b3ee0 RDS/IB: Receive d... |
499 |
|
332441258 RDS/IB: Add cachi... |
500 |
return head; |
1e23b3ee0 RDS/IB: Receive d... |
501 |
} |
c310e72c8 rds: switch ->inc... |
502 |
int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) |
1e23b3ee0 RDS/IB: Receive d... |
503 504 505 |
{ struct rds_ib_incoming *ibinc; struct rds_page_frag *frag; |
1e23b3ee0 RDS/IB: Receive d... |
506 507 |
unsigned long to_copy; unsigned long frag_off = 0; |
1e23b3ee0 RDS/IB: Receive d... |
508 509 510 511 512 513 514 |
int copied = 0; int ret; u32 len; ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); len = be32_to_cpu(inc->i_hdr.h_len); |
c310e72c8 rds: switch ->inc... |
515 |
while (iov_iter_count(to) && copied < len) { |
1e23b3ee0 RDS/IB: Receive d... |
516 517 518 519 520 |
if (frag_off == RDS_FRAG_SIZE) { frag = list_entry(frag->f_item.next, struct rds_page_frag, f_item); frag_off = 0; } |
c310e72c8 rds: switch ->inc... |
521 522 |
to_copy = min_t(unsigned long, iov_iter_count(to), RDS_FRAG_SIZE - frag_off); |
1e23b3ee0 RDS/IB: Receive d... |
523 |
to_copy = min_t(unsigned long, to_copy, len - copied); |
1e23b3ee0 RDS/IB: Receive d... |
524 |
/* XXX needs + offset for multiple recvs per page */ |
c310e72c8 rds: switch ->inc... |
525 526 527 528 529 530 531 |
rds_stats_add(s_copy_to_user, to_copy); ret = copy_page_to_iter(sg_page(&frag->f_sg), frag->f_sg.offset + frag_off, to_copy, to); if (ret != to_copy) return -EFAULT; |
1e23b3ee0 RDS/IB: Receive d... |
532 |
|
1e23b3ee0 RDS/IB: Receive d... |
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 |
frag_off += to_copy; copied += to_copy; } return copied; } /* ic starts out kzalloc()ed */ void rds_ib_recv_init_ack(struct rds_ib_connection *ic) { struct ib_send_wr *wr = &ic->i_ack_wr; struct ib_sge *sge = &ic->i_ack_sge; sge->addr = ic->i_ack_dma; sge->length = sizeof(struct rds_header); |
e5580242a rds/ib: Remove ib... |
548 |
sge->lkey = ic->i_pd->local_dma_lkey; |
1e23b3ee0 RDS/IB: Receive d... |
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 |
wr->sg_list = sge; wr->num_sge = 1; wr->opcode = IB_WR_SEND; wr->wr_id = RDS_IB_ACK_WR_ID; wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; } /* * You'd think that with reliable IB connections you wouldn't need to ack * messages that have been received. The problem is that IB hardware generates * an ack message before it has DMAed the message into memory. This creates a * potential message loss if the HCA is disabled for any reason between when it * sends the ack and before the message is DMAed and processed. This is only a * potential issue if another HCA is available for fail-over. * * When the remote host receives our ack they'll free the sent message from * their send queue. To decrease the latency of this we always send an ack * immediately after we've received messages. * * For simplicity, we only have one ack in flight at a time. This puts * pressure on senders to have deep enough send queues to absorb the latency of * a single ack frame being in flight. This might not be good enough. * * This is implemented by have a long-lived send_wr and sge which point to a * statically allocated ack frame. This ack wr does not fall under the ring * accounting that the tx and rx wrs do. The QP attribute specifically makes * room for it beyond the ring size. Send completion notices its special * wr_id and avoids working with the ring in that case. */ |
8cbd9606a RDS: Use spinlock... |
579 |
#ifndef KERNEL_HAS_ATOMIC64 |
f4f943c95 RDS: IB: ack more... |
580 |
void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) |
1e23b3ee0 RDS/IB: Receive d... |
581 |
{ |
8cbd9606a RDS: Use spinlock... |
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 |
unsigned long flags; spin_lock_irqsave(&ic->i_ack_lock, flags); ic->i_ack_next = seq; if (ack_required) set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); spin_unlock_irqrestore(&ic->i_ack_lock, flags); } static u64 rds_ib_get_ack(struct rds_ib_connection *ic) { unsigned long flags; u64 seq; clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); spin_lock_irqsave(&ic->i_ack_lock, flags); seq = ic->i_ack_next; spin_unlock_irqrestore(&ic->i_ack_lock, flags); return seq; } #else |
f4f943c95 RDS: IB: ack more... |
605 |
void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) |
8cbd9606a RDS: Use spinlock... |
606 607 |
{ atomic64_set(&ic->i_ack_next, seq); |
1e23b3ee0 RDS/IB: Receive d... |
608 |
if (ack_required) { |
4e857c58e arch: Mass conver... |
609 |
smp_mb__before_atomic(); |
1e23b3ee0 RDS/IB: Receive d... |
610 611 612 613 614 615 616 |
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); } } static u64 rds_ib_get_ack(struct rds_ib_connection *ic) { clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
4e857c58e arch: Mass conver... |
617 |
smp_mb__after_atomic(); |
1e23b3ee0 RDS/IB: Receive d... |
618 |
|
8cbd9606a RDS: Use spinlock... |
619 |
return atomic64_read(&ic->i_ack_next); |
1e23b3ee0 RDS/IB: Receive d... |
620 |
} |
8cbd9606a RDS: Use spinlock... |
621 |
#endif |
1e23b3ee0 RDS/IB: Receive d... |
622 623 624 625 |
static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits) { struct rds_header *hdr = ic->i_ack; |
1e23b3ee0 RDS/IB: Receive d... |
626 627 628 629 630 631 632 633 634 635 636 637 |
u64 seq; int ret; seq = rds_ib_get_ack(ic); rdsdebug("send_ack: ic %p ack %llu ", ic, (unsigned long long) seq); rds_message_populate_header(hdr, 0, 0, 0); hdr->h_ack = cpu_to_be64(seq); hdr->h_credit = adv_credits; rds_message_make_checksum(hdr); ic->i_ack_queued = jiffies; |
f112d53b4 net/rds: Simplify... |
638 |
ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL); |
1e23b3ee0 RDS/IB: Receive d... |
639 640 641 642 643 644 645 646 |
if (unlikely(ret)) { /* Failed to send. Release the WR, and * force another ACK. */ clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); rds_ib_stats_inc(s_ib_ack_send_failure); |
735f61e62 RDS: Do not BUG()... |
647 648 649 |
rds_ib_conn_error(ic->conn, "sending ack failed "); |
1e23b3ee0 RDS/IB: Receive d... |
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 |
} else rds_ib_stats_inc(s_ib_ack_sent); } /* * There are 3 ways of getting acknowledgements to the peer: * 1. We call rds_ib_attempt_ack from the recv completion handler * to send an ACK-only frame. * However, there can be only one such frame in the send queue * at any time, so we may have to postpone it. * 2. When another (data) packet is transmitted while there's * an ACK in the queue, we piggyback the ACK sequence number * on the data packet. * 3. If the ACK WR is done sending, we get called from the * send queue completion handler, and check whether there's * another ACK pending (postponed because the WR was on the * queue). If so, we transmit it. * * We maintain 2 variables: * - i_ack_flags, which keeps track of whether the ACK WR * is currently in the send queue or not (IB_ACK_IN_FLIGHT) * - i_ack_next, which is the last sequence number we received * * Potentially, send queue and receive queue handlers can run concurrently. |
8cbd9606a RDS: Use spinlock... |
674 675 676 677 |
* It would be nice to not have to use a spinlock to synchronize things, * but the one problem that rules this out is that 64bit updates are * not atomic on all platforms. Things would be a lot simpler if * we had atomic64 or maybe cmpxchg64 everywhere. |
1e23b3ee0 RDS/IB: Receive d... |
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 |
* * Reconnecting complicates this picture just slightly. When we * reconnect, we may be seeing duplicate packets. The peer * is retransmitting them, because it hasn't seen an ACK for * them. It is important that we ACK these. * * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with * this flag set *MUST* be acknowledged immediately. */ /* * When we get here, we're called from the recv queue handler. * Check whether we ought to transmit an ACK. */ void rds_ib_attempt_ack(struct rds_ib_connection *ic) { unsigned int adv_credits; if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) return; if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) { rds_ib_stats_inc(s_ib_ack_send_delayed); return; } /* Can we get a send credit? */ |
7b70d0336 RDS/IW+IB: Allow ... |
705 |
if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) { |
1e23b3ee0 RDS/IB: Receive d... |
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 |
rds_ib_stats_inc(s_ib_tx_throttle); clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); return; } clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); rds_ib_send_ack(ic, adv_credits); } /* * We get here from the send completion handler, when the * adapter tells us the ACK frame was sent. */ void rds_ib_ack_send_complete(struct rds_ib_connection *ic) { clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); rds_ib_attempt_ack(ic); } /* * This is called by the regular xmit code when it wants to piggyback * an ACK on an outgoing frame. */ u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic) { if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) rds_ib_stats_inc(s_ib_ack_send_piggybacked); return rds_ib_get_ack(ic); } /* * It's kind of lame that we're copying from the posted receive pages into * long-lived bitmaps. We could have posted the bitmaps and rdma written into * them. But receiving new congestion bitmaps should be a *rare* event, so * hopefully we won't need to invest that complexity in making it more * efficient. By copying we can share a simpler core with TCP which has to * copy. */ static void rds_ib_cong_recv(struct rds_connection *conn, struct rds_ib_incoming *ibinc) { struct rds_cong_map *map; unsigned int map_off; unsigned int map_page; struct rds_page_frag *frag; unsigned long frag_off; unsigned long to_copy; unsigned long copied; uint64_t uncongested = 0; void *addr; /* catch completely corrupt packets */ if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) return; map = conn->c_fcong; map_page = 0; map_off = 0; frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); frag_off = 0; copied = 0; while (copied < RDS_CONG_MAP_BYTES) { uint64_t *src, *dst; unsigned int k; to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ |
6114eab53 rds: remove the s... |
776 |
addr = kmap_atomic(sg_page(&frag->f_sg)); |
1e23b3ee0 RDS/IB: Receive d... |
777 |
|
579ba8555 RDS: fix congesti... |
778 |
src = addr + frag->f_sg.offset + frag_off; |
1e23b3ee0 RDS/IB: Receive d... |
779 780 781 782 783 784 785 |
dst = (void *)map->m_page_addrs[map_page] + map_off; for (k = 0; k < to_copy; k += 8) { /* Record ports that became uncongested, ie * bits that changed from 0 to 1. */ uncongested |= ~(*src) & *dst; *dst++ = *src++; } |
6114eab53 rds: remove the s... |
786 |
kunmap_atomic(addr); |
1e23b3ee0 RDS/IB: Receive d... |
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 |
copied += to_copy; map_off += to_copy; if (map_off == PAGE_SIZE) { map_off = 0; map_page++; } frag_off += to_copy; if (frag_off == RDS_FRAG_SIZE) { frag = list_entry(frag->f_item.next, struct rds_page_frag, f_item); frag_off = 0; } } /* the congestion map is in little endian order */ uncongested = le64_to_cpu(uncongested); rds_cong_map_updated(map, uncongested); } |
1e23b3ee0 RDS/IB: Receive d... |
809 |
static void rds_ib_process_recv(struct rds_connection *conn, |
597ddd50e RDS/IB: Rename by... |
810 |
struct rds_ib_recv_work *recv, u32 data_len, |
1e23b3ee0 RDS/IB: Receive d... |
811 812 813 814 815 816 817 818 819 820 |
struct rds_ib_ack_state *state) { struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_incoming *ibinc = ic->i_ibinc; struct rds_header *ihdr, *hdr; /* XXX shut down the connection if port 0,0 are seen? */ rdsdebug("ic %p ibinc %p recv %p byte len %u ", ic, ibinc, recv, |
597ddd50e RDS/IB: Rename by... |
821 |
data_len); |
1e23b3ee0 RDS/IB: Receive d... |
822 |
|
597ddd50e RDS/IB: Rename by... |
823 |
if (data_len < sizeof(struct rds_header)) { |
1e23b3ee0 RDS/IB: Receive d... |
824 |
rds_ib_conn_error(conn, "incoming message " |
eee2fa6ab rds: Changing IP ... |
825 |
"from %pI6c didn't include a " |
1e23b3ee0 RDS/IB: Receive d... |
826 827 828 829 830 831 |
"header, disconnecting and " "reconnecting ", &conn->c_faddr); return; } |
597ddd50e RDS/IB: Rename by... |
832 |
data_len -= sizeof(struct rds_header); |
1e23b3ee0 RDS/IB: Receive d... |
833 |
|
f147dd9ec RDS/IB: Disallow ... |
834 |
ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs]; |
1e23b3ee0 RDS/IB: Receive d... |
835 836 837 838 |
/* Validate the checksum. */ if (!rds_message_verify_checksum(ihdr)) { rds_ib_conn_error(conn, "incoming message " |
eee2fa6ab rds: Changing IP ... |
839 |
"from %pI6c has corrupted header - " |
1e23b3ee0 RDS/IB: Receive d... |
840 841 842 843 844 845 846 847 848 849 850 851 852 853 |
"forcing a reconnect ", &conn->c_faddr); rds_stats_inc(s_recv_drop_bad_checksum); return; } /* Process the ACK sequence which comes with every packet */ state->ack_recv = be64_to_cpu(ihdr->h_ack); state->ack_recv_valid = 1; /* Process the credits update if there was one */ if (ihdr->h_credit) rds_ib_send_add_credits(conn, ihdr->h_credit); |
597ddd50e RDS/IB: Rename by... |
854 |
if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) { |
1e23b3ee0 RDS/IB: Receive d... |
855 856 857 858 859 860 861 862 863 864 865 |
/* This is an ACK-only packet. The fact that it gets * special treatment here is that historically, ACKs * were rather special beasts. */ rds_ib_stats_inc(s_ib_ack_received); /* * Usually the frags make their way on to incs and are then freed as * the inc is freed. We don't go that route, so we have to drop the * page ref ourselves. We can't just leave the page on the recv * because that confuses the dma mapping of pages and each recv's use |
0b088e003 RDS: Use page_rem... |
866 |
* of a partial page. |
1e23b3ee0 RDS/IB: Receive d... |
867 868 869 |
* * FIXME: Fold this into the code path below. */ |
332441258 RDS/IB: Add cachi... |
870 |
rds_ib_frag_free(ic, recv->r_frag); |
0b088e003 RDS: Use page_rem... |
871 |
recv->r_frag = NULL; |
1e23b3ee0 RDS/IB: Receive d... |
872 873 874 875 876 877 878 879 880 |
return; } /* * If we don't already have an inc on the connection then this * fragment has a header and starts a message.. copy its header * into the inc and save the inc so we can hang upcoming fragments * off its list. */ |
8690bfa17 RDS: cleanup: rem... |
881 |
if (!ibinc) { |
1e23b3ee0 RDS/IB: Receive d... |
882 883 884 885 886 |
ibinc = recv->r_ibinc; recv->r_ibinc = NULL; ic->i_ibinc = ibinc; hdr = &ibinc->ii_inc.i_hdr; |
3289025ae RDS: add receive ... |
887 888 |
ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] = local_clock(); |
1e23b3ee0 RDS/IB: Receive d... |
889 890 |
memcpy(hdr, ihdr, sizeof(*hdr)); ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); |
3289025ae RDS: add receive ... |
891 892 |
ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] = local_clock(); |
1e23b3ee0 RDS/IB: Receive d... |
893 894 895 896 897 898 899 900 |
rdsdebug("ic %p ibinc %p rem %u flag 0x%x ", ic, ibinc, ic->i_recv_data_rem, hdr->h_flags); } else { hdr = &ibinc->ii_inc.i_hdr; /* We can't just use memcmp here; fragments of a * single message may carry different ACKs */ |
f64f9e719 net: Move && and ... |
901 902 903 904 |
if (hdr->h_sequence != ihdr->h_sequence || hdr->h_len != ihdr->h_len || hdr->h_sport != ihdr->h_sport || hdr->h_dport != ihdr->h_dport) { |
1e23b3ee0 RDS/IB: Receive d... |
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 |
rds_ib_conn_error(conn, "fragment header mismatch; forcing reconnect "); return; } } list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags); recv->r_frag = NULL; if (ic->i_recv_data_rem > RDS_FRAG_SIZE) ic->i_recv_data_rem -= RDS_FRAG_SIZE; else { ic->i_recv_data_rem = 0; ic->i_ibinc = NULL; |
eee2fa6ab rds: Changing IP ... |
920 |
if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) { |
1e23b3ee0 RDS/IB: Receive d... |
921 |
rds_ib_cong_recv(conn, ibinc); |
eee2fa6ab rds: Changing IP ... |
922 923 |
} else { rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr, |
6114eab53 rds: remove the s... |
924 |
&ibinc->ii_inc, GFP_ATOMIC); |
1e23b3ee0 RDS/IB: Receive d... |
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 |
state->ack_next = be64_to_cpu(hdr->h_sequence); state->ack_next_valid = 1; } /* Evaluate the ACK_REQUIRED flag *after* we received * the complete frame, and after bumping the next_rx * sequence. */ if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) { rds_stats_inc(s_recv_ack_required); state->ack_required = 1; } rds_inc_put(&ibinc->ii_inc); } } |
f4f943c95 RDS: IB: ack more... |
940 941 942 |
void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc, struct rds_ib_ack_state *state) |
d521b63b2 RDS/IB+IW: Move r... |
943 944 |
{ struct rds_connection *conn = ic->conn; |
d521b63b2 RDS/IB+IW: Move r... |
945 |
struct rds_ib_recv_work *recv; |
f4f943c95 RDS: IB: ack more... |
946 947 948 949 950 |
rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u ", (unsigned long long)wc->wr_id, wc->status, ib_wc_status_msg(wc->status), wc->byte_len, be32_to_cpu(wc->ex.imm_data)); |
1e23b3ee0 RDS/IB: Receive d... |
951 |
|
f4f943c95 RDS: IB: ack more... |
952 953 954 955 |
rds_ib_stats_inc(s_ib_rx_cq_event); recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); |
1e23b3ee0 RDS/IB: Receive d... |
956 |
|
f4f943c95 RDS: IB: ack more... |
957 958 959 960 961 962 963 964 965 |
/* Also process recvs in connecting state because it is possible * to get a recv completion _before_ the rdmacm ESTABLISHED * event is processed. */ if (wc->status == IB_WC_SUCCESS) { rds_ib_process_recv(conn, recv, wc->byte_len, state); } else { /* We expect errors as the qp is drained during shutdown */ if (rds_conn_up(conn) || rds_conn_connecting(conn)) |
eee2fa6ab rds: Changing IP ... |
966 967 |
rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting ", |
ff3f19a2f RDS: IB: include ... |
968 |
&conn->c_laddr, &conn->c_faddr, |
f4f943c95 RDS: IB: ack more... |
969 970 |
wc->status, ib_wc_status_msg(wc->status)); |
1e23b3ee0 RDS/IB: Receive d... |
971 |
} |
d521b63b2 RDS/IB+IW: Move r... |
972 |
|
f4f943c95 RDS: IB: ack more... |
973 974 975 976 977 978 979 980 981 982 |
/* rds_ib_process_recv() doesn't always consume the frag, and * we might not have called it at all if the wc didn't indicate * success. We already unmapped the frag's pages, though, and * the following rds_ib_ring_free() call tells the refill path * that it will not find an allocated frag here. Make sure we * keep that promise by freeing a frag that's still on the ring. */ if (recv->r_frag) { rds_ib_frag_free(ic, recv->r_frag); recv->r_frag = NULL; |
1e23b3ee0 RDS/IB: Receive d... |
983 |
} |
f4f943c95 RDS: IB: ack more... |
984 |
rds_ib_ring_free(&ic->i_recv_ring, 1); |
1e23b3ee0 RDS/IB: Receive d... |
985 986 987 988 989 990 |
/* If we ever end up with a really empty receive ring, we're * in deep trouble, as the sender will definitely see RNR * timeouts. */ if (rds_ib_ring_empty(&ic->i_recv_ring)) rds_ib_stats_inc(s_ib_rx_ring_empty); |
05bfd7dbb rds: Reintroduce ... |
991 |
if (rds_ib_ring_low(&ic->i_recv_ring)) { |
73ce4317b RDS: make sure we... |
992 |
rds_ib_recv_refill(conn, 0, GFP_NOWAIT); |
05bfd7dbb rds: Reintroduce ... |
993 994 |
rds_ib_stats_inc(s_ib_rx_refill_from_cq); } |
1e23b3ee0 RDS/IB: Receive d... |
995 |
} |
2da43c4a1 RDS: TCP: make re... |
996 |
int rds_ib_recv_path(struct rds_conn_path *cp) |
1e23b3ee0 RDS/IB: Receive d... |
997 |
{ |
2da43c4a1 RDS: TCP: make re... |
998 |
struct rds_connection *conn = cp->cp_conn; |
1e23b3ee0 RDS/IB: Receive d... |
999 |
struct rds_ib_connection *ic = conn->c_transport_data; |
1e23b3ee0 RDS/IB: Receive d... |
1000 1001 1002 |
rdsdebug("conn %p ", conn); |
73ce4317b RDS: make sure we... |
1003 |
if (rds_conn_up(conn)) { |
1e23b3ee0 RDS/IB: Receive d... |
1004 |
rds_ib_attempt_ack(ic); |
73ce4317b RDS: make sure we... |
1005 |
rds_ib_recv_refill(conn, 0, GFP_KERNEL); |
05bfd7dbb rds: Reintroduce ... |
1006 |
rds_ib_stats_inc(s_ib_rx_refill_from_thread); |
73ce4317b RDS: make sure we... |
1007 |
} |
1e23b3ee0 RDS/IB: Receive d... |
1008 |
|
fa52531eb net/rds: Remove u... |
1009 |
return 0; |
1e23b3ee0 RDS/IB: Receive d... |
1010 |
} |
ef87b7ea3 RDS: remove __ini... |
1011 |
int rds_ib_recv_init(void) |
1e23b3ee0 RDS/IB: Receive d... |
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 |
{ struct sysinfo si; int ret = -ENOMEM; /* Default to 30% of all available RAM for recv memory */ si_meminfo(&si); rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE; rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming", sizeof(struct rds_ib_incoming), |
c20f5b963 RDS/IB: Use SLAB_... |
1022 |
0, SLAB_HWCACHE_ALIGN, NULL); |
8690bfa17 RDS: cleanup: rem... |
1023 |
if (!rds_ib_incoming_slab) |
1e23b3ee0 RDS/IB: Receive d... |
1024 1025 1026 1027 |
goto out; rds_ib_frag_slab = kmem_cache_create("rds_ib_frag", sizeof(struct rds_page_frag), |
c20f5b963 RDS/IB: Use SLAB_... |
1028 |
0, SLAB_HWCACHE_ALIGN, NULL); |
ba54d3ced RDS: fix the dang... |
1029 |
if (!rds_ib_frag_slab) { |
1e23b3ee0 RDS/IB: Receive d... |
1030 |
kmem_cache_destroy(rds_ib_incoming_slab); |
ba54d3ced RDS: fix the dang... |
1031 1032 |
rds_ib_incoming_slab = NULL; } else |
1e23b3ee0 RDS/IB: Receive d... |
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 |
ret = 0; out: return ret; } void rds_ib_recv_exit(void) { kmem_cache_destroy(rds_ib_incoming_slab); kmem_cache_destroy(rds_ib_frag_slab); } |