Commit ed8ada393388ef7ccfcfb3a88d8718f7df4b3165
Exists in
master
and in
20 other branches
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband updates from Roland Dreier: "Last batch of IB changes for 3.12: many mlx5 hardware driver fixes plus one trivial semicolon cleanup" * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB: Remove unnecessary semicolons IB/mlx5: Ensure proper synchronization accessing memory IB/mlx5: Fix alignment of reg umr gather buffers IB/mlx5: Fix eq names to display nicely in /proc/interrupts mlx5: Fix error code translation from firmware to driver IB/mlx5: Fix opt param mask according to firmware spec mlx5: Fix opt param mask for sq err to rts transition IB/mlx5: Disable atomic operations mlx5: Fix layout of struct mlx5_init_seg mlx5: Keep polling to reclaim pages while any returned IB/mlx5: Avoid async events on invalid port number IB/mlx5: Decrease memory consumption of mr caches mlx5: Remove checksum on command interface commands IB/mlx5: Fix memory leak in mlx5_ib_create_srq IB/mlx5: Flush cache workqueue before destroying it IB/mlx5: Fix send work queue size calculation
Showing 15 changed files Side-by-side Diff
- drivers/infiniband/hw/amso1100/c2_ae.c
- drivers/infiniband/hw/mlx5/main.c
- drivers/infiniband/hw/mlx5/mr.c
- drivers/infiniband/hw/mlx5/qp.c
- drivers/infiniband/hw/mlx5/srq.c
- drivers/infiniband/hw/mthca/mthca_eq.c
- drivers/infiniband/hw/ocrdma/ocrdma_hw.c
- drivers/infiniband/hw/ocrdma/ocrdma_main.c
- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
- drivers/net/ethernet/mellanox/mlx5/core/cmd.c
- drivers/net/ethernet/mellanox/mlx5/core/eq.c
- drivers/net/ethernet/mellanox/mlx5/core/main.c
- drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
- include/linux/mlx5/device.h
- include/linux/mlx5/driver.h
drivers/infiniband/hw/amso1100/c2_ae.c
drivers/infiniband/hw/mlx5/main.c
... | ... | @@ -164,6 +164,7 @@ |
164 | 164 | static int alloc_comp_eqs(struct mlx5_ib_dev *dev) |
165 | 165 | { |
166 | 166 | struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; |
167 | + char name[MLX5_MAX_EQ_NAME]; | |
167 | 168 | struct mlx5_eq *eq, *n; |
168 | 169 | int ncomp_vec; |
169 | 170 | int nent; |
170 | 171 | |
... | ... | @@ -180,11 +181,10 @@ |
180 | 181 | goto clean; |
181 | 182 | } |
182 | 183 | |
183 | - snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); | |
184 | + snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); | |
184 | 185 | err = mlx5_create_map_eq(&dev->mdev, eq, |
185 | 186 | i + MLX5_EQ_VEC_COMP_BASE, nent, 0, |
186 | - eq->name, | |
187 | - &dev->mdev.priv.uuari.uars[0]); | |
187 | + name, &dev->mdev.priv.uuari.uars[0]); | |
188 | 188 | if (err) { |
189 | 189 | kfree(eq); |
190 | 190 | goto clean; |
... | ... | @@ -301,9 +301,8 @@ |
301 | 301 | props->max_srq_sge = max_rq_sg - 1; |
302 | 302 | props->max_fast_reg_page_list_len = (unsigned int)-1; |
303 | 303 | props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; |
304 | - props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ? | |
305 | - IB_ATOMIC_HCA : IB_ATOMIC_NONE; | |
306 | - props->masked_atomic_cap = IB_ATOMIC_HCA; | |
304 | + props->atomic_cap = IB_ATOMIC_NONE; | |
305 | + props->masked_atomic_cap = IB_ATOMIC_NONE; | |
307 | 306 | props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); |
308 | 307 | props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; |
309 | 308 | props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; |
... | ... | @@ -1005,6 +1004,11 @@ |
1005 | 1004 | |
1006 | 1005 | ibev.device = &ibdev->ib_dev; |
1007 | 1006 | ibev.element.port_num = port; |
1007 | + | |
1008 | + if (port < 1 || port > ibdev->num_ports) { | |
1009 | + mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); | |
1010 | + return; | |
1011 | + } | |
1008 | 1012 | |
1009 | 1013 | if (ibdev->ib_active) |
1010 | 1014 | ib_dispatch_event(&ibev); |
drivers/infiniband/hw/mlx5/mr.c
... | ... | @@ -42,6 +42,10 @@ |
42 | 42 | DEF_CACHE_SIZE = 10, |
43 | 43 | }; |
44 | 44 | |
45 | +enum { | |
46 | + MLX5_UMR_ALIGN = 2048 | |
47 | +}; | |
48 | + | |
45 | 49 | static __be64 *mr_align(__be64 *ptr, int align) |
46 | 50 | { |
47 | 51 | unsigned long mask = align - 1; |
48 | 52 | |
... | ... | @@ -61,13 +65,11 @@ |
61 | 65 | |
62 | 66 | static int add_keys(struct mlx5_ib_dev *dev, int c, int num) |
63 | 67 | { |
64 | - struct device *ddev = dev->ib_dev.dma_device; | |
65 | 68 | struct mlx5_mr_cache *cache = &dev->cache; |
66 | 69 | struct mlx5_cache_ent *ent = &cache->ent[c]; |
67 | 70 | struct mlx5_create_mkey_mbox_in *in; |
68 | 71 | struct mlx5_ib_mr *mr; |
69 | 72 | int npages = 1 << ent->order; |
70 | - int size = sizeof(u64) * npages; | |
71 | 73 | int err = 0; |
72 | 74 | int i; |
73 | 75 | |
... | ... | @@ -83,21 +85,6 @@ |
83 | 85 | } |
84 | 86 | mr->order = ent->order; |
85 | 87 | mr->umred = 1; |
86 | - mr->pas = kmalloc(size + 0x3f, GFP_KERNEL); | |
87 | - if (!mr->pas) { | |
88 | - kfree(mr); | |
89 | - err = -ENOMEM; | |
90 | - goto out; | |
91 | - } | |
92 | - mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size, | |
93 | - DMA_TO_DEVICE); | |
94 | - if (dma_mapping_error(ddev, mr->dma)) { | |
95 | - kfree(mr->pas); | |
96 | - kfree(mr); | |
97 | - err = -ENOMEM; | |
98 | - goto out; | |
99 | - } | |
100 | - | |
101 | 88 | in->seg.status = 1 << 6; |
102 | 89 | in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); |
103 | 90 | in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); |
... | ... | @@ -108,8 +95,6 @@ |
108 | 95 | sizeof(*in)); |
109 | 96 | if (err) { |
110 | 97 | mlx5_ib_warn(dev, "create mkey failed %d\n", err); |
111 | - dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); | |
112 | - kfree(mr->pas); | |
113 | 98 | kfree(mr); |
114 | 99 | goto out; |
115 | 100 | } |
116 | 101 | |
... | ... | @@ -129,11 +114,9 @@ |
129 | 114 | |
130 | 115 | static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) |
131 | 116 | { |
132 | - struct device *ddev = dev->ib_dev.dma_device; | |
133 | 117 | struct mlx5_mr_cache *cache = &dev->cache; |
134 | 118 | struct mlx5_cache_ent *ent = &cache->ent[c]; |
135 | 119 | struct mlx5_ib_mr *mr; |
136 | - int size; | |
137 | 120 | int err; |
138 | 121 | int i; |
139 | 122 | |
140 | 123 | |
141 | 124 | |
... | ... | @@ -149,14 +132,10 @@ |
149 | 132 | ent->size--; |
150 | 133 | spin_unlock(&ent->lock); |
151 | 134 | err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); |
152 | - if (err) { | |
135 | + if (err) | |
153 | 136 | mlx5_ib_warn(dev, "failed destroy mkey\n"); |
154 | - } else { | |
155 | - size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40); | |
156 | - dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); | |
157 | - kfree(mr->pas); | |
137 | + else | |
158 | 138 | kfree(mr); |
159 | - } | |
160 | 139 | } |
161 | 140 | } |
162 | 141 | |
163 | 142 | |
164 | 143 | |
... | ... | @@ -408,13 +387,12 @@ |
408 | 387 | |
409 | 388 | static void clean_keys(struct mlx5_ib_dev *dev, int c) |
410 | 389 | { |
411 | - struct device *ddev = dev->ib_dev.dma_device; | |
412 | 390 | struct mlx5_mr_cache *cache = &dev->cache; |
413 | 391 | struct mlx5_cache_ent *ent = &cache->ent[c]; |
414 | 392 | struct mlx5_ib_mr *mr; |
415 | - int size; | |
416 | 393 | int err; |
417 | 394 | |
395 | + cancel_delayed_work(&ent->dwork); | |
418 | 396 | while (1) { |
419 | 397 | spin_lock(&ent->lock); |
420 | 398 | if (list_empty(&ent->head)) { |
421 | 399 | |
422 | 400 | |
... | ... | @@ -427,14 +405,10 @@ |
427 | 405 | ent->size--; |
428 | 406 | spin_unlock(&ent->lock); |
429 | 407 | err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); |
430 | - if (err) { | |
408 | + if (err) | |
431 | 409 | mlx5_ib_warn(dev, "failed destroy mkey\n"); |
432 | - } else { | |
433 | - size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40); | |
434 | - dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); | |
435 | - kfree(mr->pas); | |
410 | + else | |
436 | 411 | kfree(mr); |
437 | - } | |
438 | 412 | } |
439 | 413 | } |
440 | 414 | |
441 | 415 | |
... | ... | @@ -540,13 +514,15 @@ |
540 | 514 | int i; |
541 | 515 | |
542 | 516 | dev->cache.stopped = 1; |
543 | - destroy_workqueue(dev->cache.wq); | |
517 | + flush_workqueue(dev->cache.wq); | |
544 | 518 | |
545 | 519 | mlx5_mr_cache_debugfs_cleanup(dev); |
546 | 520 | |
547 | 521 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) |
548 | 522 | clean_keys(dev, i); |
549 | 523 | |
524 | + destroy_workqueue(dev->cache.wq); | |
525 | + | |
550 | 526 | return 0; |
551 | 527 | } |
552 | 528 | |
553 | 529 | |
... | ... | @@ -675,10 +651,12 @@ |
675 | 651 | int page_shift, int order, int access_flags) |
676 | 652 | { |
677 | 653 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
654 | + struct device *ddev = dev->ib_dev.dma_device; | |
678 | 655 | struct umr_common *umrc = &dev->umrc; |
679 | 656 | struct ib_send_wr wr, *bad; |
680 | 657 | struct mlx5_ib_mr *mr; |
681 | 658 | struct ib_sge sg; |
659 | + int size = sizeof(u64) * npages; | |
682 | 660 | int err; |
683 | 661 | int i; |
684 | 662 | |
685 | 663 | |
... | ... | @@ -697,8 +675,23 @@ |
697 | 675 | if (!mr) |
698 | 676 | return ERR_PTR(-EAGAIN); |
699 | 677 | |
700 | - mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1); | |
678 | + mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); | |
679 | + if (!mr->pas) { | |
680 | + err = -ENOMEM; | |
681 | + goto error; | |
682 | + } | |
701 | 683 | |
684 | + mlx5_ib_populate_pas(dev, umem, page_shift, | |
685 | + mr_align(mr->pas, MLX5_UMR_ALIGN), 1); | |
686 | + | |
687 | + mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size, | |
688 | + DMA_TO_DEVICE); | |
689 | + if (dma_mapping_error(ddev, mr->dma)) { | |
690 | + kfree(mr->pas); | |
691 | + err = -ENOMEM; | |
692 | + goto error; | |
693 | + } | |
694 | + | |
702 | 695 | memset(&wr, 0, sizeof(wr)); |
703 | 696 | wr.wr_id = (u64)(unsigned long)mr; |
704 | 697 | prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags); |
... | ... | @@ -717,6 +710,9 @@ |
717 | 710 | } |
718 | 711 | wait_for_completion(&mr->done); |
719 | 712 | up(&umrc->sem); |
713 | + | |
714 | + dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); | |
715 | + kfree(mr->pas); | |
720 | 716 | |
721 | 717 | if (mr->status != IB_WC_SUCCESS) { |
722 | 718 | mlx5_ib_warn(dev, "reg umr failed\n"); |
drivers/infiniband/hw/mlx5/qp.c
... | ... | @@ -203,7 +203,7 @@ |
203 | 203 | |
204 | 204 | switch (qp_type) { |
205 | 205 | case IB_QPT_XRC_INI: |
206 | - size = sizeof(struct mlx5_wqe_xrc_seg); | |
206 | + size += sizeof(struct mlx5_wqe_xrc_seg); | |
207 | 207 | /* fall through */ |
208 | 208 | case IB_QPT_RC: |
209 | 209 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
210 | 210 | |
211 | 211 | |
212 | 212 | |
... | ... | @@ -211,20 +211,23 @@ |
211 | 211 | sizeof(struct mlx5_wqe_raddr_seg); |
212 | 212 | break; |
213 | 213 | |
214 | + case IB_QPT_XRC_TGT: | |
215 | + return 0; | |
216 | + | |
214 | 217 | case IB_QPT_UC: |
215 | - size = sizeof(struct mlx5_wqe_ctrl_seg) + | |
218 | + size += sizeof(struct mlx5_wqe_ctrl_seg) + | |
216 | 219 | sizeof(struct mlx5_wqe_raddr_seg); |
217 | 220 | break; |
218 | 221 | |
219 | 222 | case IB_QPT_UD: |
220 | 223 | case IB_QPT_SMI: |
221 | 224 | case IB_QPT_GSI: |
222 | - size = sizeof(struct mlx5_wqe_ctrl_seg) + | |
225 | + size += sizeof(struct mlx5_wqe_ctrl_seg) + | |
223 | 226 | sizeof(struct mlx5_wqe_datagram_seg); |
224 | 227 | break; |
225 | 228 | |
226 | 229 | case MLX5_IB_QPT_REG_UMR: |
227 | - size = sizeof(struct mlx5_wqe_ctrl_seg) + | |
230 | + size += sizeof(struct mlx5_wqe_ctrl_seg) + | |
228 | 231 | sizeof(struct mlx5_wqe_umr_ctrl_seg) + |
229 | 232 | sizeof(struct mlx5_mkey_seg); |
230 | 233 | break; |
... | ... | @@ -270,7 +273,8 @@ |
270 | 273 | return wqe_size; |
271 | 274 | |
272 | 275 | if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { |
273 | - mlx5_ib_dbg(dev, "\n"); | |
276 | + mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", | |
277 | + wqe_size, dev->mdev.caps.max_sq_desc_sz); | |
274 | 278 | return -EINVAL; |
275 | 279 | } |
276 | 280 | |
277 | 281 | |
... | ... | @@ -280,9 +284,15 @@ |
280 | 284 | |
281 | 285 | wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); |
282 | 286 | qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; |
287 | + if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { | |
288 | + mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", | |
289 | + qp->sq.wqe_cnt, dev->mdev.caps.max_wqes); | |
290 | + return -ENOMEM; | |
291 | + } | |
283 | 292 | qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); |
284 | 293 | qp->sq.max_gs = attr->cap.max_send_sge; |
285 | - qp->sq.max_post = 1 << ilog2(wq_size / wqe_size); | |
294 | + qp->sq.max_post = wq_size / wqe_size; | |
295 | + attr->cap.max_send_wr = qp->sq.max_post; | |
286 | 296 | |
287 | 297 | return wq_size; |
288 | 298 | } |
... | ... | @@ -1280,6 +1290,11 @@ |
1280 | 1290 | MLX5_QP_OPTPAR_Q_KEY, |
1281 | 1291 | [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | |
1282 | 1292 | MLX5_QP_OPTPAR_Q_KEY, |
1293 | + [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | | |
1294 | + MLX5_QP_OPTPAR_RRE | | |
1295 | + MLX5_QP_OPTPAR_RAE | | |
1296 | + MLX5_QP_OPTPAR_RWE | | |
1297 | + MLX5_QP_OPTPAR_PKEY_INDEX, | |
1283 | 1298 | }, |
1284 | 1299 | }, |
1285 | 1300 | [MLX5_QP_STATE_RTR] = { |
... | ... | @@ -1314,6 +1329,11 @@ |
1314 | 1329 | [MLX5_QP_STATE_RTS] = { |
1315 | 1330 | [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, |
1316 | 1331 | [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, |
1332 | + [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, | |
1333 | + [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | | |
1334 | + MLX5_QP_OPTPAR_RWE | | |
1335 | + MLX5_QP_OPTPAR_RAE | | |
1336 | + MLX5_QP_OPTPAR_RRE, | |
1317 | 1337 | }, |
1318 | 1338 | }, |
1319 | 1339 | }; |
... | ... | @@ -1651,29 +1671,6 @@ |
1651 | 1671 | rseg->reserved = 0; |
1652 | 1672 | } |
1653 | 1673 | |
1654 | -static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr) | |
1655 | -{ | |
1656 | - if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | |
1657 | - aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | |
1658 | - aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | |
1659 | - } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { | |
1660 | - aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | |
1661 | - aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); | |
1662 | - } else { | |
1663 | - aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | |
1664 | - aseg->compare = 0; | |
1665 | - } | |
1666 | -} | |
1667 | - | |
1668 | -static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg, | |
1669 | - struct ib_send_wr *wr) | |
1670 | -{ | |
1671 | - aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | |
1672 | - aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); | |
1673 | - aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | |
1674 | - aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); | |
1675 | -} | |
1676 | - | |
1677 | 1674 | static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, |
1678 | 1675 | struct ib_send_wr *wr) |
1679 | 1676 | { |
1680 | 1677 | |
... | ... | @@ -2063,28 +2060,11 @@ |
2063 | 2060 | |
2064 | 2061 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2065 | 2062 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2066 | - set_raddr_seg(seg, wr->wr.atomic.remote_addr, | |
2067 | - wr->wr.atomic.rkey); | |
2068 | - seg += sizeof(struct mlx5_wqe_raddr_seg); | |
2069 | - | |
2070 | - set_atomic_seg(seg, wr); | |
2071 | - seg += sizeof(struct mlx5_wqe_atomic_seg); | |
2072 | - | |
2073 | - size += (sizeof(struct mlx5_wqe_raddr_seg) + | |
2074 | - sizeof(struct mlx5_wqe_atomic_seg)) / 16; | |
2075 | - break; | |
2076 | - | |
2077 | 2063 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: |
2078 | - set_raddr_seg(seg, wr->wr.atomic.remote_addr, | |
2079 | - wr->wr.atomic.rkey); | |
2080 | - seg += sizeof(struct mlx5_wqe_raddr_seg); | |
2081 | - | |
2082 | - set_masked_atomic_seg(seg, wr); | |
2083 | - seg += sizeof(struct mlx5_wqe_masked_atomic_seg); | |
2084 | - | |
2085 | - size += (sizeof(struct mlx5_wqe_raddr_seg) + | |
2086 | - sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16; | |
2087 | - break; | |
2064 | + mlx5_ib_warn(dev, "Atomic operations are not supported yet\n"); | |
2065 | + err = -ENOSYS; | |
2066 | + *bad_wr = wr; | |
2067 | + goto out; | |
2088 | 2068 | |
2089 | 2069 | case IB_WR_LOCAL_INV: |
2090 | 2070 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; |
drivers/infiniband/hw/mlx5/srq.c
... | ... | @@ -295,7 +295,7 @@ |
295 | 295 | mlx5_vfree(in); |
296 | 296 | if (err) { |
297 | 297 | mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); |
298 | - goto err_srq; | |
298 | + goto err_usr_kern_srq; | |
299 | 299 | } |
300 | 300 | |
301 | 301 | mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); |
... | ... | @@ -316,6 +316,8 @@ |
316 | 316 | |
317 | 317 | err_core: |
318 | 318 | mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); |
319 | + | |
320 | +err_usr_kern_srq: | |
319 | 321 | if (pd->uobject) |
320 | 322 | destroy_srq_user(pd, srq); |
321 | 323 | else |
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
... | ... | @@ -150,7 +150,7 @@ |
150 | 150 | return IB_QPS_SQE; |
151 | 151 | case OCRDMA_QPS_ERR: |
152 | 152 | return IB_QPS_ERR; |
153 | - }; | |
153 | + } | |
154 | 154 | return IB_QPS_ERR; |
155 | 155 | } |
156 | 156 | |
... | ... | @@ -171,7 +171,7 @@ |
171 | 171 | return OCRDMA_QPS_SQE; |
172 | 172 | case IB_QPS_ERR: |
173 | 173 | return OCRDMA_QPS_ERR; |
174 | - }; | |
174 | + } | |
175 | 175 | return OCRDMA_QPS_ERR; |
176 | 176 | } |
177 | 177 | |
... | ... | @@ -1982,7 +1982,7 @@ |
1982 | 1982 | break; |
1983 | 1983 | default: |
1984 | 1984 | return -EINVAL; |
1985 | - }; | |
1985 | + } | |
1986 | 1986 | |
1987 | 1987 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd)); |
1988 | 1988 | if (!cmd) |
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
... | ... | @@ -141,7 +141,7 @@ |
141 | 141 | /* Unsupported */ |
142 | 142 | *ib_speed = IB_SPEED_SDR; |
143 | 143 | *ib_width = IB_WIDTH_1X; |
144 | - }; | |
144 | + } | |
145 | 145 | } |
146 | 146 | |
147 | 147 | |
... | ... | @@ -2331,7 +2331,7 @@ |
2331 | 2331 | default: |
2332 | 2332 | ibwc_status = IB_WC_GENERAL_ERR; |
2333 | 2333 | break; |
2334 | - }; | |
2334 | + } | |
2335 | 2335 | return ibwc_status; |
2336 | 2336 | } |
2337 | 2337 | |
... | ... | @@ -2370,7 +2370,7 @@ |
2370 | 2370 | pr_err("%s() invalid opcode received = 0x%x\n", |
2371 | 2371 | __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); |
2372 | 2372 | break; |
2373 | - }; | |
2373 | + } | |
2374 | 2374 | } |
2375 | 2375 | |
2376 | 2376 | static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, |
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
... | ... | @@ -180,28 +180,32 @@ |
180 | 180 | return 0; |
181 | 181 | } |
182 | 182 | |
183 | -static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token) | |
183 | +static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, | |
184 | + int csum) | |
184 | 185 | { |
185 | 186 | block->token = token; |
186 | - block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); | |
187 | - block->sig = ~xor8_buf(block, sizeof(*block) - 1); | |
187 | + if (csum) { | |
188 | + block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - | |
189 | + sizeof(block->data) - 2); | |
190 | + block->sig = ~xor8_buf(block, sizeof(*block) - 1); | |
191 | + } | |
188 | 192 | } |
189 | 193 | |
190 | -static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token) | |
194 | +static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) | |
191 | 195 | { |
192 | 196 | struct mlx5_cmd_mailbox *next = msg->next; |
193 | 197 | |
194 | 198 | while (next) { |
195 | - calc_block_sig(next->buf, token); | |
199 | + calc_block_sig(next->buf, token, csum); | |
196 | 200 | next = next->next; |
197 | 201 | } |
198 | 202 | } |
199 | 203 | |
200 | -static void set_signature(struct mlx5_cmd_work_ent *ent) | |
204 | +static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) | |
201 | 205 | { |
202 | 206 | ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); |
203 | - calc_chain_sig(ent->in, ent->token); | |
204 | - calc_chain_sig(ent->out, ent->token); | |
207 | + calc_chain_sig(ent->in, ent->token, csum); | |
208 | + calc_chain_sig(ent->out, ent->token, csum); | |
205 | 209 | } |
206 | 210 | |
207 | 211 | static void poll_timeout(struct mlx5_cmd_work_ent *ent) |
... | ... | @@ -539,8 +543,7 @@ |
539 | 543 | lay->type = MLX5_PCI_CMD_XPORT; |
540 | 544 | lay->token = ent->token; |
541 | 545 | lay->status_own = CMD_OWNER_HW; |
542 | - if (!cmd->checksum_disabled) | |
543 | - set_signature(ent); | |
546 | + set_signature(ent, !cmd->checksum_disabled); | |
544 | 547 | dump_command(dev, ent, 1); |
545 | 548 | ktime_get_ts(&ent->ts1); |
546 | 549 | |
... | ... | @@ -773,8 +776,6 @@ |
773 | 776 | |
774 | 777 | copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); |
775 | 778 | block = next->buf; |
776 | - if (xor8_buf(block, sizeof(*block)) != 0xff) | |
777 | - return -EINVAL; | |
778 | 779 | |
779 | 780 | memcpy(to, block->data, copy); |
780 | 781 | to += copy; |
... | ... | @@ -1361,6 +1362,7 @@ |
1361 | 1362 | goto err_map; |
1362 | 1363 | } |
1363 | 1364 | |
1365 | + cmd->checksum_disabled = 1; | |
1364 | 1366 | cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; |
1365 | 1367 | cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; |
1366 | 1368 | |
... | ... | @@ -1510,7 +1512,7 @@ |
1510 | 1512 | case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; |
1511 | 1513 | case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; |
1512 | 1514 | case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; |
1513 | - case MLX5_CMD_STAT_LIM_ERR: return -EINVAL; | |
1515 | + case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; | |
1514 | 1516 | case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; |
1515 | 1517 | case MLX5_CMD_STAT_IX_ERR: return -EINVAL; |
1516 | 1518 | case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; |
drivers/net/ethernet/mellanox/mlx5/core/eq.c
... | ... | @@ -366,9 +366,11 @@ |
366 | 366 | goto err_in; |
367 | 367 | } |
368 | 368 | |
369 | + snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", | |
370 | + name, pci_name(dev->pdev)); | |
369 | 371 | eq->eqn = out.eq_number; |
370 | 372 | err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, |
371 | - name, eq); | |
373 | + eq->name, eq); | |
372 | 374 | if (err) |
373 | 375 | goto err_eq; |
374 | 376 |
drivers/net/ethernet/mellanox/mlx5/core/main.c
... | ... | @@ -165,9 +165,7 @@ |
165 | 165 | struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; |
166 | 166 | struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; |
167 | 167 | struct mlx5_cmd_set_hca_cap_mbox_out set_out; |
168 | - struct mlx5_profile *prof = dev->profile; | |
169 | 168 | u64 flags; |
170 | - int csum = 1; | |
171 | 169 | int err; |
172 | 170 | |
173 | 171 | memset(&query_ctx, 0, sizeof(query_ctx)); |
174 | 172 | |
... | ... | @@ -197,20 +195,14 @@ |
197 | 195 | memcpy(&set_ctx->hca_cap, &query_out->hca_cap, |
198 | 196 | sizeof(set_ctx->hca_cap)); |
199 | 197 | |
200 | - if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) { | |
201 | - csum = !!prof->cmdif_csum; | |
202 | - flags = be64_to_cpu(set_ctx->hca_cap.flags); | |
203 | - if (csum) | |
204 | - flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM; | |
205 | - else | |
206 | - flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; | |
207 | - | |
208 | - set_ctx->hca_cap.flags = cpu_to_be64(flags); | |
209 | - } | |
210 | - | |
211 | 198 | if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) |
212 | 199 | set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; |
213 | 200 | |
201 | + flags = be64_to_cpu(query_out->hca_cap.flags); | |
202 | + /* disable checksum */ | |
203 | + flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; | |
204 | + | |
205 | + set_ctx->hca_cap.flags = cpu_to_be64(flags); | |
214 | 206 | memset(&set_out, 0, sizeof(set_out)); |
215 | 207 | set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); |
216 | 208 | set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); |
... | ... | @@ -224,9 +216,6 @@ |
224 | 216 | err = mlx5_cmd_status_to_err(&set_out.hdr); |
225 | 217 | if (err) |
226 | 218 | goto query_ex; |
227 | - | |
228 | - if (!csum) | |
229 | - dev->cmd.checksum_disabled = 1; | |
230 | 219 | |
231 | 220 | query_ex: |
232 | 221 | kfree(query_out); |
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
... | ... | @@ -90,6 +90,10 @@ |
90 | 90 | __be64 pas[0]; |
91 | 91 | }; |
92 | 92 | |
93 | +enum { | |
94 | + MAX_RECLAIM_TIME_MSECS = 5000, | |
95 | +}; | |
96 | + | |
93 | 97 | static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) |
94 | 98 | { |
95 | 99 | struct rb_root *root = &dev->priv.page_root; |
... | ... | @@ -279,6 +283,9 @@ |
279 | 283 | int err; |
280 | 284 | int i; |
281 | 285 | |
286 | + if (nclaimed) | |
287 | + *nclaimed = 0; | |
288 | + | |
282 | 289 | memset(&in, 0, sizeof(in)); |
283 | 290 | outlen = sizeof(*out) + npages * sizeof(out->pas[0]); |
284 | 291 | out = mlx5_vzalloc(outlen); |
285 | 292 | |
286 | 293 | |
287 | 294 | |
... | ... | @@ -388,20 +395,25 @@ |
388 | 395 | |
389 | 396 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) |
390 | 397 | { |
391 | - unsigned long end = jiffies + msecs_to_jiffies(5000); | |
398 | + unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); | |
392 | 399 | struct fw_page *fwp; |
393 | 400 | struct rb_node *p; |
401 | + int nclaimed = 0; | |
394 | 402 | int err; |
395 | 403 | |
396 | 404 | do { |
397 | 405 | p = rb_first(&dev->priv.page_root); |
398 | 406 | if (p) { |
399 | 407 | fwp = rb_entry(p, struct fw_page, rb_node); |
400 | - err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL); | |
408 | + err = reclaim_pages(dev, fwp->func_id, | |
409 | + optimal_reclaimed_pages(), | |
410 | + &nclaimed); | |
401 | 411 | if (err) { |
402 | 412 | mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); |
403 | 413 | return err; |
404 | 414 | } |
415 | + if (nclaimed) | |
416 | + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); | |
405 | 417 | } |
406 | 418 | if (time_after(jiffies, end)) { |
407 | 419 | mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); |
include/linux/mlx5/device.h
... | ... | @@ -181,7 +181,7 @@ |
181 | 181 | MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, |
182 | 182 | MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, |
183 | 183 | MLX5_DEV_CAP_FLAG_DCT = 1LL << 41, |
184 | - MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46, | |
184 | + MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, | |
185 | 185 | }; |
186 | 186 | |
187 | 187 | enum { |
... | ... | @@ -417,7 +417,7 @@ |
417 | 417 | struct health_buffer health; |
418 | 418 | __be32 rsvd2[884]; |
419 | 419 | __be32 health_counter; |
420 | - __be32 rsvd3[1023]; | |
420 | + __be32 rsvd3[1019]; | |
421 | 421 | __be64 ieee1588_clk; |
422 | 422 | __be32 ieee1588_clk_type; |
423 | 423 | __be32 clr_intx; |
include/linux/mlx5/driver.h
... | ... | @@ -82,7 +82,7 @@ |
82 | 82 | }; |
83 | 83 | |
84 | 84 | enum { |
85 | - MLX5_MAX_EQ_NAME = 20 | |
85 | + MLX5_MAX_EQ_NAME = 32 | |
86 | 86 | }; |
87 | 87 | |
88 | 88 | enum { |
... | ... | @@ -747,8 +747,7 @@ |
747 | 747 | |
748 | 748 | enum { |
749 | 749 | MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, |
750 | - MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1, | |
751 | - MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2, | |
750 | + MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, | |
752 | 751 | }; |
753 | 752 | |
754 | 753 | enum { |
... | ... | @@ -758,7 +757,6 @@ |
758 | 757 | struct mlx5_profile { |
759 | 758 | u64 mask; |
760 | 759 | u32 log_max_qp; |
761 | - int cmdif_csum; | |
762 | 760 | struct { |
763 | 761 | int size; |
764 | 762 | int limit; |