Commit 4c171acc20794af16a27da25e11ec4e9cad5d9fa
Exists in
master
and in
39 other branches
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: RDMA/cma: Save PID of ID's owner RDMA/cma: Add support for netlink statistics export RDMA/cma: Pass QP type into rdma_create_id() RDMA: Update exported headers list RDMA/cma: Export enum cma_state in <rdma/rdma_cm.h> RDMA/nes: Add a check for strict_strtoul() RDMA/cxgb3: Don't post zero-byte read if endpoint is going away RDMA/cxgb4: Use completion objects for event blocking IB/srp: Fix integer -> pointer cast warnings IB: Add devnode methods to cm_class and umad_class IB/mad: Return EPROTONOSUPPORT when an RDMA device lacks the QP required IB/uverbs: Add devnode method to set path/mode RDMA/ucma: Add .nodename/.mode to tell userspace where to create device node RDMA: Add netlink infrastructure RDMA: Add error handling to ib_core_init()
Showing 31 changed files Side-by-side Diff
- drivers/infiniband/Kconfig
- drivers/infiniband/core/Makefile
- drivers/infiniband/core/cm.c
- drivers/infiniband/core/cma.c
- drivers/infiniband/core/device.c
- drivers/infiniband/core/mad.c
- drivers/infiniband/core/netlink.c
- drivers/infiniband/core/ucma.c
- drivers/infiniband/core/user_mad.c
- drivers/infiniband/core/uverbs_main.c
- drivers/infiniband/hw/cxgb3/iwch_cm.c
- drivers/infiniband/hw/cxgb3/iwch_provider.h
- drivers/infiniband/hw/cxgb3/iwch_qp.c
- drivers/infiniband/hw/cxgb4/iw_cxgb4.h
- drivers/infiniband/hw/nes/nes.c
- drivers/infiniband/hw/qib/Kconfig
- drivers/infiniband/ulp/iser/iser_verbs.c
- drivers/infiniband/ulp/srp/ib_srp.c
- include/linux/netlink.h
- include/rdma/Kbuild
- include/rdma/ib_user_cm.h
- include/rdma/rdma_cm.h
- include/rdma/rdma_netlink.h
- net/9p/trans_rdma.c
- net/rds/ib.c
- net/rds/ib_cm.c
- net/rds/iw.c
- net/rds/iw_cm.c
- net/rds/rdma_transport.c
- net/sunrpc/xprtrdma/svc_rdma_transport.c
- net/sunrpc/xprtrdma/verbs.c
drivers/infiniband/Kconfig
drivers/infiniband/core/Makefile
drivers/infiniband/core/cm.c
... | ... | @@ -3639,8 +3639,16 @@ |
3639 | 3639 | .release = cm_release_port_obj |
3640 | 3640 | }; |
3641 | 3641 | |
3642 | +static char *cm_devnode(struct device *dev, mode_t *mode) | |
3643 | +{ | |
3644 | + *mode = 0666; | |
3645 | + return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); | |
3646 | +} | |
3647 | + | |
3642 | 3648 | struct class cm_class = { |
3649 | + .owner = THIS_MODULE, | |
3643 | 3650 | .name = "infiniband_cm", |
3651 | + .devnode = cm_devnode, | |
3644 | 3652 | }; |
3645 | 3653 | EXPORT_SYMBOL(cm_class); |
3646 | 3654 |
drivers/infiniband/core/cma.c
... | ... | @@ -47,6 +47,7 @@ |
47 | 47 | |
48 | 48 | #include <rdma/rdma_cm.h> |
49 | 49 | #include <rdma/rdma_cm_ib.h> |
50 | +#include <rdma/rdma_netlink.h> | |
50 | 51 | #include <rdma/ib_cache.h> |
51 | 52 | #include <rdma/ib_cm.h> |
52 | 53 | #include <rdma/ib_sa.h> |
... | ... | @@ -89,20 +90,6 @@ |
89 | 90 | struct list_head id_list; |
90 | 91 | }; |
91 | 92 | |
92 | -enum cma_state { | |
93 | - CMA_IDLE, | |
94 | - CMA_ADDR_QUERY, | |
95 | - CMA_ADDR_RESOLVED, | |
96 | - CMA_ROUTE_QUERY, | |
97 | - CMA_ROUTE_RESOLVED, | |
98 | - CMA_CONNECT, | |
99 | - CMA_DISCONNECT, | |
100 | - CMA_ADDR_BOUND, | |
101 | - CMA_LISTEN, | |
102 | - CMA_DEVICE_REMOVAL, | |
103 | - CMA_DESTROYING | |
104 | -}; | |
105 | - | |
106 | 93 | struct rdma_bind_list { |
107 | 94 | struct idr *ps; |
108 | 95 | struct hlist_head owners; |
... | ... | @@ -126,7 +113,7 @@ |
126 | 113 | struct list_head mc_list; |
127 | 114 | |
128 | 115 | int internal_id; |
129 | - enum cma_state state; | |
116 | + enum rdma_cm_state state; | |
130 | 117 | spinlock_t lock; |
131 | 118 | struct mutex qp_mutex; |
132 | 119 | |
... | ... | @@ -146,6 +133,7 @@ |
146 | 133 | u32 seq_num; |
147 | 134 | u32 qkey; |
148 | 135 | u32 qp_num; |
136 | + pid_t owner; | |
149 | 137 | u8 srq; |
150 | 138 | u8 tos; |
151 | 139 | u8 reuseaddr; |
... | ... | @@ -165,8 +153,8 @@ |
165 | 153 | struct cma_work { |
166 | 154 | struct work_struct work; |
167 | 155 | struct rdma_id_private *id; |
168 | - enum cma_state old_state; | |
169 | - enum cma_state new_state; | |
156 | + enum rdma_cm_state old_state; | |
157 | + enum rdma_cm_state new_state; | |
170 | 158 | struct rdma_cm_event event; |
171 | 159 | }; |
172 | 160 | |
... | ... | @@ -217,7 +205,7 @@ |
217 | 205 | #define CMA_VERSION 0x00 |
218 | 206 | #define SDP_MAJ_VERSION 0x2 |
219 | 207 | |
220 | -static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) | |
208 | +static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) | |
221 | 209 | { |
222 | 210 | unsigned long flags; |
223 | 211 | int ret; |
... | ... | @@ -229,7 +217,7 @@ |
229 | 217 | } |
230 | 218 | |
231 | 219 | static int cma_comp_exch(struct rdma_id_private *id_priv, |
232 | - enum cma_state comp, enum cma_state exch) | |
220 | + enum rdma_cm_state comp, enum rdma_cm_state exch) | |
233 | 221 | { |
234 | 222 | unsigned long flags; |
235 | 223 | int ret; |
236 | 224 | |
... | ... | @@ -241,11 +229,11 @@ |
241 | 229 | return ret; |
242 | 230 | } |
243 | 231 | |
244 | -static enum cma_state cma_exch(struct rdma_id_private *id_priv, | |
245 | - enum cma_state exch) | |
232 | +static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, | |
233 | + enum rdma_cm_state exch) | |
246 | 234 | { |
247 | 235 | unsigned long flags; |
248 | - enum cma_state old; | |
236 | + enum rdma_cm_state old; | |
249 | 237 | |
250 | 238 | spin_lock_irqsave(&id_priv->lock, flags); |
251 | 239 | old = id_priv->state; |
... | ... | @@ -279,11 +267,6 @@ |
279 | 267 | hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); |
280 | 268 | } |
281 | 269 | |
282 | -static inline int cma_is_ud_ps(enum rdma_port_space ps) | |
283 | -{ | |
284 | - return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); | |
285 | -} | |
286 | - | |
287 | 270 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, |
288 | 271 | struct cma_device *cma_dev) |
289 | 272 | { |
... | ... | @@ -413,7 +396,7 @@ |
413 | 396 | } |
414 | 397 | |
415 | 398 | static int cma_disable_callback(struct rdma_id_private *id_priv, |
416 | - enum cma_state state) | |
399 | + enum rdma_cm_state state) | |
417 | 400 | { |
418 | 401 | mutex_lock(&id_priv->handler_mutex); |
419 | 402 | if (id_priv->state != state) { |
... | ... | @@ -429,7 +412,8 @@ |
429 | 412 | } |
430 | 413 | |
431 | 414 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, |
432 | - void *context, enum rdma_port_space ps) | |
415 | + void *context, enum rdma_port_space ps, | |
416 | + enum ib_qp_type qp_type) | |
433 | 417 | { |
434 | 418 | struct rdma_id_private *id_priv; |
435 | 419 | |
436 | 420 | |
... | ... | @@ -437,10 +421,12 @@ |
437 | 421 | if (!id_priv) |
438 | 422 | return ERR_PTR(-ENOMEM); |
439 | 423 | |
440 | - id_priv->state = CMA_IDLE; | |
424 | + id_priv->owner = task_pid_nr(current); | |
425 | + id_priv->state = RDMA_CM_IDLE; | |
441 | 426 | id_priv->id.context = context; |
442 | 427 | id_priv->id.event_handler = event_handler; |
443 | 428 | id_priv->id.ps = ps; |
429 | + id_priv->id.qp_type = qp_type; | |
444 | 430 | spin_lock_init(&id_priv->lock); |
445 | 431 | mutex_init(&id_priv->qp_mutex); |
446 | 432 | init_completion(&id_priv->comp); |
... | ... | @@ -508,7 +494,7 @@ |
508 | 494 | if (IS_ERR(qp)) |
509 | 495 | return PTR_ERR(qp); |
510 | 496 | |
511 | - if (cma_is_ud_ps(id_priv->id.ps)) | |
497 | + if (id->qp_type == IB_QPT_UD) | |
512 | 498 | ret = cma_init_ud_qp(id_priv, qp); |
513 | 499 | else |
514 | 500 | ret = cma_init_conn_qp(id_priv, qp); |
... | ... | @@ -636,7 +622,7 @@ |
636 | 622 | qp_attr->port_num = id_priv->id.port_num; |
637 | 623 | *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; |
638 | 624 | |
639 | - if (cma_is_ud_ps(id_priv->id.ps)) { | |
625 | + if (id_priv->id.qp_type == IB_QPT_UD) { | |
640 | 626 | ret = cma_set_qkey(id_priv); |
641 | 627 | if (ret) |
642 | 628 | return ret; |
... | ... | @@ -659,7 +645,7 @@ |
659 | 645 | id_priv = container_of(id, struct rdma_id_private, id); |
660 | 646 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { |
661 | 647 | case RDMA_TRANSPORT_IB: |
662 | - if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) | |
648 | + if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) | |
663 | 649 | ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); |
664 | 650 | else |
665 | 651 | ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, |
666 | 652 | |
667 | 653 | |
668 | 654 | |
... | ... | @@ -858,16 +844,16 @@ |
858 | 844 | } |
859 | 845 | |
860 | 846 | static void cma_cancel_operation(struct rdma_id_private *id_priv, |
861 | - enum cma_state state) | |
847 | + enum rdma_cm_state state) | |
862 | 848 | { |
863 | 849 | switch (state) { |
864 | - case CMA_ADDR_QUERY: | |
850 | + case RDMA_CM_ADDR_QUERY: | |
865 | 851 | rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); |
866 | 852 | break; |
867 | - case CMA_ROUTE_QUERY: | |
853 | + case RDMA_CM_ROUTE_QUERY: | |
868 | 854 | cma_cancel_route(id_priv); |
869 | 855 | break; |
870 | - case CMA_LISTEN: | |
856 | + case RDMA_CM_LISTEN: | |
871 | 857 | if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) |
872 | 858 | && !id_priv->cma_dev) |
873 | 859 | cma_cancel_listens(id_priv); |
874 | 860 | |
... | ... | @@ -918,10 +904,10 @@ |
918 | 904 | void rdma_destroy_id(struct rdma_cm_id *id) |
919 | 905 | { |
920 | 906 | struct rdma_id_private *id_priv; |
921 | - enum cma_state state; | |
907 | + enum rdma_cm_state state; | |
922 | 908 | |
923 | 909 | id_priv = container_of(id, struct rdma_id_private, id); |
924 | - state = cma_exch(id_priv, CMA_DESTROYING); | |
910 | + state = cma_exch(id_priv, RDMA_CM_DESTROYING); | |
925 | 911 | cma_cancel_operation(id_priv, state); |
926 | 912 | |
927 | 913 | /* |
928 | 914 | |
... | ... | @@ -1015,9 +1001,9 @@ |
1015 | 1001 | int ret = 0; |
1016 | 1002 | |
1017 | 1003 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && |
1018 | - cma_disable_callback(id_priv, CMA_CONNECT)) || | |
1004 | + cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || | |
1019 | 1005 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && |
1020 | - cma_disable_callback(id_priv, CMA_DISCONNECT))) | |
1006 | + cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) | |
1021 | 1007 | return 0; |
1022 | 1008 | |
1023 | 1009 | memset(&event, 0, sizeof event); |
... | ... | @@ -1048,7 +1034,8 @@ |
1048 | 1034 | event.status = -ETIMEDOUT; /* fall through */ |
1049 | 1035 | case IB_CM_DREQ_RECEIVED: |
1050 | 1036 | case IB_CM_DREP_RECEIVED: |
1051 | - if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) | |
1037 | + if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, | |
1038 | + RDMA_CM_DISCONNECT)) | |
1052 | 1039 | goto out; |
1053 | 1040 | event.event = RDMA_CM_EVENT_DISCONNECTED; |
1054 | 1041 | break; |
... | ... | @@ -1075,7 +1062,7 @@ |
1075 | 1062 | if (ret) { |
1076 | 1063 | /* Destroy the CM ID by returning a non-zero value. */ |
1077 | 1064 | id_priv->cm_id.ib = NULL; |
1078 | - cma_exch(id_priv, CMA_DESTROYING); | |
1065 | + cma_exch(id_priv, RDMA_CM_DESTROYING); | |
1079 | 1066 | mutex_unlock(&id_priv->handler_mutex); |
1080 | 1067 | rdma_destroy_id(&id_priv->id); |
1081 | 1068 | return ret; |
... | ... | @@ -1101,7 +1088,7 @@ |
1101 | 1088 | goto err; |
1102 | 1089 | |
1103 | 1090 | id = rdma_create_id(listen_id->event_handler, listen_id->context, |
1104 | - listen_id->ps); | |
1091 | + listen_id->ps, ib_event->param.req_rcvd.qp_type); | |
1105 | 1092 | if (IS_ERR(id)) |
1106 | 1093 | goto err; |
1107 | 1094 | |
... | ... | @@ -1132,7 +1119,7 @@ |
1132 | 1119 | rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); |
1133 | 1120 | |
1134 | 1121 | id_priv = container_of(id, struct rdma_id_private, id); |
1135 | - id_priv->state = CMA_CONNECT; | |
1122 | + id_priv->state = RDMA_CM_CONNECT; | |
1136 | 1123 | return id_priv; |
1137 | 1124 | |
1138 | 1125 | destroy_id: |
... | ... | @@ -1152,7 +1139,7 @@ |
1152 | 1139 | int ret; |
1153 | 1140 | |
1154 | 1141 | id = rdma_create_id(listen_id->event_handler, listen_id->context, |
1155 | - listen_id->ps); | |
1142 | + listen_id->ps, IB_QPT_UD); | |
1156 | 1143 | if (IS_ERR(id)) |
1157 | 1144 | return NULL; |
1158 | 1145 | |
... | ... | @@ -1172,7 +1159,7 @@ |
1172 | 1159 | } |
1173 | 1160 | |
1174 | 1161 | id_priv = container_of(id, struct rdma_id_private, id); |
1175 | - id_priv->state = CMA_CONNECT; | |
1162 | + id_priv->state = RDMA_CM_CONNECT; | |
1176 | 1163 | return id_priv; |
1177 | 1164 | err: |
1178 | 1165 | rdma_destroy_id(id); |
1179 | 1166 | |
... | ... | @@ -1201,13 +1188,13 @@ |
1201 | 1188 | int offset, ret; |
1202 | 1189 | |
1203 | 1190 | listen_id = cm_id->context; |
1204 | - if (cma_disable_callback(listen_id, CMA_LISTEN)) | |
1191 | + if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) | |
1205 | 1192 | return -ECONNABORTED; |
1206 | 1193 | |
1207 | 1194 | memset(&event, 0, sizeof event); |
1208 | 1195 | offset = cma_user_data_offset(listen_id->id.ps); |
1209 | 1196 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
1210 | - if (cma_is_ud_ps(listen_id->id.ps)) { | |
1197 | + if (listen_id->id.qp_type == IB_QPT_UD) { | |
1211 | 1198 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); |
1212 | 1199 | event.param.ud.private_data = ib_event->private_data + offset; |
1213 | 1200 | event.param.ud.private_data_len = |
... | ... | @@ -1243,8 +1230,7 @@ |
1243 | 1230 | * while we're accessing the cm_id. |
1244 | 1231 | */ |
1245 | 1232 | mutex_lock(&lock); |
1246 | - if (cma_comp(conn_id, CMA_CONNECT) && | |
1247 | - !cma_is_ud_ps(conn_id->id.ps)) | |
1233 | + if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) | |
1248 | 1234 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
1249 | 1235 | mutex_unlock(&lock); |
1250 | 1236 | mutex_unlock(&conn_id->handler_mutex); |
... | ... | @@ -1257,7 +1243,7 @@ |
1257 | 1243 | conn_id->cm_id.ib = NULL; |
1258 | 1244 | |
1259 | 1245 | release_conn_id: |
1260 | - cma_exch(conn_id, CMA_DESTROYING); | |
1246 | + cma_exch(conn_id, RDMA_CM_DESTROYING); | |
1261 | 1247 | mutex_unlock(&conn_id->handler_mutex); |
1262 | 1248 | rdma_destroy_id(&conn_id->id); |
1263 | 1249 | |
... | ... | @@ -1328,7 +1314,7 @@ |
1328 | 1314 | struct sockaddr_in *sin; |
1329 | 1315 | int ret = 0; |
1330 | 1316 | |
1331 | - if (cma_disable_callback(id_priv, CMA_CONNECT)) | |
1317 | + if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) | |
1332 | 1318 | return 0; |
1333 | 1319 | |
1334 | 1320 | memset(&event, 0, sizeof event); |
... | ... | @@ -1371,7 +1357,7 @@ |
1371 | 1357 | if (ret) { |
1372 | 1358 | /* Destroy the CM ID by returning a non-zero value. */ |
1373 | 1359 | id_priv->cm_id.iw = NULL; |
1374 | - cma_exch(id_priv, CMA_DESTROYING); | |
1360 | + cma_exch(id_priv, RDMA_CM_DESTROYING); | |
1375 | 1361 | mutex_unlock(&id_priv->handler_mutex); |
1376 | 1362 | rdma_destroy_id(&id_priv->id); |
1377 | 1363 | return ret; |
1378 | 1364 | |
1379 | 1365 | |
... | ... | @@ -1393,20 +1379,20 @@ |
1393 | 1379 | struct ib_device_attr attr; |
1394 | 1380 | |
1395 | 1381 | listen_id = cm_id->context; |
1396 | - if (cma_disable_callback(listen_id, CMA_LISTEN)) | |
1382 | + if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) | |
1397 | 1383 | return -ECONNABORTED; |
1398 | 1384 | |
1399 | 1385 | /* Create a new RDMA id for the new IW CM ID */ |
1400 | 1386 | new_cm_id = rdma_create_id(listen_id->id.event_handler, |
1401 | 1387 | listen_id->id.context, |
1402 | - RDMA_PS_TCP); | |
1388 | + RDMA_PS_TCP, IB_QPT_RC); | |
1403 | 1389 | if (IS_ERR(new_cm_id)) { |
1404 | 1390 | ret = -ENOMEM; |
1405 | 1391 | goto out; |
1406 | 1392 | } |
1407 | 1393 | conn_id = container_of(new_cm_id, struct rdma_id_private, id); |
1408 | 1394 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
1409 | - conn_id->state = CMA_CONNECT; | |
1395 | + conn_id->state = RDMA_CM_CONNECT; | |
1410 | 1396 | |
1411 | 1397 | dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); |
1412 | 1398 | if (!dev) { |
... | ... | @@ -1461,7 +1447,7 @@ |
1461 | 1447 | if (ret) { |
1462 | 1448 | /* User wants to destroy the CM ID */ |
1463 | 1449 | conn_id->cm_id.iw = NULL; |
1464 | - cma_exch(conn_id, CMA_DESTROYING); | |
1450 | + cma_exch(conn_id, RDMA_CM_DESTROYING); | |
1465 | 1451 | mutex_unlock(&conn_id->handler_mutex); |
1466 | 1452 | cma_deref_id(conn_id); |
1467 | 1453 | rdma_destroy_id(&conn_id->id); |
1468 | 1454 | |
... | ... | @@ -1548,13 +1534,14 @@ |
1548 | 1534 | struct rdma_cm_id *id; |
1549 | 1535 | int ret; |
1550 | 1536 | |
1551 | - id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); | |
1537 | + id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, | |
1538 | + id_priv->id.qp_type); | |
1552 | 1539 | if (IS_ERR(id)) |
1553 | 1540 | return; |
1554 | 1541 | |
1555 | 1542 | dev_id_priv = container_of(id, struct rdma_id_private, id); |
1556 | 1543 | |
1557 | - dev_id_priv->state = CMA_ADDR_BOUND; | |
1544 | + dev_id_priv->state = RDMA_CM_ADDR_BOUND; | |
1558 | 1545 | memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, |
1559 | 1546 | ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); |
1560 | 1547 | |
... | ... | @@ -1601,8 +1588,8 @@ |
1601 | 1588 | route->num_paths = 1; |
1602 | 1589 | *route->path_rec = *path_rec; |
1603 | 1590 | } else { |
1604 | - work->old_state = CMA_ROUTE_QUERY; | |
1605 | - work->new_state = CMA_ADDR_RESOLVED; | |
1591 | + work->old_state = RDMA_CM_ROUTE_QUERY; | |
1592 | + work->new_state = RDMA_CM_ADDR_RESOLVED; | |
1606 | 1593 | work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; |
1607 | 1594 | work->event.status = status; |
1608 | 1595 | } |
... | ... | @@ -1660,7 +1647,7 @@ |
1660 | 1647 | goto out; |
1661 | 1648 | |
1662 | 1649 | if (id_priv->id.event_handler(&id_priv->id, &work->event)) { |
1663 | - cma_exch(id_priv, CMA_DESTROYING); | |
1650 | + cma_exch(id_priv, RDMA_CM_DESTROYING); | |
1664 | 1651 | destroy = 1; |
1665 | 1652 | } |
1666 | 1653 | out: |
1667 | 1654 | |
... | ... | @@ -1678,12 +1665,12 @@ |
1678 | 1665 | int destroy = 0; |
1679 | 1666 | |
1680 | 1667 | mutex_lock(&id_priv->handler_mutex); |
1681 | - if (id_priv->state == CMA_DESTROYING || | |
1682 | - id_priv->state == CMA_DEVICE_REMOVAL) | |
1668 | + if (id_priv->state == RDMA_CM_DESTROYING || | |
1669 | + id_priv->state == RDMA_CM_DEVICE_REMOVAL) | |
1683 | 1670 | goto out; |
1684 | 1671 | |
1685 | 1672 | if (id_priv->id.event_handler(&id_priv->id, &work->event)) { |
1686 | - cma_exch(id_priv, CMA_DESTROYING); | |
1673 | + cma_exch(id_priv, RDMA_CM_DESTROYING); | |
1687 | 1674 | destroy = 1; |
1688 | 1675 | } |
1689 | 1676 | |
... | ... | @@ -1707,8 +1694,8 @@ |
1707 | 1694 | |
1708 | 1695 | work->id = id_priv; |
1709 | 1696 | INIT_WORK(&work->work, cma_work_handler); |
1710 | - work->old_state = CMA_ROUTE_QUERY; | |
1711 | - work->new_state = CMA_ROUTE_RESOLVED; | |
1697 | + work->old_state = RDMA_CM_ROUTE_QUERY; | |
1698 | + work->new_state = RDMA_CM_ROUTE_RESOLVED; | |
1712 | 1699 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
1713 | 1700 | |
1714 | 1701 | route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); |
... | ... | @@ -1737,7 +1724,8 @@ |
1737 | 1724 | int ret; |
1738 | 1725 | |
1739 | 1726 | id_priv = container_of(id, struct rdma_id_private, id); |
1740 | - if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) | |
1727 | + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, | |
1728 | + RDMA_CM_ROUTE_RESOLVED)) | |
1741 | 1729 | return -EINVAL; |
1742 | 1730 | |
1743 | 1731 | id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, |
... | ... | @@ -1750,7 +1738,7 @@ |
1750 | 1738 | id->route.num_paths = num_paths; |
1751 | 1739 | return 0; |
1752 | 1740 | err: |
1753 | - cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); | |
1741 | + cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); | |
1754 | 1742 | return ret; |
1755 | 1743 | } |
1756 | 1744 | EXPORT_SYMBOL(rdma_set_ib_paths); |
... | ... | @@ -1765,8 +1753,8 @@ |
1765 | 1753 | |
1766 | 1754 | work->id = id_priv; |
1767 | 1755 | INIT_WORK(&work->work, cma_work_handler); |
1768 | - work->old_state = CMA_ROUTE_QUERY; | |
1769 | - work->new_state = CMA_ROUTE_RESOLVED; | |
1756 | + work->old_state = RDMA_CM_ROUTE_QUERY; | |
1757 | + work->new_state = RDMA_CM_ROUTE_RESOLVED; | |
1770 | 1758 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
1771 | 1759 | queue_work(cma_wq, &work->work); |
1772 | 1760 | return 0; |
... | ... | @@ -1830,8 +1818,8 @@ |
1830 | 1818 | goto err2; |
1831 | 1819 | } |
1832 | 1820 | |
1833 | - work->old_state = CMA_ROUTE_QUERY; | |
1834 | - work->new_state = CMA_ROUTE_RESOLVED; | |
1821 | + work->old_state = RDMA_CM_ROUTE_QUERY; | |
1822 | + work->new_state = RDMA_CM_ROUTE_RESOLVED; | |
1835 | 1823 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
1836 | 1824 | work->event.status = 0; |
1837 | 1825 | |
... | ... | @@ -1853,7 +1841,7 @@ |
1853 | 1841 | int ret; |
1854 | 1842 | |
1855 | 1843 | id_priv = container_of(id, struct rdma_id_private, id); |
1856 | - if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) | |
1844 | + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) | |
1857 | 1845 | return -EINVAL; |
1858 | 1846 | |
1859 | 1847 | atomic_inc(&id_priv->refcount); |
... | ... | @@ -1882,7 +1870,7 @@ |
1882 | 1870 | |
1883 | 1871 | return 0; |
1884 | 1872 | err: |
1885 | - cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); | |
1873 | + cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); | |
1886 | 1874 | cma_deref_id(id_priv); |
1887 | 1875 | return ret; |
1888 | 1876 | } |
1889 | 1877 | |
... | ... | @@ -1941,14 +1929,16 @@ |
1941 | 1929 | |
1942 | 1930 | memset(&event, 0, sizeof event); |
1943 | 1931 | mutex_lock(&id_priv->handler_mutex); |
1944 | - if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) | |
1932 | + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, | |
1933 | + RDMA_CM_ADDR_RESOLVED)) | |
1945 | 1934 | goto out; |
1946 | 1935 | |
1947 | 1936 | if (!status && !id_priv->cma_dev) |
1948 | 1937 | status = cma_acquire_dev(id_priv); |
1949 | 1938 | |
1950 | 1939 | if (status) { |
1951 | - if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) | |
1940 | + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, | |
1941 | + RDMA_CM_ADDR_BOUND)) | |
1952 | 1942 | goto out; |
1953 | 1943 | event.event = RDMA_CM_EVENT_ADDR_ERROR; |
1954 | 1944 | event.status = status; |
... | ... | @@ -1959,7 +1949,7 @@ |
1959 | 1949 | } |
1960 | 1950 | |
1961 | 1951 | if (id_priv->id.event_handler(&id_priv->id, &event)) { |
1962 | - cma_exch(id_priv, CMA_DESTROYING); | |
1952 | + cma_exch(id_priv, RDMA_CM_DESTROYING); | |
1963 | 1953 | mutex_unlock(&id_priv->handler_mutex); |
1964 | 1954 | cma_deref_id(id_priv); |
1965 | 1955 | rdma_destroy_id(&id_priv->id); |
... | ... | @@ -2004,8 +1994,8 @@ |
2004 | 1994 | |
2005 | 1995 | work->id = id_priv; |
2006 | 1996 | INIT_WORK(&work->work, cma_work_handler); |
2007 | - work->old_state = CMA_ADDR_QUERY; | |
2008 | - work->new_state = CMA_ADDR_RESOLVED; | |
1997 | + work->old_state = RDMA_CM_ADDR_QUERY; | |
1998 | + work->new_state = RDMA_CM_ADDR_RESOLVED; | |
2009 | 1999 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
2010 | 2000 | queue_work(cma_wq, &work->work); |
2011 | 2001 | return 0; |
2012 | 2002 | |
... | ... | @@ -2034,13 +2024,13 @@ |
2034 | 2024 | int ret; |
2035 | 2025 | |
2036 | 2026 | id_priv = container_of(id, struct rdma_id_private, id); |
2037 | - if (id_priv->state == CMA_IDLE) { | |
2027 | + if (id_priv->state == RDMA_CM_IDLE) { | |
2038 | 2028 | ret = cma_bind_addr(id, src_addr, dst_addr); |
2039 | 2029 | if (ret) |
2040 | 2030 | return ret; |
2041 | 2031 | } |
2042 | 2032 | |
2043 | - if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) | |
2033 | + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) | |
2044 | 2034 | return -EINVAL; |
2045 | 2035 | |
2046 | 2036 | atomic_inc(&id_priv->refcount); |
... | ... | @@ -2056,7 +2046,7 @@ |
2056 | 2046 | |
2057 | 2047 | return 0; |
2058 | 2048 | err: |
2059 | - cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); | |
2049 | + cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); | |
2060 | 2050 | cma_deref_id(id_priv); |
2061 | 2051 | return ret; |
2062 | 2052 | } |
... | ... | @@ -2070,7 +2060,7 @@ |
2070 | 2060 | |
2071 | 2061 | id_priv = container_of(id, struct rdma_id_private, id); |
2072 | 2062 | spin_lock_irqsave(&id_priv->lock, flags); |
2073 | - if (id_priv->state == CMA_IDLE) { | |
2063 | + if (id_priv->state == RDMA_CM_IDLE) { | |
2074 | 2064 | id_priv->reuseaddr = reuse; |
2075 | 2065 | ret = 0; |
2076 | 2066 | } else { |
... | ... | @@ -2177,7 +2167,7 @@ |
2177 | 2167 | if (id_priv == cur_id) |
2178 | 2168 | continue; |
2179 | 2169 | |
2180 | - if ((cur_id->state == CMA_LISTEN) || | |
2170 | + if ((cur_id->state == RDMA_CM_LISTEN) || | |
2181 | 2171 | !reuseaddr || !cur_id->reuseaddr) { |
2182 | 2172 | cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; |
2183 | 2173 | if (cma_any_addr(cur_addr)) |
2184 | 2174 | |
... | ... | @@ -2280,14 +2270,14 @@ |
2280 | 2270 | int ret; |
2281 | 2271 | |
2282 | 2272 | id_priv = container_of(id, struct rdma_id_private, id); |
2283 | - if (id_priv->state == CMA_IDLE) { | |
2273 | + if (id_priv->state == RDMA_CM_IDLE) { | |
2284 | 2274 | ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; |
2285 | 2275 | ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); |
2286 | 2276 | if (ret) |
2287 | 2277 | return ret; |
2288 | 2278 | } |
2289 | 2279 | |
2290 | - if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) | |
2280 | + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) | |
2291 | 2281 | return -EINVAL; |
2292 | 2282 | |
2293 | 2283 | if (id_priv->reuseaddr) { |
... | ... | @@ -2319,7 +2309,7 @@ |
2319 | 2309 | return 0; |
2320 | 2310 | err: |
2321 | 2311 | id_priv->backlog = 0; |
2322 | - cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); | |
2312 | + cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); | |
2323 | 2313 | return ret; |
2324 | 2314 | } |
2325 | 2315 | EXPORT_SYMBOL(rdma_listen); |
... | ... | @@ -2333,7 +2323,7 @@ |
2333 | 2323 | return -EAFNOSUPPORT; |
2334 | 2324 | |
2335 | 2325 | id_priv = container_of(id, struct rdma_id_private, id); |
2336 | - if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) | |
2326 | + if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) | |
2337 | 2327 | return -EINVAL; |
2338 | 2328 | |
2339 | 2329 | ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); |
... | ... | @@ -2360,7 +2350,7 @@ |
2360 | 2350 | if (id_priv->cma_dev) |
2361 | 2351 | cma_release_dev(id_priv); |
2362 | 2352 | err1: |
2363 | - cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); | |
2353 | + cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); | |
2364 | 2354 | return ret; |
2365 | 2355 | } |
2366 | 2356 | EXPORT_SYMBOL(rdma_bind_addr); |
... | ... | @@ -2433,7 +2423,7 @@ |
2433 | 2423 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; |
2434 | 2424 | int ret = 0; |
2435 | 2425 | |
2436 | - if (cma_disable_callback(id_priv, CMA_CONNECT)) | |
2426 | + if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) | |
2437 | 2427 | return 0; |
2438 | 2428 | |
2439 | 2429 | memset(&event, 0, sizeof event); |
... | ... | @@ -2479,7 +2469,7 @@ |
2479 | 2469 | if (ret) { |
2480 | 2470 | /* Destroy the CM ID by returning a non-zero value. */ |
2481 | 2471 | id_priv->cm_id.ib = NULL; |
2482 | - cma_exch(id_priv, CMA_DESTROYING); | |
2472 | + cma_exch(id_priv, RDMA_CM_DESTROYING); | |
2483 | 2473 | mutex_unlock(&id_priv->handler_mutex); |
2484 | 2474 | rdma_destroy_id(&id_priv->id); |
2485 | 2475 | return ret; |
... | ... | @@ -2645,7 +2635,7 @@ |
2645 | 2635 | int ret; |
2646 | 2636 | |
2647 | 2637 | id_priv = container_of(id, struct rdma_id_private, id); |
2648 | - if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) | |
2638 | + if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) | |
2649 | 2639 | return -EINVAL; |
2650 | 2640 | |
2651 | 2641 | if (!id->qp) { |
... | ... | @@ -2655,7 +2645,7 @@ |
2655 | 2645 | |
2656 | 2646 | switch (rdma_node_get_transport(id->device->node_type)) { |
2657 | 2647 | case RDMA_TRANSPORT_IB: |
2658 | - if (cma_is_ud_ps(id->ps)) | |
2648 | + if (id->qp_type == IB_QPT_UD) | |
2659 | 2649 | ret = cma_resolve_ib_udp(id_priv, conn_param); |
2660 | 2650 | else |
2661 | 2651 | ret = cma_connect_ib(id_priv, conn_param); |
... | ... | @@ -2672,7 +2662,7 @@ |
2672 | 2662 | |
2673 | 2663 | return 0; |
2674 | 2664 | err: |
2675 | - cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); | |
2665 | + cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); | |
2676 | 2666 | return ret; |
2677 | 2667 | } |
2678 | 2668 | EXPORT_SYMBOL(rdma_connect); |
... | ... | @@ -2758,7 +2748,10 @@ |
2758 | 2748 | int ret; |
2759 | 2749 | |
2760 | 2750 | id_priv = container_of(id, struct rdma_id_private, id); |
2761 | - if (!cma_comp(id_priv, CMA_CONNECT)) | |
2751 | + | |
2752 | + id_priv->owner = task_pid_nr(current); | |
2753 | + | |
2754 | + if (!cma_comp(id_priv, RDMA_CM_CONNECT)) | |
2762 | 2755 | return -EINVAL; |
2763 | 2756 | |
2764 | 2757 | if (!id->qp && conn_param) { |
... | ... | @@ -2768,7 +2761,7 @@ |
2768 | 2761 | |
2769 | 2762 | switch (rdma_node_get_transport(id->device->node_type)) { |
2770 | 2763 | case RDMA_TRANSPORT_IB: |
2771 | - if (cma_is_ud_ps(id->ps)) | |
2764 | + if (id->qp_type == IB_QPT_UD) | |
2772 | 2765 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, |
2773 | 2766 | conn_param->private_data, |
2774 | 2767 | conn_param->private_data_len); |
... | ... | @@ -2829,7 +2822,7 @@ |
2829 | 2822 | |
2830 | 2823 | switch (rdma_node_get_transport(id->device->node_type)) { |
2831 | 2824 | case RDMA_TRANSPORT_IB: |
2832 | - if (cma_is_ud_ps(id->ps)) | |
2825 | + if (id->qp_type == IB_QPT_UD) | |
2833 | 2826 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, |
2834 | 2827 | private_data, private_data_len); |
2835 | 2828 | else |
... | ... | @@ -2887,8 +2880,8 @@ |
2887 | 2880 | int ret; |
2888 | 2881 | |
2889 | 2882 | id_priv = mc->id_priv; |
2890 | - if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && | |
2891 | - cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) | |
2883 | + if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && | |
2884 | + cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) | |
2892 | 2885 | return 0; |
2893 | 2886 | |
2894 | 2887 | mutex_lock(&id_priv->qp_mutex); |
... | ... | @@ -2912,7 +2905,7 @@ |
2912 | 2905 | |
2913 | 2906 | ret = id_priv->id.event_handler(&id_priv->id, &event); |
2914 | 2907 | if (ret) { |
2915 | - cma_exch(id_priv, CMA_DESTROYING); | |
2908 | + cma_exch(id_priv, RDMA_CM_DESTROYING); | |
2916 | 2909 | mutex_unlock(&id_priv->handler_mutex); |
2917 | 2910 | rdma_destroy_id(&id_priv->id); |
2918 | 2911 | return 0; |
... | ... | @@ -3095,8 +3088,8 @@ |
3095 | 3088 | int ret; |
3096 | 3089 | |
3097 | 3090 | id_priv = container_of(id, struct rdma_id_private, id); |
3098 | - if (!cma_comp(id_priv, CMA_ADDR_BOUND) && | |
3099 | - !cma_comp(id_priv, CMA_ADDR_RESOLVED)) | |
3091 | + if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && | |
3092 | + !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) | |
3100 | 3093 | return -EINVAL; |
3101 | 3094 | |
3102 | 3095 | mc = kmalloc(sizeof *mc, GFP_KERNEL); |
3103 | 3096 | |
3104 | 3097 | |
... | ... | @@ -3261,19 +3254,19 @@ |
3261 | 3254 | static int cma_remove_id_dev(struct rdma_id_private *id_priv) |
3262 | 3255 | { |
3263 | 3256 | struct rdma_cm_event event; |
3264 | - enum cma_state state; | |
3257 | + enum rdma_cm_state state; | |
3265 | 3258 | int ret = 0; |
3266 | 3259 | |
3267 | 3260 | /* Record that we want to remove the device */ |
3268 | - state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); | |
3269 | - if (state == CMA_DESTROYING) | |
3261 | + state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); | |
3262 | + if (state == RDMA_CM_DESTROYING) | |
3270 | 3263 | return 0; |
3271 | 3264 | |
3272 | 3265 | cma_cancel_operation(id_priv, state); |
3273 | 3266 | mutex_lock(&id_priv->handler_mutex); |
3274 | 3267 | |
3275 | 3268 | /* Check for destruction from another callback. */ |
3276 | - if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) | |
3269 | + if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) | |
3277 | 3270 | goto out; |
3278 | 3271 | |
3279 | 3272 | memset(&event, 0, sizeof event); |
... | ... | @@ -3328,6 +3321,100 @@ |
3328 | 3321 | kfree(cma_dev); |
3329 | 3322 | } |
3330 | 3323 | |
3324 | +static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) | |
3325 | +{ | |
3326 | + struct nlmsghdr *nlh; | |
3327 | + struct rdma_cm_id_stats *id_stats; | |
3328 | + struct rdma_id_private *id_priv; | |
3329 | + struct rdma_cm_id *id = NULL; | |
3330 | + struct cma_device *cma_dev; | |
3331 | + int i_dev = 0, i_id = 0; | |
3332 | + | |
3333 | + /* | |
3334 | + * We export all of the IDs as a sequence of messages. Each | |
3335 | + * ID gets its own netlink message. | |
3336 | + */ | |
3337 | + mutex_lock(&lock); | |
3338 | + | |
3339 | + list_for_each_entry(cma_dev, &dev_list, list) { | |
3340 | + if (i_dev < cb->args[0]) { | |
3341 | + i_dev++; | |
3342 | + continue; | |
3343 | + } | |
3344 | + | |
3345 | + i_id = 0; | |
3346 | + list_for_each_entry(id_priv, &cma_dev->id_list, list) { | |
3347 | + if (i_id < cb->args[1]) { | |
3348 | + i_id++; | |
3349 | + continue; | |
3350 | + } | |
3351 | + | |
3352 | + id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, | |
3353 | + sizeof *id_stats, RDMA_NL_RDMA_CM, | |
3354 | + RDMA_NL_RDMA_CM_ID_STATS); | |
3355 | + if (!id_stats) | |
3356 | + goto out; | |
3357 | + | |
3358 | + memset(id_stats, 0, sizeof *id_stats); | |
3359 | + id = &id_priv->id; | |
3360 | + id_stats->node_type = id->route.addr.dev_addr.dev_type; | |
3361 | + id_stats->port_num = id->port_num; | |
3362 | + id_stats->bound_dev_if = | |
3363 | + id->route.addr.dev_addr.bound_dev_if; | |
3364 | + | |
3365 | + if (id->route.addr.src_addr.ss_family == AF_INET) { | |
3366 | + if (ibnl_put_attr(skb, nlh, | |
3367 | + sizeof(struct sockaddr_in), | |
3368 | + &id->route.addr.src_addr, | |
3369 | + RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { | |
3370 | + goto out; | |
3371 | + } | |
3372 | + if (ibnl_put_attr(skb, nlh, | |
3373 | + sizeof(struct sockaddr_in), | |
3374 | + &id->route.addr.dst_addr, | |
3375 | + RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { | |
3376 | + goto out; | |
3377 | + } | |
3378 | + } else if (id->route.addr.src_addr.ss_family == AF_INET6) { | |
3379 | + if (ibnl_put_attr(skb, nlh, | |
3380 | + sizeof(struct sockaddr_in6), | |
3381 | + &id->route.addr.src_addr, | |
3382 | + RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { | |
3383 | + goto out; | |
3384 | + } | |
3385 | + if (ibnl_put_attr(skb, nlh, | |
3386 | + sizeof(struct sockaddr_in6), | |
3387 | + &id->route.addr.dst_addr, | |
3388 | + RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { | |
3389 | + goto out; | |
3390 | + } | |
3391 | + } | |
3392 | + | |
3393 | + id_stats->pid = id_priv->owner; | |
3394 | + id_stats->port_space = id->ps; | |
3395 | + id_stats->cm_state = id_priv->state; | |
3396 | + id_stats->qp_num = id_priv->qp_num; | |
3397 | + id_stats->qp_type = id->qp_type; | |
3398 | + | |
3399 | + i_id++; | |
3400 | + } | |
3401 | + | |
3402 | + cb->args[1] = 0; | |
3403 | + i_dev++; | |
3404 | + } | |
3405 | + | |
3406 | +out: | |
3407 | + mutex_unlock(&lock); | |
3408 | + cb->args[0] = i_dev; | |
3409 | + cb->args[1] = i_id; | |
3410 | + | |
3411 | + return skb->len; | |
3412 | +} | |
3413 | + | |
3414 | +static const struct ibnl_client_cbs cma_cb_table[] = { | |
3415 | + [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats }, | |
3416 | +}; | |
3417 | + | |
3331 | 3418 | static int __init cma_init(void) |
3332 | 3419 | { |
3333 | 3420 | int ret; |
... | ... | @@ -3343,6 +3430,10 @@ |
3343 | 3430 | ret = ib_register_client(&cma_client); |
3344 | 3431 | if (ret) |
3345 | 3432 | goto err; |
3433 | + | |
3434 | + if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table)) | |
3435 | + printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n"); | |
3436 | + | |
3346 | 3437 | return 0; |
3347 | 3438 | |
3348 | 3439 | err: |
... | ... | @@ -3355,6 +3446,7 @@ |
3355 | 3446 | |
3356 | 3447 | static void __exit cma_cleanup(void) |
3357 | 3448 | { |
3449 | + ibnl_remove_client(RDMA_NL_RDMA_CM); | |
3358 | 3450 | ib_unregister_client(&cma_client); |
3359 | 3451 | unregister_netdevice_notifier(&cma_nb); |
3360 | 3452 | rdma_addr_unregister_client(&addr_client); |
drivers/infiniband/core/device.c
... | ... | @@ -38,6 +38,7 @@ |
38 | 38 | #include <linux/slab.h> |
39 | 39 | #include <linux/init.h> |
40 | 40 | #include <linux/mutex.h> |
41 | +#include <rdma/rdma_netlink.h> | |
41 | 42 | |
42 | 43 | #include "core_priv.h" |
43 | 44 | |
44 | 45 | |
45 | 46 | |
46 | 47 | |
47 | 48 | |
48 | 49 | |
... | ... | @@ -725,22 +726,40 @@ |
725 | 726 | return -ENOMEM; |
726 | 727 | |
727 | 728 | ret = ib_sysfs_setup(); |
728 | - if (ret) | |
729 | + if (ret) { | |
729 | 730 | printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); |
731 | + goto err; | |
732 | + } | |
730 | 733 | |
734 | + ret = ibnl_init(); | |
735 | + if (ret) { | |
736 | + printk(KERN_WARNING "Couldn't init IB netlink interface\n"); | |
737 | + goto err_sysfs; | |
738 | + } | |
739 | + | |
731 | 740 | ret = ib_cache_setup(); |
732 | 741 | if (ret) { |
733 | 742 | printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); |
734 | - ib_sysfs_cleanup(); | |
735 | - destroy_workqueue(ib_wq); | |
743 | + goto err_nl; | |
736 | 744 | } |
737 | 745 | |
746 | + return 0; | |
747 | + | |
748 | +err_nl: | |
749 | + ibnl_cleanup(); | |
750 | + | |
751 | +err_sysfs: | |
752 | + ib_sysfs_cleanup(); | |
753 | + | |
754 | +err: | |
755 | + destroy_workqueue(ib_wq); | |
738 | 756 | return ret; |
739 | 757 | } |
740 | 758 | |
741 | 759 | static void __exit ib_core_cleanup(void) |
742 | 760 | { |
743 | 761 | ib_cache_cleanup(); |
762 | + ibnl_cleanup(); | |
744 | 763 | ib_sysfs_cleanup(); |
745 | 764 | /* Make sure that any pending umem accounting work is done. */ |
746 | 765 | destroy_workqueue(ib_wq); |
drivers/infiniband/core/mad.c
... | ... | @@ -276,6 +276,13 @@ |
276 | 276 | goto error1; |
277 | 277 | } |
278 | 278 | |
279 | + /* Verify the QP requested is supported. For example, Ethernet devices | |
280 | + * will not have QP0 */ | |
281 | + if (!port_priv->qp_info[qpn].qp) { | |
282 | + ret = ERR_PTR(-EPROTONOSUPPORT); | |
283 | + goto error1; | |
284 | + } | |
285 | + | |
279 | 286 | /* Allocate structures */ |
280 | 287 | mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); |
281 | 288 | if (!mad_agent_priv) { |
drivers/infiniband/core/netlink.c
1 | +/* | |
2 | + * Copyright (c) 2010 Voltaire Inc. All rights reserved. | |
3 | + * | |
4 | + * This software is available to you under a choice of one of two | |
5 | + * licenses. You may choose to be licensed under the terms of the GNU | |
6 | + * General Public License (GPL) Version 2, available from the file | |
7 | + * COPYING in the main directory of this source tree, or the | |
8 | + * OpenIB.org BSD license below: | |
9 | + * | |
10 | + * Redistribution and use in source and binary forms, with or | |
11 | + * without modification, are permitted provided that the following | |
12 | + * conditions are met: | |
13 | + * | |
14 | + * - Redistributions of source code must retain the above | |
15 | + * copyright notice, this list of conditions and the following | |
16 | + * disclaimer. | |
17 | + * | |
18 | + * - Redistributions in binary form must reproduce the above | |
19 | + * copyright notice, this list of conditions and the following | |
20 | + * disclaimer in the documentation and/or other materials | |
21 | + * provided with the distribution. | |
22 | + * | |
23 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | + * SOFTWARE. | |
31 | + */ | |
32 | + | |
33 | +#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ | |
34 | + | |
35 | +#include <net/netlink.h> | |
36 | +#include <net/net_namespace.h> | |
37 | +#include <net/sock.h> | |
38 | +#include <rdma/rdma_netlink.h> | |
39 | + | |
40 | +struct ibnl_client { | |
41 | + struct list_head list; | |
42 | + int index; | |
43 | + int nops; | |
44 | + const struct ibnl_client_cbs *cb_table; | |
45 | +}; | |
46 | + | |
47 | +static DEFINE_MUTEX(ibnl_mutex); | |
48 | +static struct sock *nls; | |
49 | +static LIST_HEAD(client_list); | |
50 | + | |
51 | +int ibnl_add_client(int index, int nops, | |
52 | + const struct ibnl_client_cbs cb_table[]) | |
53 | +{ | |
54 | + struct ibnl_client *cur; | |
55 | + struct ibnl_client *nl_client; | |
56 | + | |
57 | + nl_client = kmalloc(sizeof *nl_client, GFP_KERNEL); | |
58 | + if (!nl_client) | |
59 | + return -ENOMEM; | |
60 | + | |
61 | + nl_client->index = index; | |
62 | + nl_client->nops = nops; | |
63 | + nl_client->cb_table = cb_table; | |
64 | + | |
65 | + mutex_lock(&ibnl_mutex); | |
66 | + | |
67 | + list_for_each_entry(cur, &client_list, list) { | |
68 | + if (cur->index == index) { | |
69 | + pr_warn("Client for %d already exists\n", index); | |
70 | + mutex_unlock(&ibnl_mutex); | |
71 | + kfree(nl_client); | |
72 | + return -EINVAL; | |
73 | + } | |
74 | + } | |
75 | + | |
76 | + list_add_tail(&nl_client->list, &client_list); | |
77 | + | |
78 | + mutex_unlock(&ibnl_mutex); | |
79 | + | |
80 | + return 0; | |
81 | +} | |
82 | +EXPORT_SYMBOL(ibnl_add_client); | |
83 | + | |
84 | +int ibnl_remove_client(int index) | |
85 | +{ | |
86 | + struct ibnl_client *cur, *next; | |
87 | + | |
88 | + mutex_lock(&ibnl_mutex); | |
89 | + list_for_each_entry_safe(cur, next, &client_list, list) { | |
90 | + if (cur->index == index) { | |
91 | + list_del(&(cur->list)); | |
92 | + mutex_unlock(&ibnl_mutex); | |
93 | + kfree(cur); | |
94 | + return 0; | |
95 | + } | |
96 | + } | |
97 | + pr_warn("Can't remove callback for client idx %d. Not found\n", index); | |
98 | + mutex_unlock(&ibnl_mutex); | |
99 | + | |
100 | + return -EINVAL; | |
101 | +} | |
102 | +EXPORT_SYMBOL(ibnl_remove_client); | |
103 | + | |
104 | +void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, | |
105 | + int len, int client, int op) | |
106 | +{ | |
107 | + unsigned char *prev_tail; | |
108 | + | |
109 | + prev_tail = skb_tail_pointer(skb); | |
110 | + *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), | |
111 | + len, NLM_F_MULTI); | |
112 | + (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail; | |
113 | + return NLMSG_DATA(*nlh); | |
114 | + | |
115 | +nlmsg_failure: | |
116 | + nlmsg_trim(skb, prev_tail); | |
117 | + return NULL; | |
118 | +} | |
119 | +EXPORT_SYMBOL(ibnl_put_msg); | |
120 | + | |
121 | +int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, | |
122 | + int len, void *data, int type) | |
123 | +{ | |
124 | + unsigned char *prev_tail; | |
125 | + | |
126 | + prev_tail = skb_tail_pointer(skb); | |
127 | + NLA_PUT(skb, type, len, data); | |
128 | + nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail; | |
129 | + return 0; | |
130 | + | |
131 | +nla_put_failure: | |
132 | + nlmsg_trim(skb, prev_tail - nlh->nlmsg_len); | |
133 | + return -EMSGSIZE; | |
134 | +} | |
135 | +EXPORT_SYMBOL(ibnl_put_attr); | |
136 | + | |
137 | +static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |
138 | +{ | |
139 | + struct ibnl_client *client; | |
140 | + int type = nlh->nlmsg_type; | |
141 | + int index = RDMA_NL_GET_CLIENT(type); | |
142 | + int op = RDMA_NL_GET_OP(type); | |
143 | + | |
144 | + list_for_each_entry(client, &client_list, list) { | |
145 | + if (client->index == index) { | |
146 | + if (op < 0 || op >= client->nops || | |
147 | + !client->cb_table[RDMA_NL_GET_OP(op)].dump) | |
148 | + return -EINVAL; | |
149 | + return netlink_dump_start(nls, skb, nlh, | |
150 | + client->cb_table[op].dump, | |
151 | + NULL); | |
152 | + } | |
153 | + } | |
154 | + | |
155 | + pr_info("Index %d wasn't found in client list\n", index); | |
156 | + return -EINVAL; | |
157 | +} | |
158 | + | |
159 | +static void ibnl_rcv(struct sk_buff *skb) | |
160 | +{ | |
161 | + mutex_lock(&ibnl_mutex); | |
162 | + netlink_rcv_skb(skb, &ibnl_rcv_msg); | |
163 | + mutex_unlock(&ibnl_mutex); | |
164 | +} | |
165 | + | |
166 | +int __init ibnl_init(void) | |
167 | +{ | |
168 | + nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv, | |
169 | + NULL, THIS_MODULE); | |
170 | + if (!nls) { | |
171 | + pr_warn("Failed to create netlink socket\n"); | |
172 | + return -ENOMEM; | |
173 | + } | |
174 | + | |
175 | + return 0; | |
176 | +} | |
177 | + | |
178 | +void ibnl_cleanup(void) | |
179 | +{ | |
180 | + struct ibnl_client *cur, *next; | |
181 | + | |
182 | + mutex_lock(&ibnl_mutex); | |
183 | + list_for_each_entry_safe(cur, next, &client_list, list) { | |
184 | + list_del(&(cur->list)); | |
185 | + kfree(cur); | |
186 | + } | |
187 | + mutex_unlock(&ibnl_mutex); | |
188 | + | |
189 | + netlink_kernel_release(nls); | |
190 | +} |
drivers/infiniband/core/ucma.c
... | ... | @@ -367,13 +367,28 @@ |
367 | 367 | return ret; |
368 | 368 | } |
369 | 369 | |
370 | -static ssize_t ucma_create_id(struct ucma_file *file, | |
371 | - const char __user *inbuf, | |
372 | - int in_len, int out_len) | |
370 | +static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) | |
373 | 371 | { |
372 | + switch (cmd->ps) { | |
373 | + case RDMA_PS_TCP: | |
374 | + *qp_type = IB_QPT_RC; | |
375 | + return 0; | |
376 | + case RDMA_PS_UDP: | |
377 | + case RDMA_PS_IPOIB: | |
378 | + *qp_type = IB_QPT_UD; | |
379 | + return 0; | |
380 | + default: | |
381 | + return -EINVAL; | |
382 | + } | |
383 | +} | |
384 | + | |
385 | +static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, | |
386 | + int in_len, int out_len) | |
387 | +{ | |
374 | 388 | struct rdma_ucm_create_id cmd; |
375 | 389 | struct rdma_ucm_create_id_resp resp; |
376 | 390 | struct ucma_context *ctx; |
391 | + enum ib_qp_type qp_type; | |
377 | 392 | int ret; |
378 | 393 | |
379 | 394 | if (out_len < sizeof(resp)) |
... | ... | @@ -382,6 +397,10 @@ |
382 | 397 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) |
383 | 398 | return -EFAULT; |
384 | 399 | |
400 | + ret = ucma_get_qp_type(&cmd, &qp_type); | |
401 | + if (ret) | |
402 | + return ret; | |
403 | + | |
385 | 404 | mutex_lock(&file->mut); |
386 | 405 | ctx = ucma_alloc_ctx(file); |
387 | 406 | mutex_unlock(&file->mut); |
... | ... | @@ -389,7 +408,7 @@ |
389 | 408 | return -ENOMEM; |
390 | 409 | |
391 | 410 | ctx->uid = cmd.uid; |
392 | - ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps); | |
411 | + ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type); | |
393 | 412 | if (IS_ERR(ctx->cm_id)) { |
394 | 413 | ret = PTR_ERR(ctx->cm_id); |
395 | 414 | goto err1; |
... | ... | @@ -1338,9 +1357,11 @@ |
1338 | 1357 | }; |
1339 | 1358 | |
1340 | 1359 | static struct miscdevice ucma_misc = { |
1341 | - .minor = MISC_DYNAMIC_MINOR, | |
1342 | - .name = "rdma_cm", | |
1343 | - .fops = &ucma_fops, | |
1360 | + .minor = MISC_DYNAMIC_MINOR, | |
1361 | + .name = "rdma_cm", | |
1362 | + .nodename = "infiniband/rdma_cm", | |
1363 | + .mode = 0666, | |
1364 | + .fops = &ucma_fops, | |
1344 | 1365 | }; |
1345 | 1366 | |
1346 | 1367 | static ssize_t show_abi_version(struct device *dev, |
drivers/infiniband/core/user_mad.c
... | ... | @@ -1176,6 +1176,11 @@ |
1176 | 1176 | kref_put(&umad_dev->ref, ib_umad_release_dev); |
1177 | 1177 | } |
1178 | 1178 | |
1179 | +static char *umad_devnode(struct device *dev, mode_t *mode) | |
1180 | +{ | |
1181 | + return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); | |
1182 | +} | |
1183 | + | |
1179 | 1184 | static int __init ib_umad_init(void) |
1180 | 1185 | { |
1181 | 1186 | int ret; |
... | ... | @@ -1193,6 +1198,8 @@ |
1193 | 1198 | printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n"); |
1194 | 1199 | goto out_chrdev; |
1195 | 1200 | } |
1201 | + | |
1202 | + umad_class->devnode = umad_devnode; | |
1196 | 1203 | |
1197 | 1204 | ret = class_create_file(umad_class, &class_attr_abi_version.attr); |
1198 | 1205 | if (ret) { |
drivers/infiniband/core/uverbs_main.c
... | ... | @@ -824,6 +824,12 @@ |
824 | 824 | kfree(uverbs_dev); |
825 | 825 | } |
826 | 826 | |
827 | +static char *uverbs_devnode(struct device *dev, mode_t *mode) | |
828 | +{ | |
829 | + *mode = 0666; | |
830 | + return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); | |
831 | +} | |
832 | + | |
827 | 833 | static int __init ib_uverbs_init(void) |
828 | 834 | { |
829 | 835 | int ret; |
... | ... | @@ -841,6 +847,8 @@ |
841 | 847 | printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n"); |
842 | 848 | goto out_chrdev; |
843 | 849 | } |
850 | + | |
851 | + uverbs_class->devnode = uverbs_devnode; | |
844 | 852 | |
845 | 853 | ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); |
846 | 854 | if (ret) { |
drivers/infiniband/hw/cxgb3/iwch_cm.c
... | ... | @@ -914,7 +914,7 @@ |
914 | 914 | goto err; |
915 | 915 | |
916 | 916 | if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { |
917 | - iwch_post_zb_read(ep->com.qp); | |
917 | + iwch_post_zb_read(ep); | |
918 | 918 | } |
919 | 919 | |
920 | 920 | goto out; |
... | ... | @@ -1078,6 +1078,8 @@ |
1078 | 1078 | struct iwch_ep *ep = ctx; |
1079 | 1079 | struct cpl_wr_ack *hdr = cplhdr(skb); |
1080 | 1080 | unsigned int credits = ntohs(hdr->credits); |
1081 | + unsigned long flags; | |
1082 | + int post_zb = 0; | |
1081 | 1083 | |
1082 | 1084 | PDBG("%s ep %p credits %u\n", __func__, ep, credits); |
1083 | 1085 | |
1084 | 1086 | |
1085 | 1087 | |
1086 | 1088 | |
1087 | 1089 | |
1088 | 1090 | |
... | ... | @@ -1087,28 +1089,34 @@ |
1087 | 1089 | return CPL_RET_BUF_DONE; |
1088 | 1090 | } |
1089 | 1091 | |
1092 | + spin_lock_irqsave(&ep->com.lock, flags); | |
1090 | 1093 | BUG_ON(credits != 1); |
1091 | 1094 | dst_confirm(ep->dst); |
1092 | 1095 | if (!ep->mpa_skb) { |
1093 | 1096 | PDBG("%s rdma_init wr_ack ep %p state %u\n", |
1094 | - __func__, ep, state_read(&ep->com)); | |
1097 | + __func__, ep, ep->com.state); | |
1095 | 1098 | if (ep->mpa_attr.initiator) { |
1096 | 1099 | PDBG("%s initiator ep %p state %u\n", |
1097 | - __func__, ep, state_read(&ep->com)); | |
1098 | - if (peer2peer) | |
1099 | - iwch_post_zb_read(ep->com.qp); | |
1100 | + __func__, ep, ep->com.state); | |
1101 | + if (peer2peer && ep->com.state == FPDU_MODE) | |
1102 | + post_zb = 1; | |
1100 | 1103 | } else { |
1101 | 1104 | PDBG("%s responder ep %p state %u\n", |
1102 | - __func__, ep, state_read(&ep->com)); | |
1103 | - ep->com.rpl_done = 1; | |
1104 | - wake_up(&ep->com.waitq); | |
1105 | + __func__, ep, ep->com.state); | |
1106 | + if (ep->com.state == MPA_REQ_RCVD) { | |
1107 | + ep->com.rpl_done = 1; | |
1108 | + wake_up(&ep->com.waitq); | |
1109 | + } | |
1105 | 1110 | } |
1106 | 1111 | } else { |
1107 | 1112 | PDBG("%s lsm ack ep %p state %u freeing skb\n", |
1108 | - __func__, ep, state_read(&ep->com)); | |
1113 | + __func__, ep, ep->com.state); | |
1109 | 1114 | kfree_skb(ep->mpa_skb); |
1110 | 1115 | ep->mpa_skb = NULL; |
1111 | 1116 | } |
1117 | + spin_unlock_irqrestore(&ep->com.lock, flags); | |
1118 | + if (post_zb) | |
1119 | + iwch_post_zb_read(ep); | |
1112 | 1120 | return CPL_RET_BUF_DONE; |
1113 | 1121 | } |
1114 | 1122 |
drivers/infiniband/hw/cxgb3/iwch_provider.h
... | ... | @@ -332,7 +332,7 @@ |
332 | 332 | struct ib_mw_bind *mw_bind); |
333 | 333 | int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
334 | 334 | int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); |
335 | -int iwch_post_zb_read(struct iwch_qp *qhp); | |
335 | +int iwch_post_zb_read(struct iwch_ep *ep); | |
336 | 336 | int iwch_register_device(struct iwch_dev *dev); |
337 | 337 | void iwch_unregister_device(struct iwch_dev *dev); |
338 | 338 | void stop_read_rep_timer(struct iwch_qp *qhp); |
drivers/infiniband/hw/cxgb3/iwch_qp.c
... | ... | @@ -738,7 +738,7 @@ |
738 | 738 | } |
739 | 739 | } |
740 | 740 | |
741 | -int iwch_post_zb_read(struct iwch_qp *qhp) | |
741 | +int iwch_post_zb_read(struct iwch_ep *ep) | |
742 | 742 | { |
743 | 743 | union t3_wr *wqe; |
744 | 744 | struct sk_buff *skb; |
745 | 745 | |
... | ... | @@ -761,10 +761,10 @@ |
761 | 761 | wqe->read.local_len = cpu_to_be32(0); |
762 | 762 | wqe->read.local_to = cpu_to_be64(1); |
763 | 763 | wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); |
764 | - wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| | |
764 | + wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)| | |
765 | 765 | V_FW_RIWR_LEN(flit_cnt)); |
766 | 766 | skb->priority = CPL_PRIORITY_DATA; |
767 | - return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); | |
767 | + return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); | |
768 | 768 | } |
769 | 769 | |
770 | 770 | /* |
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
... | ... | @@ -35,7 +35,7 @@ |
35 | 35 | #include <linux/list.h> |
36 | 36 | #include <linux/spinlock.h> |
37 | 37 | #include <linux/idr.h> |
38 | -#include <linux/workqueue.h> | |
38 | +#include <linux/completion.h> | |
39 | 39 | #include <linux/netdevice.h> |
40 | 40 | #include <linux/sched.h> |
41 | 41 | #include <linux/pci.h> |
42 | 42 | |
43 | 43 | |
44 | 44 | |
... | ... | @@ -131,28 +131,21 @@ |
131 | 131 | |
132 | 132 | #define C4IW_WR_TO (10*HZ) |
133 | 133 | |
134 | -enum { | |
135 | - REPLY_READY = 0, | |
136 | -}; | |
137 | - | |
138 | 134 | struct c4iw_wr_wait { |
139 | - wait_queue_head_t wait; | |
140 | - unsigned long status; | |
135 | + struct completion completion; | |
141 | 136 | int ret; |
142 | 137 | }; |
143 | 138 | |
144 | 139 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) |
145 | 140 | { |
146 | 141 | wr_waitp->ret = 0; |
147 | - wr_waitp->status = 0; | |
148 | - init_waitqueue_head(&wr_waitp->wait); | |
142 | + init_completion(&wr_waitp->completion); | |
149 | 143 | } |
150 | 144 | |
151 | 145 | static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) |
152 | 146 | { |
153 | 147 | wr_waitp->ret = ret; |
154 | - set_bit(REPLY_READY, &wr_waitp->status); | |
155 | - wake_up(&wr_waitp->wait); | |
148 | + complete(&wr_waitp->completion); | |
156 | 149 | } |
157 | 150 | |
158 | 151 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, |
... | ... | @@ -164,8 +157,7 @@ |
164 | 157 | int ret; |
165 | 158 | |
166 | 159 | do { |
167 | - ret = wait_event_timeout(wr_waitp->wait, | |
168 | - test_and_clear_bit(REPLY_READY, &wr_waitp->status), to); | |
160 | + ret = wait_for_completion_timeout(&wr_waitp->completion, to); | |
169 | 161 | if (!ret) { |
170 | 162 | printk(KERN_ERR MOD "%s - Device %s not responding - " |
171 | 163 | "tid %u qpid %u\n", func, |
drivers/infiniband/hw/nes/nes.c
... | ... | @@ -1138,7 +1138,9 @@ |
1138 | 1138 | u32 i = 0; |
1139 | 1139 | struct nes_device *nesdev; |
1140 | 1140 | |
1141 | - strict_strtoul(buf, 0, &wqm_quanta_value); | |
1141 | + if (kstrtoul(buf, 0, &wqm_quanta_value) < 0) | |
1142 | + return -EINVAL; | |
1143 | + | |
1142 | 1144 | list_for_each_entry(nesdev, &nes_dev_list, list) { |
1143 | 1145 | if (i == ee_flsh_adapter) { |
1144 | 1146 | nesdev->nesadapter->wqm_quanta = wqm_quanta_value; |
drivers/infiniband/hw/qib/Kconfig
drivers/infiniband/ulp/iser/iser_verbs.c
... | ... | @@ -548,7 +548,7 @@ |
548 | 548 | iser_conn_get(ib_conn); /* ref ib conn's cma id */ |
549 | 549 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, |
550 | 550 | (void *)ib_conn, |
551 | - RDMA_PS_TCP); | |
551 | + RDMA_PS_TCP, IB_QPT_RC); | |
552 | 552 | if (IS_ERR(ib_conn->cma_id)) { |
553 | 553 | err = PTR_ERR(ib_conn->cma_id); |
554 | 554 | iser_err("rdma_create_id failed: %d\n", err); |
drivers/infiniband/ulp/srp/ib_srp.c
... | ... | @@ -1147,7 +1147,7 @@ |
1147 | 1147 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) |
1148 | 1148 | { |
1149 | 1149 | struct ib_device *dev = target->srp_host->srp_dev->dev; |
1150 | - struct srp_iu *iu = (struct srp_iu *) wc->wr_id; | |
1150 | + struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; | |
1151 | 1151 | int res; |
1152 | 1152 | u8 opcode; |
1153 | 1153 | |
... | ... | @@ -1231,7 +1231,7 @@ |
1231 | 1231 | break; |
1232 | 1232 | } |
1233 | 1233 | |
1234 | - iu = (struct srp_iu *) wc.wr_id; | |
1234 | + iu = (struct srp_iu *) (uintptr_t) wc.wr_id; | |
1235 | 1235 | list_add(&iu->list, &target->free_tx); |
1236 | 1236 | } |
1237 | 1237 | } |
include/linux/netlink.h
include/rdma/Kbuild
include/rdma/ib_user_cm.h
include/rdma/rdma_cm.h
... | ... | @@ -111,6 +111,20 @@ |
111 | 111 | } param; |
112 | 112 | }; |
113 | 113 | |
114 | +enum rdma_cm_state { | |
115 | + RDMA_CM_IDLE, | |
116 | + RDMA_CM_ADDR_QUERY, | |
117 | + RDMA_CM_ADDR_RESOLVED, | |
118 | + RDMA_CM_ROUTE_QUERY, | |
119 | + RDMA_CM_ROUTE_RESOLVED, | |
120 | + RDMA_CM_CONNECT, | |
121 | + RDMA_CM_DISCONNECT, | |
122 | + RDMA_CM_ADDR_BOUND, | |
123 | + RDMA_CM_LISTEN, | |
124 | + RDMA_CM_DEVICE_REMOVAL, | |
125 | + RDMA_CM_DESTROYING | |
126 | +}; | |
127 | + | |
114 | 128 | struct rdma_cm_id; |
115 | 129 | |
116 | 130 | /** |
... | ... | @@ -130,6 +144,7 @@ |
130 | 144 | rdma_cm_event_handler event_handler; |
131 | 145 | struct rdma_route route; |
132 | 146 | enum rdma_port_space ps; |
147 | + enum ib_qp_type qp_type; | |
133 | 148 | u8 port_num; |
134 | 149 | }; |
135 | 150 | |
136 | 151 | |
... | ... | @@ -140,9 +155,11 @@ |
140 | 155 | * returned rdma_id. |
141 | 156 | * @context: User specified context associated with the id. |
142 | 157 | * @ps: RDMA port space. |
158 | + * @qp_type: type of queue pair associated with the id. | |
143 | 159 | */ |
144 | 160 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, |
145 | - void *context, enum rdma_port_space ps); | |
161 | + void *context, enum rdma_port_space ps, | |
162 | + enum ib_qp_type qp_type); | |
146 | 163 | |
147 | 164 | /** |
148 | 165 | * rdma_destroy_id - Destroys an RDMA identifier. |
include/rdma/rdma_netlink.h
1 | +#ifndef _RDMA_NETLINK_H | |
2 | +#define _RDMA_NETLINK_H | |
3 | + | |
4 | +#include <linux/types.h> | |
5 | + | |
6 | +enum { | |
7 | + RDMA_NL_RDMA_CM = 1 | |
8 | +}; | |
9 | + | |
10 | +#define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10) | |
11 | +#define RDMA_NL_GET_OP(type) (type & ((1 << 10) - 1)) | |
12 | +#define RDMA_NL_GET_TYPE(client, op) ((client << 10) + op) | |
13 | + | |
14 | +enum { | |
15 | + RDMA_NL_RDMA_CM_ID_STATS = 0, | |
16 | + RDMA_NL_RDMA_CM_NUM_OPS | |
17 | +}; | |
18 | + | |
19 | +enum { | |
20 | + RDMA_NL_RDMA_CM_ATTR_SRC_ADDR = 1, | |
21 | + RDMA_NL_RDMA_CM_ATTR_DST_ADDR, | |
22 | + RDMA_NL_RDMA_CM_NUM_ATTR, | |
23 | +}; | |
24 | + | |
25 | +struct rdma_cm_id_stats { | |
26 | + __u32 qp_num; | |
27 | + __u32 bound_dev_if; | |
28 | + __u32 port_space; | |
29 | + __s32 pid; | |
30 | + __u8 cm_state; | |
31 | + __u8 node_type; | |
32 | + __u8 port_num; | |
33 | + __u8 qp_type; | |
34 | +}; | |
35 | + | |
36 | +#ifdef __KERNEL__ | |
37 | + | |
38 | +#include <linux/netlink.h> | |
39 | + | |
40 | +struct ibnl_client_cbs { | |
41 | + int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb); | |
42 | +}; | |
43 | + | |
44 | +int ibnl_init(void); | |
45 | +void ibnl_cleanup(void); | |
46 | + | |
47 | +/** | |
48 | + * Add a a client to the list of IB netlink exporters. | |
49 | + * @index: Index of the added client | |
50 | + * @nops: Number of supported ops by the added client. | |
51 | + * @cb_table: A table for op->callback | |
52 | + * | |
53 | + * Returns 0 on success or a negative error code. | |
54 | + */ | |
55 | +int ibnl_add_client(int index, int nops, | |
56 | + const struct ibnl_client_cbs cb_table[]); | |
57 | + | |
58 | +/** | |
59 | + * Remove a client from IB netlink. | |
60 | + * @index: Index of the removed IB client. | |
61 | + * | |
62 | + * Returns 0 on success or a negative error code. | |
63 | + */ | |
64 | +int ibnl_remove_client(int index); | |
65 | + | |
66 | +/** | |
67 | + * Put a new message in a supplied skb. | |
68 | + * @skb: The netlink skb. | |
69 | + * @nlh: Pointer to put the header of the new netlink message. | |
70 | + * @seq: The message sequence number. | |
71 | + * @len: The requested message length to allocate. | |
72 | + * @client: Calling IB netlink client. | |
73 | + * @op: message content op. | |
74 | + * Returns the allocated buffer on success and NULL on failure. | |
75 | + */ | |
76 | +void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, | |
77 | + int len, int client, int op); | |
78 | +/** | |
79 | + * Put a new attribute in a supplied skb. | |
80 | + * @skb: The netlink skb. | |
81 | + * @nlh: Header of the netlink message to append the attribute to. | |
82 | + * @len: The length of the attribute data. | |
83 | + * @data: The attribute data to put. | |
84 | + * @type: The attribute type. | |
85 | + * Returns the 0 and a negative error code on failure. | |
86 | + */ | |
87 | +int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, | |
88 | + int len, void *data, int type); | |
89 | + | |
90 | +#endif /* __KERNEL__ */ | |
91 | + | |
92 | +#endif /* _RDMA_NETLINK_H */ |
net/9p/trans_rdma.c
... | ... | @@ -589,7 +589,8 @@ |
589 | 589 | return -ENOMEM; |
590 | 590 | |
591 | 591 | /* Create the RDMA CM ID */ |
592 | - rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP); | |
592 | + rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP, | |
593 | + IB_QPT_RC); | |
593 | 594 | if (IS_ERR(rdma->cm_id)) |
594 | 595 | goto error; |
595 | 596 |
net/rds/ib.c
... | ... | @@ -325,7 +325,7 @@ |
325 | 325 | /* Create a CMA ID and try to bind it. This catches both |
326 | 326 | * IB and iWARP capable NICs. |
327 | 327 | */ |
328 | - cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); | |
328 | + cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); | |
329 | 329 | if (IS_ERR(cm_id)) |
330 | 330 | return PTR_ERR(cm_id); |
331 | 331 |
net/rds/ib_cm.c
... | ... | @@ -587,7 +587,7 @@ |
587 | 587 | /* XXX I wonder what affect the port space has */ |
588 | 588 | /* delegate cm event handler to rdma_transport */ |
589 | 589 | ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, |
590 | - RDMA_PS_TCP); | |
590 | + RDMA_PS_TCP, IB_QPT_RC); | |
591 | 591 | if (IS_ERR(ic->i_cm_id)) { |
592 | 592 | ret = PTR_ERR(ic->i_cm_id); |
593 | 593 | ic->i_cm_id = NULL; |
net/rds/iw.c
... | ... | @@ -226,7 +226,7 @@ |
226 | 226 | /* Create a CMA ID and try to bind it. This catches both |
227 | 227 | * IB and iWARP capable NICs. |
228 | 228 | */ |
229 | - cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); | |
229 | + cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); | |
230 | 230 | if (IS_ERR(cm_id)) |
231 | 231 | return PTR_ERR(cm_id); |
232 | 232 |
net/rds/iw_cm.c
... | ... | @@ -522,7 +522,7 @@ |
522 | 522 | /* XXX I wonder what affect the port space has */ |
523 | 523 | /* delegate cm event handler to rdma_transport */ |
524 | 524 | ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, |
525 | - RDMA_PS_TCP); | |
525 | + RDMA_PS_TCP, IB_QPT_RC); | |
526 | 526 | if (IS_ERR(ic->i_cm_id)) { |
527 | 527 | ret = PTR_ERR(ic->i_cm_id); |
528 | 528 | ic->i_cm_id = NULL; |
net/rds/rdma_transport.c
... | ... | @@ -158,7 +158,8 @@ |
158 | 158 | struct rdma_cm_id *cm_id; |
159 | 159 | int ret; |
160 | 160 | |
161 | - cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP); | |
161 | + cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP, | |
162 | + IB_QPT_RC); | |
162 | 163 | if (IS_ERR(cm_id)) { |
163 | 164 | ret = PTR_ERR(cm_id); |
164 | 165 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " |
net/sunrpc/xprtrdma/svc_rdma_transport.c
... | ... | @@ -695,7 +695,8 @@ |
695 | 695 | return ERR_PTR(-ENOMEM); |
696 | 696 | xprt = &cma_xprt->sc_xprt; |
697 | 697 | |
698 | - listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); | |
698 | + listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP, | |
699 | + IB_QPT_RC); | |
699 | 700 | if (IS_ERR(listen_id)) { |
700 | 701 | ret = PTR_ERR(listen_id); |
701 | 702 | dprintk("svcrdma: rdma_create_id failed = %d\n", ret); |
net/sunrpc/xprtrdma/verbs.c
... | ... | @@ -387,7 +387,7 @@ |
387 | 387 | |
388 | 388 | init_completion(&ia->ri_done); |
389 | 389 | |
390 | - id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP); | |
390 | + id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC); | |
391 | 391 | if (IS_ERR(id)) { |
392 | 392 | rc = PTR_ERR(id); |
393 | 393 | dprintk("RPC: %s: rdma_create_id() failed %i\n", |