Commit 3f44675439b136d51179d31eb5a498383cb38624

Authored by Roland Dreier
1 parent 6e86841d05

RDMA/cma: Remove padding arrays by using struct sockaddr_storage

There are a few places where the RDMA CM code handles IPv6 by doing

	struct sockaddr		addr;
	u8			pad[sizeof(struct sockaddr_in6) -
				    sizeof(struct sockaddr)];

This is fragile and ugly; handle this in a better way with just

	struct sockaddr_storage	addr;

[ Also roll in patch from Aleksey Senin <alekseys@voltaire.com> to
  switch to struct sockaddr_storage and get rid of padding arrays in
  struct rdma_addr. ]

Signed-off-by: Roland Dreier <rolandd@cisco.com>

Showing 3 changed files with 26 additions and 33 deletions Inline Diff

drivers/infiniband/core/cma.c
1 /* 1 /*
2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
6 * 6 *
7 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file 9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the 10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below: 11 * OpenIB.org BSD license below:
12 * 12 *
13 * Redistribution and use in source and binary forms, with or 13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following 14 * without modification, are permitted provided that the following
15 * conditions are met: 15 * conditions are met:
16 * 16 *
17 * - Redistributions of source code must retain the above 17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following 18 * copyright notice, this list of conditions and the following
19 * disclaimer. 19 * disclaimer.
20 * 20 *
21 * - Redistributions in binary form must reproduce the above 21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following 22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials 23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution. 24 * provided with the distribution.
25 * 25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE. 33 * SOFTWARE.
34 */ 34 */
35 35
36 #include <linux/completion.h> 36 #include <linux/completion.h>
37 #include <linux/in.h> 37 #include <linux/in.h>
38 #include <linux/in6.h> 38 #include <linux/in6.h>
39 #include <linux/mutex.h> 39 #include <linux/mutex.h>
40 #include <linux/random.h> 40 #include <linux/random.h>
41 #include <linux/idr.h> 41 #include <linux/idr.h>
42 #include <linux/inetdevice.h> 42 #include <linux/inetdevice.h>
43 43
44 #include <net/tcp.h> 44 #include <net/tcp.h>
45 45
46 #include <rdma/rdma_cm.h> 46 #include <rdma/rdma_cm.h>
47 #include <rdma/rdma_cm_ib.h> 47 #include <rdma/rdma_cm_ib.h>
48 #include <rdma/ib_cache.h> 48 #include <rdma/ib_cache.h>
49 #include <rdma/ib_cm.h> 49 #include <rdma/ib_cm.h>
50 #include <rdma/ib_sa.h> 50 #include <rdma/ib_sa.h>
51 #include <rdma/iw_cm.h> 51 #include <rdma/iw_cm.h>
52 52
53 MODULE_AUTHOR("Sean Hefty"); 53 MODULE_AUTHOR("Sean Hefty");
54 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 54 MODULE_DESCRIPTION("Generic RDMA CM Agent");
55 MODULE_LICENSE("Dual BSD/GPL"); 55 MODULE_LICENSE("Dual BSD/GPL");
56 56
57 #define CMA_CM_RESPONSE_TIMEOUT 20 57 #define CMA_CM_RESPONSE_TIMEOUT 20
58 #define CMA_MAX_CM_RETRIES 15 58 #define CMA_MAX_CM_RETRIES 15
59 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 59 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
60 60
61 static void cma_add_one(struct ib_device *device); 61 static void cma_add_one(struct ib_device *device);
62 static void cma_remove_one(struct ib_device *device); 62 static void cma_remove_one(struct ib_device *device);
63 63
64 static struct ib_client cma_client = { 64 static struct ib_client cma_client = {
65 .name = "cma", 65 .name = "cma",
66 .add = cma_add_one, 66 .add = cma_add_one,
67 .remove = cma_remove_one 67 .remove = cma_remove_one
68 }; 68 };
69 69
70 static struct ib_sa_client sa_client; 70 static struct ib_sa_client sa_client;
71 static struct rdma_addr_client addr_client; 71 static struct rdma_addr_client addr_client;
72 static LIST_HEAD(dev_list); 72 static LIST_HEAD(dev_list);
73 static LIST_HEAD(listen_any_list); 73 static LIST_HEAD(listen_any_list);
74 static DEFINE_MUTEX(lock); 74 static DEFINE_MUTEX(lock);
75 static struct workqueue_struct *cma_wq; 75 static struct workqueue_struct *cma_wq;
76 static DEFINE_IDR(sdp_ps); 76 static DEFINE_IDR(sdp_ps);
77 static DEFINE_IDR(tcp_ps); 77 static DEFINE_IDR(tcp_ps);
78 static DEFINE_IDR(udp_ps); 78 static DEFINE_IDR(udp_ps);
79 static DEFINE_IDR(ipoib_ps); 79 static DEFINE_IDR(ipoib_ps);
80 static int next_port; 80 static int next_port;
81 81
82 struct cma_device { 82 struct cma_device {
83 struct list_head list; 83 struct list_head list;
84 struct ib_device *device; 84 struct ib_device *device;
85 struct completion comp; 85 struct completion comp;
86 atomic_t refcount; 86 atomic_t refcount;
87 struct list_head id_list; 87 struct list_head id_list;
88 }; 88 };
89 89
90 enum cma_state { 90 enum cma_state {
91 CMA_IDLE, 91 CMA_IDLE,
92 CMA_ADDR_QUERY, 92 CMA_ADDR_QUERY,
93 CMA_ADDR_RESOLVED, 93 CMA_ADDR_RESOLVED,
94 CMA_ROUTE_QUERY, 94 CMA_ROUTE_QUERY,
95 CMA_ROUTE_RESOLVED, 95 CMA_ROUTE_RESOLVED,
96 CMA_CONNECT, 96 CMA_CONNECT,
97 CMA_DISCONNECT, 97 CMA_DISCONNECT,
98 CMA_ADDR_BOUND, 98 CMA_ADDR_BOUND,
99 CMA_LISTEN, 99 CMA_LISTEN,
100 CMA_DEVICE_REMOVAL, 100 CMA_DEVICE_REMOVAL,
101 CMA_DESTROYING 101 CMA_DESTROYING
102 }; 102 };
103 103
104 struct rdma_bind_list { 104 struct rdma_bind_list {
105 struct idr *ps; 105 struct idr *ps;
106 struct hlist_head owners; 106 struct hlist_head owners;
107 unsigned short port; 107 unsigned short port;
108 }; 108 };
109 109
110 /* 110 /*
111 * Device removal can occur at anytime, so we need extra handling to 111 * Device removal can occur at anytime, so we need extra handling to
112 * serialize notifying the user of device removal with other callbacks. 112 * serialize notifying the user of device removal with other callbacks.
113 * We do this by disabling removal notification while a callback is in process, 113 * We do this by disabling removal notification while a callback is in process,
114 * and reporting it after the callback completes. 114 * and reporting it after the callback completes.
115 */ 115 */
116 struct rdma_id_private { 116 struct rdma_id_private {
117 struct rdma_cm_id id; 117 struct rdma_cm_id id;
118 118
119 struct rdma_bind_list *bind_list; 119 struct rdma_bind_list *bind_list;
120 struct hlist_node node; 120 struct hlist_node node;
121 struct list_head list; /* listen_any_list or cma_device.list */ 121 struct list_head list; /* listen_any_list or cma_device.list */
122 struct list_head listen_list; /* per device listens */ 122 struct list_head listen_list; /* per device listens */
123 struct cma_device *cma_dev; 123 struct cma_device *cma_dev;
124 struct list_head mc_list; 124 struct list_head mc_list;
125 125
126 int internal_id; 126 int internal_id;
127 enum cma_state state; 127 enum cma_state state;
128 spinlock_t lock; 128 spinlock_t lock;
129 struct mutex qp_mutex; 129 struct mutex qp_mutex;
130 130
131 struct completion comp; 131 struct completion comp;
132 atomic_t refcount; 132 atomic_t refcount;
133 struct mutex handler_mutex; 133 struct mutex handler_mutex;
134 134
135 int backlog; 135 int backlog;
136 int timeout_ms; 136 int timeout_ms;
137 struct ib_sa_query *query; 137 struct ib_sa_query *query;
138 int query_id; 138 int query_id;
139 union { 139 union {
140 struct ib_cm_id *ib; 140 struct ib_cm_id *ib;
141 struct iw_cm_id *iw; 141 struct iw_cm_id *iw;
142 } cm_id; 142 } cm_id;
143 143
144 u32 seq_num; 144 u32 seq_num;
145 u32 qkey; 145 u32 qkey;
146 u32 qp_num; 146 u32 qp_num;
147 u8 srq; 147 u8 srq;
148 u8 tos; 148 u8 tos;
149 }; 149 };
150 150
151 struct cma_multicast { 151 struct cma_multicast {
152 struct rdma_id_private *id_priv; 152 struct rdma_id_private *id_priv;
153 union { 153 union {
154 struct ib_sa_multicast *ib; 154 struct ib_sa_multicast *ib;
155 } multicast; 155 } multicast;
156 struct list_head list; 156 struct list_head list;
157 void *context; 157 void *context;
158 struct sockaddr addr; 158 struct sockaddr_storage addr;
159 u8 pad[sizeof(struct sockaddr_in6) -
160 sizeof(struct sockaddr)];
161 }; 159 };
162 160
163 struct cma_work { 161 struct cma_work {
164 struct work_struct work; 162 struct work_struct work;
165 struct rdma_id_private *id; 163 struct rdma_id_private *id;
166 enum cma_state old_state; 164 enum cma_state old_state;
167 enum cma_state new_state; 165 enum cma_state new_state;
168 struct rdma_cm_event event; 166 struct rdma_cm_event event;
169 }; 167 };
170 168
171 struct cma_ndev_work { 169 struct cma_ndev_work {
172 struct work_struct work; 170 struct work_struct work;
173 struct rdma_id_private *id; 171 struct rdma_id_private *id;
174 struct rdma_cm_event event; 172 struct rdma_cm_event event;
175 }; 173 };
176 174
177 union cma_ip_addr { 175 union cma_ip_addr {
178 struct in6_addr ip6; 176 struct in6_addr ip6;
179 struct { 177 struct {
180 __be32 pad[3]; 178 __be32 pad[3];
181 __be32 addr; 179 __be32 addr;
182 } ip4; 180 } ip4;
183 }; 181 };
184 182
185 struct cma_hdr { 183 struct cma_hdr {
186 u8 cma_version; 184 u8 cma_version;
187 u8 ip_version; /* IP version: 7:4 */ 185 u8 ip_version; /* IP version: 7:4 */
188 __be16 port; 186 __be16 port;
189 union cma_ip_addr src_addr; 187 union cma_ip_addr src_addr;
190 union cma_ip_addr dst_addr; 188 union cma_ip_addr dst_addr;
191 }; 189 };
192 190
193 struct sdp_hh { 191 struct sdp_hh {
194 u8 bsdh[16]; 192 u8 bsdh[16];
195 u8 sdp_version; /* Major version: 7:4 */ 193 u8 sdp_version; /* Major version: 7:4 */
196 u8 ip_version; /* IP version: 7:4 */ 194 u8 ip_version; /* IP version: 7:4 */
197 u8 sdp_specific1[10]; 195 u8 sdp_specific1[10];
198 __be16 port; 196 __be16 port;
199 __be16 sdp_specific2; 197 __be16 sdp_specific2;
200 union cma_ip_addr src_addr; 198 union cma_ip_addr src_addr;
201 union cma_ip_addr dst_addr; 199 union cma_ip_addr dst_addr;
202 }; 200 };
203 201
204 struct sdp_hah { 202 struct sdp_hah {
205 u8 bsdh[16]; 203 u8 bsdh[16];
206 u8 sdp_version; 204 u8 sdp_version;
207 }; 205 };
208 206
209 #define CMA_VERSION 0x00 207 #define CMA_VERSION 0x00
210 #define SDP_MAJ_VERSION 0x2 208 #define SDP_MAJ_VERSION 0x2
211 209
212 static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) 210 static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
213 { 211 {
214 unsigned long flags; 212 unsigned long flags;
215 int ret; 213 int ret;
216 214
217 spin_lock_irqsave(&id_priv->lock, flags); 215 spin_lock_irqsave(&id_priv->lock, flags);
218 ret = (id_priv->state == comp); 216 ret = (id_priv->state == comp);
219 spin_unlock_irqrestore(&id_priv->lock, flags); 217 spin_unlock_irqrestore(&id_priv->lock, flags);
220 return ret; 218 return ret;
221 } 219 }
222 220
223 static int cma_comp_exch(struct rdma_id_private *id_priv, 221 static int cma_comp_exch(struct rdma_id_private *id_priv,
224 enum cma_state comp, enum cma_state exch) 222 enum cma_state comp, enum cma_state exch)
225 { 223 {
226 unsigned long flags; 224 unsigned long flags;
227 int ret; 225 int ret;
228 226
229 spin_lock_irqsave(&id_priv->lock, flags); 227 spin_lock_irqsave(&id_priv->lock, flags);
230 if ((ret = (id_priv->state == comp))) 228 if ((ret = (id_priv->state == comp)))
231 id_priv->state = exch; 229 id_priv->state = exch;
232 spin_unlock_irqrestore(&id_priv->lock, flags); 230 spin_unlock_irqrestore(&id_priv->lock, flags);
233 return ret; 231 return ret;
234 } 232 }
235 233
236 static enum cma_state cma_exch(struct rdma_id_private *id_priv, 234 static enum cma_state cma_exch(struct rdma_id_private *id_priv,
237 enum cma_state exch) 235 enum cma_state exch)
238 { 236 {
239 unsigned long flags; 237 unsigned long flags;
240 enum cma_state old; 238 enum cma_state old;
241 239
242 spin_lock_irqsave(&id_priv->lock, flags); 240 spin_lock_irqsave(&id_priv->lock, flags);
243 old = id_priv->state; 241 old = id_priv->state;
244 id_priv->state = exch; 242 id_priv->state = exch;
245 spin_unlock_irqrestore(&id_priv->lock, flags); 243 spin_unlock_irqrestore(&id_priv->lock, flags);
246 return old; 244 return old;
247 } 245 }
248 246
249 static inline u8 cma_get_ip_ver(struct cma_hdr *hdr) 247 static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
250 { 248 {
251 return hdr->ip_version >> 4; 249 return hdr->ip_version >> 4;
252 } 250 }
253 251
254 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 252 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
255 { 253 {
256 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 254 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
257 } 255 }
258 256
259 static inline u8 sdp_get_majv(u8 sdp_version) 257 static inline u8 sdp_get_majv(u8 sdp_version)
260 { 258 {
261 return sdp_version >> 4; 259 return sdp_version >> 4;
262 } 260 }
263 261
264 static inline u8 sdp_get_ip_ver(struct sdp_hh *hh) 262 static inline u8 sdp_get_ip_ver(struct sdp_hh *hh)
265 { 263 {
266 return hh->ip_version >> 4; 264 return hh->ip_version >> 4;
267 } 265 }
268 266
269 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) 267 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
270 { 268 {
271 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); 269 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
272 } 270 }
273 271
274 static inline int cma_is_ud_ps(enum rdma_port_space ps) 272 static inline int cma_is_ud_ps(enum rdma_port_space ps)
275 { 273 {
276 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); 274 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
277 } 275 }
278 276
279 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 277 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
280 struct cma_device *cma_dev) 278 struct cma_device *cma_dev)
281 { 279 {
282 atomic_inc(&cma_dev->refcount); 280 atomic_inc(&cma_dev->refcount);
283 id_priv->cma_dev = cma_dev; 281 id_priv->cma_dev = cma_dev;
284 id_priv->id.device = cma_dev->device; 282 id_priv->id.device = cma_dev->device;
285 list_add_tail(&id_priv->list, &cma_dev->id_list); 283 list_add_tail(&id_priv->list, &cma_dev->id_list);
286 } 284 }
287 285
288 static inline void cma_deref_dev(struct cma_device *cma_dev) 286 static inline void cma_deref_dev(struct cma_device *cma_dev)
289 { 287 {
290 if (atomic_dec_and_test(&cma_dev->refcount)) 288 if (atomic_dec_and_test(&cma_dev->refcount))
291 complete(&cma_dev->comp); 289 complete(&cma_dev->comp);
292 } 290 }
293 291
294 static void cma_detach_from_dev(struct rdma_id_private *id_priv) 292 static void cma_detach_from_dev(struct rdma_id_private *id_priv)
295 { 293 {
296 list_del(&id_priv->list); 294 list_del(&id_priv->list);
297 cma_deref_dev(id_priv->cma_dev); 295 cma_deref_dev(id_priv->cma_dev);
298 id_priv->cma_dev = NULL; 296 id_priv->cma_dev = NULL;
299 } 297 }
300 298
301 static int cma_set_qkey(struct ib_device *device, u8 port_num, 299 static int cma_set_qkey(struct ib_device *device, u8 port_num,
302 enum rdma_port_space ps, 300 enum rdma_port_space ps,
303 struct rdma_dev_addr *dev_addr, u32 *qkey) 301 struct rdma_dev_addr *dev_addr, u32 *qkey)
304 { 302 {
305 struct ib_sa_mcmember_rec rec; 303 struct ib_sa_mcmember_rec rec;
306 int ret = 0; 304 int ret = 0;
307 305
308 switch (ps) { 306 switch (ps) {
309 case RDMA_PS_UDP: 307 case RDMA_PS_UDP:
310 *qkey = RDMA_UDP_QKEY; 308 *qkey = RDMA_UDP_QKEY;
311 break; 309 break;
312 case RDMA_PS_IPOIB: 310 case RDMA_PS_IPOIB:
313 ib_addr_get_mgid(dev_addr, &rec.mgid); 311 ib_addr_get_mgid(dev_addr, &rec.mgid);
314 ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec); 312 ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec);
315 *qkey = be32_to_cpu(rec.qkey); 313 *qkey = be32_to_cpu(rec.qkey);
316 break; 314 break;
317 default: 315 default:
318 break; 316 break;
319 } 317 }
320 return ret; 318 return ret;
321 } 319 }
322 320
323 static int cma_acquire_dev(struct rdma_id_private *id_priv) 321 static int cma_acquire_dev(struct rdma_id_private *id_priv)
324 { 322 {
325 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 323 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
326 struct cma_device *cma_dev; 324 struct cma_device *cma_dev;
327 union ib_gid gid; 325 union ib_gid gid;
328 int ret = -ENODEV; 326 int ret = -ENODEV;
329 327
330 switch (rdma_node_get_transport(dev_addr->dev_type)) { 328 switch (rdma_node_get_transport(dev_addr->dev_type)) {
331 case RDMA_TRANSPORT_IB: 329 case RDMA_TRANSPORT_IB:
332 ib_addr_get_sgid(dev_addr, &gid); 330 ib_addr_get_sgid(dev_addr, &gid);
333 break; 331 break;
334 case RDMA_TRANSPORT_IWARP: 332 case RDMA_TRANSPORT_IWARP:
335 iw_addr_get_sgid(dev_addr, &gid); 333 iw_addr_get_sgid(dev_addr, &gid);
336 break; 334 break;
337 default: 335 default:
338 return -ENODEV; 336 return -ENODEV;
339 } 337 }
340 338
341 list_for_each_entry(cma_dev, &dev_list, list) { 339 list_for_each_entry(cma_dev, &dev_list, list) {
342 ret = ib_find_cached_gid(cma_dev->device, &gid, 340 ret = ib_find_cached_gid(cma_dev->device, &gid,
343 &id_priv->id.port_num, NULL); 341 &id_priv->id.port_num, NULL);
344 if (!ret) { 342 if (!ret) {
345 ret = cma_set_qkey(cma_dev->device, 343 ret = cma_set_qkey(cma_dev->device,
346 id_priv->id.port_num, 344 id_priv->id.port_num,
347 id_priv->id.ps, dev_addr, 345 id_priv->id.ps, dev_addr,
348 &id_priv->qkey); 346 &id_priv->qkey);
349 if (!ret) 347 if (!ret)
350 cma_attach_to_dev(id_priv, cma_dev); 348 cma_attach_to_dev(id_priv, cma_dev);
351 break; 349 break;
352 } 350 }
353 } 351 }
354 return ret; 352 return ret;
355 } 353 }
356 354
357 static void cma_deref_id(struct rdma_id_private *id_priv) 355 static void cma_deref_id(struct rdma_id_private *id_priv)
358 { 356 {
359 if (atomic_dec_and_test(&id_priv->refcount)) 357 if (atomic_dec_and_test(&id_priv->refcount))
360 complete(&id_priv->comp); 358 complete(&id_priv->comp);
361 } 359 }
362 360
363 static int cma_disable_callback(struct rdma_id_private *id_priv, 361 static int cma_disable_callback(struct rdma_id_private *id_priv,
364 enum cma_state state) 362 enum cma_state state)
365 { 363 {
366 mutex_lock(&id_priv->handler_mutex); 364 mutex_lock(&id_priv->handler_mutex);
367 if (id_priv->state != state) { 365 if (id_priv->state != state) {
368 mutex_unlock(&id_priv->handler_mutex); 366 mutex_unlock(&id_priv->handler_mutex);
369 return -EINVAL; 367 return -EINVAL;
370 } 368 }
371 return 0; 369 return 0;
372 } 370 }
373 371
374 static int cma_has_cm_dev(struct rdma_id_private *id_priv) 372 static int cma_has_cm_dev(struct rdma_id_private *id_priv)
375 { 373 {
376 return (id_priv->id.device && id_priv->cm_id.ib); 374 return (id_priv->id.device && id_priv->cm_id.ib);
377 } 375 }
378 376
379 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 377 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
380 void *context, enum rdma_port_space ps) 378 void *context, enum rdma_port_space ps)
381 { 379 {
382 struct rdma_id_private *id_priv; 380 struct rdma_id_private *id_priv;
383 381
384 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 382 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
385 if (!id_priv) 383 if (!id_priv)
386 return ERR_PTR(-ENOMEM); 384 return ERR_PTR(-ENOMEM);
387 385
388 id_priv->state = CMA_IDLE; 386 id_priv->state = CMA_IDLE;
389 id_priv->id.context = context; 387 id_priv->id.context = context;
390 id_priv->id.event_handler = event_handler; 388 id_priv->id.event_handler = event_handler;
391 id_priv->id.ps = ps; 389 id_priv->id.ps = ps;
392 spin_lock_init(&id_priv->lock); 390 spin_lock_init(&id_priv->lock);
393 mutex_init(&id_priv->qp_mutex); 391 mutex_init(&id_priv->qp_mutex);
394 init_completion(&id_priv->comp); 392 init_completion(&id_priv->comp);
395 atomic_set(&id_priv->refcount, 1); 393 atomic_set(&id_priv->refcount, 1);
396 mutex_init(&id_priv->handler_mutex); 394 mutex_init(&id_priv->handler_mutex);
397 INIT_LIST_HEAD(&id_priv->listen_list); 395 INIT_LIST_HEAD(&id_priv->listen_list);
398 INIT_LIST_HEAD(&id_priv->mc_list); 396 INIT_LIST_HEAD(&id_priv->mc_list);
399 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 397 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
400 398
401 return &id_priv->id; 399 return &id_priv->id;
402 } 400 }
403 EXPORT_SYMBOL(rdma_create_id); 401 EXPORT_SYMBOL(rdma_create_id);
404 402
405 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 403 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
406 { 404 {
407 struct ib_qp_attr qp_attr; 405 struct ib_qp_attr qp_attr;
408 int qp_attr_mask, ret; 406 int qp_attr_mask, ret;
409 407
410 qp_attr.qp_state = IB_QPS_INIT; 408 qp_attr.qp_state = IB_QPS_INIT;
411 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 409 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
412 if (ret) 410 if (ret)
413 return ret; 411 return ret;
414 412
415 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 413 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
416 if (ret) 414 if (ret)
417 return ret; 415 return ret;
418 416
419 qp_attr.qp_state = IB_QPS_RTR; 417 qp_attr.qp_state = IB_QPS_RTR;
420 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 418 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
421 if (ret) 419 if (ret)
422 return ret; 420 return ret;
423 421
424 qp_attr.qp_state = IB_QPS_RTS; 422 qp_attr.qp_state = IB_QPS_RTS;
425 qp_attr.sq_psn = 0; 423 qp_attr.sq_psn = 0;
426 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 424 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
427 425
428 return ret; 426 return ret;
429 } 427 }
430 428
431 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 429 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
432 { 430 {
433 struct ib_qp_attr qp_attr; 431 struct ib_qp_attr qp_attr;
434 int qp_attr_mask, ret; 432 int qp_attr_mask, ret;
435 433
436 qp_attr.qp_state = IB_QPS_INIT; 434 qp_attr.qp_state = IB_QPS_INIT;
437 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 435 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
438 if (ret) 436 if (ret)
439 return ret; 437 return ret;
440 438
441 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 439 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
442 } 440 }
443 441
444 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 442 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
445 struct ib_qp_init_attr *qp_init_attr) 443 struct ib_qp_init_attr *qp_init_attr)
446 { 444 {
447 struct rdma_id_private *id_priv; 445 struct rdma_id_private *id_priv;
448 struct ib_qp *qp; 446 struct ib_qp *qp;
449 int ret; 447 int ret;
450 448
451 id_priv = container_of(id, struct rdma_id_private, id); 449 id_priv = container_of(id, struct rdma_id_private, id);
452 if (id->device != pd->device) 450 if (id->device != pd->device)
453 return -EINVAL; 451 return -EINVAL;
454 452
455 qp = ib_create_qp(pd, qp_init_attr); 453 qp = ib_create_qp(pd, qp_init_attr);
456 if (IS_ERR(qp)) 454 if (IS_ERR(qp))
457 return PTR_ERR(qp); 455 return PTR_ERR(qp);
458 456
459 if (cma_is_ud_ps(id_priv->id.ps)) 457 if (cma_is_ud_ps(id_priv->id.ps))
460 ret = cma_init_ud_qp(id_priv, qp); 458 ret = cma_init_ud_qp(id_priv, qp);
461 else 459 else
462 ret = cma_init_conn_qp(id_priv, qp); 460 ret = cma_init_conn_qp(id_priv, qp);
463 if (ret) 461 if (ret)
464 goto err; 462 goto err;
465 463
466 id->qp = qp; 464 id->qp = qp;
467 id_priv->qp_num = qp->qp_num; 465 id_priv->qp_num = qp->qp_num;
468 id_priv->srq = (qp->srq != NULL); 466 id_priv->srq = (qp->srq != NULL);
469 return 0; 467 return 0;
470 err: 468 err:
471 ib_destroy_qp(qp); 469 ib_destroy_qp(qp);
472 return ret; 470 return ret;
473 } 471 }
474 EXPORT_SYMBOL(rdma_create_qp); 472 EXPORT_SYMBOL(rdma_create_qp);
475 473
476 void rdma_destroy_qp(struct rdma_cm_id *id) 474 void rdma_destroy_qp(struct rdma_cm_id *id)
477 { 475 {
478 struct rdma_id_private *id_priv; 476 struct rdma_id_private *id_priv;
479 477
480 id_priv = container_of(id, struct rdma_id_private, id); 478 id_priv = container_of(id, struct rdma_id_private, id);
481 mutex_lock(&id_priv->qp_mutex); 479 mutex_lock(&id_priv->qp_mutex);
482 ib_destroy_qp(id_priv->id.qp); 480 ib_destroy_qp(id_priv->id.qp);
483 id_priv->id.qp = NULL; 481 id_priv->id.qp = NULL;
484 mutex_unlock(&id_priv->qp_mutex); 482 mutex_unlock(&id_priv->qp_mutex);
485 } 483 }
486 EXPORT_SYMBOL(rdma_destroy_qp); 484 EXPORT_SYMBOL(rdma_destroy_qp);
487 485
488 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 486 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
489 struct rdma_conn_param *conn_param) 487 struct rdma_conn_param *conn_param)
490 { 488 {
491 struct ib_qp_attr qp_attr; 489 struct ib_qp_attr qp_attr;
492 int qp_attr_mask, ret; 490 int qp_attr_mask, ret;
493 491
494 mutex_lock(&id_priv->qp_mutex); 492 mutex_lock(&id_priv->qp_mutex);
495 if (!id_priv->id.qp) { 493 if (!id_priv->id.qp) {
496 ret = 0; 494 ret = 0;
497 goto out; 495 goto out;
498 } 496 }
499 497
500 /* Need to update QP attributes from default values. */ 498 /* Need to update QP attributes from default values. */
501 qp_attr.qp_state = IB_QPS_INIT; 499 qp_attr.qp_state = IB_QPS_INIT;
502 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 500 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
503 if (ret) 501 if (ret)
504 goto out; 502 goto out;
505 503
506 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 504 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
507 if (ret) 505 if (ret)
508 goto out; 506 goto out;
509 507
510 qp_attr.qp_state = IB_QPS_RTR; 508 qp_attr.qp_state = IB_QPS_RTR;
511 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 509 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
512 if (ret) 510 if (ret)
513 goto out; 511 goto out;
514 512
515 if (conn_param) 513 if (conn_param)
516 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 514 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
517 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 515 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
518 out: 516 out:
519 mutex_unlock(&id_priv->qp_mutex); 517 mutex_unlock(&id_priv->qp_mutex);
520 return ret; 518 return ret;
521 } 519 }
522 520
523 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 521 static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
524 struct rdma_conn_param *conn_param) 522 struct rdma_conn_param *conn_param)
525 { 523 {
526 struct ib_qp_attr qp_attr; 524 struct ib_qp_attr qp_attr;
527 int qp_attr_mask, ret; 525 int qp_attr_mask, ret;
528 526
529 mutex_lock(&id_priv->qp_mutex); 527 mutex_lock(&id_priv->qp_mutex);
530 if (!id_priv->id.qp) { 528 if (!id_priv->id.qp) {
531 ret = 0; 529 ret = 0;
532 goto out; 530 goto out;
533 } 531 }
534 532
535 qp_attr.qp_state = IB_QPS_RTS; 533 qp_attr.qp_state = IB_QPS_RTS;
536 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 534 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
537 if (ret) 535 if (ret)
538 goto out; 536 goto out;
539 537
540 if (conn_param) 538 if (conn_param)
541 qp_attr.max_rd_atomic = conn_param->initiator_depth; 539 qp_attr.max_rd_atomic = conn_param->initiator_depth;
542 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 540 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
543 out: 541 out:
544 mutex_unlock(&id_priv->qp_mutex); 542 mutex_unlock(&id_priv->qp_mutex);
545 return ret; 543 return ret;
546 } 544 }
547 545
548 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 546 static int cma_modify_qp_err(struct rdma_id_private *id_priv)
549 { 547 {
550 struct ib_qp_attr qp_attr; 548 struct ib_qp_attr qp_attr;
551 int ret; 549 int ret;
552 550
553 mutex_lock(&id_priv->qp_mutex); 551 mutex_lock(&id_priv->qp_mutex);
554 if (!id_priv->id.qp) { 552 if (!id_priv->id.qp) {
555 ret = 0; 553 ret = 0;
556 goto out; 554 goto out;
557 } 555 }
558 556
559 qp_attr.qp_state = IB_QPS_ERR; 557 qp_attr.qp_state = IB_QPS_ERR;
560 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 558 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
561 out: 559 out:
562 mutex_unlock(&id_priv->qp_mutex); 560 mutex_unlock(&id_priv->qp_mutex);
563 return ret; 561 return ret;
564 } 562 }
565 563
566 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 564 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
567 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 565 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
568 { 566 {
569 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 567 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
570 int ret; 568 int ret;
571 569
572 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 570 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
573 ib_addr_get_pkey(dev_addr), 571 ib_addr_get_pkey(dev_addr),
574 &qp_attr->pkey_index); 572 &qp_attr->pkey_index);
575 if (ret) 573 if (ret)
576 return ret; 574 return ret;
577 575
578 qp_attr->port_num = id_priv->id.port_num; 576 qp_attr->port_num = id_priv->id.port_num;
579 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 577 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
580 578
581 if (cma_is_ud_ps(id_priv->id.ps)) { 579 if (cma_is_ud_ps(id_priv->id.ps)) {
582 qp_attr->qkey = id_priv->qkey; 580 qp_attr->qkey = id_priv->qkey;
583 *qp_attr_mask |= IB_QP_QKEY; 581 *qp_attr_mask |= IB_QP_QKEY;
584 } else { 582 } else {
585 qp_attr->qp_access_flags = 0; 583 qp_attr->qp_access_flags = 0;
586 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 584 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
587 } 585 }
588 return 0; 586 return 0;
589 } 587 }
590 588
591 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 589 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
592 int *qp_attr_mask) 590 int *qp_attr_mask)
593 { 591 {
594 struct rdma_id_private *id_priv; 592 struct rdma_id_private *id_priv;
595 int ret = 0; 593 int ret = 0;
596 594
597 id_priv = container_of(id, struct rdma_id_private, id); 595 id_priv = container_of(id, struct rdma_id_private, id);
598 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 596 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
599 case RDMA_TRANSPORT_IB: 597 case RDMA_TRANSPORT_IB:
600 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) 598 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
601 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 599 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
602 else 600 else
603 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 601 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
604 qp_attr_mask); 602 qp_attr_mask);
605 if (qp_attr->qp_state == IB_QPS_RTR) 603 if (qp_attr->qp_state == IB_QPS_RTR)
606 qp_attr->rq_psn = id_priv->seq_num; 604 qp_attr->rq_psn = id_priv->seq_num;
607 break; 605 break;
608 case RDMA_TRANSPORT_IWARP: 606 case RDMA_TRANSPORT_IWARP:
609 if (!id_priv->cm_id.iw) { 607 if (!id_priv->cm_id.iw) {
610 qp_attr->qp_access_flags = 0; 608 qp_attr->qp_access_flags = 0;
611 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 609 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
612 } else 610 } else
613 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 611 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
614 qp_attr_mask); 612 qp_attr_mask);
615 break; 613 break;
616 default: 614 default:
617 ret = -ENOSYS; 615 ret = -ENOSYS;
618 break; 616 break;
619 } 617 }
620 618
621 return ret; 619 return ret;
622 } 620 }
623 EXPORT_SYMBOL(rdma_init_qp_attr); 621 EXPORT_SYMBOL(rdma_init_qp_attr);
624 622
625 static inline int cma_zero_addr(struct sockaddr *addr) 623 static inline int cma_zero_addr(struct sockaddr *addr)
626 { 624 {
627 struct in6_addr *ip6; 625 struct in6_addr *ip6;
628 626
629 if (addr->sa_family == AF_INET) 627 if (addr->sa_family == AF_INET)
630 return ipv4_is_zeronet( 628 return ipv4_is_zeronet(
631 ((struct sockaddr_in *)addr)->sin_addr.s_addr); 629 ((struct sockaddr_in *)addr)->sin_addr.s_addr);
632 else { 630 else {
633 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 631 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
634 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 632 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
635 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; 633 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
636 } 634 }
637 } 635 }
638 636
639 static inline int cma_loopback_addr(struct sockaddr *addr) 637 static inline int cma_loopback_addr(struct sockaddr *addr)
640 { 638 {
641 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); 639 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
642 } 640 }
643 641
644 static inline int cma_any_addr(struct sockaddr *addr) 642 static inline int cma_any_addr(struct sockaddr *addr)
645 { 643 {
646 return cma_zero_addr(addr) || cma_loopback_addr(addr); 644 return cma_zero_addr(addr) || cma_loopback_addr(addr);
647 } 645 }
648 646
649 static inline __be16 cma_port(struct sockaddr *addr) 647 static inline __be16 cma_port(struct sockaddr *addr)
650 { 648 {
651 if (addr->sa_family == AF_INET) 649 if (addr->sa_family == AF_INET)
652 return ((struct sockaddr_in *) addr)->sin_port; 650 return ((struct sockaddr_in *) addr)->sin_port;
653 else 651 else
654 return ((struct sockaddr_in6 *) addr)->sin6_port; 652 return ((struct sockaddr_in6 *) addr)->sin6_port;
655 } 653 }
656 654
657 static inline int cma_any_port(struct sockaddr *addr) 655 static inline int cma_any_port(struct sockaddr *addr)
658 { 656 {
659 return !cma_port(addr); 657 return !cma_port(addr);
660 } 658 }
661 659
662 static int cma_get_net_info(void *hdr, enum rdma_port_space ps, 660 static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
663 u8 *ip_ver, __be16 *port, 661 u8 *ip_ver, __be16 *port,
664 union cma_ip_addr **src, union cma_ip_addr **dst) 662 union cma_ip_addr **src, union cma_ip_addr **dst)
665 { 663 {
666 switch (ps) { 664 switch (ps) {
667 case RDMA_PS_SDP: 665 case RDMA_PS_SDP:
668 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) != 666 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) !=
669 SDP_MAJ_VERSION) 667 SDP_MAJ_VERSION)
670 return -EINVAL; 668 return -EINVAL;
671 669
672 *ip_ver = sdp_get_ip_ver(hdr); 670 *ip_ver = sdp_get_ip_ver(hdr);
673 *port = ((struct sdp_hh *) hdr)->port; 671 *port = ((struct sdp_hh *) hdr)->port;
674 *src = &((struct sdp_hh *) hdr)->src_addr; 672 *src = &((struct sdp_hh *) hdr)->src_addr;
675 *dst = &((struct sdp_hh *) hdr)->dst_addr; 673 *dst = &((struct sdp_hh *) hdr)->dst_addr;
676 break; 674 break;
677 default: 675 default:
678 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION) 676 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION)
679 return -EINVAL; 677 return -EINVAL;
680 678
681 *ip_ver = cma_get_ip_ver(hdr); 679 *ip_ver = cma_get_ip_ver(hdr);
682 *port = ((struct cma_hdr *) hdr)->port; 680 *port = ((struct cma_hdr *) hdr)->port;
683 *src = &((struct cma_hdr *) hdr)->src_addr; 681 *src = &((struct cma_hdr *) hdr)->src_addr;
684 *dst = &((struct cma_hdr *) hdr)->dst_addr; 682 *dst = &((struct cma_hdr *) hdr)->dst_addr;
685 break; 683 break;
686 } 684 }
687 685
688 if (*ip_ver != 4 && *ip_ver != 6) 686 if (*ip_ver != 4 && *ip_ver != 6)
689 return -EINVAL; 687 return -EINVAL;
690 return 0; 688 return 0;
691 } 689 }
692 690
693 static void cma_save_net_info(struct rdma_addr *addr, 691 static void cma_save_net_info(struct rdma_addr *addr,
694 struct rdma_addr *listen_addr, 692 struct rdma_addr *listen_addr,
695 u8 ip_ver, __be16 port, 693 u8 ip_ver, __be16 port,
696 union cma_ip_addr *src, union cma_ip_addr *dst) 694 union cma_ip_addr *src, union cma_ip_addr *dst)
697 { 695 {
698 struct sockaddr_in *listen4, *ip4; 696 struct sockaddr_in *listen4, *ip4;
699 struct sockaddr_in6 *listen6, *ip6; 697 struct sockaddr_in6 *listen6, *ip6;
700 698
701 switch (ip_ver) { 699 switch (ip_ver) {
702 case 4: 700 case 4:
703 listen4 = (struct sockaddr_in *) &listen_addr->src_addr; 701 listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
704 ip4 = (struct sockaddr_in *) &addr->src_addr; 702 ip4 = (struct sockaddr_in *) &addr->src_addr;
705 ip4->sin_family = listen4->sin_family; 703 ip4->sin_family = listen4->sin_family;
706 ip4->sin_addr.s_addr = dst->ip4.addr; 704 ip4->sin_addr.s_addr = dst->ip4.addr;
707 ip4->sin_port = listen4->sin_port; 705 ip4->sin_port = listen4->sin_port;
708 706
709 ip4 = (struct sockaddr_in *) &addr->dst_addr; 707 ip4 = (struct sockaddr_in *) &addr->dst_addr;
710 ip4->sin_family = listen4->sin_family; 708 ip4->sin_family = listen4->sin_family;
711 ip4->sin_addr.s_addr = src->ip4.addr; 709 ip4->sin_addr.s_addr = src->ip4.addr;
712 ip4->sin_port = port; 710 ip4->sin_port = port;
713 break; 711 break;
714 case 6: 712 case 6:
715 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr; 713 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
716 ip6 = (struct sockaddr_in6 *) &addr->src_addr; 714 ip6 = (struct sockaddr_in6 *) &addr->src_addr;
717 ip6->sin6_family = listen6->sin6_family; 715 ip6->sin6_family = listen6->sin6_family;
718 ip6->sin6_addr = dst->ip6; 716 ip6->sin6_addr = dst->ip6;
719 ip6->sin6_port = listen6->sin6_port; 717 ip6->sin6_port = listen6->sin6_port;
720 718
721 ip6 = (struct sockaddr_in6 *) &addr->dst_addr; 719 ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
722 ip6->sin6_family = listen6->sin6_family; 720 ip6->sin6_family = listen6->sin6_family;
723 ip6->sin6_addr = src->ip6; 721 ip6->sin6_addr = src->ip6;
724 ip6->sin6_port = port; 722 ip6->sin6_port = port;
725 break; 723 break;
726 default: 724 default:
727 break; 725 break;
728 } 726 }
729 } 727 }
730 728
731 static inline int cma_user_data_offset(enum rdma_port_space ps) 729 static inline int cma_user_data_offset(enum rdma_port_space ps)
732 { 730 {
733 switch (ps) { 731 switch (ps) {
734 case RDMA_PS_SDP: 732 case RDMA_PS_SDP:
735 return 0; 733 return 0;
736 default: 734 default:
737 return sizeof(struct cma_hdr); 735 return sizeof(struct cma_hdr);
738 } 736 }
739 } 737 }
740 738
741 static void cma_cancel_route(struct rdma_id_private *id_priv) 739 static void cma_cancel_route(struct rdma_id_private *id_priv)
742 { 740 {
743 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 741 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
744 case RDMA_TRANSPORT_IB: 742 case RDMA_TRANSPORT_IB:
745 if (id_priv->query) 743 if (id_priv->query)
746 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 744 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
747 break; 745 break;
748 default: 746 default:
749 break; 747 break;
750 } 748 }
751 } 749 }
752 750
753 static void cma_cancel_listens(struct rdma_id_private *id_priv) 751 static void cma_cancel_listens(struct rdma_id_private *id_priv)
754 { 752 {
755 struct rdma_id_private *dev_id_priv; 753 struct rdma_id_private *dev_id_priv;
756 754
757 /* 755 /*
758 * Remove from listen_any_list to prevent added devices from spawning 756 * Remove from listen_any_list to prevent added devices from spawning
759 * additional listen requests. 757 * additional listen requests.
760 */ 758 */
761 mutex_lock(&lock); 759 mutex_lock(&lock);
762 list_del(&id_priv->list); 760 list_del(&id_priv->list);
763 761
764 while (!list_empty(&id_priv->listen_list)) { 762 while (!list_empty(&id_priv->listen_list)) {
765 dev_id_priv = list_entry(id_priv->listen_list.next, 763 dev_id_priv = list_entry(id_priv->listen_list.next,
766 struct rdma_id_private, listen_list); 764 struct rdma_id_private, listen_list);
767 /* sync with device removal to avoid duplicate destruction */ 765 /* sync with device removal to avoid duplicate destruction */
768 list_del_init(&dev_id_priv->list); 766 list_del_init(&dev_id_priv->list);
769 list_del(&dev_id_priv->listen_list); 767 list_del(&dev_id_priv->listen_list);
770 mutex_unlock(&lock); 768 mutex_unlock(&lock);
771 769
772 rdma_destroy_id(&dev_id_priv->id); 770 rdma_destroy_id(&dev_id_priv->id);
773 mutex_lock(&lock); 771 mutex_lock(&lock);
774 } 772 }
775 mutex_unlock(&lock); 773 mutex_unlock(&lock);
776 } 774 }
777 775
778 static void cma_cancel_operation(struct rdma_id_private *id_priv, 776 static void cma_cancel_operation(struct rdma_id_private *id_priv,
779 enum cma_state state) 777 enum cma_state state)
780 { 778 {
781 switch (state) { 779 switch (state) {
782 case CMA_ADDR_QUERY: 780 case CMA_ADDR_QUERY:
783 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 781 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
784 break; 782 break;
785 case CMA_ROUTE_QUERY: 783 case CMA_ROUTE_QUERY:
786 cma_cancel_route(id_priv); 784 cma_cancel_route(id_priv);
787 break; 785 break;
788 case CMA_LISTEN: 786 case CMA_LISTEN:
789 if (cma_any_addr(&id_priv->id.route.addr.src_addr) && 787 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
790 !id_priv->cma_dev) 788 && !id_priv->cma_dev)
791 cma_cancel_listens(id_priv); 789 cma_cancel_listens(id_priv);
792 break; 790 break;
793 default: 791 default:
794 break; 792 break;
795 } 793 }
796 } 794 }
797 795
798 static void cma_release_port(struct rdma_id_private *id_priv) 796 static void cma_release_port(struct rdma_id_private *id_priv)
799 { 797 {
800 struct rdma_bind_list *bind_list = id_priv->bind_list; 798 struct rdma_bind_list *bind_list = id_priv->bind_list;
801 799
802 if (!bind_list) 800 if (!bind_list)
803 return; 801 return;
804 802
805 mutex_lock(&lock); 803 mutex_lock(&lock);
806 hlist_del(&id_priv->node); 804 hlist_del(&id_priv->node);
807 if (hlist_empty(&bind_list->owners)) { 805 if (hlist_empty(&bind_list->owners)) {
808 idr_remove(bind_list->ps, bind_list->port); 806 idr_remove(bind_list->ps, bind_list->port);
809 kfree(bind_list); 807 kfree(bind_list);
810 } 808 }
811 mutex_unlock(&lock); 809 mutex_unlock(&lock);
812 } 810 }
813 811
814 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 812 static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
815 { 813 {
816 struct cma_multicast *mc; 814 struct cma_multicast *mc;
817 815
818 while (!list_empty(&id_priv->mc_list)) { 816 while (!list_empty(&id_priv->mc_list)) {
819 mc = container_of(id_priv->mc_list.next, 817 mc = container_of(id_priv->mc_list.next,
820 struct cma_multicast, list); 818 struct cma_multicast, list);
821 list_del(&mc->list); 819 list_del(&mc->list);
822 ib_sa_free_multicast(mc->multicast.ib); 820 ib_sa_free_multicast(mc->multicast.ib);
823 kfree(mc); 821 kfree(mc);
824 } 822 }
825 } 823 }
826 824
827 void rdma_destroy_id(struct rdma_cm_id *id) 825 void rdma_destroy_id(struct rdma_cm_id *id)
828 { 826 {
829 struct rdma_id_private *id_priv; 827 struct rdma_id_private *id_priv;
830 enum cma_state state; 828 enum cma_state state;
831 829
832 id_priv = container_of(id, struct rdma_id_private, id); 830 id_priv = container_of(id, struct rdma_id_private, id);
833 state = cma_exch(id_priv, CMA_DESTROYING); 831 state = cma_exch(id_priv, CMA_DESTROYING);
834 cma_cancel_operation(id_priv, state); 832 cma_cancel_operation(id_priv, state);
835 833
836 mutex_lock(&lock); 834 mutex_lock(&lock);
837 if (id_priv->cma_dev) { 835 if (id_priv->cma_dev) {
838 mutex_unlock(&lock); 836 mutex_unlock(&lock);
839 switch (rdma_node_get_transport(id->device->node_type)) { 837 switch (rdma_node_get_transport(id->device->node_type)) {
840 case RDMA_TRANSPORT_IB: 838 case RDMA_TRANSPORT_IB:
841 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 839 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
842 ib_destroy_cm_id(id_priv->cm_id.ib); 840 ib_destroy_cm_id(id_priv->cm_id.ib);
843 break; 841 break;
844 case RDMA_TRANSPORT_IWARP: 842 case RDMA_TRANSPORT_IWARP:
845 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) 843 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
846 iw_destroy_cm_id(id_priv->cm_id.iw); 844 iw_destroy_cm_id(id_priv->cm_id.iw);
847 break; 845 break;
848 default: 846 default:
849 break; 847 break;
850 } 848 }
851 cma_leave_mc_groups(id_priv); 849 cma_leave_mc_groups(id_priv);
852 mutex_lock(&lock); 850 mutex_lock(&lock);
853 cma_detach_from_dev(id_priv); 851 cma_detach_from_dev(id_priv);
854 } 852 }
855 mutex_unlock(&lock); 853 mutex_unlock(&lock);
856 854
857 cma_release_port(id_priv); 855 cma_release_port(id_priv);
858 cma_deref_id(id_priv); 856 cma_deref_id(id_priv);
859 wait_for_completion(&id_priv->comp); 857 wait_for_completion(&id_priv->comp);
860 858
861 if (id_priv->internal_id) 859 if (id_priv->internal_id)
862 cma_deref_id(id_priv->id.context); 860 cma_deref_id(id_priv->id.context);
863 861
864 kfree(id_priv->id.route.path_rec); 862 kfree(id_priv->id.route.path_rec);
865 kfree(id_priv); 863 kfree(id_priv);
866 } 864 }
867 EXPORT_SYMBOL(rdma_destroy_id); 865 EXPORT_SYMBOL(rdma_destroy_id);
868 866
869 static int cma_rep_recv(struct rdma_id_private *id_priv) 867 static int cma_rep_recv(struct rdma_id_private *id_priv)
870 { 868 {
871 int ret; 869 int ret;
872 870
873 ret = cma_modify_qp_rtr(id_priv, NULL); 871 ret = cma_modify_qp_rtr(id_priv, NULL);
874 if (ret) 872 if (ret)
875 goto reject; 873 goto reject;
876 874
877 ret = cma_modify_qp_rts(id_priv, NULL); 875 ret = cma_modify_qp_rts(id_priv, NULL);
878 if (ret) 876 if (ret)
879 goto reject; 877 goto reject;
880 878
881 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 879 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
882 if (ret) 880 if (ret)
883 goto reject; 881 goto reject;
884 882
885 return 0; 883 return 0;
886 reject: 884 reject:
887 cma_modify_qp_err(id_priv); 885 cma_modify_qp_err(id_priv);
888 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 886 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
889 NULL, 0, NULL, 0); 887 NULL, 0, NULL, 0);
890 return ret; 888 return ret;
891 } 889 }
892 890
893 static int cma_verify_rep(struct rdma_id_private *id_priv, void *data) 891 static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
894 { 892 {
895 if (id_priv->id.ps == RDMA_PS_SDP && 893 if (id_priv->id.ps == RDMA_PS_SDP &&
896 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) != 894 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) !=
897 SDP_MAJ_VERSION) 895 SDP_MAJ_VERSION)
898 return -EINVAL; 896 return -EINVAL;
899 897
900 return 0; 898 return 0;
901 } 899 }
902 900
903 static void cma_set_rep_event_data(struct rdma_cm_event *event, 901 static void cma_set_rep_event_data(struct rdma_cm_event *event,
904 struct ib_cm_rep_event_param *rep_data, 902 struct ib_cm_rep_event_param *rep_data,
905 void *private_data) 903 void *private_data)
906 { 904 {
907 event->param.conn.private_data = private_data; 905 event->param.conn.private_data = private_data;
908 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 906 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
909 event->param.conn.responder_resources = rep_data->responder_resources; 907 event->param.conn.responder_resources = rep_data->responder_resources;
910 event->param.conn.initiator_depth = rep_data->initiator_depth; 908 event->param.conn.initiator_depth = rep_data->initiator_depth;
911 event->param.conn.flow_control = rep_data->flow_control; 909 event->param.conn.flow_control = rep_data->flow_control;
912 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 910 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
913 event->param.conn.srq = rep_data->srq; 911 event->param.conn.srq = rep_data->srq;
914 event->param.conn.qp_num = rep_data->remote_qpn; 912 event->param.conn.qp_num = rep_data->remote_qpn;
915 } 913 }
916 914
917 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 915 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
918 { 916 {
919 struct rdma_id_private *id_priv = cm_id->context; 917 struct rdma_id_private *id_priv = cm_id->context;
920 struct rdma_cm_event event; 918 struct rdma_cm_event event;
921 int ret = 0; 919 int ret = 0;
922 920
923 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 921 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
924 cma_disable_callback(id_priv, CMA_CONNECT)) || 922 cma_disable_callback(id_priv, CMA_CONNECT)) ||
925 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 923 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
926 cma_disable_callback(id_priv, CMA_DISCONNECT))) 924 cma_disable_callback(id_priv, CMA_DISCONNECT)))
927 return 0; 925 return 0;
928 926
929 memset(&event, 0, sizeof event); 927 memset(&event, 0, sizeof event);
930 switch (ib_event->event) { 928 switch (ib_event->event) {
931 case IB_CM_REQ_ERROR: 929 case IB_CM_REQ_ERROR:
932 case IB_CM_REP_ERROR: 930 case IB_CM_REP_ERROR:
933 event.event = RDMA_CM_EVENT_UNREACHABLE; 931 event.event = RDMA_CM_EVENT_UNREACHABLE;
934 event.status = -ETIMEDOUT; 932 event.status = -ETIMEDOUT;
935 break; 933 break;
936 case IB_CM_REP_RECEIVED: 934 case IB_CM_REP_RECEIVED:
937 event.status = cma_verify_rep(id_priv, ib_event->private_data); 935 event.status = cma_verify_rep(id_priv, ib_event->private_data);
938 if (event.status) 936 if (event.status)
939 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 937 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
940 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 938 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
941 event.status = cma_rep_recv(id_priv); 939 event.status = cma_rep_recv(id_priv);
942 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 940 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
943 RDMA_CM_EVENT_ESTABLISHED; 941 RDMA_CM_EVENT_ESTABLISHED;
944 } else 942 } else
945 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 943 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
946 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 944 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
947 ib_event->private_data); 945 ib_event->private_data);
948 break; 946 break;
949 case IB_CM_RTU_RECEIVED: 947 case IB_CM_RTU_RECEIVED:
950 case IB_CM_USER_ESTABLISHED: 948 case IB_CM_USER_ESTABLISHED:
951 event.event = RDMA_CM_EVENT_ESTABLISHED; 949 event.event = RDMA_CM_EVENT_ESTABLISHED;
952 break; 950 break;
953 case IB_CM_DREQ_ERROR: 951 case IB_CM_DREQ_ERROR:
954 event.status = -ETIMEDOUT; /* fall through */ 952 event.status = -ETIMEDOUT; /* fall through */
955 case IB_CM_DREQ_RECEIVED: 953 case IB_CM_DREQ_RECEIVED:
956 case IB_CM_DREP_RECEIVED: 954 case IB_CM_DREP_RECEIVED:
957 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 955 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
958 goto out; 956 goto out;
959 event.event = RDMA_CM_EVENT_DISCONNECTED; 957 event.event = RDMA_CM_EVENT_DISCONNECTED;
960 break; 958 break;
961 case IB_CM_TIMEWAIT_EXIT: 959 case IB_CM_TIMEWAIT_EXIT:
962 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 960 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
963 break; 961 break;
964 case IB_CM_MRA_RECEIVED: 962 case IB_CM_MRA_RECEIVED:
965 /* ignore event */ 963 /* ignore event */
966 goto out; 964 goto out;
967 case IB_CM_REJ_RECEIVED: 965 case IB_CM_REJ_RECEIVED:
968 cma_modify_qp_err(id_priv); 966 cma_modify_qp_err(id_priv);
969 event.status = ib_event->param.rej_rcvd.reason; 967 event.status = ib_event->param.rej_rcvd.reason;
970 event.event = RDMA_CM_EVENT_REJECTED; 968 event.event = RDMA_CM_EVENT_REJECTED;
971 event.param.conn.private_data = ib_event->private_data; 969 event.param.conn.private_data = ib_event->private_data;
972 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 970 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
973 break; 971 break;
974 default: 972 default:
975 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 973 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
976 ib_event->event); 974 ib_event->event);
977 goto out; 975 goto out;
978 } 976 }
979 977
980 ret = id_priv->id.event_handler(&id_priv->id, &event); 978 ret = id_priv->id.event_handler(&id_priv->id, &event);
981 if (ret) { 979 if (ret) {
982 /* Destroy the CM ID by returning a non-zero value. */ 980 /* Destroy the CM ID by returning a non-zero value. */
983 id_priv->cm_id.ib = NULL; 981 id_priv->cm_id.ib = NULL;
984 cma_exch(id_priv, CMA_DESTROYING); 982 cma_exch(id_priv, CMA_DESTROYING);
985 mutex_unlock(&id_priv->handler_mutex); 983 mutex_unlock(&id_priv->handler_mutex);
986 rdma_destroy_id(&id_priv->id); 984 rdma_destroy_id(&id_priv->id);
987 return ret; 985 return ret;
988 } 986 }
989 out: 987 out:
990 mutex_unlock(&id_priv->handler_mutex); 988 mutex_unlock(&id_priv->handler_mutex);
991 return ret; 989 return ret;
992 } 990 }
993 991
994 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 992 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
995 struct ib_cm_event *ib_event) 993 struct ib_cm_event *ib_event)
996 { 994 {
997 struct rdma_id_private *id_priv; 995 struct rdma_id_private *id_priv;
998 struct rdma_cm_id *id; 996 struct rdma_cm_id *id;
999 struct rdma_route *rt; 997 struct rdma_route *rt;
1000 union cma_ip_addr *src, *dst; 998 union cma_ip_addr *src, *dst;
1001 __be16 port; 999 __be16 port;
1002 u8 ip_ver; 1000 u8 ip_ver;
1003 int ret; 1001 int ret;
1004 1002
1005 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1003 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1006 &ip_ver, &port, &src, &dst)) 1004 &ip_ver, &port, &src, &dst))
1007 goto err; 1005 goto err;
1008 1006
1009 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1007 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1010 listen_id->ps); 1008 listen_id->ps);
1011 if (IS_ERR(id)) 1009 if (IS_ERR(id))
1012 goto err; 1010 goto err;
1013 1011
1014 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1012 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1015 ip_ver, port, src, dst); 1013 ip_ver, port, src, dst);
1016 1014
1017 rt = &id->route; 1015 rt = &id->route;
1018 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1016 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
1019 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1017 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
1020 GFP_KERNEL); 1018 GFP_KERNEL);
1021 if (!rt->path_rec) 1019 if (!rt->path_rec)
1022 goto destroy_id; 1020 goto destroy_id;
1023 1021
1024 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1022 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1025 if (rt->num_paths == 2) 1023 if (rt->num_paths == 2)
1026 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1024 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1027 1025
1028 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1026 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1029 ret = rdma_translate_ip(&id->route.addr.src_addr, 1027 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
1030 &id->route.addr.dev_addr); 1028 &id->route.addr.dev_addr);
1031 if (ret) 1029 if (ret)
1032 goto destroy_id; 1030 goto destroy_id;
1033 1031
1034 id_priv = container_of(id, struct rdma_id_private, id); 1032 id_priv = container_of(id, struct rdma_id_private, id);
1035 id_priv->state = CMA_CONNECT; 1033 id_priv->state = CMA_CONNECT;
1036 return id_priv; 1034 return id_priv;
1037 1035
1038 destroy_id: 1036 destroy_id:
1039 rdma_destroy_id(id); 1037 rdma_destroy_id(id);
1040 err: 1038 err:
1041 return NULL; 1039 return NULL;
1042 } 1040 }
1043 1041
1044 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1042 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1045 struct ib_cm_event *ib_event) 1043 struct ib_cm_event *ib_event)
1046 { 1044 {
1047 struct rdma_id_private *id_priv; 1045 struct rdma_id_private *id_priv;
1048 struct rdma_cm_id *id; 1046 struct rdma_cm_id *id;
1049 union cma_ip_addr *src, *dst; 1047 union cma_ip_addr *src, *dst;
1050 __be16 port; 1048 __be16 port;
1051 u8 ip_ver; 1049 u8 ip_ver;
1052 int ret; 1050 int ret;
1053 1051
1054 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1052 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1055 listen_id->ps); 1053 listen_id->ps);
1056 if (IS_ERR(id)) 1054 if (IS_ERR(id))
1057 return NULL; 1055 return NULL;
1058 1056
1059 1057
1060 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1058 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1061 &ip_ver, &port, &src, &dst)) 1059 &ip_ver, &port, &src, &dst))
1062 goto err; 1060 goto err;
1063 1061
1064 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1062 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1065 ip_ver, port, src, dst); 1063 ip_ver, port, src, dst);
1066 1064
1067 ret = rdma_translate_ip(&id->route.addr.src_addr, 1065 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
1068 &id->route.addr.dev_addr); 1066 &id->route.addr.dev_addr);
1069 if (ret) 1067 if (ret)
1070 goto err; 1068 goto err;
1071 1069
1072 id_priv = container_of(id, struct rdma_id_private, id); 1070 id_priv = container_of(id, struct rdma_id_private, id);
1073 id_priv->state = CMA_CONNECT; 1071 id_priv->state = CMA_CONNECT;
1074 return id_priv; 1072 return id_priv;
1075 err: 1073 err:
1076 rdma_destroy_id(id); 1074 rdma_destroy_id(id);
1077 return NULL; 1075 return NULL;
1078 } 1076 }
1079 1077
1080 static void cma_set_req_event_data(struct rdma_cm_event *event, 1078 static void cma_set_req_event_data(struct rdma_cm_event *event,
1081 struct ib_cm_req_event_param *req_data, 1079 struct ib_cm_req_event_param *req_data,
1082 void *private_data, int offset) 1080 void *private_data, int offset)
1083 { 1081 {
1084 event->param.conn.private_data = private_data + offset; 1082 event->param.conn.private_data = private_data + offset;
1085 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1083 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
1086 event->param.conn.responder_resources = req_data->responder_resources; 1084 event->param.conn.responder_resources = req_data->responder_resources;
1087 event->param.conn.initiator_depth = req_data->initiator_depth; 1085 event->param.conn.initiator_depth = req_data->initiator_depth;
1088 event->param.conn.flow_control = req_data->flow_control; 1086 event->param.conn.flow_control = req_data->flow_control;
1089 event->param.conn.retry_count = req_data->retry_count; 1087 event->param.conn.retry_count = req_data->retry_count;
1090 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1088 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
1091 event->param.conn.srq = req_data->srq; 1089 event->param.conn.srq = req_data->srq;
1092 event->param.conn.qp_num = req_data->remote_qpn; 1090 event->param.conn.qp_num = req_data->remote_qpn;
1093 } 1091 }
1094 1092
1095 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1093 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1096 { 1094 {
1097 struct rdma_id_private *listen_id, *conn_id; 1095 struct rdma_id_private *listen_id, *conn_id;
1098 struct rdma_cm_event event; 1096 struct rdma_cm_event event;
1099 int offset, ret; 1097 int offset, ret;
1100 1098
1101 listen_id = cm_id->context; 1099 listen_id = cm_id->context;
1102 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1100 if (cma_disable_callback(listen_id, CMA_LISTEN))
1103 return -ECONNABORTED; 1101 return -ECONNABORTED;
1104 1102
1105 memset(&event, 0, sizeof event); 1103 memset(&event, 0, sizeof event);
1106 offset = cma_user_data_offset(listen_id->id.ps); 1104 offset = cma_user_data_offset(listen_id->id.ps);
1107 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1105 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1108 if (cma_is_ud_ps(listen_id->id.ps)) { 1106 if (cma_is_ud_ps(listen_id->id.ps)) {
1109 conn_id = cma_new_udp_id(&listen_id->id, ib_event); 1107 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1110 event.param.ud.private_data = ib_event->private_data + offset; 1108 event.param.ud.private_data = ib_event->private_data + offset;
1111 event.param.ud.private_data_len = 1109 event.param.ud.private_data_len =
1112 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1110 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1113 } else { 1111 } else {
1114 conn_id = cma_new_conn_id(&listen_id->id, ib_event); 1112 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1115 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1113 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1116 ib_event->private_data, offset); 1114 ib_event->private_data, offset);
1117 } 1115 }
1118 if (!conn_id) { 1116 if (!conn_id) {
1119 ret = -ENOMEM; 1117 ret = -ENOMEM;
1120 goto out; 1118 goto out;
1121 } 1119 }
1122 1120
1123 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1121 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1124 mutex_lock(&lock); 1122 mutex_lock(&lock);
1125 ret = cma_acquire_dev(conn_id); 1123 ret = cma_acquire_dev(conn_id);
1126 mutex_unlock(&lock); 1124 mutex_unlock(&lock);
1127 if (ret) 1125 if (ret)
1128 goto release_conn_id; 1126 goto release_conn_id;
1129 1127
1130 conn_id->cm_id.ib = cm_id; 1128 conn_id->cm_id.ib = cm_id;
1131 cm_id->context = conn_id; 1129 cm_id->context = conn_id;
1132 cm_id->cm_handler = cma_ib_handler; 1130 cm_id->cm_handler = cma_ib_handler;
1133 1131
1134 ret = conn_id->id.event_handler(&conn_id->id, &event); 1132 ret = conn_id->id.event_handler(&conn_id->id, &event);
1135 if (!ret) { 1133 if (!ret) {
1136 /* 1134 /*
1137 * Acquire mutex to prevent user executing rdma_destroy_id() 1135 * Acquire mutex to prevent user executing rdma_destroy_id()
1138 * while we're accessing the cm_id. 1136 * while we're accessing the cm_id.
1139 */ 1137 */
1140 mutex_lock(&lock); 1138 mutex_lock(&lock);
1141 if (cma_comp(conn_id, CMA_CONNECT) && 1139 if (cma_comp(conn_id, CMA_CONNECT) &&
1142 !cma_is_ud_ps(conn_id->id.ps)) 1140 !cma_is_ud_ps(conn_id->id.ps))
1143 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1141 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1144 mutex_unlock(&lock); 1142 mutex_unlock(&lock);
1145 mutex_unlock(&conn_id->handler_mutex); 1143 mutex_unlock(&conn_id->handler_mutex);
1146 goto out; 1144 goto out;
1147 } 1145 }
1148 1146
1149 /* Destroy the CM ID by returning a non-zero value. */ 1147 /* Destroy the CM ID by returning a non-zero value. */
1150 conn_id->cm_id.ib = NULL; 1148 conn_id->cm_id.ib = NULL;
1151 1149
1152 release_conn_id: 1150 release_conn_id:
1153 cma_exch(conn_id, CMA_DESTROYING); 1151 cma_exch(conn_id, CMA_DESTROYING);
1154 mutex_unlock(&conn_id->handler_mutex); 1152 mutex_unlock(&conn_id->handler_mutex);
1155 rdma_destroy_id(&conn_id->id); 1153 rdma_destroy_id(&conn_id->id);
1156 1154
1157 out: 1155 out:
1158 mutex_unlock(&listen_id->handler_mutex); 1156 mutex_unlock(&listen_id->handler_mutex);
1159 return ret; 1157 return ret;
1160 } 1158 }
1161 1159
1162 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 1160 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
1163 { 1161 {
1164 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr))); 1162 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
1165 } 1163 }
1166 1164
1167 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, 1165 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1168 struct ib_cm_compare_data *compare) 1166 struct ib_cm_compare_data *compare)
1169 { 1167 {
1170 struct cma_hdr *cma_data, *cma_mask; 1168 struct cma_hdr *cma_data, *cma_mask;
1171 struct sdp_hh *sdp_data, *sdp_mask; 1169 struct sdp_hh *sdp_data, *sdp_mask;
1172 __be32 ip4_addr; 1170 __be32 ip4_addr;
1173 struct in6_addr ip6_addr; 1171 struct in6_addr ip6_addr;
1174 1172
1175 memset(compare, 0, sizeof *compare); 1173 memset(compare, 0, sizeof *compare);
1176 cma_data = (void *) compare->data; 1174 cma_data = (void *) compare->data;
1177 cma_mask = (void *) compare->mask; 1175 cma_mask = (void *) compare->mask;
1178 sdp_data = (void *) compare->data; 1176 sdp_data = (void *) compare->data;
1179 sdp_mask = (void *) compare->mask; 1177 sdp_mask = (void *) compare->mask;
1180 1178
1181 switch (addr->sa_family) { 1179 switch (addr->sa_family) {
1182 case AF_INET: 1180 case AF_INET:
1183 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr; 1181 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
1184 if (ps == RDMA_PS_SDP) { 1182 if (ps == RDMA_PS_SDP) {
1185 sdp_set_ip_ver(sdp_data, 4); 1183 sdp_set_ip_ver(sdp_data, 4);
1186 sdp_set_ip_ver(sdp_mask, 0xF); 1184 sdp_set_ip_ver(sdp_mask, 0xF);
1187 sdp_data->dst_addr.ip4.addr = ip4_addr; 1185 sdp_data->dst_addr.ip4.addr = ip4_addr;
1188 sdp_mask->dst_addr.ip4.addr = htonl(~0); 1186 sdp_mask->dst_addr.ip4.addr = htonl(~0);
1189 } else { 1187 } else {
1190 cma_set_ip_ver(cma_data, 4); 1188 cma_set_ip_ver(cma_data, 4);
1191 cma_set_ip_ver(cma_mask, 0xF); 1189 cma_set_ip_ver(cma_mask, 0xF);
1192 cma_data->dst_addr.ip4.addr = ip4_addr; 1190 cma_data->dst_addr.ip4.addr = ip4_addr;
1193 cma_mask->dst_addr.ip4.addr = htonl(~0); 1191 cma_mask->dst_addr.ip4.addr = htonl(~0);
1194 } 1192 }
1195 break; 1193 break;
1196 case AF_INET6: 1194 case AF_INET6:
1197 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr; 1195 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
1198 if (ps == RDMA_PS_SDP) { 1196 if (ps == RDMA_PS_SDP) {
1199 sdp_set_ip_ver(sdp_data, 6); 1197 sdp_set_ip_ver(sdp_data, 6);
1200 sdp_set_ip_ver(sdp_mask, 0xF); 1198 sdp_set_ip_ver(sdp_mask, 0xF);
1201 sdp_data->dst_addr.ip6 = ip6_addr; 1199 sdp_data->dst_addr.ip6 = ip6_addr;
1202 memset(&sdp_mask->dst_addr.ip6, 0xFF, 1200 memset(&sdp_mask->dst_addr.ip6, 0xFF,
1203 sizeof sdp_mask->dst_addr.ip6); 1201 sizeof sdp_mask->dst_addr.ip6);
1204 } else { 1202 } else {
1205 cma_set_ip_ver(cma_data, 6); 1203 cma_set_ip_ver(cma_data, 6);
1206 cma_set_ip_ver(cma_mask, 0xF); 1204 cma_set_ip_ver(cma_mask, 0xF);
1207 cma_data->dst_addr.ip6 = ip6_addr; 1205 cma_data->dst_addr.ip6 = ip6_addr;
1208 memset(&cma_mask->dst_addr.ip6, 0xFF, 1206 memset(&cma_mask->dst_addr.ip6, 0xFF,
1209 sizeof cma_mask->dst_addr.ip6); 1207 sizeof cma_mask->dst_addr.ip6);
1210 } 1208 }
1211 break; 1209 break;
1212 default: 1210 default:
1213 break; 1211 break;
1214 } 1212 }
1215 } 1213 }
1216 1214
1217 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1215 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1218 { 1216 {
1219 struct rdma_id_private *id_priv = iw_id->context; 1217 struct rdma_id_private *id_priv = iw_id->context;
1220 struct rdma_cm_event event; 1218 struct rdma_cm_event event;
1221 struct sockaddr_in *sin; 1219 struct sockaddr_in *sin;
1222 int ret = 0; 1220 int ret = 0;
1223 1221
1224 if (cma_disable_callback(id_priv, CMA_CONNECT)) 1222 if (cma_disable_callback(id_priv, CMA_CONNECT))
1225 return 0; 1223 return 0;
1226 1224
1227 memset(&event, 0, sizeof event); 1225 memset(&event, 0, sizeof event);
1228 switch (iw_event->event) { 1226 switch (iw_event->event) {
1229 case IW_CM_EVENT_CLOSE: 1227 case IW_CM_EVENT_CLOSE:
1230 event.event = RDMA_CM_EVENT_DISCONNECTED; 1228 event.event = RDMA_CM_EVENT_DISCONNECTED;
1231 break; 1229 break;
1232 case IW_CM_EVENT_CONNECT_REPLY: 1230 case IW_CM_EVENT_CONNECT_REPLY:
1233 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1231 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1234 *sin = iw_event->local_addr; 1232 *sin = iw_event->local_addr;
1235 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1233 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1236 *sin = iw_event->remote_addr; 1234 *sin = iw_event->remote_addr;
1237 switch (iw_event->status) { 1235 switch (iw_event->status) {
1238 case 0: 1236 case 0:
1239 event.event = RDMA_CM_EVENT_ESTABLISHED; 1237 event.event = RDMA_CM_EVENT_ESTABLISHED;
1240 break; 1238 break;
1241 case -ECONNRESET: 1239 case -ECONNRESET:
1242 case -ECONNREFUSED: 1240 case -ECONNREFUSED:
1243 event.event = RDMA_CM_EVENT_REJECTED; 1241 event.event = RDMA_CM_EVENT_REJECTED;
1244 break; 1242 break;
1245 case -ETIMEDOUT: 1243 case -ETIMEDOUT:
1246 event.event = RDMA_CM_EVENT_UNREACHABLE; 1244 event.event = RDMA_CM_EVENT_UNREACHABLE;
1247 break; 1245 break;
1248 default: 1246 default:
1249 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1247 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1250 break; 1248 break;
1251 } 1249 }
1252 break; 1250 break;
1253 case IW_CM_EVENT_ESTABLISHED: 1251 case IW_CM_EVENT_ESTABLISHED:
1254 event.event = RDMA_CM_EVENT_ESTABLISHED; 1252 event.event = RDMA_CM_EVENT_ESTABLISHED;
1255 break; 1253 break;
1256 default: 1254 default:
1257 BUG_ON(1); 1255 BUG_ON(1);
1258 } 1256 }
1259 1257
1260 event.status = iw_event->status; 1258 event.status = iw_event->status;
1261 event.param.conn.private_data = iw_event->private_data; 1259 event.param.conn.private_data = iw_event->private_data;
1262 event.param.conn.private_data_len = iw_event->private_data_len; 1260 event.param.conn.private_data_len = iw_event->private_data_len;
1263 ret = id_priv->id.event_handler(&id_priv->id, &event); 1261 ret = id_priv->id.event_handler(&id_priv->id, &event);
1264 if (ret) { 1262 if (ret) {
1265 /* Destroy the CM ID by returning a non-zero value. */ 1263 /* Destroy the CM ID by returning a non-zero value. */
1266 id_priv->cm_id.iw = NULL; 1264 id_priv->cm_id.iw = NULL;
1267 cma_exch(id_priv, CMA_DESTROYING); 1265 cma_exch(id_priv, CMA_DESTROYING);
1268 mutex_unlock(&id_priv->handler_mutex); 1266 mutex_unlock(&id_priv->handler_mutex);
1269 rdma_destroy_id(&id_priv->id); 1267 rdma_destroy_id(&id_priv->id);
1270 return ret; 1268 return ret;
1271 } 1269 }
1272 1270
1273 mutex_unlock(&id_priv->handler_mutex); 1271 mutex_unlock(&id_priv->handler_mutex);
1274 return ret; 1272 return ret;
1275 } 1273 }
1276 1274
1277 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 1275 static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1278 struct iw_cm_event *iw_event) 1276 struct iw_cm_event *iw_event)
1279 { 1277 {
1280 struct rdma_cm_id *new_cm_id; 1278 struct rdma_cm_id *new_cm_id;
1281 struct rdma_id_private *listen_id, *conn_id; 1279 struct rdma_id_private *listen_id, *conn_id;
1282 struct sockaddr_in *sin; 1280 struct sockaddr_in *sin;
1283 struct net_device *dev = NULL; 1281 struct net_device *dev = NULL;
1284 struct rdma_cm_event event; 1282 struct rdma_cm_event event;
1285 int ret; 1283 int ret;
1286 struct ib_device_attr attr; 1284 struct ib_device_attr attr;
1287 1285
1288 listen_id = cm_id->context; 1286 listen_id = cm_id->context;
1289 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1287 if (cma_disable_callback(listen_id, CMA_LISTEN))
1290 return -ECONNABORTED; 1288 return -ECONNABORTED;
1291 1289
1292 /* Create a new RDMA id for the new IW CM ID */ 1290 /* Create a new RDMA id for the new IW CM ID */
1293 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1291 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1294 listen_id->id.context, 1292 listen_id->id.context,
1295 RDMA_PS_TCP); 1293 RDMA_PS_TCP);
1296 if (IS_ERR(new_cm_id)) { 1294 if (IS_ERR(new_cm_id)) {
1297 ret = -ENOMEM; 1295 ret = -ENOMEM;
1298 goto out; 1296 goto out;
1299 } 1297 }
1300 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1298 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1301 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1299 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1302 conn_id->state = CMA_CONNECT; 1300 conn_id->state = CMA_CONNECT;
1303 1301
1304 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); 1302 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
1305 if (!dev) { 1303 if (!dev) {
1306 ret = -EADDRNOTAVAIL; 1304 ret = -EADDRNOTAVAIL;
1307 mutex_unlock(&conn_id->handler_mutex); 1305 mutex_unlock(&conn_id->handler_mutex);
1308 rdma_destroy_id(new_cm_id); 1306 rdma_destroy_id(new_cm_id);
1309 goto out; 1307 goto out;
1310 } 1308 }
1311 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); 1309 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1312 if (ret) { 1310 if (ret) {
1313 mutex_unlock(&conn_id->handler_mutex); 1311 mutex_unlock(&conn_id->handler_mutex);
1314 rdma_destroy_id(new_cm_id); 1312 rdma_destroy_id(new_cm_id);
1315 goto out; 1313 goto out;
1316 } 1314 }
1317 1315
1318 mutex_lock(&lock); 1316 mutex_lock(&lock);
1319 ret = cma_acquire_dev(conn_id); 1317 ret = cma_acquire_dev(conn_id);
1320 mutex_unlock(&lock); 1318 mutex_unlock(&lock);
1321 if (ret) { 1319 if (ret) {
1322 mutex_unlock(&conn_id->handler_mutex); 1320 mutex_unlock(&conn_id->handler_mutex);
1323 rdma_destroy_id(new_cm_id); 1321 rdma_destroy_id(new_cm_id);
1324 goto out; 1322 goto out;
1325 } 1323 }
1326 1324
1327 conn_id->cm_id.iw = cm_id; 1325 conn_id->cm_id.iw = cm_id;
1328 cm_id->context = conn_id; 1326 cm_id->context = conn_id;
1329 cm_id->cm_handler = cma_iw_handler; 1327 cm_id->cm_handler = cma_iw_handler;
1330 1328
1331 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr; 1329 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
1332 *sin = iw_event->local_addr; 1330 *sin = iw_event->local_addr;
1333 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1331 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1334 *sin = iw_event->remote_addr; 1332 *sin = iw_event->remote_addr;
1335 1333
1336 ret = ib_query_device(conn_id->id.device, &attr); 1334 ret = ib_query_device(conn_id->id.device, &attr);
1337 if (ret) { 1335 if (ret) {
1338 mutex_unlock(&conn_id->handler_mutex); 1336 mutex_unlock(&conn_id->handler_mutex);
1339 rdma_destroy_id(new_cm_id); 1337 rdma_destroy_id(new_cm_id);
1340 goto out; 1338 goto out;
1341 } 1339 }
1342 1340
1343 memset(&event, 0, sizeof event); 1341 memset(&event, 0, sizeof event);
1344 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1342 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1345 event.param.conn.private_data = iw_event->private_data; 1343 event.param.conn.private_data = iw_event->private_data;
1346 event.param.conn.private_data_len = iw_event->private_data_len; 1344 event.param.conn.private_data_len = iw_event->private_data_len;
1347 event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; 1345 event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
1348 event.param.conn.responder_resources = attr.max_qp_rd_atom; 1346 event.param.conn.responder_resources = attr.max_qp_rd_atom;
1349 ret = conn_id->id.event_handler(&conn_id->id, &event); 1347 ret = conn_id->id.event_handler(&conn_id->id, &event);
1350 if (ret) { 1348 if (ret) {
1351 /* User wants to destroy the CM ID */ 1349 /* User wants to destroy the CM ID */
1352 conn_id->cm_id.iw = NULL; 1350 conn_id->cm_id.iw = NULL;
1353 cma_exch(conn_id, CMA_DESTROYING); 1351 cma_exch(conn_id, CMA_DESTROYING);
1354 mutex_unlock(&conn_id->handler_mutex); 1352 mutex_unlock(&conn_id->handler_mutex);
1355 rdma_destroy_id(&conn_id->id); 1353 rdma_destroy_id(&conn_id->id);
1356 goto out; 1354 goto out;
1357 } 1355 }
1358 1356
1359 mutex_unlock(&conn_id->handler_mutex); 1357 mutex_unlock(&conn_id->handler_mutex);
1360 1358
1361 out: 1359 out:
1362 if (dev) 1360 if (dev)
1363 dev_put(dev); 1361 dev_put(dev);
1364 mutex_unlock(&listen_id->handler_mutex); 1362 mutex_unlock(&listen_id->handler_mutex);
1365 return ret; 1363 return ret;
1366 } 1364 }
1367 1365
1368 static int cma_ib_listen(struct rdma_id_private *id_priv) 1366 static int cma_ib_listen(struct rdma_id_private *id_priv)
1369 { 1367 {
1370 struct ib_cm_compare_data compare_data; 1368 struct ib_cm_compare_data compare_data;
1371 struct sockaddr *addr; 1369 struct sockaddr *addr;
1372 __be64 svc_id; 1370 __be64 svc_id;
1373 int ret; 1371 int ret;
1374 1372
1375 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler, 1373 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
1376 id_priv); 1374 id_priv);
1377 if (IS_ERR(id_priv->cm_id.ib)) 1375 if (IS_ERR(id_priv->cm_id.ib))
1378 return PTR_ERR(id_priv->cm_id.ib); 1376 return PTR_ERR(id_priv->cm_id.ib);
1379 1377
1380 addr = &id_priv->id.route.addr.src_addr; 1378 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
1381 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1379 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1382 if (cma_any_addr(addr)) 1380 if (cma_any_addr(addr))
1383 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); 1381 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1384 else { 1382 else {
1385 cma_set_compare_data(id_priv->id.ps, addr, &compare_data); 1383 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1386 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data); 1384 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1387 } 1385 }
1388 1386
1389 if (ret) { 1387 if (ret) {
1390 ib_destroy_cm_id(id_priv->cm_id.ib); 1388 ib_destroy_cm_id(id_priv->cm_id.ib);
1391 id_priv->cm_id.ib = NULL; 1389 id_priv->cm_id.ib = NULL;
1392 } 1390 }
1393 1391
1394 return ret; 1392 return ret;
1395 } 1393 }
1396 1394
1397 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 1395 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1398 { 1396 {
1399 int ret; 1397 int ret;
1400 struct sockaddr_in *sin; 1398 struct sockaddr_in *sin;
1401 1399
1402 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, 1400 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
1403 iw_conn_req_handler, 1401 iw_conn_req_handler,
1404 id_priv); 1402 id_priv);
1405 if (IS_ERR(id_priv->cm_id.iw)) 1403 if (IS_ERR(id_priv->cm_id.iw))
1406 return PTR_ERR(id_priv->cm_id.iw); 1404 return PTR_ERR(id_priv->cm_id.iw);
1407 1405
1408 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1406 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1409 id_priv->cm_id.iw->local_addr = *sin; 1407 id_priv->cm_id.iw->local_addr = *sin;
1410 1408
1411 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 1409 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1412 1410
1413 if (ret) { 1411 if (ret) {
1414 iw_destroy_cm_id(id_priv->cm_id.iw); 1412 iw_destroy_cm_id(id_priv->cm_id.iw);
1415 id_priv->cm_id.iw = NULL; 1413 id_priv->cm_id.iw = NULL;
1416 } 1414 }
1417 1415
1418 return ret; 1416 return ret;
1419 } 1417 }
1420 1418
1421 static int cma_listen_handler(struct rdma_cm_id *id, 1419 static int cma_listen_handler(struct rdma_cm_id *id,
1422 struct rdma_cm_event *event) 1420 struct rdma_cm_event *event)
1423 { 1421 {
1424 struct rdma_id_private *id_priv = id->context; 1422 struct rdma_id_private *id_priv = id->context;
1425 1423
1426 id->context = id_priv->id.context; 1424 id->context = id_priv->id.context;
1427 id->event_handler = id_priv->id.event_handler; 1425 id->event_handler = id_priv->id.event_handler;
1428 return id_priv->id.event_handler(id, event); 1426 return id_priv->id.event_handler(id, event);
1429 } 1427 }
1430 1428
1431 static void cma_listen_on_dev(struct rdma_id_private *id_priv, 1429 static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1432 struct cma_device *cma_dev) 1430 struct cma_device *cma_dev)
1433 { 1431 {
1434 struct rdma_id_private *dev_id_priv; 1432 struct rdma_id_private *dev_id_priv;
1435 struct rdma_cm_id *id; 1433 struct rdma_cm_id *id;
1436 int ret; 1434 int ret;
1437 1435
1438 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); 1436 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
1439 if (IS_ERR(id)) 1437 if (IS_ERR(id))
1440 return; 1438 return;
1441 1439
1442 dev_id_priv = container_of(id, struct rdma_id_private, id); 1440 dev_id_priv = container_of(id, struct rdma_id_private, id);
1443 1441
1444 dev_id_priv->state = CMA_ADDR_BOUND; 1442 dev_id_priv->state = CMA_ADDR_BOUND;
1445 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1443 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1446 ip_addr_size(&id_priv->id.route.addr.src_addr)); 1444 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
1447 1445
1448 cma_attach_to_dev(dev_id_priv, cma_dev); 1446 cma_attach_to_dev(dev_id_priv, cma_dev);
1449 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 1447 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
1450 atomic_inc(&id_priv->refcount); 1448 atomic_inc(&id_priv->refcount);
1451 dev_id_priv->internal_id = 1; 1449 dev_id_priv->internal_id = 1;
1452 1450
1453 ret = rdma_listen(id, id_priv->backlog); 1451 ret = rdma_listen(id, id_priv->backlog);
1454 if (ret) 1452 if (ret)
1455 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, " 1453 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
1456 "listening on device %s\n", ret, cma_dev->device->name); 1454 "listening on device %s\n", ret, cma_dev->device->name);
1457 } 1455 }
1458 1456
1459 static void cma_listen_on_all(struct rdma_id_private *id_priv) 1457 static void cma_listen_on_all(struct rdma_id_private *id_priv)
1460 { 1458 {
1461 struct cma_device *cma_dev; 1459 struct cma_device *cma_dev;
1462 1460
1463 mutex_lock(&lock); 1461 mutex_lock(&lock);
1464 list_add_tail(&id_priv->list, &listen_any_list); 1462 list_add_tail(&id_priv->list, &listen_any_list);
1465 list_for_each_entry(cma_dev, &dev_list, list) 1463 list_for_each_entry(cma_dev, &dev_list, list)
1466 cma_listen_on_dev(id_priv, cma_dev); 1464 cma_listen_on_dev(id_priv, cma_dev);
1467 mutex_unlock(&lock); 1465 mutex_unlock(&lock);
1468 } 1466 }
1469 1467
1470 static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af) 1468 static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
1471 { 1469 {
1472 struct sockaddr_in addr_in; 1470 struct sockaddr_in addr_in;
1473 1471
1474 memset(&addr_in, 0, sizeof addr_in); 1472 memset(&addr_in, 0, sizeof addr_in);
1475 addr_in.sin_family = af; 1473 addr_in.sin_family = af;
1476 return rdma_bind_addr(id, (struct sockaddr *) &addr_in); 1474 return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
1477 } 1475 }
1478 1476
1479 int rdma_listen(struct rdma_cm_id *id, int backlog) 1477 int rdma_listen(struct rdma_cm_id *id, int backlog)
1480 { 1478 {
1481 struct rdma_id_private *id_priv; 1479 struct rdma_id_private *id_priv;
1482 int ret; 1480 int ret;
1483 1481
1484 id_priv = container_of(id, struct rdma_id_private, id); 1482 id_priv = container_of(id, struct rdma_id_private, id);
1485 if (id_priv->state == CMA_IDLE) { 1483 if (id_priv->state == CMA_IDLE) {
1486 ret = cma_bind_any(id, AF_INET); 1484 ret = cma_bind_any(id, AF_INET);
1487 if (ret) 1485 if (ret)
1488 return ret; 1486 return ret;
1489 } 1487 }
1490 1488
1491 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) 1489 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
1492 return -EINVAL; 1490 return -EINVAL;
1493 1491
1494 id_priv->backlog = backlog; 1492 id_priv->backlog = backlog;
1495 if (id->device) { 1493 if (id->device) {
1496 switch (rdma_node_get_transport(id->device->node_type)) { 1494 switch (rdma_node_get_transport(id->device->node_type)) {
1497 case RDMA_TRANSPORT_IB: 1495 case RDMA_TRANSPORT_IB:
1498 ret = cma_ib_listen(id_priv); 1496 ret = cma_ib_listen(id_priv);
1499 if (ret) 1497 if (ret)
1500 goto err; 1498 goto err;
1501 break; 1499 break;
1502 case RDMA_TRANSPORT_IWARP: 1500 case RDMA_TRANSPORT_IWARP:
1503 ret = cma_iw_listen(id_priv, backlog); 1501 ret = cma_iw_listen(id_priv, backlog);
1504 if (ret) 1502 if (ret)
1505 goto err; 1503 goto err;
1506 break; 1504 break;
1507 default: 1505 default:
1508 ret = -ENOSYS; 1506 ret = -ENOSYS;
1509 goto err; 1507 goto err;
1510 } 1508 }
1511 } else 1509 } else
1512 cma_listen_on_all(id_priv); 1510 cma_listen_on_all(id_priv);
1513 1511
1514 return 0; 1512 return 0;
1515 err: 1513 err:
1516 id_priv->backlog = 0; 1514 id_priv->backlog = 0;
1517 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); 1515 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
1518 return ret; 1516 return ret;
1519 } 1517 }
1520 EXPORT_SYMBOL(rdma_listen); 1518 EXPORT_SYMBOL(rdma_listen);
1521 1519
1522 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 1520 void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1523 { 1521 {
1524 struct rdma_id_private *id_priv; 1522 struct rdma_id_private *id_priv;
1525 1523
1526 id_priv = container_of(id, struct rdma_id_private, id); 1524 id_priv = container_of(id, struct rdma_id_private, id);
1527 id_priv->tos = (u8) tos; 1525 id_priv->tos = (u8) tos;
1528 } 1526 }
1529 EXPORT_SYMBOL(rdma_set_service_type); 1527 EXPORT_SYMBOL(rdma_set_service_type);
1530 1528
1531 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 1529 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1532 void *context) 1530 void *context)
1533 { 1531 {
1534 struct cma_work *work = context; 1532 struct cma_work *work = context;
1535 struct rdma_route *route; 1533 struct rdma_route *route;
1536 1534
1537 route = &work->id->id.route; 1535 route = &work->id->id.route;
1538 1536
1539 if (!status) { 1537 if (!status) {
1540 route->num_paths = 1; 1538 route->num_paths = 1;
1541 *route->path_rec = *path_rec; 1539 *route->path_rec = *path_rec;
1542 } else { 1540 } else {
1543 work->old_state = CMA_ROUTE_QUERY; 1541 work->old_state = CMA_ROUTE_QUERY;
1544 work->new_state = CMA_ADDR_RESOLVED; 1542 work->new_state = CMA_ADDR_RESOLVED;
1545 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1543 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
1546 work->event.status = status; 1544 work->event.status = status;
1547 } 1545 }
1548 1546
1549 queue_work(cma_wq, &work->work); 1547 queue_work(cma_wq, &work->work);
1550 } 1548 }
1551 1549
1552 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 1550 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1553 struct cma_work *work) 1551 struct cma_work *work)
1554 { 1552 {
1555 struct rdma_addr *addr = &id_priv->id.route.addr; 1553 struct rdma_addr *addr = &id_priv->id.route.addr;
1556 struct ib_sa_path_rec path_rec; 1554 struct ib_sa_path_rec path_rec;
1557 ib_sa_comp_mask comp_mask; 1555 ib_sa_comp_mask comp_mask;
1558 struct sockaddr_in6 *sin6; 1556 struct sockaddr_in6 *sin6;
1559 1557
1560 memset(&path_rec, 0, sizeof path_rec); 1558 memset(&path_rec, 0, sizeof path_rec);
1561 ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); 1559 ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
1562 ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); 1560 ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
1563 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1561 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
1564 path_rec.numb_path = 1; 1562 path_rec.numb_path = 1;
1565 path_rec.reversible = 1; 1563 path_rec.reversible = 1;
1566 path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr); 1564 path_rec.service_id = cma_get_service_id(id_priv->id.ps,
1565 (struct sockaddr *) &addr->dst_addr);
1567 1566
1568 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1567 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1569 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1568 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1570 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1569 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
1571 1570
1572 if (addr->src_addr.sa_family == AF_INET) { 1571 if (addr->src_addr.ss_family == AF_INET) {
1573 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1572 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
1574 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1573 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
1575 } else { 1574 } else {
1576 sin6 = (struct sockaddr_in6 *) &addr->src_addr; 1575 sin6 = (struct sockaddr_in6 *) &addr->src_addr;
1577 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 1576 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
1578 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 1577 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
1579 } 1578 }
1580 1579
1581 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1580 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1582 id_priv->id.port_num, &path_rec, 1581 id_priv->id.port_num, &path_rec,
1583 comp_mask, timeout_ms, 1582 comp_mask, timeout_ms,
1584 GFP_KERNEL, cma_query_handler, 1583 GFP_KERNEL, cma_query_handler,
1585 work, &id_priv->query); 1584 work, &id_priv->query);
1586 1585
1587 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1586 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1588 } 1587 }
1589 1588
1590 static void cma_work_handler(struct work_struct *_work) 1589 static void cma_work_handler(struct work_struct *_work)
1591 { 1590 {
1592 struct cma_work *work = container_of(_work, struct cma_work, work); 1591 struct cma_work *work = container_of(_work, struct cma_work, work);
1593 struct rdma_id_private *id_priv = work->id; 1592 struct rdma_id_private *id_priv = work->id;
1594 int destroy = 0; 1593 int destroy = 0;
1595 1594
1596 mutex_lock(&id_priv->handler_mutex); 1595 mutex_lock(&id_priv->handler_mutex);
1597 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 1596 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1598 goto out; 1597 goto out;
1599 1598
1600 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1599 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1601 cma_exch(id_priv, CMA_DESTROYING); 1600 cma_exch(id_priv, CMA_DESTROYING);
1602 destroy = 1; 1601 destroy = 1;
1603 } 1602 }
1604 out: 1603 out:
1605 mutex_unlock(&id_priv->handler_mutex); 1604 mutex_unlock(&id_priv->handler_mutex);
1606 cma_deref_id(id_priv); 1605 cma_deref_id(id_priv);
1607 if (destroy) 1606 if (destroy)
1608 rdma_destroy_id(&id_priv->id); 1607 rdma_destroy_id(&id_priv->id);
1609 kfree(work); 1608 kfree(work);
1610 } 1609 }
1611 1610
1612 static void cma_ndev_work_handler(struct work_struct *_work) 1611 static void cma_ndev_work_handler(struct work_struct *_work)
1613 { 1612 {
1614 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 1613 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
1615 struct rdma_id_private *id_priv = work->id; 1614 struct rdma_id_private *id_priv = work->id;
1616 int destroy = 0; 1615 int destroy = 0;
1617 1616
1618 mutex_lock(&id_priv->handler_mutex); 1617 mutex_lock(&id_priv->handler_mutex);
1619 if (id_priv->state == CMA_DESTROYING || 1618 if (id_priv->state == CMA_DESTROYING ||
1620 id_priv->state == CMA_DEVICE_REMOVAL) 1619 id_priv->state == CMA_DEVICE_REMOVAL)
1621 goto out; 1620 goto out;
1622 1621
1623 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1622 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1624 cma_exch(id_priv, CMA_DESTROYING); 1623 cma_exch(id_priv, CMA_DESTROYING);
1625 destroy = 1; 1624 destroy = 1;
1626 } 1625 }
1627 1626
1628 out: 1627 out:
1629 mutex_unlock(&id_priv->handler_mutex); 1628 mutex_unlock(&id_priv->handler_mutex);
1630 cma_deref_id(id_priv); 1629 cma_deref_id(id_priv);
1631 if (destroy) 1630 if (destroy)
1632 rdma_destroy_id(&id_priv->id); 1631 rdma_destroy_id(&id_priv->id);
1633 kfree(work); 1632 kfree(work);
1634 } 1633 }
1635 1634
1636 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 1635 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1637 { 1636 {
1638 struct rdma_route *route = &id_priv->id.route; 1637 struct rdma_route *route = &id_priv->id.route;
1639 struct cma_work *work; 1638 struct cma_work *work;
1640 int ret; 1639 int ret;
1641 1640
1642 work = kzalloc(sizeof *work, GFP_KERNEL); 1641 work = kzalloc(sizeof *work, GFP_KERNEL);
1643 if (!work) 1642 if (!work)
1644 return -ENOMEM; 1643 return -ENOMEM;
1645 1644
1646 work->id = id_priv; 1645 work->id = id_priv;
1647 INIT_WORK(&work->work, cma_work_handler); 1646 INIT_WORK(&work->work, cma_work_handler);
1648 work->old_state = CMA_ROUTE_QUERY; 1647 work->old_state = CMA_ROUTE_QUERY;
1649 work->new_state = CMA_ROUTE_RESOLVED; 1648 work->new_state = CMA_ROUTE_RESOLVED;
1650 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1649 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1651 1650
1652 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 1651 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
1653 if (!route->path_rec) { 1652 if (!route->path_rec) {
1654 ret = -ENOMEM; 1653 ret = -ENOMEM;
1655 goto err1; 1654 goto err1;
1656 } 1655 }
1657 1656
1658 ret = cma_query_ib_route(id_priv, timeout_ms, work); 1657 ret = cma_query_ib_route(id_priv, timeout_ms, work);
1659 if (ret) 1658 if (ret)
1660 goto err2; 1659 goto err2;
1661 1660
1662 return 0; 1661 return 0;
1663 err2: 1662 err2:
1664 kfree(route->path_rec); 1663 kfree(route->path_rec);
1665 route->path_rec = NULL; 1664 route->path_rec = NULL;
1666 err1: 1665 err1:
1667 kfree(work); 1666 kfree(work);
1668 return ret; 1667 return ret;
1669 } 1668 }
1670 1669
1671 int rdma_set_ib_paths(struct rdma_cm_id *id, 1670 int rdma_set_ib_paths(struct rdma_cm_id *id,
1672 struct ib_sa_path_rec *path_rec, int num_paths) 1671 struct ib_sa_path_rec *path_rec, int num_paths)
1673 { 1672 {
1674 struct rdma_id_private *id_priv; 1673 struct rdma_id_private *id_priv;
1675 int ret; 1674 int ret;
1676 1675
1677 id_priv = container_of(id, struct rdma_id_private, id); 1676 id_priv = container_of(id, struct rdma_id_private, id);
1678 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1677 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1679 return -EINVAL; 1678 return -EINVAL;
1680 1679
1681 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); 1680 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
1682 if (!id->route.path_rec) { 1681 if (!id->route.path_rec) {
1683 ret = -ENOMEM; 1682 ret = -ENOMEM;
1684 goto err; 1683 goto err;
1685 } 1684 }
1686 1685
1687 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); 1686 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1688 return 0; 1687 return 0;
1689 err: 1688 err:
1690 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); 1689 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
1691 return ret; 1690 return ret;
1692 } 1691 }
1693 EXPORT_SYMBOL(rdma_set_ib_paths); 1692 EXPORT_SYMBOL(rdma_set_ib_paths);
1694 1693
1695 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 1694 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1696 { 1695 {
1697 struct cma_work *work; 1696 struct cma_work *work;
1698 1697
1699 work = kzalloc(sizeof *work, GFP_KERNEL); 1698 work = kzalloc(sizeof *work, GFP_KERNEL);
1700 if (!work) 1699 if (!work)
1701 return -ENOMEM; 1700 return -ENOMEM;
1702 1701
1703 work->id = id_priv; 1702 work->id = id_priv;
1704 INIT_WORK(&work->work, cma_work_handler); 1703 INIT_WORK(&work->work, cma_work_handler);
1705 work->old_state = CMA_ROUTE_QUERY; 1704 work->old_state = CMA_ROUTE_QUERY;
1706 work->new_state = CMA_ROUTE_RESOLVED; 1705 work->new_state = CMA_ROUTE_RESOLVED;
1707 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1706 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1708 queue_work(cma_wq, &work->work); 1707 queue_work(cma_wq, &work->work);
1709 return 0; 1708 return 0;
1710 } 1709 }
1711 1710
1712 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 1711 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1713 { 1712 {
1714 struct rdma_id_private *id_priv; 1713 struct rdma_id_private *id_priv;
1715 int ret; 1714 int ret;
1716 1715
1717 id_priv = container_of(id, struct rdma_id_private, id); 1716 id_priv = container_of(id, struct rdma_id_private, id);
1718 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) 1717 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
1719 return -EINVAL; 1718 return -EINVAL;
1720 1719
1721 atomic_inc(&id_priv->refcount); 1720 atomic_inc(&id_priv->refcount);
1722 switch (rdma_node_get_transport(id->device->node_type)) { 1721 switch (rdma_node_get_transport(id->device->node_type)) {
1723 case RDMA_TRANSPORT_IB: 1722 case RDMA_TRANSPORT_IB:
1724 ret = cma_resolve_ib_route(id_priv, timeout_ms); 1723 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1725 break; 1724 break;
1726 case RDMA_TRANSPORT_IWARP: 1725 case RDMA_TRANSPORT_IWARP:
1727 ret = cma_resolve_iw_route(id_priv, timeout_ms); 1726 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1728 break; 1727 break;
1729 default: 1728 default:
1730 ret = -ENOSYS; 1729 ret = -ENOSYS;
1731 break; 1730 break;
1732 } 1731 }
1733 if (ret) 1732 if (ret)
1734 goto err; 1733 goto err;
1735 1734
1736 return 0; 1735 return 0;
1737 err: 1736 err:
1738 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); 1737 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
1739 cma_deref_id(id_priv); 1738 cma_deref_id(id_priv);
1740 return ret; 1739 return ret;
1741 } 1740 }
1742 EXPORT_SYMBOL(rdma_resolve_route); 1741 EXPORT_SYMBOL(rdma_resolve_route);
1743 1742
1744 static int cma_bind_loopback(struct rdma_id_private *id_priv) 1743 static int cma_bind_loopback(struct rdma_id_private *id_priv)
1745 { 1744 {
1746 struct cma_device *cma_dev; 1745 struct cma_device *cma_dev;
1747 struct ib_port_attr port_attr; 1746 struct ib_port_attr port_attr;
1748 union ib_gid gid; 1747 union ib_gid gid;
1749 u16 pkey; 1748 u16 pkey;
1750 int ret; 1749 int ret;
1751 u8 p; 1750 u8 p;
1752 1751
1753 mutex_lock(&lock); 1752 mutex_lock(&lock);
1754 if (list_empty(&dev_list)) { 1753 if (list_empty(&dev_list)) {
1755 ret = -ENODEV; 1754 ret = -ENODEV;
1756 goto out; 1755 goto out;
1757 } 1756 }
1758 list_for_each_entry(cma_dev, &dev_list, list) 1757 list_for_each_entry(cma_dev, &dev_list, list)
1759 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) 1758 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
1760 if (!ib_query_port(cma_dev->device, p, &port_attr) && 1759 if (!ib_query_port(cma_dev->device, p, &port_attr) &&
1761 port_attr.state == IB_PORT_ACTIVE) 1760 port_attr.state == IB_PORT_ACTIVE)
1762 goto port_found; 1761 goto port_found;
1763 1762
1764 p = 1; 1763 p = 1;
1765 cma_dev = list_entry(dev_list.next, struct cma_device, list); 1764 cma_dev = list_entry(dev_list.next, struct cma_device, list);
1766 1765
1767 port_found: 1766 port_found:
1768 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); 1767 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
1769 if (ret) 1768 if (ret)
1770 goto out; 1769 goto out;
1771 1770
1772 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 1771 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
1773 if (ret) 1772 if (ret)
1774 goto out; 1773 goto out;
1775 1774
1776 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1775 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1777 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 1776 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1778 id_priv->id.port_num = p; 1777 id_priv->id.port_num = p;
1779 cma_attach_to_dev(id_priv, cma_dev); 1778 cma_attach_to_dev(id_priv, cma_dev);
1780 out: 1779 out:
1781 mutex_unlock(&lock); 1780 mutex_unlock(&lock);
1782 return ret; 1781 return ret;
1783 } 1782 }
1784 1783
1785 static void addr_handler(int status, struct sockaddr *src_addr, 1784 static void addr_handler(int status, struct sockaddr *src_addr,
1786 struct rdma_dev_addr *dev_addr, void *context) 1785 struct rdma_dev_addr *dev_addr, void *context)
1787 { 1786 {
1788 struct rdma_id_private *id_priv = context; 1787 struct rdma_id_private *id_priv = context;
1789 struct rdma_cm_event event; 1788 struct rdma_cm_event event;
1790 1789
1791 memset(&event, 0, sizeof event); 1790 memset(&event, 0, sizeof event);
1792 mutex_lock(&id_priv->handler_mutex); 1791 mutex_lock(&id_priv->handler_mutex);
1793 1792
1794 /* 1793 /*
1795 * Grab mutex to block rdma_destroy_id() from removing the device while 1794 * Grab mutex to block rdma_destroy_id() from removing the device while
1796 * we're trying to acquire it. 1795 * we're trying to acquire it.
1797 */ 1796 */
1798 mutex_lock(&lock); 1797 mutex_lock(&lock);
1799 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) { 1798 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
1800 mutex_unlock(&lock); 1799 mutex_unlock(&lock);
1801 goto out; 1800 goto out;
1802 } 1801 }
1803 1802
1804 if (!status && !id_priv->cma_dev) 1803 if (!status && !id_priv->cma_dev)
1805 status = cma_acquire_dev(id_priv); 1804 status = cma_acquire_dev(id_priv);
1806 mutex_unlock(&lock); 1805 mutex_unlock(&lock);
1807 1806
1808 if (status) { 1807 if (status) {
1809 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1808 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1810 goto out; 1809 goto out;
1811 event.event = RDMA_CM_EVENT_ADDR_ERROR; 1810 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1812 event.status = status; 1811 event.status = status;
1813 } else { 1812 } else {
1814 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1813 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1815 ip_addr_size(src_addr)); 1814 ip_addr_size(src_addr));
1816 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1815 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1817 } 1816 }
1818 1817
1819 if (id_priv->id.event_handler(&id_priv->id, &event)) { 1818 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1820 cma_exch(id_priv, CMA_DESTROYING); 1819 cma_exch(id_priv, CMA_DESTROYING);
1821 mutex_unlock(&id_priv->handler_mutex); 1820 mutex_unlock(&id_priv->handler_mutex);
1822 cma_deref_id(id_priv); 1821 cma_deref_id(id_priv);
1823 rdma_destroy_id(&id_priv->id); 1822 rdma_destroy_id(&id_priv->id);
1824 return; 1823 return;
1825 } 1824 }
1826 out: 1825 out:
1827 mutex_unlock(&id_priv->handler_mutex); 1826 mutex_unlock(&id_priv->handler_mutex);
1828 cma_deref_id(id_priv); 1827 cma_deref_id(id_priv);
1829 } 1828 }
1830 1829
1831 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 1830 static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1832 { 1831 {
1833 struct cma_work *work; 1832 struct cma_work *work;
1834 struct sockaddr_in *src_in, *dst_in; 1833 struct sockaddr_in *src_in, *dst_in;
1835 union ib_gid gid; 1834 union ib_gid gid;
1836 int ret; 1835 int ret;
1837 1836
1838 work = kzalloc(sizeof *work, GFP_KERNEL); 1837 work = kzalloc(sizeof *work, GFP_KERNEL);
1839 if (!work) 1838 if (!work)
1840 return -ENOMEM; 1839 return -ENOMEM;
1841 1840
1842 if (!id_priv->cma_dev) { 1841 if (!id_priv->cma_dev) {
1843 ret = cma_bind_loopback(id_priv); 1842 ret = cma_bind_loopback(id_priv);
1844 if (ret) 1843 if (ret)
1845 goto err; 1844 goto err;
1846 } 1845 }
1847 1846
1848 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1847 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1849 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 1848 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1850 1849
1851 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { 1850 if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) {
1852 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; 1851 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1853 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; 1852 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1854 src_in->sin_family = dst_in->sin_family; 1853 src_in->sin_family = dst_in->sin_family;
1855 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr; 1854 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
1856 } 1855 }
1857 1856
1858 work->id = id_priv; 1857 work->id = id_priv;
1859 INIT_WORK(&work->work, cma_work_handler); 1858 INIT_WORK(&work->work, cma_work_handler);
1860 work->old_state = CMA_ADDR_QUERY; 1859 work->old_state = CMA_ADDR_QUERY;
1861 work->new_state = CMA_ADDR_RESOLVED; 1860 work->new_state = CMA_ADDR_RESOLVED;
1862 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1861 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1863 queue_work(cma_wq, &work->work); 1862 queue_work(cma_wq, &work->work);
1864 return 0; 1863 return 0;
1865 err: 1864 err:
1866 kfree(work); 1865 kfree(work);
1867 return ret; 1866 return ret;
1868 } 1867 }
1869 1868
1870 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 1869 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1871 struct sockaddr *dst_addr) 1870 struct sockaddr *dst_addr)
1872 { 1871 {
1873 if (src_addr && src_addr->sa_family) 1872 if (src_addr && src_addr->sa_family)
1874 return rdma_bind_addr(id, src_addr); 1873 return rdma_bind_addr(id, src_addr);
1875 else 1874 else
1876 return cma_bind_any(id, dst_addr->sa_family); 1875 return cma_bind_any(id, dst_addr->sa_family);
1877 } 1876 }
1878 1877
1879 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 1878 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1880 struct sockaddr *dst_addr, int timeout_ms) 1879 struct sockaddr *dst_addr, int timeout_ms)
1881 { 1880 {
1882 struct rdma_id_private *id_priv; 1881 struct rdma_id_private *id_priv;
1883 int ret; 1882 int ret;
1884 1883
1885 id_priv = container_of(id, struct rdma_id_private, id); 1884 id_priv = container_of(id, struct rdma_id_private, id);
1886 if (id_priv->state == CMA_IDLE) { 1885 if (id_priv->state == CMA_IDLE) {
1887 ret = cma_bind_addr(id, src_addr, dst_addr); 1886 ret = cma_bind_addr(id, src_addr, dst_addr);
1888 if (ret) 1887 if (ret)
1889 return ret; 1888 return ret;
1890 } 1889 }
1891 1890
1892 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) 1891 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
1893 return -EINVAL; 1892 return -EINVAL;
1894 1893
1895 atomic_inc(&id_priv->refcount); 1894 atomic_inc(&id_priv->refcount);
1896 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr)); 1895 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
1897 if (cma_any_addr(dst_addr)) 1896 if (cma_any_addr(dst_addr))
1898 ret = cma_resolve_loopback(id_priv); 1897 ret = cma_resolve_loopback(id_priv);
1899 else 1898 else
1900 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr, 1899 ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr,
1901 dst_addr, &id->route.addr.dev_addr, 1900 dst_addr, &id->route.addr.dev_addr,
1902 timeout_ms, addr_handler, id_priv); 1901 timeout_ms, addr_handler, id_priv);
1903 if (ret) 1902 if (ret)
1904 goto err; 1903 goto err;
1905 1904
1906 return 0; 1905 return 0;
1907 err: 1906 err:
1908 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); 1907 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
1909 cma_deref_id(id_priv); 1908 cma_deref_id(id_priv);
1910 return ret; 1909 return ret;
1911 } 1910 }
1912 EXPORT_SYMBOL(rdma_resolve_addr); 1911 EXPORT_SYMBOL(rdma_resolve_addr);
1913 1912
1914 static void cma_bind_port(struct rdma_bind_list *bind_list, 1913 static void cma_bind_port(struct rdma_bind_list *bind_list,
1915 struct rdma_id_private *id_priv) 1914 struct rdma_id_private *id_priv)
1916 { 1915 {
1917 struct sockaddr_in *sin; 1916 struct sockaddr_in *sin;
1918 1917
1919 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1918 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1920 sin->sin_port = htons(bind_list->port); 1919 sin->sin_port = htons(bind_list->port);
1921 id_priv->bind_list = bind_list; 1920 id_priv->bind_list = bind_list;
1922 hlist_add_head(&id_priv->node, &bind_list->owners); 1921 hlist_add_head(&id_priv->node, &bind_list->owners);
1923 } 1922 }
1924 1923
1925 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, 1924 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
1926 unsigned short snum) 1925 unsigned short snum)
1927 { 1926 {
1928 struct rdma_bind_list *bind_list; 1927 struct rdma_bind_list *bind_list;
1929 int port, ret; 1928 int port, ret;
1930 1929
1931 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 1930 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1932 if (!bind_list) 1931 if (!bind_list)
1933 return -ENOMEM; 1932 return -ENOMEM;
1934 1933
1935 do { 1934 do {
1936 ret = idr_get_new_above(ps, bind_list, snum, &port); 1935 ret = idr_get_new_above(ps, bind_list, snum, &port);
1937 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 1936 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1938 1937
1939 if (ret) 1938 if (ret)
1940 goto err1; 1939 goto err1;
1941 1940
1942 if (port != snum) { 1941 if (port != snum) {
1943 ret = -EADDRNOTAVAIL; 1942 ret = -EADDRNOTAVAIL;
1944 goto err2; 1943 goto err2;
1945 } 1944 }
1946 1945
1947 bind_list->ps = ps; 1946 bind_list->ps = ps;
1948 bind_list->port = (unsigned short) port; 1947 bind_list->port = (unsigned short) port;
1949 cma_bind_port(bind_list, id_priv); 1948 cma_bind_port(bind_list, id_priv);
1950 return 0; 1949 return 0;
1951 err2: 1950 err2:
1952 idr_remove(ps, port); 1951 idr_remove(ps, port);
1953 err1: 1952 err1:
1954 kfree(bind_list); 1953 kfree(bind_list);
1955 return ret; 1954 return ret;
1956 } 1955 }
1957 1956
1958 static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 1957 static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
1959 { 1958 {
1960 struct rdma_bind_list *bind_list; 1959 struct rdma_bind_list *bind_list;
1961 int port, ret, low, high; 1960 int port, ret, low, high;
1962 1961
1963 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 1962 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1964 if (!bind_list) 1963 if (!bind_list)
1965 return -ENOMEM; 1964 return -ENOMEM;
1966 1965
1967 retry: 1966 retry:
1968 /* FIXME: add proper port randomization per like inet_csk_get_port */ 1967 /* FIXME: add proper port randomization per like inet_csk_get_port */
1969 do { 1968 do {
1970 ret = idr_get_new_above(ps, bind_list, next_port, &port); 1969 ret = idr_get_new_above(ps, bind_list, next_port, &port);
1971 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 1970 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1972 1971
1973 if (ret) 1972 if (ret)
1974 goto err1; 1973 goto err1;
1975 1974
1976 inet_get_local_port_range(&low, &high); 1975 inet_get_local_port_range(&low, &high);
1977 if (port > high) { 1976 if (port > high) {
1978 if (next_port != low) { 1977 if (next_port != low) {
1979 idr_remove(ps, port); 1978 idr_remove(ps, port);
1980 next_port = low; 1979 next_port = low;
1981 goto retry; 1980 goto retry;
1982 } 1981 }
1983 ret = -EADDRNOTAVAIL; 1982 ret = -EADDRNOTAVAIL;
1984 goto err2; 1983 goto err2;
1985 } 1984 }
1986 1985
1987 if (port == high) 1986 if (port == high)
1988 next_port = low; 1987 next_port = low;
1989 else 1988 else
1990 next_port = port + 1; 1989 next_port = port + 1;
1991 1990
1992 bind_list->ps = ps; 1991 bind_list->ps = ps;
1993 bind_list->port = (unsigned short) port; 1992 bind_list->port = (unsigned short) port;
1994 cma_bind_port(bind_list, id_priv); 1993 cma_bind_port(bind_list, id_priv);
1995 return 0; 1994 return 0;
1996 err2: 1995 err2:
1997 idr_remove(ps, port); 1996 idr_remove(ps, port);
1998 err1: 1997 err1:
1999 kfree(bind_list); 1998 kfree(bind_list);
2000 return ret; 1999 return ret;
2001 } 2000 }
2002 2001
2003 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2002 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
2004 { 2003 {
2005 struct rdma_id_private *cur_id; 2004 struct rdma_id_private *cur_id;
2006 struct sockaddr_in *sin, *cur_sin; 2005 struct sockaddr_in *sin, *cur_sin;
2007 struct rdma_bind_list *bind_list; 2006 struct rdma_bind_list *bind_list;
2008 struct hlist_node *node; 2007 struct hlist_node *node;
2009 unsigned short snum; 2008 unsigned short snum;
2010 2009
2011 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2010 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
2012 snum = ntohs(sin->sin_port); 2011 snum = ntohs(sin->sin_port);
2013 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 2012 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
2014 return -EACCES; 2013 return -EACCES;
2015 2014
2016 bind_list = idr_find(ps, snum); 2015 bind_list = idr_find(ps, snum);
2017 if (!bind_list) 2016 if (!bind_list)
2018 return cma_alloc_port(ps, id_priv, snum); 2017 return cma_alloc_port(ps, id_priv, snum);
2019 2018
2020 /* 2019 /*
2021 * We don't support binding to any address if anyone is bound to 2020 * We don't support binding to any address if anyone is bound to
2022 * a specific address on the same port. 2021 * a specific address on the same port.
2023 */ 2022 */
2024 if (cma_any_addr(&id_priv->id.route.addr.src_addr)) 2023 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2025 return -EADDRNOTAVAIL; 2024 return -EADDRNOTAVAIL;
2026 2025
2027 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2026 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
2028 if (cma_any_addr(&cur_id->id.route.addr.src_addr)) 2027 if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
2029 return -EADDRNOTAVAIL; 2028 return -EADDRNOTAVAIL;
2030 2029
2031 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; 2030 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
2032 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) 2031 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
2033 return -EADDRINUSE; 2032 return -EADDRINUSE;
2034 } 2033 }
2035 2034
2036 cma_bind_port(bind_list, id_priv); 2035 cma_bind_port(bind_list, id_priv);
2037 return 0; 2036 return 0;
2038 } 2037 }
2039 2038
2040 static int cma_get_port(struct rdma_id_private *id_priv) 2039 static int cma_get_port(struct rdma_id_private *id_priv)
2041 { 2040 {
2042 struct idr *ps; 2041 struct idr *ps;
2043 int ret; 2042 int ret;
2044 2043
2045 switch (id_priv->id.ps) { 2044 switch (id_priv->id.ps) {
2046 case RDMA_PS_SDP: 2045 case RDMA_PS_SDP:
2047 ps = &sdp_ps; 2046 ps = &sdp_ps;
2048 break; 2047 break;
2049 case RDMA_PS_TCP: 2048 case RDMA_PS_TCP:
2050 ps = &tcp_ps; 2049 ps = &tcp_ps;
2051 break; 2050 break;
2052 case RDMA_PS_UDP: 2051 case RDMA_PS_UDP:
2053 ps = &udp_ps; 2052 ps = &udp_ps;
2054 break; 2053 break;
2055 case RDMA_PS_IPOIB: 2054 case RDMA_PS_IPOIB:
2056 ps = &ipoib_ps; 2055 ps = &ipoib_ps;
2057 break; 2056 break;
2058 default: 2057 default:
2059 return -EPROTONOSUPPORT; 2058 return -EPROTONOSUPPORT;
2060 } 2059 }
2061 2060
2062 mutex_lock(&lock); 2061 mutex_lock(&lock);
2063 if (cma_any_port(&id_priv->id.route.addr.src_addr)) 2062 if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2064 ret = cma_alloc_any_port(ps, id_priv); 2063 ret = cma_alloc_any_port(ps, id_priv);
2065 else 2064 else
2066 ret = cma_use_port(ps, id_priv); 2065 ret = cma_use_port(ps, id_priv);
2067 mutex_unlock(&lock); 2066 mutex_unlock(&lock);
2068 2067
2069 return ret; 2068 return ret;
2070 } 2069 }
2071 2070
2072 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 2071 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2073 { 2072 {
2074 struct rdma_id_private *id_priv; 2073 struct rdma_id_private *id_priv;
2075 int ret; 2074 int ret;
2076 2075
2077 if (addr->sa_family != AF_INET) 2076 if (addr->sa_family != AF_INET)
2078 return -EAFNOSUPPORT; 2077 return -EAFNOSUPPORT;
2079 2078
2080 id_priv = container_of(id, struct rdma_id_private, id); 2079 id_priv = container_of(id, struct rdma_id_private, id);
2081 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) 2080 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
2082 return -EINVAL; 2081 return -EINVAL;
2083 2082
2084 if (!cma_any_addr(addr)) { 2083 if (!cma_any_addr(addr)) {
2085 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2084 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2086 if (ret) 2085 if (ret)
2087 goto err1; 2086 goto err1;
2088 2087
2089 mutex_lock(&lock); 2088 mutex_lock(&lock);
2090 ret = cma_acquire_dev(id_priv); 2089 ret = cma_acquire_dev(id_priv);
2091 mutex_unlock(&lock); 2090 mutex_unlock(&lock);
2092 if (ret) 2091 if (ret)
2093 goto err1; 2092 goto err1;
2094 } 2093 }
2095 2094
2096 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); 2095 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
2097 ret = cma_get_port(id_priv); 2096 ret = cma_get_port(id_priv);
2098 if (ret) 2097 if (ret)
2099 goto err2; 2098 goto err2;
2100 2099
2101 return 0; 2100 return 0;
2102 err2: 2101 err2:
2103 if (!cma_any_addr(addr)) { 2102 if (!cma_any_addr(addr)) {
2104 mutex_lock(&lock); 2103 mutex_lock(&lock);
2105 cma_detach_from_dev(id_priv); 2104 cma_detach_from_dev(id_priv);
2106 mutex_unlock(&lock); 2105 mutex_unlock(&lock);
2107 } 2106 }
2108 err1: 2107 err1:
2109 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 2108 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
2110 return ret; 2109 return ret;
2111 } 2110 }
2112 EXPORT_SYMBOL(rdma_bind_addr); 2111 EXPORT_SYMBOL(rdma_bind_addr);
2113 2112
2114 static int cma_format_hdr(void *hdr, enum rdma_port_space ps, 2113 static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
2115 struct rdma_route *route) 2114 struct rdma_route *route)
2116 { 2115 {
2117 struct sockaddr_in *src4, *dst4; 2116 struct sockaddr_in *src4, *dst4;
2118 struct cma_hdr *cma_hdr; 2117 struct cma_hdr *cma_hdr;
2119 struct sdp_hh *sdp_hdr; 2118 struct sdp_hh *sdp_hdr;
2120 2119
2121 src4 = (struct sockaddr_in *) &route->addr.src_addr; 2120 src4 = (struct sockaddr_in *) &route->addr.src_addr;
2122 dst4 = (struct sockaddr_in *) &route->addr.dst_addr; 2121 dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
2123 2122
2124 switch (ps) { 2123 switch (ps) {
2125 case RDMA_PS_SDP: 2124 case RDMA_PS_SDP:
2126 sdp_hdr = hdr; 2125 sdp_hdr = hdr;
2127 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2126 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
2128 return -EINVAL; 2127 return -EINVAL;
2129 sdp_set_ip_ver(sdp_hdr, 4); 2128 sdp_set_ip_ver(sdp_hdr, 4);
2130 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2129 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2131 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2130 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2132 sdp_hdr->port = src4->sin_port; 2131 sdp_hdr->port = src4->sin_port;
2133 break; 2132 break;
2134 default: 2133 default:
2135 cma_hdr = hdr; 2134 cma_hdr = hdr;
2136 cma_hdr->cma_version = CMA_VERSION; 2135 cma_hdr->cma_version = CMA_VERSION;
2137 cma_set_ip_ver(cma_hdr, 4); 2136 cma_set_ip_ver(cma_hdr, 4);
2138 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2137 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2139 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2138 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2140 cma_hdr->port = src4->sin_port; 2139 cma_hdr->port = src4->sin_port;
2141 break; 2140 break;
2142 } 2141 }
2143 return 0; 2142 return 0;
2144 } 2143 }
2145 2144
2146 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 2145 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2147 struct ib_cm_event *ib_event) 2146 struct ib_cm_event *ib_event)
2148 { 2147 {
2149 struct rdma_id_private *id_priv = cm_id->context; 2148 struct rdma_id_private *id_priv = cm_id->context;
2150 struct rdma_cm_event event; 2149 struct rdma_cm_event event;
2151 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2150 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2152 int ret = 0; 2151 int ret = 0;
2153 2152
2154 if (cma_disable_callback(id_priv, CMA_CONNECT)) 2153 if (cma_disable_callback(id_priv, CMA_CONNECT))
2155 return 0; 2154 return 0;
2156 2155
2157 memset(&event, 0, sizeof event); 2156 memset(&event, 0, sizeof event);
2158 switch (ib_event->event) { 2157 switch (ib_event->event) {
2159 case IB_CM_SIDR_REQ_ERROR: 2158 case IB_CM_SIDR_REQ_ERROR:
2160 event.event = RDMA_CM_EVENT_UNREACHABLE; 2159 event.event = RDMA_CM_EVENT_UNREACHABLE;
2161 event.status = -ETIMEDOUT; 2160 event.status = -ETIMEDOUT;
2162 break; 2161 break;
2163 case IB_CM_SIDR_REP_RECEIVED: 2162 case IB_CM_SIDR_REP_RECEIVED:
2164 event.param.ud.private_data = ib_event->private_data; 2163 event.param.ud.private_data = ib_event->private_data;
2165 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 2164 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
2166 if (rep->status != IB_SIDR_SUCCESS) { 2165 if (rep->status != IB_SIDR_SUCCESS) {
2167 event.event = RDMA_CM_EVENT_UNREACHABLE; 2166 event.event = RDMA_CM_EVENT_UNREACHABLE;
2168 event.status = ib_event->param.sidr_rep_rcvd.status; 2167 event.status = ib_event->param.sidr_rep_rcvd.status;
2169 break; 2168 break;
2170 } 2169 }
2171 if (id_priv->qkey != rep->qkey) { 2170 if (id_priv->qkey != rep->qkey) {
2172 event.event = RDMA_CM_EVENT_UNREACHABLE; 2171 event.event = RDMA_CM_EVENT_UNREACHABLE;
2173 event.status = -EINVAL; 2172 event.status = -EINVAL;
2174 break; 2173 break;
2175 } 2174 }
2176 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 2175 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2177 id_priv->id.route.path_rec, 2176 id_priv->id.route.path_rec,
2178 &event.param.ud.ah_attr); 2177 &event.param.ud.ah_attr);
2179 event.param.ud.qp_num = rep->qpn; 2178 event.param.ud.qp_num = rep->qpn;
2180 event.param.ud.qkey = rep->qkey; 2179 event.param.ud.qkey = rep->qkey;
2181 event.event = RDMA_CM_EVENT_ESTABLISHED; 2180 event.event = RDMA_CM_EVENT_ESTABLISHED;
2182 event.status = 0; 2181 event.status = 0;
2183 break; 2182 break;
2184 default: 2183 default:
2185 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 2184 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
2186 ib_event->event); 2185 ib_event->event);
2187 goto out; 2186 goto out;
2188 } 2187 }
2189 2188
2190 ret = id_priv->id.event_handler(&id_priv->id, &event); 2189 ret = id_priv->id.event_handler(&id_priv->id, &event);
2191 if (ret) { 2190 if (ret) {
2192 /* Destroy the CM ID by returning a non-zero value. */ 2191 /* Destroy the CM ID by returning a non-zero value. */
2193 id_priv->cm_id.ib = NULL; 2192 id_priv->cm_id.ib = NULL;
2194 cma_exch(id_priv, CMA_DESTROYING); 2193 cma_exch(id_priv, CMA_DESTROYING);
2195 mutex_unlock(&id_priv->handler_mutex); 2194 mutex_unlock(&id_priv->handler_mutex);
2196 rdma_destroy_id(&id_priv->id); 2195 rdma_destroy_id(&id_priv->id);
2197 return ret; 2196 return ret;
2198 } 2197 }
2199 out: 2198 out:
2200 mutex_unlock(&id_priv->handler_mutex); 2199 mutex_unlock(&id_priv->handler_mutex);
2201 return ret; 2200 return ret;
2202 } 2201 }
2203 2202
2204 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 2203 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2205 struct rdma_conn_param *conn_param) 2204 struct rdma_conn_param *conn_param)
2206 { 2205 {
2207 struct ib_cm_sidr_req_param req; 2206 struct ib_cm_sidr_req_param req;
2208 struct rdma_route *route; 2207 struct rdma_route *route;
2209 int ret; 2208 int ret;
2210 2209
2211 req.private_data_len = sizeof(struct cma_hdr) + 2210 req.private_data_len = sizeof(struct cma_hdr) +
2212 conn_param->private_data_len; 2211 conn_param->private_data_len;
2213 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2212 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2214 if (!req.private_data) 2213 if (!req.private_data)
2215 return -ENOMEM; 2214 return -ENOMEM;
2216 2215
2217 if (conn_param->private_data && conn_param->private_data_len) 2216 if (conn_param->private_data && conn_param->private_data_len)
2218 memcpy((void *) req.private_data + sizeof(struct cma_hdr), 2217 memcpy((void *) req.private_data + sizeof(struct cma_hdr),
2219 conn_param->private_data, conn_param->private_data_len); 2218 conn_param->private_data, conn_param->private_data_len);
2220 2219
2221 route = &id_priv->id.route; 2220 route = &id_priv->id.route;
2222 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route); 2221 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
2223 if (ret) 2222 if (ret)
2224 goto out; 2223 goto out;
2225 2224
2226 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, 2225 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
2227 cma_sidr_rep_handler, id_priv); 2226 cma_sidr_rep_handler, id_priv);
2228 if (IS_ERR(id_priv->cm_id.ib)) { 2227 if (IS_ERR(id_priv->cm_id.ib)) {
2229 ret = PTR_ERR(id_priv->cm_id.ib); 2228 ret = PTR_ERR(id_priv->cm_id.ib);
2230 goto out; 2229 goto out;
2231 } 2230 }
2232 2231
2233 req.path = route->path_rec; 2232 req.path = route->path_rec;
2234 req.service_id = cma_get_service_id(id_priv->id.ps, 2233 req.service_id = cma_get_service_id(id_priv->id.ps,
2235 &route->addr.dst_addr); 2234 (struct sockaddr *) &route->addr.dst_addr);
2236 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 2235 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2237 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2236 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2238 2237
2239 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 2238 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
2240 if (ret) { 2239 if (ret) {
2241 ib_destroy_cm_id(id_priv->cm_id.ib); 2240 ib_destroy_cm_id(id_priv->cm_id.ib);
2242 id_priv->cm_id.ib = NULL; 2241 id_priv->cm_id.ib = NULL;
2243 } 2242 }
2244 out: 2243 out:
2245 kfree(req.private_data); 2244 kfree(req.private_data);
2246 return ret; 2245 return ret;
2247 } 2246 }
2248 2247
2249 static int cma_connect_ib(struct rdma_id_private *id_priv, 2248 static int cma_connect_ib(struct rdma_id_private *id_priv,
2250 struct rdma_conn_param *conn_param) 2249 struct rdma_conn_param *conn_param)
2251 { 2250 {
2252 struct ib_cm_req_param req; 2251 struct ib_cm_req_param req;
2253 struct rdma_route *route; 2252 struct rdma_route *route;
2254 void *private_data; 2253 void *private_data;
2255 int offset, ret; 2254 int offset, ret;
2256 2255
2257 memset(&req, 0, sizeof req); 2256 memset(&req, 0, sizeof req);
2258 offset = cma_user_data_offset(id_priv->id.ps); 2257 offset = cma_user_data_offset(id_priv->id.ps);
2259 req.private_data_len = offset + conn_param->private_data_len; 2258 req.private_data_len = offset + conn_param->private_data_len;
2260 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2259 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2261 if (!private_data) 2260 if (!private_data)
2262 return -ENOMEM; 2261 return -ENOMEM;
2263 2262
2264 if (conn_param->private_data && conn_param->private_data_len) 2263 if (conn_param->private_data && conn_param->private_data_len)
2265 memcpy(private_data + offset, conn_param->private_data, 2264 memcpy(private_data + offset, conn_param->private_data,
2266 conn_param->private_data_len); 2265 conn_param->private_data_len);
2267 2266
2268 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler, 2267 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
2269 id_priv); 2268 id_priv);
2270 if (IS_ERR(id_priv->cm_id.ib)) { 2269 if (IS_ERR(id_priv->cm_id.ib)) {
2271 ret = PTR_ERR(id_priv->cm_id.ib); 2270 ret = PTR_ERR(id_priv->cm_id.ib);
2272 goto out; 2271 goto out;
2273 } 2272 }
2274 2273
2275 route = &id_priv->id.route; 2274 route = &id_priv->id.route;
2276 ret = cma_format_hdr(private_data, id_priv->id.ps, route); 2275 ret = cma_format_hdr(private_data, id_priv->id.ps, route);
2277 if (ret) 2276 if (ret)
2278 goto out; 2277 goto out;
2279 req.private_data = private_data; 2278 req.private_data = private_data;
2280 2279
2281 req.primary_path = &route->path_rec[0]; 2280 req.primary_path = &route->path_rec[0];
2282 if (route->num_paths == 2) 2281 if (route->num_paths == 2)
2283 req.alternate_path = &route->path_rec[1]; 2282 req.alternate_path = &route->path_rec[1];
2284 2283
2285 req.service_id = cma_get_service_id(id_priv->id.ps, 2284 req.service_id = cma_get_service_id(id_priv->id.ps,
2286 &route->addr.dst_addr); 2285 (struct sockaddr *) &route->addr.dst_addr);
2287 req.qp_num = id_priv->qp_num; 2286 req.qp_num = id_priv->qp_num;
2288 req.qp_type = IB_QPT_RC; 2287 req.qp_type = IB_QPT_RC;
2289 req.starting_psn = id_priv->seq_num; 2288 req.starting_psn = id_priv->seq_num;
2290 req.responder_resources = conn_param->responder_resources; 2289 req.responder_resources = conn_param->responder_resources;
2291 req.initiator_depth = conn_param->initiator_depth; 2290 req.initiator_depth = conn_param->initiator_depth;
2292 req.flow_control = conn_param->flow_control; 2291 req.flow_control = conn_param->flow_control;
2293 req.retry_count = conn_param->retry_count; 2292 req.retry_count = conn_param->retry_count;
2294 req.rnr_retry_count = conn_param->rnr_retry_count; 2293 req.rnr_retry_count = conn_param->rnr_retry_count;
2295 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 2294 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2296 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 2295 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2297 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2296 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2298 req.srq = id_priv->srq ? 1 : 0; 2297 req.srq = id_priv->srq ? 1 : 0;
2299 2298
2300 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 2299 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
2301 out: 2300 out:
2302 if (ret && !IS_ERR(id_priv->cm_id.ib)) { 2301 if (ret && !IS_ERR(id_priv->cm_id.ib)) {
2303 ib_destroy_cm_id(id_priv->cm_id.ib); 2302 ib_destroy_cm_id(id_priv->cm_id.ib);
2304 id_priv->cm_id.ib = NULL; 2303 id_priv->cm_id.ib = NULL;
2305 } 2304 }
2306 2305
2307 kfree(private_data); 2306 kfree(private_data);
2308 return ret; 2307 return ret;
2309 } 2308 }
2310 2309
2311 static int cma_connect_iw(struct rdma_id_private *id_priv, 2310 static int cma_connect_iw(struct rdma_id_private *id_priv,
2312 struct rdma_conn_param *conn_param) 2311 struct rdma_conn_param *conn_param)
2313 { 2312 {
2314 struct iw_cm_id *cm_id; 2313 struct iw_cm_id *cm_id;
2315 struct sockaddr_in* sin; 2314 struct sockaddr_in* sin;
2316 int ret; 2315 int ret;
2317 struct iw_cm_conn_param iw_param; 2316 struct iw_cm_conn_param iw_param;
2318 2317
2319 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 2318 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
2320 if (IS_ERR(cm_id)) { 2319 if (IS_ERR(cm_id)) {
2321 ret = PTR_ERR(cm_id); 2320 ret = PTR_ERR(cm_id);
2322 goto out; 2321 goto out;
2323 } 2322 }
2324 2323
2325 id_priv->cm_id.iw = cm_id; 2324 id_priv->cm_id.iw = cm_id;
2326 2325
2327 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr; 2326 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
2328 cm_id->local_addr = *sin; 2327 cm_id->local_addr = *sin;
2329 2328
2330 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; 2329 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
2331 cm_id->remote_addr = *sin; 2330 cm_id->remote_addr = *sin;
2332 2331
2333 ret = cma_modify_qp_rtr(id_priv, conn_param); 2332 ret = cma_modify_qp_rtr(id_priv, conn_param);
2334 if (ret) 2333 if (ret)
2335 goto out; 2334 goto out;
2336 2335
2337 iw_param.ord = conn_param->initiator_depth; 2336 iw_param.ord = conn_param->initiator_depth;
2338 iw_param.ird = conn_param->responder_resources; 2337 iw_param.ird = conn_param->responder_resources;
2339 iw_param.private_data = conn_param->private_data; 2338 iw_param.private_data = conn_param->private_data;
2340 iw_param.private_data_len = conn_param->private_data_len; 2339 iw_param.private_data_len = conn_param->private_data_len;
2341 if (id_priv->id.qp) 2340 if (id_priv->id.qp)
2342 iw_param.qpn = id_priv->qp_num; 2341 iw_param.qpn = id_priv->qp_num;
2343 else 2342 else
2344 iw_param.qpn = conn_param->qp_num; 2343 iw_param.qpn = conn_param->qp_num;
2345 ret = iw_cm_connect(cm_id, &iw_param); 2344 ret = iw_cm_connect(cm_id, &iw_param);
2346 out: 2345 out:
2347 if (ret && !IS_ERR(cm_id)) { 2346 if (ret && !IS_ERR(cm_id)) {
2348 iw_destroy_cm_id(cm_id); 2347 iw_destroy_cm_id(cm_id);
2349 id_priv->cm_id.iw = NULL; 2348 id_priv->cm_id.iw = NULL;
2350 } 2349 }
2351 return ret; 2350 return ret;
2352 } 2351 }
2353 2352
2354 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2353 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2355 { 2354 {
2356 struct rdma_id_private *id_priv; 2355 struct rdma_id_private *id_priv;
2357 int ret; 2356 int ret;
2358 2357
2359 id_priv = container_of(id, struct rdma_id_private, id); 2358 id_priv = container_of(id, struct rdma_id_private, id);
2360 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) 2359 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
2361 return -EINVAL; 2360 return -EINVAL;
2362 2361
2363 if (!id->qp) { 2362 if (!id->qp) {
2364 id_priv->qp_num = conn_param->qp_num; 2363 id_priv->qp_num = conn_param->qp_num;
2365 id_priv->srq = conn_param->srq; 2364 id_priv->srq = conn_param->srq;
2366 } 2365 }
2367 2366
2368 switch (rdma_node_get_transport(id->device->node_type)) { 2367 switch (rdma_node_get_transport(id->device->node_type)) {
2369 case RDMA_TRANSPORT_IB: 2368 case RDMA_TRANSPORT_IB:
2370 if (cma_is_ud_ps(id->ps)) 2369 if (cma_is_ud_ps(id->ps))
2371 ret = cma_resolve_ib_udp(id_priv, conn_param); 2370 ret = cma_resolve_ib_udp(id_priv, conn_param);
2372 else 2371 else
2373 ret = cma_connect_ib(id_priv, conn_param); 2372 ret = cma_connect_ib(id_priv, conn_param);
2374 break; 2373 break;
2375 case RDMA_TRANSPORT_IWARP: 2374 case RDMA_TRANSPORT_IWARP:
2376 ret = cma_connect_iw(id_priv, conn_param); 2375 ret = cma_connect_iw(id_priv, conn_param);
2377 break; 2376 break;
2378 default: 2377 default:
2379 ret = -ENOSYS; 2378 ret = -ENOSYS;
2380 break; 2379 break;
2381 } 2380 }
2382 if (ret) 2381 if (ret)
2383 goto err; 2382 goto err;
2384 2383
2385 return 0; 2384 return 0;
2386 err: 2385 err:
2387 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); 2386 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
2388 return ret; 2387 return ret;
2389 } 2388 }
2390 EXPORT_SYMBOL(rdma_connect); 2389 EXPORT_SYMBOL(rdma_connect);
2391 2390
2392 static int cma_accept_ib(struct rdma_id_private *id_priv, 2391 static int cma_accept_ib(struct rdma_id_private *id_priv,
2393 struct rdma_conn_param *conn_param) 2392 struct rdma_conn_param *conn_param)
2394 { 2393 {
2395 struct ib_cm_rep_param rep; 2394 struct ib_cm_rep_param rep;
2396 int ret; 2395 int ret;
2397 2396
2398 ret = cma_modify_qp_rtr(id_priv, conn_param); 2397 ret = cma_modify_qp_rtr(id_priv, conn_param);
2399 if (ret) 2398 if (ret)
2400 goto out; 2399 goto out;
2401 2400
2402 ret = cma_modify_qp_rts(id_priv, conn_param); 2401 ret = cma_modify_qp_rts(id_priv, conn_param);
2403 if (ret) 2402 if (ret)
2404 goto out; 2403 goto out;
2405 2404
2406 memset(&rep, 0, sizeof rep); 2405 memset(&rep, 0, sizeof rep);
2407 rep.qp_num = id_priv->qp_num; 2406 rep.qp_num = id_priv->qp_num;
2408 rep.starting_psn = id_priv->seq_num; 2407 rep.starting_psn = id_priv->seq_num;
2409 rep.private_data = conn_param->private_data; 2408 rep.private_data = conn_param->private_data;
2410 rep.private_data_len = conn_param->private_data_len; 2409 rep.private_data_len = conn_param->private_data_len;
2411 rep.responder_resources = conn_param->responder_resources; 2410 rep.responder_resources = conn_param->responder_resources;
2412 rep.initiator_depth = conn_param->initiator_depth; 2411 rep.initiator_depth = conn_param->initiator_depth;
2413 rep.failover_accepted = 0; 2412 rep.failover_accepted = 0;
2414 rep.flow_control = conn_param->flow_control; 2413 rep.flow_control = conn_param->flow_control;
2415 rep.rnr_retry_count = conn_param->rnr_retry_count; 2414 rep.rnr_retry_count = conn_param->rnr_retry_count;
2416 rep.srq = id_priv->srq ? 1 : 0; 2415 rep.srq = id_priv->srq ? 1 : 0;
2417 2416
2418 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2417 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2419 out: 2418 out:
2420 return ret; 2419 return ret;
2421 } 2420 }
2422 2421
2423 static int cma_accept_iw(struct rdma_id_private *id_priv, 2422 static int cma_accept_iw(struct rdma_id_private *id_priv,
2424 struct rdma_conn_param *conn_param) 2423 struct rdma_conn_param *conn_param)
2425 { 2424 {
2426 struct iw_cm_conn_param iw_param; 2425 struct iw_cm_conn_param iw_param;
2427 int ret; 2426 int ret;
2428 2427
2429 ret = cma_modify_qp_rtr(id_priv, conn_param); 2428 ret = cma_modify_qp_rtr(id_priv, conn_param);
2430 if (ret) 2429 if (ret)
2431 return ret; 2430 return ret;
2432 2431
2433 iw_param.ord = conn_param->initiator_depth; 2432 iw_param.ord = conn_param->initiator_depth;
2434 iw_param.ird = conn_param->responder_resources; 2433 iw_param.ird = conn_param->responder_resources;
2435 iw_param.private_data = conn_param->private_data; 2434 iw_param.private_data = conn_param->private_data;
2436 iw_param.private_data_len = conn_param->private_data_len; 2435 iw_param.private_data_len = conn_param->private_data_len;
2437 if (id_priv->id.qp) { 2436 if (id_priv->id.qp) {
2438 iw_param.qpn = id_priv->qp_num; 2437 iw_param.qpn = id_priv->qp_num;
2439 } else 2438 } else
2440 iw_param.qpn = conn_param->qp_num; 2439 iw_param.qpn = conn_param->qp_num;
2441 2440
2442 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 2441 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2443 } 2442 }
2444 2443
2445 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 2444 static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2446 enum ib_cm_sidr_status status, 2445 enum ib_cm_sidr_status status,
2447 const void *private_data, int private_data_len) 2446 const void *private_data, int private_data_len)
2448 { 2447 {
2449 struct ib_cm_sidr_rep_param rep; 2448 struct ib_cm_sidr_rep_param rep;
2450 2449
2451 memset(&rep, 0, sizeof rep); 2450 memset(&rep, 0, sizeof rep);
2452 rep.status = status; 2451 rep.status = status;
2453 if (status == IB_SIDR_SUCCESS) { 2452 if (status == IB_SIDR_SUCCESS) {
2454 rep.qp_num = id_priv->qp_num; 2453 rep.qp_num = id_priv->qp_num;
2455 rep.qkey = id_priv->qkey; 2454 rep.qkey = id_priv->qkey;
2456 } 2455 }
2457 rep.private_data = private_data; 2456 rep.private_data = private_data;
2458 rep.private_data_len = private_data_len; 2457 rep.private_data_len = private_data_len;
2459 2458
2460 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 2459 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2461 } 2460 }
2462 2461
2463 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2462 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2464 { 2463 {
2465 struct rdma_id_private *id_priv; 2464 struct rdma_id_private *id_priv;
2466 int ret; 2465 int ret;
2467 2466
2468 id_priv = container_of(id, struct rdma_id_private, id); 2467 id_priv = container_of(id, struct rdma_id_private, id);
2469 if (!cma_comp(id_priv, CMA_CONNECT)) 2468 if (!cma_comp(id_priv, CMA_CONNECT))
2470 return -EINVAL; 2469 return -EINVAL;
2471 2470
2472 if (!id->qp && conn_param) { 2471 if (!id->qp && conn_param) {
2473 id_priv->qp_num = conn_param->qp_num; 2472 id_priv->qp_num = conn_param->qp_num;
2474 id_priv->srq = conn_param->srq; 2473 id_priv->srq = conn_param->srq;
2475 } 2474 }
2476 2475
2477 switch (rdma_node_get_transport(id->device->node_type)) { 2476 switch (rdma_node_get_transport(id->device->node_type)) {
2478 case RDMA_TRANSPORT_IB: 2477 case RDMA_TRANSPORT_IB:
2479 if (cma_is_ud_ps(id->ps)) 2478 if (cma_is_ud_ps(id->ps))
2480 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2479 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2481 conn_param->private_data, 2480 conn_param->private_data,
2482 conn_param->private_data_len); 2481 conn_param->private_data_len);
2483 else if (conn_param) 2482 else if (conn_param)
2484 ret = cma_accept_ib(id_priv, conn_param); 2483 ret = cma_accept_ib(id_priv, conn_param);
2485 else 2484 else
2486 ret = cma_rep_recv(id_priv); 2485 ret = cma_rep_recv(id_priv);
2487 break; 2486 break;
2488 case RDMA_TRANSPORT_IWARP: 2487 case RDMA_TRANSPORT_IWARP:
2489 ret = cma_accept_iw(id_priv, conn_param); 2488 ret = cma_accept_iw(id_priv, conn_param);
2490 break; 2489 break;
2491 default: 2490 default:
2492 ret = -ENOSYS; 2491 ret = -ENOSYS;
2493 break; 2492 break;
2494 } 2493 }
2495 2494
2496 if (ret) 2495 if (ret)
2497 goto reject; 2496 goto reject;
2498 2497
2499 return 0; 2498 return 0;
2500 reject: 2499 reject:
2501 cma_modify_qp_err(id_priv); 2500 cma_modify_qp_err(id_priv);
2502 rdma_reject(id, NULL, 0); 2501 rdma_reject(id, NULL, 0);
2503 return ret; 2502 return ret;
2504 } 2503 }
2505 EXPORT_SYMBOL(rdma_accept); 2504 EXPORT_SYMBOL(rdma_accept);
2506 2505
2507 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 2506 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2508 { 2507 {
2509 struct rdma_id_private *id_priv; 2508 struct rdma_id_private *id_priv;
2510 int ret; 2509 int ret;
2511 2510
2512 id_priv = container_of(id, struct rdma_id_private, id); 2511 id_priv = container_of(id, struct rdma_id_private, id);
2513 if (!cma_has_cm_dev(id_priv)) 2512 if (!cma_has_cm_dev(id_priv))
2514 return -EINVAL; 2513 return -EINVAL;
2515 2514
2516 switch (id->device->node_type) { 2515 switch (id->device->node_type) {
2517 case RDMA_NODE_IB_CA: 2516 case RDMA_NODE_IB_CA:
2518 ret = ib_cm_notify(id_priv->cm_id.ib, event); 2517 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2519 break; 2518 break;
2520 default: 2519 default:
2521 ret = 0; 2520 ret = 0;
2522 break; 2521 break;
2523 } 2522 }
2524 return ret; 2523 return ret;
2525 } 2524 }
2526 EXPORT_SYMBOL(rdma_notify); 2525 EXPORT_SYMBOL(rdma_notify);
2527 2526
2528 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2527 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2529 u8 private_data_len) 2528 u8 private_data_len)
2530 { 2529 {
2531 struct rdma_id_private *id_priv; 2530 struct rdma_id_private *id_priv;
2532 int ret; 2531 int ret;
2533 2532
2534 id_priv = container_of(id, struct rdma_id_private, id); 2533 id_priv = container_of(id, struct rdma_id_private, id);
2535 if (!cma_has_cm_dev(id_priv)) 2534 if (!cma_has_cm_dev(id_priv))
2536 return -EINVAL; 2535 return -EINVAL;
2537 2536
2538 switch (rdma_node_get_transport(id->device->node_type)) { 2537 switch (rdma_node_get_transport(id->device->node_type)) {
2539 case RDMA_TRANSPORT_IB: 2538 case RDMA_TRANSPORT_IB:
2540 if (cma_is_ud_ps(id->ps)) 2539 if (cma_is_ud_ps(id->ps))
2541 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 2540 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2542 private_data, private_data_len); 2541 private_data, private_data_len);
2543 else 2542 else
2544 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2543 ret = ib_send_cm_rej(id_priv->cm_id.ib,
2545 IB_CM_REJ_CONSUMER_DEFINED, NULL, 2544 IB_CM_REJ_CONSUMER_DEFINED, NULL,
2546 0, private_data, private_data_len); 2545 0, private_data, private_data_len);
2547 break; 2546 break;
2548 case RDMA_TRANSPORT_IWARP: 2547 case RDMA_TRANSPORT_IWARP:
2549 ret = iw_cm_reject(id_priv->cm_id.iw, 2548 ret = iw_cm_reject(id_priv->cm_id.iw,
2550 private_data, private_data_len); 2549 private_data, private_data_len);
2551 break; 2550 break;
2552 default: 2551 default:
2553 ret = -ENOSYS; 2552 ret = -ENOSYS;
2554 break; 2553 break;
2555 } 2554 }
2556 return ret; 2555 return ret;
2557 } 2556 }
2558 EXPORT_SYMBOL(rdma_reject); 2557 EXPORT_SYMBOL(rdma_reject);
2559 2558
2560 int rdma_disconnect(struct rdma_cm_id *id) 2559 int rdma_disconnect(struct rdma_cm_id *id)
2561 { 2560 {
2562 struct rdma_id_private *id_priv; 2561 struct rdma_id_private *id_priv;
2563 int ret; 2562 int ret;
2564 2563
2565 id_priv = container_of(id, struct rdma_id_private, id); 2564 id_priv = container_of(id, struct rdma_id_private, id);
2566 if (!cma_has_cm_dev(id_priv)) 2565 if (!cma_has_cm_dev(id_priv))
2567 return -EINVAL; 2566 return -EINVAL;
2568 2567
2569 switch (rdma_node_get_transport(id->device->node_type)) { 2568 switch (rdma_node_get_transport(id->device->node_type)) {
2570 case RDMA_TRANSPORT_IB: 2569 case RDMA_TRANSPORT_IB:
2571 ret = cma_modify_qp_err(id_priv); 2570 ret = cma_modify_qp_err(id_priv);
2572 if (ret) 2571 if (ret)
2573 goto out; 2572 goto out;
2574 /* Initiate or respond to a disconnect. */ 2573 /* Initiate or respond to a disconnect. */
2575 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 2574 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
2576 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 2575 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
2577 break; 2576 break;
2578 case RDMA_TRANSPORT_IWARP: 2577 case RDMA_TRANSPORT_IWARP:
2579 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 2578 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
2580 break; 2579 break;
2581 default: 2580 default:
2582 ret = -EINVAL; 2581 ret = -EINVAL;
2583 break; 2582 break;
2584 } 2583 }
2585 out: 2584 out:
2586 return ret; 2585 return ret;
2587 } 2586 }
2588 EXPORT_SYMBOL(rdma_disconnect); 2587 EXPORT_SYMBOL(rdma_disconnect);
2589 2588
2590 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 2589 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2591 { 2590 {
2592 struct rdma_id_private *id_priv; 2591 struct rdma_id_private *id_priv;
2593 struct cma_multicast *mc = multicast->context; 2592 struct cma_multicast *mc = multicast->context;
2594 struct rdma_cm_event event; 2593 struct rdma_cm_event event;
2595 int ret; 2594 int ret;
2596 2595
2597 id_priv = mc->id_priv; 2596 id_priv = mc->id_priv;
2598 if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && 2597 if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) &&
2599 cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) 2598 cma_disable_callback(id_priv, CMA_ADDR_RESOLVED))
2600 return 0; 2599 return 0;
2601 2600
2602 mutex_lock(&id_priv->qp_mutex); 2601 mutex_lock(&id_priv->qp_mutex);
2603 if (!status && id_priv->id.qp) 2602 if (!status && id_priv->id.qp)
2604 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 2603 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
2605 multicast->rec.mlid); 2604 multicast->rec.mlid);
2606 mutex_unlock(&id_priv->qp_mutex); 2605 mutex_unlock(&id_priv->qp_mutex);
2607 2606
2608 memset(&event, 0, sizeof event); 2607 memset(&event, 0, sizeof event);
2609 event.status = status; 2608 event.status = status;
2610 event.param.ud.private_data = mc->context; 2609 event.param.ud.private_data = mc->context;
2611 if (!status) { 2610 if (!status) {
2612 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 2611 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
2613 ib_init_ah_from_mcmember(id_priv->id.device, 2612 ib_init_ah_from_mcmember(id_priv->id.device,
2614 id_priv->id.port_num, &multicast->rec, 2613 id_priv->id.port_num, &multicast->rec,
2615 &event.param.ud.ah_attr); 2614 &event.param.ud.ah_attr);
2616 event.param.ud.qp_num = 0xFFFFFF; 2615 event.param.ud.qp_num = 0xFFFFFF;
2617 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 2616 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
2618 } else 2617 } else
2619 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 2618 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
2620 2619
2621 ret = id_priv->id.event_handler(&id_priv->id, &event); 2620 ret = id_priv->id.event_handler(&id_priv->id, &event);
2622 if (ret) { 2621 if (ret) {
2623 cma_exch(id_priv, CMA_DESTROYING); 2622 cma_exch(id_priv, CMA_DESTROYING);
2624 mutex_unlock(&id_priv->handler_mutex); 2623 mutex_unlock(&id_priv->handler_mutex);
2625 rdma_destroy_id(&id_priv->id); 2624 rdma_destroy_id(&id_priv->id);
2626 return 0; 2625 return 0;
2627 } 2626 }
2628 2627
2629 mutex_unlock(&id_priv->handler_mutex); 2628 mutex_unlock(&id_priv->handler_mutex);
2630 return 0; 2629 return 0;
2631 } 2630 }
2632 2631
2633 static void cma_set_mgid(struct rdma_id_private *id_priv, 2632 static void cma_set_mgid(struct rdma_id_private *id_priv,
2634 struct sockaddr *addr, union ib_gid *mgid) 2633 struct sockaddr *addr, union ib_gid *mgid)
2635 { 2634 {
2636 unsigned char mc_map[MAX_ADDR_LEN]; 2635 unsigned char mc_map[MAX_ADDR_LEN];
2637 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2636 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2638 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 2637 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
2639 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 2638 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
2640 2639
2641 if (cma_any_addr(addr)) { 2640 if (cma_any_addr(addr)) {
2642 memset(mgid, 0, sizeof *mgid); 2641 memset(mgid, 0, sizeof *mgid);
2643 } else if ((addr->sa_family == AF_INET6) && 2642 } else if ((addr->sa_family == AF_INET6) &&
2644 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) == 2643 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) ==
2645 0xFF10A01B)) { 2644 0xFF10A01B)) {
2646 /* IPv6 address is an SA assigned MGID. */ 2645 /* IPv6 address is an SA assigned MGID. */
2647 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 2646 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
2648 } else { 2647 } else {
2649 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 2648 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
2650 if (id_priv->id.ps == RDMA_PS_UDP) 2649 if (id_priv->id.ps == RDMA_PS_UDP)
2651 mc_map[7] = 0x01; /* Use RDMA CM signature */ 2650 mc_map[7] = 0x01; /* Use RDMA CM signature */
2652 *mgid = *(union ib_gid *) (mc_map + 4); 2651 *mgid = *(union ib_gid *) (mc_map + 4);
2653 } 2652 }
2654 } 2653 }
2655 2654
2656 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 2655 static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
2657 struct cma_multicast *mc) 2656 struct cma_multicast *mc)
2658 { 2657 {
2659 struct ib_sa_mcmember_rec rec; 2658 struct ib_sa_mcmember_rec rec;
2660 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2659 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2661 ib_sa_comp_mask comp_mask; 2660 ib_sa_comp_mask comp_mask;
2662 int ret; 2661 int ret;
2663 2662
2664 ib_addr_get_mgid(dev_addr, &rec.mgid); 2663 ib_addr_get_mgid(dev_addr, &rec.mgid);
2665 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 2664 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
2666 &rec.mgid, &rec); 2665 &rec.mgid, &rec);
2667 if (ret) 2666 if (ret)
2668 return ret; 2667 return ret;
2669 2668
2670 cma_set_mgid(id_priv, &mc->addr, &rec.mgid); 2669 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
2671 if (id_priv->id.ps == RDMA_PS_UDP) 2670 if (id_priv->id.ps == RDMA_PS_UDP)
2672 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 2671 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
2673 ib_addr_get_sgid(dev_addr, &rec.port_gid); 2672 ib_addr_get_sgid(dev_addr, &rec.port_gid);
2674 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2673 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2675 rec.join_state = 1; 2674 rec.join_state = 1;
2676 2675
2677 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 2676 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
2678 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 2677 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
2679 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 2678 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
2680 IB_SA_MCMEMBER_REC_FLOW_LABEL | 2679 IB_SA_MCMEMBER_REC_FLOW_LABEL |
2681 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 2680 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
2682 2681
2683 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 2682 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
2684 id_priv->id.port_num, &rec, 2683 id_priv->id.port_num, &rec,
2685 comp_mask, GFP_KERNEL, 2684 comp_mask, GFP_KERNEL,
2686 cma_ib_mc_handler, mc); 2685 cma_ib_mc_handler, mc);
2687 if (IS_ERR(mc->multicast.ib)) 2686 if (IS_ERR(mc->multicast.ib))
2688 return PTR_ERR(mc->multicast.ib); 2687 return PTR_ERR(mc->multicast.ib);
2689 2688
2690 return 0; 2689 return 0;
2691 } 2690 }
2692 2691
2693 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 2692 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
2694 void *context) 2693 void *context)
2695 { 2694 {
2696 struct rdma_id_private *id_priv; 2695 struct rdma_id_private *id_priv;
2697 struct cma_multicast *mc; 2696 struct cma_multicast *mc;
2698 int ret; 2697 int ret;
2699 2698
2700 id_priv = container_of(id, struct rdma_id_private, id); 2699 id_priv = container_of(id, struct rdma_id_private, id);
2701 if (!cma_comp(id_priv, CMA_ADDR_BOUND) && 2700 if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
2702 !cma_comp(id_priv, CMA_ADDR_RESOLVED)) 2701 !cma_comp(id_priv, CMA_ADDR_RESOLVED))
2703 return -EINVAL; 2702 return -EINVAL;
2704 2703
2705 mc = kmalloc(sizeof *mc, GFP_KERNEL); 2704 mc = kmalloc(sizeof *mc, GFP_KERNEL);
2706 if (!mc) 2705 if (!mc)
2707 return -ENOMEM; 2706 return -ENOMEM;
2708 2707
2709 memcpy(&mc->addr, addr, ip_addr_size(addr)); 2708 memcpy(&mc->addr, addr, ip_addr_size(addr));
2710 mc->context = context; 2709 mc->context = context;
2711 mc->id_priv = id_priv; 2710 mc->id_priv = id_priv;
2712 2711
2713 spin_lock(&id_priv->lock); 2712 spin_lock(&id_priv->lock);
2714 list_add(&mc->list, &id_priv->mc_list); 2713 list_add(&mc->list, &id_priv->mc_list);
2715 spin_unlock(&id_priv->lock); 2714 spin_unlock(&id_priv->lock);
2716 2715
2717 switch (rdma_node_get_transport(id->device->node_type)) { 2716 switch (rdma_node_get_transport(id->device->node_type)) {
2718 case RDMA_TRANSPORT_IB: 2717 case RDMA_TRANSPORT_IB:
2719 ret = cma_join_ib_multicast(id_priv, mc); 2718 ret = cma_join_ib_multicast(id_priv, mc);
2720 break; 2719 break;
2721 default: 2720 default:
2722 ret = -ENOSYS; 2721 ret = -ENOSYS;
2723 break; 2722 break;
2724 } 2723 }
2725 2724
2726 if (ret) { 2725 if (ret) {
2727 spin_lock_irq(&id_priv->lock); 2726 spin_lock_irq(&id_priv->lock);
2728 list_del(&mc->list); 2727 list_del(&mc->list);
2729 spin_unlock_irq(&id_priv->lock); 2728 spin_unlock_irq(&id_priv->lock);
2730 kfree(mc); 2729 kfree(mc);
2731 } 2730 }
2732 return ret; 2731 return ret;
2733 } 2732 }
2734 EXPORT_SYMBOL(rdma_join_multicast); 2733 EXPORT_SYMBOL(rdma_join_multicast);
2735 2734
2736 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 2735 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
2737 { 2736 {
2738 struct rdma_id_private *id_priv; 2737 struct rdma_id_private *id_priv;
2739 struct cma_multicast *mc; 2738 struct cma_multicast *mc;
2740 2739
2741 id_priv = container_of(id, struct rdma_id_private, id); 2740 id_priv = container_of(id, struct rdma_id_private, id);
2742 spin_lock_irq(&id_priv->lock); 2741 spin_lock_irq(&id_priv->lock);
2743 list_for_each_entry(mc, &id_priv->mc_list, list) { 2742 list_for_each_entry(mc, &id_priv->mc_list, list) {
2744 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) { 2743 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) {
2745 list_del(&mc->list); 2744 list_del(&mc->list);
2746 spin_unlock_irq(&id_priv->lock); 2745 spin_unlock_irq(&id_priv->lock);
2747 2746
2748 if (id->qp) 2747 if (id->qp)
2749 ib_detach_mcast(id->qp, 2748 ib_detach_mcast(id->qp,
2750 &mc->multicast.ib->rec.mgid, 2749 &mc->multicast.ib->rec.mgid,
2751 mc->multicast.ib->rec.mlid); 2750 mc->multicast.ib->rec.mlid);
2752 ib_sa_free_multicast(mc->multicast.ib); 2751 ib_sa_free_multicast(mc->multicast.ib);
2753 kfree(mc); 2752 kfree(mc);
2754 return; 2753 return;
2755 } 2754 }
2756 } 2755 }
2757 spin_unlock_irq(&id_priv->lock); 2756 spin_unlock_irq(&id_priv->lock);
2758 } 2757 }
2759 EXPORT_SYMBOL(rdma_leave_multicast); 2758 EXPORT_SYMBOL(rdma_leave_multicast);
2760 2759
2761 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 2760 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
2762 { 2761 {
2763 struct rdma_dev_addr *dev_addr; 2762 struct rdma_dev_addr *dev_addr;
2764 struct cma_ndev_work *work; 2763 struct cma_ndev_work *work;
2765 2764
2766 dev_addr = &id_priv->id.route.addr.dev_addr; 2765 dev_addr = &id_priv->id.route.addr.dev_addr;
2767 2766
2768 if ((dev_addr->src_dev == ndev) && 2767 if ((dev_addr->src_dev == ndev) &&
2769 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 2768 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
2770 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", 2769 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
2771 ndev->name, &id_priv->id); 2770 ndev->name, &id_priv->id);
2772 work = kzalloc(sizeof *work, GFP_KERNEL); 2771 work = kzalloc(sizeof *work, GFP_KERNEL);
2773 if (!work) 2772 if (!work)
2774 return -ENOMEM; 2773 return -ENOMEM;
2775 2774
2776 INIT_WORK(&work->work, cma_ndev_work_handler); 2775 INIT_WORK(&work->work, cma_ndev_work_handler);
2777 work->id = id_priv; 2776 work->id = id_priv;
2778 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 2777 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
2779 atomic_inc(&id_priv->refcount); 2778 atomic_inc(&id_priv->refcount);
2780 queue_work(cma_wq, &work->work); 2779 queue_work(cma_wq, &work->work);
2781 } 2780 }
2782 2781
2783 return 0; 2782 return 0;
2784 } 2783 }
2785 2784
2786 static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 2785 static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
2787 void *ctx) 2786 void *ctx)
2788 { 2787 {
2789 struct net_device *ndev = (struct net_device *)ctx; 2788 struct net_device *ndev = (struct net_device *)ctx;
2790 struct cma_device *cma_dev; 2789 struct cma_device *cma_dev;
2791 struct rdma_id_private *id_priv; 2790 struct rdma_id_private *id_priv;
2792 int ret = NOTIFY_DONE; 2791 int ret = NOTIFY_DONE;
2793 2792
2794 if (dev_net(ndev) != &init_net) 2793 if (dev_net(ndev) != &init_net)
2795 return NOTIFY_DONE; 2794 return NOTIFY_DONE;
2796 2795
2797 if (event != NETDEV_BONDING_FAILOVER) 2796 if (event != NETDEV_BONDING_FAILOVER)
2798 return NOTIFY_DONE; 2797 return NOTIFY_DONE;
2799 2798
2800 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 2799 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
2801 return NOTIFY_DONE; 2800 return NOTIFY_DONE;
2802 2801
2803 mutex_lock(&lock); 2802 mutex_lock(&lock);
2804 list_for_each_entry(cma_dev, &dev_list, list) 2803 list_for_each_entry(cma_dev, &dev_list, list)
2805 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 2804 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
2806 ret = cma_netdev_change(ndev, id_priv); 2805 ret = cma_netdev_change(ndev, id_priv);
2807 if (ret) 2806 if (ret)
2808 goto out; 2807 goto out;
2809 } 2808 }
2810 2809
2811 out: 2810 out:
2812 mutex_unlock(&lock); 2811 mutex_unlock(&lock);
2813 return ret; 2812 return ret;
2814 } 2813 }
2815 2814
2816 static struct notifier_block cma_nb = { 2815 static struct notifier_block cma_nb = {
2817 .notifier_call = cma_netdev_callback 2816 .notifier_call = cma_netdev_callback
2818 }; 2817 };
2819 2818
2820 static void cma_add_one(struct ib_device *device) 2819 static void cma_add_one(struct ib_device *device)
2821 { 2820 {
2822 struct cma_device *cma_dev; 2821 struct cma_device *cma_dev;
2823 struct rdma_id_private *id_priv; 2822 struct rdma_id_private *id_priv;
2824 2823
2825 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 2824 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
2826 if (!cma_dev) 2825 if (!cma_dev)
2827 return; 2826 return;
2828 2827
2829 cma_dev->device = device; 2828 cma_dev->device = device;
2830 2829
2831 init_completion(&cma_dev->comp); 2830 init_completion(&cma_dev->comp);
2832 atomic_set(&cma_dev->refcount, 1); 2831 atomic_set(&cma_dev->refcount, 1);
2833 INIT_LIST_HEAD(&cma_dev->id_list); 2832 INIT_LIST_HEAD(&cma_dev->id_list);
2834 ib_set_client_data(device, &cma_client, cma_dev); 2833 ib_set_client_data(device, &cma_client, cma_dev);
2835 2834
2836 mutex_lock(&lock); 2835 mutex_lock(&lock);
2837 list_add_tail(&cma_dev->list, &dev_list); 2836 list_add_tail(&cma_dev->list, &dev_list);
2838 list_for_each_entry(id_priv, &listen_any_list, list) 2837 list_for_each_entry(id_priv, &listen_any_list, list)
2839 cma_listen_on_dev(id_priv, cma_dev); 2838 cma_listen_on_dev(id_priv, cma_dev);
2840 mutex_unlock(&lock); 2839 mutex_unlock(&lock);
2841 } 2840 }
2842 2841
2843 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 2842 static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2844 { 2843 {
2845 struct rdma_cm_event event; 2844 struct rdma_cm_event event;
2846 enum cma_state state; 2845 enum cma_state state;
2847 int ret = 0; 2846 int ret = 0;
2848 2847
2849 /* Record that we want to remove the device */ 2848 /* Record that we want to remove the device */
2850 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); 2849 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
2851 if (state == CMA_DESTROYING) 2850 if (state == CMA_DESTROYING)
2852 return 0; 2851 return 0;
2853 2852
2854 cma_cancel_operation(id_priv, state); 2853 cma_cancel_operation(id_priv, state);
2855 mutex_lock(&id_priv->handler_mutex); 2854 mutex_lock(&id_priv->handler_mutex);
2856 2855
2857 /* Check for destruction from another callback. */ 2856 /* Check for destruction from another callback. */
2858 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 2857 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2859 goto out; 2858 goto out;
2860 2859
2861 memset(&event, 0, sizeof event); 2860 memset(&event, 0, sizeof event);
2862 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 2861 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2863 ret = id_priv->id.event_handler(&id_priv->id, &event); 2862 ret = id_priv->id.event_handler(&id_priv->id, &event);
2864 out: 2863 out:
2865 mutex_unlock(&id_priv->handler_mutex); 2864 mutex_unlock(&id_priv->handler_mutex);
2866 return ret; 2865 return ret;
2867 } 2866 }
2868 2867
2869 static void cma_process_remove(struct cma_device *cma_dev) 2868 static void cma_process_remove(struct cma_device *cma_dev)
2870 { 2869 {
2871 struct rdma_id_private *id_priv; 2870 struct rdma_id_private *id_priv;
2872 int ret; 2871 int ret;
2873 2872
2874 mutex_lock(&lock); 2873 mutex_lock(&lock);
2875 while (!list_empty(&cma_dev->id_list)) { 2874 while (!list_empty(&cma_dev->id_list)) {
2876 id_priv = list_entry(cma_dev->id_list.next, 2875 id_priv = list_entry(cma_dev->id_list.next,
2877 struct rdma_id_private, list); 2876 struct rdma_id_private, list);
2878 2877
2879 list_del(&id_priv->listen_list); 2878 list_del(&id_priv->listen_list);
2880 list_del_init(&id_priv->list); 2879 list_del_init(&id_priv->list);
2881 atomic_inc(&id_priv->refcount); 2880 atomic_inc(&id_priv->refcount);
2882 mutex_unlock(&lock); 2881 mutex_unlock(&lock);
2883 2882
2884 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 2883 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
2885 cma_deref_id(id_priv); 2884 cma_deref_id(id_priv);
2886 if (ret) 2885 if (ret)
2887 rdma_destroy_id(&id_priv->id); 2886 rdma_destroy_id(&id_priv->id);
2888 2887
2889 mutex_lock(&lock); 2888 mutex_lock(&lock);
2890 } 2889 }
2891 mutex_unlock(&lock); 2890 mutex_unlock(&lock);
2892 2891
2893 cma_deref_dev(cma_dev); 2892 cma_deref_dev(cma_dev);
2894 wait_for_completion(&cma_dev->comp); 2893 wait_for_completion(&cma_dev->comp);
2895 } 2894 }
2896 2895
2897 static void cma_remove_one(struct ib_device *device) 2896 static void cma_remove_one(struct ib_device *device)
2898 { 2897 {
2899 struct cma_device *cma_dev; 2898 struct cma_device *cma_dev;
2900 2899
2901 cma_dev = ib_get_client_data(device, &cma_client); 2900 cma_dev = ib_get_client_data(device, &cma_client);
2902 if (!cma_dev) 2901 if (!cma_dev)
2903 return; 2902 return;
2904 2903
2905 mutex_lock(&lock); 2904 mutex_lock(&lock);
2906 list_del(&cma_dev->list); 2905 list_del(&cma_dev->list);
2907 mutex_unlock(&lock); 2906 mutex_unlock(&lock);
2908 2907
2909 cma_process_remove(cma_dev); 2908 cma_process_remove(cma_dev);
2910 kfree(cma_dev); 2909 kfree(cma_dev);
2911 } 2910 }
2912 2911
2913 static int cma_init(void) 2912 static int cma_init(void)
2914 { 2913 {
2915 int ret, low, high, remaining; 2914 int ret, low, high, remaining;
2916 2915
2917 get_random_bytes(&next_port, sizeof next_port); 2916 get_random_bytes(&next_port, sizeof next_port);
2918 inet_get_local_port_range(&low, &high); 2917 inet_get_local_port_range(&low, &high);
2919 remaining = (high - low) + 1; 2918 remaining = (high - low) + 1;
2920 next_port = ((unsigned int) next_port % remaining) + low; 2919 next_port = ((unsigned int) next_port % remaining) + low;
2921 2920
2922 cma_wq = create_singlethread_workqueue("rdma_cm"); 2921 cma_wq = create_singlethread_workqueue("rdma_cm");
2923 if (!cma_wq) 2922 if (!cma_wq)
2924 return -ENOMEM; 2923 return -ENOMEM;
2925 2924
2926 ib_sa_register_client(&sa_client); 2925 ib_sa_register_client(&sa_client);
2927 rdma_addr_register_client(&addr_client); 2926 rdma_addr_register_client(&addr_client);
2928 register_netdevice_notifier(&cma_nb); 2927 register_netdevice_notifier(&cma_nb);
2929 2928
2930 ret = ib_register_client(&cma_client); 2929 ret = ib_register_client(&cma_client);
2931 if (ret) 2930 if (ret)
2932 goto err; 2931 goto err;
2933 return 0; 2932 return 0;
2934 2933
2935 err: 2934 err:
2936 unregister_netdevice_notifier(&cma_nb); 2935 unregister_netdevice_notifier(&cma_nb);
2937 rdma_addr_unregister_client(&addr_client); 2936 rdma_addr_unregister_client(&addr_client);
2938 ib_sa_unregister_client(&sa_client); 2937 ib_sa_unregister_client(&sa_client);
2939 destroy_workqueue(cma_wq); 2938 destroy_workqueue(cma_wq);
2940 return ret; 2939 return ret;
2941 } 2940 }
2942 2941
2943 static void cma_cleanup(void) 2942 static void cma_cleanup(void)
2944 { 2943 {
2945 ib_unregister_client(&cma_client); 2944 ib_unregister_client(&cma_client);
2946 unregister_netdevice_notifier(&cma_nb); 2945 unregister_netdevice_notifier(&cma_nb);
2947 rdma_addr_unregister_client(&addr_client); 2946 rdma_addr_unregister_client(&addr_client);
2948 ib_sa_unregister_client(&sa_client); 2947 ib_sa_unregister_client(&sa_client);
2949 destroy_workqueue(cma_wq); 2948 destroy_workqueue(cma_wq);
2950 idr_destroy(&sdp_ps); 2949 idr_destroy(&sdp_ps);
2951 idr_destroy(&tcp_ps); 2950 idr_destroy(&tcp_ps);
2952 idr_destroy(&udp_ps); 2951 idr_destroy(&udp_ps);
2953 idr_destroy(&ipoib_ps); 2952 idr_destroy(&ipoib_ps);
2954 } 2953 }
2955 2954
2956 module_init(cma_init); 2955 module_init(cma_init);
2957 module_exit(cma_cleanup); 2956 module_exit(cma_cleanup);
drivers/infiniband/core/ucma.c
1 /* 1 /*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the 7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below: 8 * OpenIB.org BSD license below:
9 * 9 *
10 * Redistribution and use in source and binary forms, with or 10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following 11 * without modification, are permitted provided that the following
12 * conditions are met: 12 * conditions are met:
13 * 13 *
14 * - Redistributions of source code must retain the above 14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following 15 * copyright notice, this list of conditions and the following
16 * disclaimer. 16 * disclaimer.
17 * 17 *
18 * - Redistributions in binary form must reproduce the above 18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following 19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials 20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution. 21 * provided with the distribution.
22 * 22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33 #include <linux/completion.h> 33 #include <linux/completion.h>
34 #include <linux/file.h> 34 #include <linux/file.h>
35 #include <linux/mutex.h> 35 #include <linux/mutex.h>
36 #include <linux/poll.h> 36 #include <linux/poll.h>
37 #include <linux/idr.h> 37 #include <linux/idr.h>
38 #include <linux/in.h> 38 #include <linux/in.h>
39 #include <linux/in6.h> 39 #include <linux/in6.h>
40 #include <linux/miscdevice.h> 40 #include <linux/miscdevice.h>
41 41
42 #include <rdma/rdma_user_cm.h> 42 #include <rdma/rdma_user_cm.h>
43 #include <rdma/ib_marshall.h> 43 #include <rdma/ib_marshall.h>
44 #include <rdma/rdma_cm.h> 44 #include <rdma/rdma_cm.h>
45 45
46 MODULE_AUTHOR("Sean Hefty"); 46 MODULE_AUTHOR("Sean Hefty");
47 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); 47 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
48 MODULE_LICENSE("Dual BSD/GPL"); 48 MODULE_LICENSE("Dual BSD/GPL");
49 49
50 enum { 50 enum {
51 UCMA_MAX_BACKLOG = 128 51 UCMA_MAX_BACKLOG = 128
52 }; 52 };
53 53
54 struct ucma_file { 54 struct ucma_file {
55 struct mutex mut; 55 struct mutex mut;
56 struct file *filp; 56 struct file *filp;
57 struct list_head ctx_list; 57 struct list_head ctx_list;
58 struct list_head event_list; 58 struct list_head event_list;
59 wait_queue_head_t poll_wait; 59 wait_queue_head_t poll_wait;
60 }; 60 };
61 61
62 struct ucma_context { 62 struct ucma_context {
63 int id; 63 int id;
64 struct completion comp; 64 struct completion comp;
65 atomic_t ref; 65 atomic_t ref;
66 int events_reported; 66 int events_reported;
67 int backlog; 67 int backlog;
68 68
69 struct ucma_file *file; 69 struct ucma_file *file;
70 struct rdma_cm_id *cm_id; 70 struct rdma_cm_id *cm_id;
71 u64 uid; 71 u64 uid;
72 72
73 struct list_head list; 73 struct list_head list;
74 struct list_head mc_list; 74 struct list_head mc_list;
75 }; 75 };
76 76
77 struct ucma_multicast { 77 struct ucma_multicast {
78 struct ucma_context *ctx; 78 struct ucma_context *ctx;
79 int id; 79 int id;
80 int events_reported; 80 int events_reported;
81 81
82 u64 uid; 82 u64 uid;
83 struct list_head list; 83 struct list_head list;
84 struct sockaddr addr; 84 struct sockaddr_storage addr;
85 u8 pad[sizeof(struct sockaddr_in6) -
86 sizeof(struct sockaddr)];
87 }; 85 };
88 86
89 struct ucma_event { 87 struct ucma_event {
90 struct ucma_context *ctx; 88 struct ucma_context *ctx;
91 struct ucma_multicast *mc; 89 struct ucma_multicast *mc;
92 struct list_head list; 90 struct list_head list;
93 struct rdma_cm_id *cm_id; 91 struct rdma_cm_id *cm_id;
94 struct rdma_ucm_event_resp resp; 92 struct rdma_ucm_event_resp resp;
95 }; 93 };
96 94
97 static DEFINE_MUTEX(mut); 95 static DEFINE_MUTEX(mut);
98 static DEFINE_IDR(ctx_idr); 96 static DEFINE_IDR(ctx_idr);
99 static DEFINE_IDR(multicast_idr); 97 static DEFINE_IDR(multicast_idr);
100 98
101 static inline struct ucma_context *_ucma_find_context(int id, 99 static inline struct ucma_context *_ucma_find_context(int id,
102 struct ucma_file *file) 100 struct ucma_file *file)
103 { 101 {
104 struct ucma_context *ctx; 102 struct ucma_context *ctx;
105 103
106 ctx = idr_find(&ctx_idr, id); 104 ctx = idr_find(&ctx_idr, id);
107 if (!ctx) 105 if (!ctx)
108 ctx = ERR_PTR(-ENOENT); 106 ctx = ERR_PTR(-ENOENT);
109 else if (ctx->file != file) 107 else if (ctx->file != file)
110 ctx = ERR_PTR(-EINVAL); 108 ctx = ERR_PTR(-EINVAL);
111 return ctx; 109 return ctx;
112 } 110 }
113 111
114 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) 112 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
115 { 113 {
116 struct ucma_context *ctx; 114 struct ucma_context *ctx;
117 115
118 mutex_lock(&mut); 116 mutex_lock(&mut);
119 ctx = _ucma_find_context(id, file); 117 ctx = _ucma_find_context(id, file);
120 if (!IS_ERR(ctx)) 118 if (!IS_ERR(ctx))
121 atomic_inc(&ctx->ref); 119 atomic_inc(&ctx->ref);
122 mutex_unlock(&mut); 120 mutex_unlock(&mut);
123 return ctx; 121 return ctx;
124 } 122 }
125 123
126 static void ucma_put_ctx(struct ucma_context *ctx) 124 static void ucma_put_ctx(struct ucma_context *ctx)
127 { 125 {
128 if (atomic_dec_and_test(&ctx->ref)) 126 if (atomic_dec_and_test(&ctx->ref))
129 complete(&ctx->comp); 127 complete(&ctx->comp);
130 } 128 }
131 129
132 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) 130 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
133 { 131 {
134 struct ucma_context *ctx; 132 struct ucma_context *ctx;
135 int ret; 133 int ret;
136 134
137 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 135 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
138 if (!ctx) 136 if (!ctx)
139 return NULL; 137 return NULL;
140 138
141 atomic_set(&ctx->ref, 1); 139 atomic_set(&ctx->ref, 1);
142 init_completion(&ctx->comp); 140 init_completion(&ctx->comp);
143 INIT_LIST_HEAD(&ctx->mc_list); 141 INIT_LIST_HEAD(&ctx->mc_list);
144 ctx->file = file; 142 ctx->file = file;
145 143
146 do { 144 do {
147 ret = idr_pre_get(&ctx_idr, GFP_KERNEL); 145 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
148 if (!ret) 146 if (!ret)
149 goto error; 147 goto error;
150 148
151 mutex_lock(&mut); 149 mutex_lock(&mut);
152 ret = idr_get_new(&ctx_idr, ctx, &ctx->id); 150 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
153 mutex_unlock(&mut); 151 mutex_unlock(&mut);
154 } while (ret == -EAGAIN); 152 } while (ret == -EAGAIN);
155 153
156 if (ret) 154 if (ret)
157 goto error; 155 goto error;
158 156
159 list_add_tail(&ctx->list, &file->ctx_list); 157 list_add_tail(&ctx->list, &file->ctx_list);
160 return ctx; 158 return ctx;
161 159
162 error: 160 error:
163 kfree(ctx); 161 kfree(ctx);
164 return NULL; 162 return NULL;
165 } 163 }
166 164
167 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) 165 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
168 { 166 {
169 struct ucma_multicast *mc; 167 struct ucma_multicast *mc;
170 int ret; 168 int ret;
171 169
172 mc = kzalloc(sizeof(*mc), GFP_KERNEL); 170 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
173 if (!mc) 171 if (!mc)
174 return NULL; 172 return NULL;
175 173
176 do { 174 do {
177 ret = idr_pre_get(&multicast_idr, GFP_KERNEL); 175 ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
178 if (!ret) 176 if (!ret)
179 goto error; 177 goto error;
180 178
181 mutex_lock(&mut); 179 mutex_lock(&mut);
182 ret = idr_get_new(&multicast_idr, mc, &mc->id); 180 ret = idr_get_new(&multicast_idr, mc, &mc->id);
183 mutex_unlock(&mut); 181 mutex_unlock(&mut);
184 } while (ret == -EAGAIN); 182 } while (ret == -EAGAIN);
185 183
186 if (ret) 184 if (ret)
187 goto error; 185 goto error;
188 186
189 mc->ctx = ctx; 187 mc->ctx = ctx;
190 list_add_tail(&mc->list, &ctx->mc_list); 188 list_add_tail(&mc->list, &ctx->mc_list);
191 return mc; 189 return mc;
192 190
193 error: 191 error:
194 kfree(mc); 192 kfree(mc);
195 return NULL; 193 return NULL;
196 } 194 }
197 195
198 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, 196 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
199 struct rdma_conn_param *src) 197 struct rdma_conn_param *src)
200 { 198 {
201 if (src->private_data_len) 199 if (src->private_data_len)
202 memcpy(dst->private_data, src->private_data, 200 memcpy(dst->private_data, src->private_data,
203 src->private_data_len); 201 src->private_data_len);
204 dst->private_data_len = src->private_data_len; 202 dst->private_data_len = src->private_data_len;
205 dst->responder_resources =src->responder_resources; 203 dst->responder_resources =src->responder_resources;
206 dst->initiator_depth = src->initiator_depth; 204 dst->initiator_depth = src->initiator_depth;
207 dst->flow_control = src->flow_control; 205 dst->flow_control = src->flow_control;
208 dst->retry_count = src->retry_count; 206 dst->retry_count = src->retry_count;
209 dst->rnr_retry_count = src->rnr_retry_count; 207 dst->rnr_retry_count = src->rnr_retry_count;
210 dst->srq = src->srq; 208 dst->srq = src->srq;
211 dst->qp_num = src->qp_num; 209 dst->qp_num = src->qp_num;
212 } 210 }
213 211
214 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst, 212 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
215 struct rdma_ud_param *src) 213 struct rdma_ud_param *src)
216 { 214 {
217 if (src->private_data_len) 215 if (src->private_data_len)
218 memcpy(dst->private_data, src->private_data, 216 memcpy(dst->private_data, src->private_data,
219 src->private_data_len); 217 src->private_data_len);
220 dst->private_data_len = src->private_data_len; 218 dst->private_data_len = src->private_data_len;
221 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr); 219 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
222 dst->qp_num = src->qp_num; 220 dst->qp_num = src->qp_num;
223 dst->qkey = src->qkey; 221 dst->qkey = src->qkey;
224 } 222 }
225 223
226 static void ucma_set_event_context(struct ucma_context *ctx, 224 static void ucma_set_event_context(struct ucma_context *ctx,
227 struct rdma_cm_event *event, 225 struct rdma_cm_event *event,
228 struct ucma_event *uevent) 226 struct ucma_event *uevent)
229 { 227 {
230 uevent->ctx = ctx; 228 uevent->ctx = ctx;
231 switch (event->event) { 229 switch (event->event) {
232 case RDMA_CM_EVENT_MULTICAST_JOIN: 230 case RDMA_CM_EVENT_MULTICAST_JOIN:
233 case RDMA_CM_EVENT_MULTICAST_ERROR: 231 case RDMA_CM_EVENT_MULTICAST_ERROR:
234 uevent->mc = (struct ucma_multicast *) 232 uevent->mc = (struct ucma_multicast *)
235 event->param.ud.private_data; 233 event->param.ud.private_data;
236 uevent->resp.uid = uevent->mc->uid; 234 uevent->resp.uid = uevent->mc->uid;
237 uevent->resp.id = uevent->mc->id; 235 uevent->resp.id = uevent->mc->id;
238 break; 236 break;
239 default: 237 default:
240 uevent->resp.uid = ctx->uid; 238 uevent->resp.uid = ctx->uid;
241 uevent->resp.id = ctx->id; 239 uevent->resp.id = ctx->id;
242 break; 240 break;
243 } 241 }
244 } 242 }
245 243
246 static int ucma_event_handler(struct rdma_cm_id *cm_id, 244 static int ucma_event_handler(struct rdma_cm_id *cm_id,
247 struct rdma_cm_event *event) 245 struct rdma_cm_event *event)
248 { 246 {
249 struct ucma_event *uevent; 247 struct ucma_event *uevent;
250 struct ucma_context *ctx = cm_id->context; 248 struct ucma_context *ctx = cm_id->context;
251 int ret = 0; 249 int ret = 0;
252 250
253 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); 251 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
254 if (!uevent) 252 if (!uevent)
255 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; 253 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
256 254
257 uevent->cm_id = cm_id; 255 uevent->cm_id = cm_id;
258 ucma_set_event_context(ctx, event, uevent); 256 ucma_set_event_context(ctx, event, uevent);
259 uevent->resp.event = event->event; 257 uevent->resp.event = event->event;
260 uevent->resp.status = event->status; 258 uevent->resp.status = event->status;
261 if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB) 259 if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
262 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); 260 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
263 else 261 else
264 ucma_copy_conn_event(&uevent->resp.param.conn, 262 ucma_copy_conn_event(&uevent->resp.param.conn,
265 &event->param.conn); 263 &event->param.conn);
266 264
267 mutex_lock(&ctx->file->mut); 265 mutex_lock(&ctx->file->mut);
268 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 266 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
269 if (!ctx->backlog) { 267 if (!ctx->backlog) {
270 ret = -ENOMEM; 268 ret = -ENOMEM;
271 kfree(uevent); 269 kfree(uevent);
272 goto out; 270 goto out;
273 } 271 }
274 ctx->backlog--; 272 ctx->backlog--;
275 } else if (!ctx->uid) { 273 } else if (!ctx->uid) {
276 /* 274 /*
277 * We ignore events for new connections until userspace has set 275 * We ignore events for new connections until userspace has set
278 * their context. This can only happen if an error occurs on a 276 * their context. This can only happen if an error occurs on a
279 * new connection before the user accepts it. This is okay, 277 * new connection before the user accepts it. This is okay,
280 * since the accept will just fail later. 278 * since the accept will just fail later.
281 */ 279 */
282 kfree(uevent); 280 kfree(uevent);
283 goto out; 281 goto out;
284 } 282 }
285 283
286 list_add_tail(&uevent->list, &ctx->file->event_list); 284 list_add_tail(&uevent->list, &ctx->file->event_list);
287 wake_up_interruptible(&ctx->file->poll_wait); 285 wake_up_interruptible(&ctx->file->poll_wait);
288 out: 286 out:
289 mutex_unlock(&ctx->file->mut); 287 mutex_unlock(&ctx->file->mut);
290 return ret; 288 return ret;
291 } 289 }
292 290
293 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, 291 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
294 int in_len, int out_len) 292 int in_len, int out_len)
295 { 293 {
296 struct ucma_context *ctx; 294 struct ucma_context *ctx;
297 struct rdma_ucm_get_event cmd; 295 struct rdma_ucm_get_event cmd;
298 struct ucma_event *uevent; 296 struct ucma_event *uevent;
299 int ret = 0; 297 int ret = 0;
300 DEFINE_WAIT(wait); 298 DEFINE_WAIT(wait);
301 299
302 if (out_len < sizeof uevent->resp) 300 if (out_len < sizeof uevent->resp)
303 return -ENOSPC; 301 return -ENOSPC;
304 302
305 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 303 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
306 return -EFAULT; 304 return -EFAULT;
307 305
308 mutex_lock(&file->mut); 306 mutex_lock(&file->mut);
309 while (list_empty(&file->event_list)) { 307 while (list_empty(&file->event_list)) {
310 mutex_unlock(&file->mut); 308 mutex_unlock(&file->mut);
311 309
312 if (file->filp->f_flags & O_NONBLOCK) 310 if (file->filp->f_flags & O_NONBLOCK)
313 return -EAGAIN; 311 return -EAGAIN;
314 312
315 if (wait_event_interruptible(file->poll_wait, 313 if (wait_event_interruptible(file->poll_wait,
316 !list_empty(&file->event_list))) 314 !list_empty(&file->event_list)))
317 return -ERESTARTSYS; 315 return -ERESTARTSYS;
318 316
319 mutex_lock(&file->mut); 317 mutex_lock(&file->mut);
320 } 318 }
321 319
322 uevent = list_entry(file->event_list.next, struct ucma_event, list); 320 uevent = list_entry(file->event_list.next, struct ucma_event, list);
323 321
324 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { 322 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
325 ctx = ucma_alloc_ctx(file); 323 ctx = ucma_alloc_ctx(file);
326 if (!ctx) { 324 if (!ctx) {
327 ret = -ENOMEM; 325 ret = -ENOMEM;
328 goto done; 326 goto done;
329 } 327 }
330 uevent->ctx->backlog++; 328 uevent->ctx->backlog++;
331 ctx->cm_id = uevent->cm_id; 329 ctx->cm_id = uevent->cm_id;
332 ctx->cm_id->context = ctx; 330 ctx->cm_id->context = ctx;
333 uevent->resp.id = ctx->id; 331 uevent->resp.id = ctx->id;
334 } 332 }
335 333
336 if (copy_to_user((void __user *)(unsigned long)cmd.response, 334 if (copy_to_user((void __user *)(unsigned long)cmd.response,
337 &uevent->resp, sizeof uevent->resp)) { 335 &uevent->resp, sizeof uevent->resp)) {
338 ret = -EFAULT; 336 ret = -EFAULT;
339 goto done; 337 goto done;
340 } 338 }
341 339
342 list_del(&uevent->list); 340 list_del(&uevent->list);
343 uevent->ctx->events_reported++; 341 uevent->ctx->events_reported++;
344 if (uevent->mc) 342 if (uevent->mc)
345 uevent->mc->events_reported++; 343 uevent->mc->events_reported++;
346 kfree(uevent); 344 kfree(uevent);
347 done: 345 done:
348 mutex_unlock(&file->mut); 346 mutex_unlock(&file->mut);
349 return ret; 347 return ret;
350 } 348 }
351 349
352 static ssize_t ucma_create_id(struct ucma_file *file, 350 static ssize_t ucma_create_id(struct ucma_file *file,
353 const char __user *inbuf, 351 const char __user *inbuf,
354 int in_len, int out_len) 352 int in_len, int out_len)
355 { 353 {
356 struct rdma_ucm_create_id cmd; 354 struct rdma_ucm_create_id cmd;
357 struct rdma_ucm_create_id_resp resp; 355 struct rdma_ucm_create_id_resp resp;
358 struct ucma_context *ctx; 356 struct ucma_context *ctx;
359 int ret; 357 int ret;
360 358
361 if (out_len < sizeof(resp)) 359 if (out_len < sizeof(resp))
362 return -ENOSPC; 360 return -ENOSPC;
363 361
364 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 362 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
365 return -EFAULT; 363 return -EFAULT;
366 364
367 mutex_lock(&file->mut); 365 mutex_lock(&file->mut);
368 ctx = ucma_alloc_ctx(file); 366 ctx = ucma_alloc_ctx(file);
369 mutex_unlock(&file->mut); 367 mutex_unlock(&file->mut);
370 if (!ctx) 368 if (!ctx)
371 return -ENOMEM; 369 return -ENOMEM;
372 370
373 ctx->uid = cmd.uid; 371 ctx->uid = cmd.uid;
374 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps); 372 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
375 if (IS_ERR(ctx->cm_id)) { 373 if (IS_ERR(ctx->cm_id)) {
376 ret = PTR_ERR(ctx->cm_id); 374 ret = PTR_ERR(ctx->cm_id);
377 goto err1; 375 goto err1;
378 } 376 }
379 377
380 resp.id = ctx->id; 378 resp.id = ctx->id;
381 if (copy_to_user((void __user *)(unsigned long)cmd.response, 379 if (copy_to_user((void __user *)(unsigned long)cmd.response,
382 &resp, sizeof(resp))) { 380 &resp, sizeof(resp))) {
383 ret = -EFAULT; 381 ret = -EFAULT;
384 goto err2; 382 goto err2;
385 } 383 }
386 return 0; 384 return 0;
387 385
388 err2: 386 err2:
389 rdma_destroy_id(ctx->cm_id); 387 rdma_destroy_id(ctx->cm_id);
390 err1: 388 err1:
391 mutex_lock(&mut); 389 mutex_lock(&mut);
392 idr_remove(&ctx_idr, ctx->id); 390 idr_remove(&ctx_idr, ctx->id);
393 mutex_unlock(&mut); 391 mutex_unlock(&mut);
394 kfree(ctx); 392 kfree(ctx);
395 return ret; 393 return ret;
396 } 394 }
397 395
398 static void ucma_cleanup_multicast(struct ucma_context *ctx) 396 static void ucma_cleanup_multicast(struct ucma_context *ctx)
399 { 397 {
400 struct ucma_multicast *mc, *tmp; 398 struct ucma_multicast *mc, *tmp;
401 399
402 mutex_lock(&mut); 400 mutex_lock(&mut);
403 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { 401 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
404 list_del(&mc->list); 402 list_del(&mc->list);
405 idr_remove(&multicast_idr, mc->id); 403 idr_remove(&multicast_idr, mc->id);
406 kfree(mc); 404 kfree(mc);
407 } 405 }
408 mutex_unlock(&mut); 406 mutex_unlock(&mut);
409 } 407 }
410 408
411 static void ucma_cleanup_events(struct ucma_context *ctx) 409 static void ucma_cleanup_events(struct ucma_context *ctx)
412 { 410 {
413 struct ucma_event *uevent, *tmp; 411 struct ucma_event *uevent, *tmp;
414 412
415 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { 413 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
416 if (uevent->ctx != ctx) 414 if (uevent->ctx != ctx)
417 continue; 415 continue;
418 416
419 list_del(&uevent->list); 417 list_del(&uevent->list);
420 418
421 /* clear incoming connections. */ 419 /* clear incoming connections. */
422 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) 420 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
423 rdma_destroy_id(uevent->cm_id); 421 rdma_destroy_id(uevent->cm_id);
424 422
425 kfree(uevent); 423 kfree(uevent);
426 } 424 }
427 } 425 }
428 426
429 static void ucma_cleanup_mc_events(struct ucma_multicast *mc) 427 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
430 { 428 {
431 struct ucma_event *uevent, *tmp; 429 struct ucma_event *uevent, *tmp;
432 430
433 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { 431 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
434 if (uevent->mc != mc) 432 if (uevent->mc != mc)
435 continue; 433 continue;
436 434
437 list_del(&uevent->list); 435 list_del(&uevent->list);
438 kfree(uevent); 436 kfree(uevent);
439 } 437 }
440 } 438 }
441 439
442 static int ucma_free_ctx(struct ucma_context *ctx) 440 static int ucma_free_ctx(struct ucma_context *ctx)
443 { 441 {
444 int events_reported; 442 int events_reported;
445 443
446 /* No new events will be generated after destroying the id. */ 444 /* No new events will be generated after destroying the id. */
447 rdma_destroy_id(ctx->cm_id); 445 rdma_destroy_id(ctx->cm_id);
448 446
449 ucma_cleanup_multicast(ctx); 447 ucma_cleanup_multicast(ctx);
450 448
451 /* Cleanup events not yet reported to the user. */ 449 /* Cleanup events not yet reported to the user. */
452 mutex_lock(&ctx->file->mut); 450 mutex_lock(&ctx->file->mut);
453 ucma_cleanup_events(ctx); 451 ucma_cleanup_events(ctx);
454 list_del(&ctx->list); 452 list_del(&ctx->list);
455 mutex_unlock(&ctx->file->mut); 453 mutex_unlock(&ctx->file->mut);
456 454
457 events_reported = ctx->events_reported; 455 events_reported = ctx->events_reported;
458 kfree(ctx); 456 kfree(ctx);
459 return events_reported; 457 return events_reported;
460 } 458 }
461 459
462 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, 460 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
463 int in_len, int out_len) 461 int in_len, int out_len)
464 { 462 {
465 struct rdma_ucm_destroy_id cmd; 463 struct rdma_ucm_destroy_id cmd;
466 struct rdma_ucm_destroy_id_resp resp; 464 struct rdma_ucm_destroy_id_resp resp;
467 struct ucma_context *ctx; 465 struct ucma_context *ctx;
468 int ret = 0; 466 int ret = 0;
469 467
470 if (out_len < sizeof(resp)) 468 if (out_len < sizeof(resp))
471 return -ENOSPC; 469 return -ENOSPC;
472 470
473 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 471 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
474 return -EFAULT; 472 return -EFAULT;
475 473
476 mutex_lock(&mut); 474 mutex_lock(&mut);
477 ctx = _ucma_find_context(cmd.id, file); 475 ctx = _ucma_find_context(cmd.id, file);
478 if (!IS_ERR(ctx)) 476 if (!IS_ERR(ctx))
479 idr_remove(&ctx_idr, ctx->id); 477 idr_remove(&ctx_idr, ctx->id);
480 mutex_unlock(&mut); 478 mutex_unlock(&mut);
481 479
482 if (IS_ERR(ctx)) 480 if (IS_ERR(ctx))
483 return PTR_ERR(ctx); 481 return PTR_ERR(ctx);
484 482
485 ucma_put_ctx(ctx); 483 ucma_put_ctx(ctx);
486 wait_for_completion(&ctx->comp); 484 wait_for_completion(&ctx->comp);
487 resp.events_reported = ucma_free_ctx(ctx); 485 resp.events_reported = ucma_free_ctx(ctx);
488 486
489 if (copy_to_user((void __user *)(unsigned long)cmd.response, 487 if (copy_to_user((void __user *)(unsigned long)cmd.response,
490 &resp, sizeof(resp))) 488 &resp, sizeof(resp)))
491 ret = -EFAULT; 489 ret = -EFAULT;
492 490
493 return ret; 491 return ret;
494 } 492 }
495 493
496 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf, 494 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
497 int in_len, int out_len) 495 int in_len, int out_len)
498 { 496 {
499 struct rdma_ucm_bind_addr cmd; 497 struct rdma_ucm_bind_addr cmd;
500 struct ucma_context *ctx; 498 struct ucma_context *ctx;
501 int ret; 499 int ret;
502 500
503 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 501 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
504 return -EFAULT; 502 return -EFAULT;
505 503
506 ctx = ucma_get_ctx(file, cmd.id); 504 ctx = ucma_get_ctx(file, cmd.id);
507 if (IS_ERR(ctx)) 505 if (IS_ERR(ctx))
508 return PTR_ERR(ctx); 506 return PTR_ERR(ctx);
509 507
510 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); 508 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
511 ucma_put_ctx(ctx); 509 ucma_put_ctx(ctx);
512 return ret; 510 return ret;
513 } 511 }
514 512
515 static ssize_t ucma_resolve_addr(struct ucma_file *file, 513 static ssize_t ucma_resolve_addr(struct ucma_file *file,
516 const char __user *inbuf, 514 const char __user *inbuf,
517 int in_len, int out_len) 515 int in_len, int out_len)
518 { 516 {
519 struct rdma_ucm_resolve_addr cmd; 517 struct rdma_ucm_resolve_addr cmd;
520 struct ucma_context *ctx; 518 struct ucma_context *ctx;
521 int ret; 519 int ret;
522 520
523 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 521 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
524 return -EFAULT; 522 return -EFAULT;
525 523
526 ctx = ucma_get_ctx(file, cmd.id); 524 ctx = ucma_get_ctx(file, cmd.id);
527 if (IS_ERR(ctx)) 525 if (IS_ERR(ctx))
528 return PTR_ERR(ctx); 526 return PTR_ERR(ctx);
529 527
530 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 528 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
531 (struct sockaddr *) &cmd.dst_addr, 529 (struct sockaddr *) &cmd.dst_addr,
532 cmd.timeout_ms); 530 cmd.timeout_ms);
533 ucma_put_ctx(ctx); 531 ucma_put_ctx(ctx);
534 return ret; 532 return ret;
535 } 533 }
536 534
537 static ssize_t ucma_resolve_route(struct ucma_file *file, 535 static ssize_t ucma_resolve_route(struct ucma_file *file,
538 const char __user *inbuf, 536 const char __user *inbuf,
539 int in_len, int out_len) 537 int in_len, int out_len)
540 { 538 {
541 struct rdma_ucm_resolve_route cmd; 539 struct rdma_ucm_resolve_route cmd;
542 struct ucma_context *ctx; 540 struct ucma_context *ctx;
543 int ret; 541 int ret;
544 542
545 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 543 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
546 return -EFAULT; 544 return -EFAULT;
547 545
548 ctx = ucma_get_ctx(file, cmd.id); 546 ctx = ucma_get_ctx(file, cmd.id);
549 if (IS_ERR(ctx)) 547 if (IS_ERR(ctx))
550 return PTR_ERR(ctx); 548 return PTR_ERR(ctx);
551 549
552 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); 550 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
553 ucma_put_ctx(ctx); 551 ucma_put_ctx(ctx);
554 return ret; 552 return ret;
555 } 553 }
556 554
557 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, 555 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
558 struct rdma_route *route) 556 struct rdma_route *route)
559 { 557 {
560 struct rdma_dev_addr *dev_addr; 558 struct rdma_dev_addr *dev_addr;
561 559
562 resp->num_paths = route->num_paths; 560 resp->num_paths = route->num_paths;
563 switch (route->num_paths) { 561 switch (route->num_paths) {
564 case 0: 562 case 0:
565 dev_addr = &route->addr.dev_addr; 563 dev_addr = &route->addr.dev_addr;
566 ib_addr_get_dgid(dev_addr, 564 ib_addr_get_dgid(dev_addr,
567 (union ib_gid *) &resp->ib_route[0].dgid); 565 (union ib_gid *) &resp->ib_route[0].dgid);
568 ib_addr_get_sgid(dev_addr, 566 ib_addr_get_sgid(dev_addr,
569 (union ib_gid *) &resp->ib_route[0].sgid); 567 (union ib_gid *) &resp->ib_route[0].sgid);
570 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 568 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
571 break; 569 break;
572 case 2: 570 case 2:
573 ib_copy_path_rec_to_user(&resp->ib_route[1], 571 ib_copy_path_rec_to_user(&resp->ib_route[1],
574 &route->path_rec[1]); 572 &route->path_rec[1]);
575 /* fall through */ 573 /* fall through */
576 case 1: 574 case 1:
577 ib_copy_path_rec_to_user(&resp->ib_route[0], 575 ib_copy_path_rec_to_user(&resp->ib_route[0],
578 &route->path_rec[0]); 576 &route->path_rec[0]);
579 break; 577 break;
580 default: 578 default:
581 break; 579 break;
582 } 580 }
583 } 581 }
584 582
585 static ssize_t ucma_query_route(struct ucma_file *file, 583 static ssize_t ucma_query_route(struct ucma_file *file,
586 const char __user *inbuf, 584 const char __user *inbuf,
587 int in_len, int out_len) 585 int in_len, int out_len)
588 { 586 {
589 struct rdma_ucm_query_route cmd; 587 struct rdma_ucm_query_route cmd;
590 struct rdma_ucm_query_route_resp resp; 588 struct rdma_ucm_query_route_resp resp;
591 struct ucma_context *ctx; 589 struct ucma_context *ctx;
592 struct sockaddr *addr; 590 struct sockaddr *addr;
593 int ret = 0; 591 int ret = 0;
594 592
595 if (out_len < sizeof(resp)) 593 if (out_len < sizeof(resp))
596 return -ENOSPC; 594 return -ENOSPC;
597 595
598 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 596 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
599 return -EFAULT; 597 return -EFAULT;
600 598
601 ctx = ucma_get_ctx(file, cmd.id); 599 ctx = ucma_get_ctx(file, cmd.id);
602 if (IS_ERR(ctx)) 600 if (IS_ERR(ctx))
603 return PTR_ERR(ctx); 601 return PTR_ERR(ctx);
604 602
605 memset(&resp, 0, sizeof resp); 603 memset(&resp, 0, sizeof resp);
606 addr = &ctx->cm_id->route.addr.src_addr; 604 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
607 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? 605 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
608 sizeof(struct sockaddr_in) : 606 sizeof(struct sockaddr_in) :
609 sizeof(struct sockaddr_in6)); 607 sizeof(struct sockaddr_in6));
610 addr = &ctx->cm_id->route.addr.dst_addr; 608 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
611 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? 609 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
612 sizeof(struct sockaddr_in) : 610 sizeof(struct sockaddr_in) :
613 sizeof(struct sockaddr_in6)); 611 sizeof(struct sockaddr_in6));
614 if (!ctx->cm_id->device) 612 if (!ctx->cm_id->device)
615 goto out; 613 goto out;
616 614
617 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; 615 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
618 resp.port_num = ctx->cm_id->port_num; 616 resp.port_num = ctx->cm_id->port_num;
619 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) { 617 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
620 case RDMA_TRANSPORT_IB: 618 case RDMA_TRANSPORT_IB:
621 ucma_copy_ib_route(&resp, &ctx->cm_id->route); 619 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
622 break; 620 break;
623 default: 621 default:
624 break; 622 break;
625 } 623 }
626 624
627 out: 625 out:
628 if (copy_to_user((void __user *)(unsigned long)cmd.response, 626 if (copy_to_user((void __user *)(unsigned long)cmd.response,
629 &resp, sizeof(resp))) 627 &resp, sizeof(resp)))
630 ret = -EFAULT; 628 ret = -EFAULT;
631 629
632 ucma_put_ctx(ctx); 630 ucma_put_ctx(ctx);
633 return ret; 631 return ret;
634 } 632 }
635 633
636 static void ucma_copy_conn_param(struct rdma_conn_param *dst, 634 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
637 struct rdma_ucm_conn_param *src) 635 struct rdma_ucm_conn_param *src)
638 { 636 {
639 dst->private_data = src->private_data; 637 dst->private_data = src->private_data;
640 dst->private_data_len = src->private_data_len; 638 dst->private_data_len = src->private_data_len;
641 dst->responder_resources =src->responder_resources; 639 dst->responder_resources =src->responder_resources;
642 dst->initiator_depth = src->initiator_depth; 640 dst->initiator_depth = src->initiator_depth;
643 dst->flow_control = src->flow_control; 641 dst->flow_control = src->flow_control;
644 dst->retry_count = src->retry_count; 642 dst->retry_count = src->retry_count;
645 dst->rnr_retry_count = src->rnr_retry_count; 643 dst->rnr_retry_count = src->rnr_retry_count;
646 dst->srq = src->srq; 644 dst->srq = src->srq;
647 dst->qp_num = src->qp_num; 645 dst->qp_num = src->qp_num;
648 } 646 }
649 647
650 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, 648 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
651 int in_len, int out_len) 649 int in_len, int out_len)
652 { 650 {
653 struct rdma_ucm_connect cmd; 651 struct rdma_ucm_connect cmd;
654 struct rdma_conn_param conn_param; 652 struct rdma_conn_param conn_param;
655 struct ucma_context *ctx; 653 struct ucma_context *ctx;
656 int ret; 654 int ret;
657 655
658 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 656 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
659 return -EFAULT; 657 return -EFAULT;
660 658
661 if (!cmd.conn_param.valid) 659 if (!cmd.conn_param.valid)
662 return -EINVAL; 660 return -EINVAL;
663 661
664 ctx = ucma_get_ctx(file, cmd.id); 662 ctx = ucma_get_ctx(file, cmd.id);
665 if (IS_ERR(ctx)) 663 if (IS_ERR(ctx))
666 return PTR_ERR(ctx); 664 return PTR_ERR(ctx);
667 665
668 ucma_copy_conn_param(&conn_param, &cmd.conn_param); 666 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
669 ret = rdma_connect(ctx->cm_id, &conn_param); 667 ret = rdma_connect(ctx->cm_id, &conn_param);
670 ucma_put_ctx(ctx); 668 ucma_put_ctx(ctx);
671 return ret; 669 return ret;
672 } 670 }
673 671
674 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, 672 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
675 int in_len, int out_len) 673 int in_len, int out_len)
676 { 674 {
677 struct rdma_ucm_listen cmd; 675 struct rdma_ucm_listen cmd;
678 struct ucma_context *ctx; 676 struct ucma_context *ctx;
679 int ret; 677 int ret;
680 678
681 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 679 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
682 return -EFAULT; 680 return -EFAULT;
683 681
684 ctx = ucma_get_ctx(file, cmd.id); 682 ctx = ucma_get_ctx(file, cmd.id);
685 if (IS_ERR(ctx)) 683 if (IS_ERR(ctx))
686 return PTR_ERR(ctx); 684 return PTR_ERR(ctx);
687 685
688 ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ? 686 ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
689 cmd.backlog : UCMA_MAX_BACKLOG; 687 cmd.backlog : UCMA_MAX_BACKLOG;
690 ret = rdma_listen(ctx->cm_id, ctx->backlog); 688 ret = rdma_listen(ctx->cm_id, ctx->backlog);
691 ucma_put_ctx(ctx); 689 ucma_put_ctx(ctx);
692 return ret; 690 return ret;
693 } 691 }
694 692
695 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, 693 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
696 int in_len, int out_len) 694 int in_len, int out_len)
697 { 695 {
698 struct rdma_ucm_accept cmd; 696 struct rdma_ucm_accept cmd;
699 struct rdma_conn_param conn_param; 697 struct rdma_conn_param conn_param;
700 struct ucma_context *ctx; 698 struct ucma_context *ctx;
701 int ret; 699 int ret;
702 700
703 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 701 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
704 return -EFAULT; 702 return -EFAULT;
705 703
706 ctx = ucma_get_ctx(file, cmd.id); 704 ctx = ucma_get_ctx(file, cmd.id);
707 if (IS_ERR(ctx)) 705 if (IS_ERR(ctx))
708 return PTR_ERR(ctx); 706 return PTR_ERR(ctx);
709 707
710 if (cmd.conn_param.valid) { 708 if (cmd.conn_param.valid) {
711 ctx->uid = cmd.uid; 709 ctx->uid = cmd.uid;
712 ucma_copy_conn_param(&conn_param, &cmd.conn_param); 710 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
713 ret = rdma_accept(ctx->cm_id, &conn_param); 711 ret = rdma_accept(ctx->cm_id, &conn_param);
714 } else 712 } else
715 ret = rdma_accept(ctx->cm_id, NULL); 713 ret = rdma_accept(ctx->cm_id, NULL);
716 714
717 ucma_put_ctx(ctx); 715 ucma_put_ctx(ctx);
718 return ret; 716 return ret;
719 } 717 }
720 718
721 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, 719 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
722 int in_len, int out_len) 720 int in_len, int out_len)
723 { 721 {
724 struct rdma_ucm_reject cmd; 722 struct rdma_ucm_reject cmd;
725 struct ucma_context *ctx; 723 struct ucma_context *ctx;
726 int ret; 724 int ret;
727 725
728 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 726 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
729 return -EFAULT; 727 return -EFAULT;
730 728
731 ctx = ucma_get_ctx(file, cmd.id); 729 ctx = ucma_get_ctx(file, cmd.id);
732 if (IS_ERR(ctx)) 730 if (IS_ERR(ctx))
733 return PTR_ERR(ctx); 731 return PTR_ERR(ctx);
734 732
735 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); 733 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
736 ucma_put_ctx(ctx); 734 ucma_put_ctx(ctx);
737 return ret; 735 return ret;
738 } 736 }
739 737
740 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, 738 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
741 int in_len, int out_len) 739 int in_len, int out_len)
742 { 740 {
743 struct rdma_ucm_disconnect cmd; 741 struct rdma_ucm_disconnect cmd;
744 struct ucma_context *ctx; 742 struct ucma_context *ctx;
745 int ret; 743 int ret;
746 744
747 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 745 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
748 return -EFAULT; 746 return -EFAULT;
749 747
750 ctx = ucma_get_ctx(file, cmd.id); 748 ctx = ucma_get_ctx(file, cmd.id);
751 if (IS_ERR(ctx)) 749 if (IS_ERR(ctx))
752 return PTR_ERR(ctx); 750 return PTR_ERR(ctx);
753 751
754 ret = rdma_disconnect(ctx->cm_id); 752 ret = rdma_disconnect(ctx->cm_id);
755 ucma_put_ctx(ctx); 753 ucma_put_ctx(ctx);
756 return ret; 754 return ret;
757 } 755 }
758 756
759 static ssize_t ucma_init_qp_attr(struct ucma_file *file, 757 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
760 const char __user *inbuf, 758 const char __user *inbuf,
761 int in_len, int out_len) 759 int in_len, int out_len)
762 { 760 {
763 struct rdma_ucm_init_qp_attr cmd; 761 struct rdma_ucm_init_qp_attr cmd;
764 struct ib_uverbs_qp_attr resp; 762 struct ib_uverbs_qp_attr resp;
765 struct ucma_context *ctx; 763 struct ucma_context *ctx;
766 struct ib_qp_attr qp_attr; 764 struct ib_qp_attr qp_attr;
767 int ret; 765 int ret;
768 766
769 if (out_len < sizeof(resp)) 767 if (out_len < sizeof(resp))
770 return -ENOSPC; 768 return -ENOSPC;
771 769
772 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 770 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
773 return -EFAULT; 771 return -EFAULT;
774 772
775 ctx = ucma_get_ctx(file, cmd.id); 773 ctx = ucma_get_ctx(file, cmd.id);
776 if (IS_ERR(ctx)) 774 if (IS_ERR(ctx))
777 return PTR_ERR(ctx); 775 return PTR_ERR(ctx);
778 776
779 resp.qp_attr_mask = 0; 777 resp.qp_attr_mask = 0;
780 memset(&qp_attr, 0, sizeof qp_attr); 778 memset(&qp_attr, 0, sizeof qp_attr);
781 qp_attr.qp_state = cmd.qp_state; 779 qp_attr.qp_state = cmd.qp_state;
782 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); 780 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
783 if (ret) 781 if (ret)
784 goto out; 782 goto out;
785 783
786 ib_copy_qp_attr_to_user(&resp, &qp_attr); 784 ib_copy_qp_attr_to_user(&resp, &qp_attr);
787 if (copy_to_user((void __user *)(unsigned long)cmd.response, 785 if (copy_to_user((void __user *)(unsigned long)cmd.response,
788 &resp, sizeof(resp))) 786 &resp, sizeof(resp)))
789 ret = -EFAULT; 787 ret = -EFAULT;
790 788
791 out: 789 out:
792 ucma_put_ctx(ctx); 790 ucma_put_ctx(ctx);
793 return ret; 791 return ret;
794 } 792 }
795 793
796 static int ucma_set_option_id(struct ucma_context *ctx, int optname, 794 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
797 void *optval, size_t optlen) 795 void *optval, size_t optlen)
798 { 796 {
799 int ret = 0; 797 int ret = 0;
800 798
801 switch (optname) { 799 switch (optname) {
802 case RDMA_OPTION_ID_TOS: 800 case RDMA_OPTION_ID_TOS:
803 if (optlen != sizeof(u8)) { 801 if (optlen != sizeof(u8)) {
804 ret = -EINVAL; 802 ret = -EINVAL;
805 break; 803 break;
806 } 804 }
807 rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); 805 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
808 break; 806 break;
809 default: 807 default:
810 ret = -ENOSYS; 808 ret = -ENOSYS;
811 } 809 }
812 810
813 return ret; 811 return ret;
814 } 812 }
815 813
816 static int ucma_set_option_level(struct ucma_context *ctx, int level, 814 static int ucma_set_option_level(struct ucma_context *ctx, int level,
817 int optname, void *optval, size_t optlen) 815 int optname, void *optval, size_t optlen)
818 { 816 {
819 int ret; 817 int ret;
820 818
821 switch (level) { 819 switch (level) {
822 case RDMA_OPTION_ID: 820 case RDMA_OPTION_ID:
823 ret = ucma_set_option_id(ctx, optname, optval, optlen); 821 ret = ucma_set_option_id(ctx, optname, optval, optlen);
824 break; 822 break;
825 default: 823 default:
826 ret = -ENOSYS; 824 ret = -ENOSYS;
827 } 825 }
828 826
829 return ret; 827 return ret;
830 } 828 }
831 829
832 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, 830 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
833 int in_len, int out_len) 831 int in_len, int out_len)
834 { 832 {
835 struct rdma_ucm_set_option cmd; 833 struct rdma_ucm_set_option cmd;
836 struct ucma_context *ctx; 834 struct ucma_context *ctx;
837 void *optval; 835 void *optval;
838 int ret; 836 int ret;
839 837
840 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 838 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
841 return -EFAULT; 839 return -EFAULT;
842 840
843 ctx = ucma_get_ctx(file, cmd.id); 841 ctx = ucma_get_ctx(file, cmd.id);
844 if (IS_ERR(ctx)) 842 if (IS_ERR(ctx))
845 return PTR_ERR(ctx); 843 return PTR_ERR(ctx);
846 844
847 optval = kmalloc(cmd.optlen, GFP_KERNEL); 845 optval = kmalloc(cmd.optlen, GFP_KERNEL);
848 if (!optval) { 846 if (!optval) {
849 ret = -ENOMEM; 847 ret = -ENOMEM;
850 goto out1; 848 goto out1;
851 } 849 }
852 850
853 if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval, 851 if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
854 cmd.optlen)) { 852 cmd.optlen)) {
855 ret = -EFAULT; 853 ret = -EFAULT;
856 goto out2; 854 goto out2;
857 } 855 }
858 856
859 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, 857 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
860 cmd.optlen); 858 cmd.optlen);
861 out2: 859 out2:
862 kfree(optval); 860 kfree(optval);
863 out1: 861 out1:
864 ucma_put_ctx(ctx); 862 ucma_put_ctx(ctx);
865 return ret; 863 return ret;
866 } 864 }
867 865
868 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, 866 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
869 int in_len, int out_len) 867 int in_len, int out_len)
870 { 868 {
871 struct rdma_ucm_notify cmd; 869 struct rdma_ucm_notify cmd;
872 struct ucma_context *ctx; 870 struct ucma_context *ctx;
873 int ret; 871 int ret;
874 872
875 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 873 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
876 return -EFAULT; 874 return -EFAULT;
877 875
878 ctx = ucma_get_ctx(file, cmd.id); 876 ctx = ucma_get_ctx(file, cmd.id);
879 if (IS_ERR(ctx)) 877 if (IS_ERR(ctx))
880 return PTR_ERR(ctx); 878 return PTR_ERR(ctx);
881 879
882 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); 880 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
883 ucma_put_ctx(ctx); 881 ucma_put_ctx(ctx);
884 return ret; 882 return ret;
885 } 883 }
886 884
887 static ssize_t ucma_join_multicast(struct ucma_file *file, 885 static ssize_t ucma_join_multicast(struct ucma_file *file,
888 const char __user *inbuf, 886 const char __user *inbuf,
889 int in_len, int out_len) 887 int in_len, int out_len)
890 { 888 {
891 struct rdma_ucm_join_mcast cmd; 889 struct rdma_ucm_join_mcast cmd;
892 struct rdma_ucm_create_id_resp resp; 890 struct rdma_ucm_create_id_resp resp;
893 struct ucma_context *ctx; 891 struct ucma_context *ctx;
894 struct ucma_multicast *mc; 892 struct ucma_multicast *mc;
895 int ret; 893 int ret;
896 894
897 if (out_len < sizeof(resp)) 895 if (out_len < sizeof(resp))
898 return -ENOSPC; 896 return -ENOSPC;
899 897
900 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 898 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
901 return -EFAULT; 899 return -EFAULT;
902 900
903 ctx = ucma_get_ctx(file, cmd.id); 901 ctx = ucma_get_ctx(file, cmd.id);
904 if (IS_ERR(ctx)) 902 if (IS_ERR(ctx))
905 return PTR_ERR(ctx); 903 return PTR_ERR(ctx);
906 904
907 mutex_lock(&file->mut); 905 mutex_lock(&file->mut);
908 mc = ucma_alloc_multicast(ctx); 906 mc = ucma_alloc_multicast(ctx);
909 if (IS_ERR(mc)) { 907 if (IS_ERR(mc)) {
910 ret = PTR_ERR(mc); 908 ret = PTR_ERR(mc);
911 goto err1; 909 goto err1;
912 } 910 }
913 911
914 mc->uid = cmd.uid; 912 mc->uid = cmd.uid;
915 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); 913 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
916 ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc); 914 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
917 if (ret) 915 if (ret)
918 goto err2; 916 goto err2;
919 917
920 resp.id = mc->id; 918 resp.id = mc->id;
921 if (copy_to_user((void __user *)(unsigned long)cmd.response, 919 if (copy_to_user((void __user *)(unsigned long)cmd.response,
922 &resp, sizeof(resp))) { 920 &resp, sizeof(resp))) {
923 ret = -EFAULT; 921 ret = -EFAULT;
924 goto err3; 922 goto err3;
925 } 923 }
926 924
927 mutex_unlock(&file->mut); 925 mutex_unlock(&file->mut);
928 ucma_put_ctx(ctx); 926 ucma_put_ctx(ctx);
929 return 0; 927 return 0;
930 928
931 err3: 929 err3:
932 rdma_leave_multicast(ctx->cm_id, &mc->addr); 930 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
933 ucma_cleanup_mc_events(mc); 931 ucma_cleanup_mc_events(mc);
934 err2: 932 err2:
935 mutex_lock(&mut); 933 mutex_lock(&mut);
936 idr_remove(&multicast_idr, mc->id); 934 idr_remove(&multicast_idr, mc->id);
937 mutex_unlock(&mut); 935 mutex_unlock(&mut);
938 list_del(&mc->list); 936 list_del(&mc->list);
939 kfree(mc); 937 kfree(mc);
940 err1: 938 err1:
941 mutex_unlock(&file->mut); 939 mutex_unlock(&file->mut);
942 ucma_put_ctx(ctx); 940 ucma_put_ctx(ctx);
943 return ret; 941 return ret;
944 } 942 }
945 943
946 static ssize_t ucma_leave_multicast(struct ucma_file *file, 944 static ssize_t ucma_leave_multicast(struct ucma_file *file,
947 const char __user *inbuf, 945 const char __user *inbuf,
948 int in_len, int out_len) 946 int in_len, int out_len)
949 { 947 {
950 struct rdma_ucm_destroy_id cmd; 948 struct rdma_ucm_destroy_id cmd;
951 struct rdma_ucm_destroy_id_resp resp; 949 struct rdma_ucm_destroy_id_resp resp;
952 struct ucma_multicast *mc; 950 struct ucma_multicast *mc;
953 int ret = 0; 951 int ret = 0;
954 952
955 if (out_len < sizeof(resp)) 953 if (out_len < sizeof(resp))
956 return -ENOSPC; 954 return -ENOSPC;
957 955
958 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 956 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
959 return -EFAULT; 957 return -EFAULT;
960 958
961 mutex_lock(&mut); 959 mutex_lock(&mut);
962 mc = idr_find(&multicast_idr, cmd.id); 960 mc = idr_find(&multicast_idr, cmd.id);
963 if (!mc) 961 if (!mc)
964 mc = ERR_PTR(-ENOENT); 962 mc = ERR_PTR(-ENOENT);
965 else if (mc->ctx->file != file) 963 else if (mc->ctx->file != file)
966 mc = ERR_PTR(-EINVAL); 964 mc = ERR_PTR(-EINVAL);
967 else { 965 else {
968 idr_remove(&multicast_idr, mc->id); 966 idr_remove(&multicast_idr, mc->id);
969 atomic_inc(&mc->ctx->ref); 967 atomic_inc(&mc->ctx->ref);
970 } 968 }
971 mutex_unlock(&mut); 969 mutex_unlock(&mut);
972 970
973 if (IS_ERR(mc)) { 971 if (IS_ERR(mc)) {
974 ret = PTR_ERR(mc); 972 ret = PTR_ERR(mc);
975 goto out; 973 goto out;
976 } 974 }
977 975
978 rdma_leave_multicast(mc->ctx->cm_id, &mc->addr); 976 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
979 mutex_lock(&mc->ctx->file->mut); 977 mutex_lock(&mc->ctx->file->mut);
980 ucma_cleanup_mc_events(mc); 978 ucma_cleanup_mc_events(mc);
981 list_del(&mc->list); 979 list_del(&mc->list);
982 mutex_unlock(&mc->ctx->file->mut); 980 mutex_unlock(&mc->ctx->file->mut);
983 981
984 ucma_put_ctx(mc->ctx); 982 ucma_put_ctx(mc->ctx);
985 resp.events_reported = mc->events_reported; 983 resp.events_reported = mc->events_reported;
986 kfree(mc); 984 kfree(mc);
987 985
988 if (copy_to_user((void __user *)(unsigned long)cmd.response, 986 if (copy_to_user((void __user *)(unsigned long)cmd.response,
989 &resp, sizeof(resp))) 987 &resp, sizeof(resp)))
990 ret = -EFAULT; 988 ret = -EFAULT;
991 out: 989 out:
992 return ret; 990 return ret;
993 } 991 }
994 992
995 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) 993 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
996 { 994 {
997 /* Acquire mutex's based on pointer comparison to prevent deadlock. */ 995 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
998 if (file1 < file2) { 996 if (file1 < file2) {
999 mutex_lock(&file1->mut); 997 mutex_lock(&file1->mut);
1000 mutex_lock(&file2->mut); 998 mutex_lock(&file2->mut);
1001 } else { 999 } else {
1002 mutex_lock(&file2->mut); 1000 mutex_lock(&file2->mut);
1003 mutex_lock(&file1->mut); 1001 mutex_lock(&file1->mut);
1004 } 1002 }
1005 } 1003 }
1006 1004
1007 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2) 1005 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1008 { 1006 {
1009 if (file1 < file2) { 1007 if (file1 < file2) {
1010 mutex_unlock(&file2->mut); 1008 mutex_unlock(&file2->mut);
1011 mutex_unlock(&file1->mut); 1009 mutex_unlock(&file1->mut);
1012 } else { 1010 } else {
1013 mutex_unlock(&file1->mut); 1011 mutex_unlock(&file1->mut);
1014 mutex_unlock(&file2->mut); 1012 mutex_unlock(&file2->mut);
1015 } 1013 }
1016 } 1014 }
1017 1015
1018 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) 1016 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1019 { 1017 {
1020 struct ucma_event *uevent, *tmp; 1018 struct ucma_event *uevent, *tmp;
1021 1019
1022 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) 1020 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1023 if (uevent->ctx == ctx) 1021 if (uevent->ctx == ctx)
1024 list_move_tail(&uevent->list, &file->event_list); 1022 list_move_tail(&uevent->list, &file->event_list);
1025 } 1023 }
1026 1024
1027 static ssize_t ucma_migrate_id(struct ucma_file *new_file, 1025 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1028 const char __user *inbuf, 1026 const char __user *inbuf,
1029 int in_len, int out_len) 1027 int in_len, int out_len)
1030 { 1028 {
1031 struct rdma_ucm_migrate_id cmd; 1029 struct rdma_ucm_migrate_id cmd;
1032 struct rdma_ucm_migrate_resp resp; 1030 struct rdma_ucm_migrate_resp resp;
1033 struct ucma_context *ctx; 1031 struct ucma_context *ctx;
1034 struct file *filp; 1032 struct file *filp;
1035 struct ucma_file *cur_file; 1033 struct ucma_file *cur_file;
1036 int ret = 0; 1034 int ret = 0;
1037 1035
1038 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1036 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1039 return -EFAULT; 1037 return -EFAULT;
1040 1038
1041 /* Get current fd to protect against it being closed */ 1039 /* Get current fd to protect against it being closed */
1042 filp = fget(cmd.fd); 1040 filp = fget(cmd.fd);
1043 if (!filp) 1041 if (!filp)
1044 return -ENOENT; 1042 return -ENOENT;
1045 1043
1046 /* Validate current fd and prevent destruction of id. */ 1044 /* Validate current fd and prevent destruction of id. */
1047 ctx = ucma_get_ctx(filp->private_data, cmd.id); 1045 ctx = ucma_get_ctx(filp->private_data, cmd.id);
1048 if (IS_ERR(ctx)) { 1046 if (IS_ERR(ctx)) {
1049 ret = PTR_ERR(ctx); 1047 ret = PTR_ERR(ctx);
1050 goto file_put; 1048 goto file_put;
1051 } 1049 }
1052 1050
1053 cur_file = ctx->file; 1051 cur_file = ctx->file;
1054 if (cur_file == new_file) { 1052 if (cur_file == new_file) {
1055 resp.events_reported = ctx->events_reported; 1053 resp.events_reported = ctx->events_reported;
1056 goto response; 1054 goto response;
1057 } 1055 }
1058 1056
1059 /* 1057 /*
1060 * Migrate events between fd's, maintaining order, and avoiding new 1058 * Migrate events between fd's, maintaining order, and avoiding new
1061 * events being added before existing events. 1059 * events being added before existing events.
1062 */ 1060 */
1063 ucma_lock_files(cur_file, new_file); 1061 ucma_lock_files(cur_file, new_file);
1064 mutex_lock(&mut); 1062 mutex_lock(&mut);
1065 1063
1066 list_move_tail(&ctx->list, &new_file->ctx_list); 1064 list_move_tail(&ctx->list, &new_file->ctx_list);
1067 ucma_move_events(ctx, new_file); 1065 ucma_move_events(ctx, new_file);
1068 ctx->file = new_file; 1066 ctx->file = new_file;
1069 resp.events_reported = ctx->events_reported; 1067 resp.events_reported = ctx->events_reported;
1070 1068
1071 mutex_unlock(&mut); 1069 mutex_unlock(&mut);
1072 ucma_unlock_files(cur_file, new_file); 1070 ucma_unlock_files(cur_file, new_file);
1073 1071
1074 response: 1072 response:
1075 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1073 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1076 &resp, sizeof(resp))) 1074 &resp, sizeof(resp)))
1077 ret = -EFAULT; 1075 ret = -EFAULT;
1078 1076
1079 ucma_put_ctx(ctx); 1077 ucma_put_ctx(ctx);
1080 file_put: 1078 file_put:
1081 fput(filp); 1079 fput(filp);
1082 return ret; 1080 return ret;
1083 } 1081 }
1084 1082
1085 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, 1083 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1086 const char __user *inbuf, 1084 const char __user *inbuf,
1087 int in_len, int out_len) = { 1085 int in_len, int out_len) = {
1088 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, 1086 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1089 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, 1087 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1090 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr, 1088 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
1091 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, 1089 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1092 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route, 1090 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1093 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, 1091 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1094 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, 1092 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1095 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, 1093 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1096 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, 1094 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1097 [RDMA_USER_CM_CMD_REJECT] = ucma_reject, 1095 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1098 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, 1096 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1099 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, 1097 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1100 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, 1098 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1101 [RDMA_USER_CM_CMD_GET_OPTION] = NULL, 1099 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1102 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, 1100 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1103 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, 1101 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1104 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast, 1102 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
1105 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, 1103 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1106 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id 1104 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
1107 }; 1105 };
1108 1106
1109 static ssize_t ucma_write(struct file *filp, const char __user *buf, 1107 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1110 size_t len, loff_t *pos) 1108 size_t len, loff_t *pos)
1111 { 1109 {
1112 struct ucma_file *file = filp->private_data; 1110 struct ucma_file *file = filp->private_data;
1113 struct rdma_ucm_cmd_hdr hdr; 1111 struct rdma_ucm_cmd_hdr hdr;
1114 ssize_t ret; 1112 ssize_t ret;
1115 1113
1116 if (len < sizeof(hdr)) 1114 if (len < sizeof(hdr))
1117 return -EINVAL; 1115 return -EINVAL;
1118 1116
1119 if (copy_from_user(&hdr, buf, sizeof(hdr))) 1117 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1120 return -EFAULT; 1118 return -EFAULT;
1121 1119
1122 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) 1120 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1123 return -EINVAL; 1121 return -EINVAL;
1124 1122
1125 if (hdr.in + sizeof(hdr) > len) 1123 if (hdr.in + sizeof(hdr) > len)
1126 return -EINVAL; 1124 return -EINVAL;
1127 1125
1128 if (!ucma_cmd_table[hdr.cmd]) 1126 if (!ucma_cmd_table[hdr.cmd])
1129 return -ENOSYS; 1127 return -ENOSYS;
1130 1128
1131 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); 1129 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1132 if (!ret) 1130 if (!ret)
1133 ret = len; 1131 ret = len;
1134 1132
1135 return ret; 1133 return ret;
1136 } 1134 }
1137 1135
1138 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait) 1136 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1139 { 1137 {
1140 struct ucma_file *file = filp->private_data; 1138 struct ucma_file *file = filp->private_data;
1141 unsigned int mask = 0; 1139 unsigned int mask = 0;
1142 1140
1143 poll_wait(filp, &file->poll_wait, wait); 1141 poll_wait(filp, &file->poll_wait, wait);
1144 1142
1145 if (!list_empty(&file->event_list)) 1143 if (!list_empty(&file->event_list))
1146 mask = POLLIN | POLLRDNORM; 1144 mask = POLLIN | POLLRDNORM;
1147 1145
1148 return mask; 1146 return mask;
1149 } 1147 }
1150 1148
1151 /* 1149 /*
1152 * ucma_open() does not need the BKL: 1150 * ucma_open() does not need the BKL:
1153 * 1151 *
1154 * - no global state is referred to; 1152 * - no global state is referred to;
1155 * - there is no ioctl method to race against; 1153 * - there is no ioctl method to race against;
1156 * - no further module initialization is required for open to work 1154 * - no further module initialization is required for open to work
1157 * after the device is registered. 1155 * after the device is registered.
1158 */ 1156 */
1159 static int ucma_open(struct inode *inode, struct file *filp) 1157 static int ucma_open(struct inode *inode, struct file *filp)
1160 { 1158 {
1161 struct ucma_file *file; 1159 struct ucma_file *file;
1162 1160
1163 file = kmalloc(sizeof *file, GFP_KERNEL); 1161 file = kmalloc(sizeof *file, GFP_KERNEL);
1164 if (!file) 1162 if (!file)
1165 return -ENOMEM; 1163 return -ENOMEM;
1166 1164
1167 INIT_LIST_HEAD(&file->event_list); 1165 INIT_LIST_HEAD(&file->event_list);
1168 INIT_LIST_HEAD(&file->ctx_list); 1166 INIT_LIST_HEAD(&file->ctx_list);
1169 init_waitqueue_head(&file->poll_wait); 1167 init_waitqueue_head(&file->poll_wait);
1170 mutex_init(&file->mut); 1168 mutex_init(&file->mut);
1171 1169
1172 filp->private_data = file; 1170 filp->private_data = file;
1173 file->filp = filp; 1171 file->filp = filp;
1174 return 0; 1172 return 0;
1175 } 1173 }
1176 1174
1177 static int ucma_close(struct inode *inode, struct file *filp) 1175 static int ucma_close(struct inode *inode, struct file *filp)
1178 { 1176 {
1179 struct ucma_file *file = filp->private_data; 1177 struct ucma_file *file = filp->private_data;
1180 struct ucma_context *ctx, *tmp; 1178 struct ucma_context *ctx, *tmp;
1181 1179
1182 mutex_lock(&file->mut); 1180 mutex_lock(&file->mut);
1183 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { 1181 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1184 mutex_unlock(&file->mut); 1182 mutex_unlock(&file->mut);
1185 1183
1186 mutex_lock(&mut); 1184 mutex_lock(&mut);
1187 idr_remove(&ctx_idr, ctx->id); 1185 idr_remove(&ctx_idr, ctx->id);
1188 mutex_unlock(&mut); 1186 mutex_unlock(&mut);
1189 1187
1190 ucma_free_ctx(ctx); 1188 ucma_free_ctx(ctx);
1191 mutex_lock(&file->mut); 1189 mutex_lock(&file->mut);
1192 } 1190 }
1193 mutex_unlock(&file->mut); 1191 mutex_unlock(&file->mut);
1194 kfree(file); 1192 kfree(file);
1195 return 0; 1193 return 0;
1196 } 1194 }
1197 1195
1198 static const struct file_operations ucma_fops = { 1196 static const struct file_operations ucma_fops = {
1199 .owner = THIS_MODULE, 1197 .owner = THIS_MODULE,
1200 .open = ucma_open, 1198 .open = ucma_open,
1201 .release = ucma_close, 1199 .release = ucma_close,
1202 .write = ucma_write, 1200 .write = ucma_write,
1203 .poll = ucma_poll, 1201 .poll = ucma_poll,
1204 }; 1202 };
1205 1203
1206 static struct miscdevice ucma_misc = { 1204 static struct miscdevice ucma_misc = {
1207 .minor = MISC_DYNAMIC_MINOR, 1205 .minor = MISC_DYNAMIC_MINOR,
1208 .name = "rdma_cm", 1206 .name = "rdma_cm",
1209 .fops = &ucma_fops, 1207 .fops = &ucma_fops,
1210 }; 1208 };
1211 1209
1212 static ssize_t show_abi_version(struct device *dev, 1210 static ssize_t show_abi_version(struct device *dev,
1213 struct device_attribute *attr, 1211 struct device_attribute *attr,
1214 char *buf) 1212 char *buf)
1215 { 1213 {
1216 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); 1214 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1217 } 1215 }
1218 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); 1216 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1219 1217
1220 static int __init ucma_init(void) 1218 static int __init ucma_init(void)
1221 { 1219 {
1222 int ret; 1220 int ret;
1223 1221
1224 ret = misc_register(&ucma_misc); 1222 ret = misc_register(&ucma_misc);
1225 if (ret) 1223 if (ret)
1226 return ret; 1224 return ret;
1227 1225
1228 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); 1226 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1229 if (ret) { 1227 if (ret) {
1230 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n"); 1228 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1231 goto err; 1229 goto err;
1232 } 1230 }
1233 return 0; 1231 return 0;
1234 err: 1232 err:
1235 misc_deregister(&ucma_misc); 1233 misc_deregister(&ucma_misc);
1236 return ret; 1234 return ret;
1237 } 1235 }
1238 1236
1239 static void __exit ucma_cleanup(void) 1237 static void __exit ucma_cleanup(void)
1240 { 1238 {
1241 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1239 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1242 misc_deregister(&ucma_misc); 1240 misc_deregister(&ucma_misc);
1243 idr_destroy(&ctx_idr); 1241 idr_destroy(&ctx_idr);
1244 } 1242 }
1245 1243
1246 module_init(ucma_init); 1244 module_init(ucma_init);
1247 module_exit(ucma_cleanup); 1245 module_exit(ucma_cleanup);
1248 1246
include/rdma/rdma_cm.h
1 /* 1 /*
2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file 7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the 8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below: 9 * OpenIB.org BSD license below:
10 * 10 *
11 * Redistribution and use in source and binary forms, with or 11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following 12 * without modification, are permitted provided that the following
13 * conditions are met: 13 * conditions are met:
14 * 14 *
15 * - Redistributions of source code must retain the above 15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following 16 * copyright notice, this list of conditions and the following
17 * disclaimer. 17 * disclaimer.
18 * 18 *
19 * - Redistributions in binary form must reproduce the above 19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following 20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials 21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution. 22 * provided with the distribution.
23 * 23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34 #if !defined(RDMA_CM_H) 34 #if !defined(RDMA_CM_H)
35 #define RDMA_CM_H 35 #define RDMA_CM_H
36 36
37 #include <linux/socket.h> 37 #include <linux/socket.h>
38 #include <linux/in6.h> 38 #include <linux/in6.h>
39 #include <rdma/ib_addr.h> 39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_sa.h> 40 #include <rdma/ib_sa.h>
41 41
42 /* 42 /*
43 * Upon receiving a device removal event, users must destroy the associated 43 * Upon receiving a device removal event, users must destroy the associated
44 * RDMA identifier and release all resources allocated with the device. 44 * RDMA identifier and release all resources allocated with the device.
45 */ 45 */
46 enum rdma_cm_event_type { 46 enum rdma_cm_event_type {
47 RDMA_CM_EVENT_ADDR_RESOLVED, 47 RDMA_CM_EVENT_ADDR_RESOLVED,
48 RDMA_CM_EVENT_ADDR_ERROR, 48 RDMA_CM_EVENT_ADDR_ERROR,
49 RDMA_CM_EVENT_ROUTE_RESOLVED, 49 RDMA_CM_EVENT_ROUTE_RESOLVED,
50 RDMA_CM_EVENT_ROUTE_ERROR, 50 RDMA_CM_EVENT_ROUTE_ERROR,
51 RDMA_CM_EVENT_CONNECT_REQUEST, 51 RDMA_CM_EVENT_CONNECT_REQUEST,
52 RDMA_CM_EVENT_CONNECT_RESPONSE, 52 RDMA_CM_EVENT_CONNECT_RESPONSE,
53 RDMA_CM_EVENT_CONNECT_ERROR, 53 RDMA_CM_EVENT_CONNECT_ERROR,
54 RDMA_CM_EVENT_UNREACHABLE, 54 RDMA_CM_EVENT_UNREACHABLE,
55 RDMA_CM_EVENT_REJECTED, 55 RDMA_CM_EVENT_REJECTED,
56 RDMA_CM_EVENT_ESTABLISHED, 56 RDMA_CM_EVENT_ESTABLISHED,
57 RDMA_CM_EVENT_DISCONNECTED, 57 RDMA_CM_EVENT_DISCONNECTED,
58 RDMA_CM_EVENT_DEVICE_REMOVAL, 58 RDMA_CM_EVENT_DEVICE_REMOVAL,
59 RDMA_CM_EVENT_MULTICAST_JOIN, 59 RDMA_CM_EVENT_MULTICAST_JOIN,
60 RDMA_CM_EVENT_MULTICAST_ERROR, 60 RDMA_CM_EVENT_MULTICAST_ERROR,
61 RDMA_CM_EVENT_ADDR_CHANGE, 61 RDMA_CM_EVENT_ADDR_CHANGE,
62 RDMA_CM_EVENT_TIMEWAIT_EXIT 62 RDMA_CM_EVENT_TIMEWAIT_EXIT
63 }; 63 };
64 64
65 enum rdma_port_space { 65 enum rdma_port_space {
66 RDMA_PS_SDP = 0x0001, 66 RDMA_PS_SDP = 0x0001,
67 RDMA_PS_IPOIB = 0x0002, 67 RDMA_PS_IPOIB = 0x0002,
68 RDMA_PS_TCP = 0x0106, 68 RDMA_PS_TCP = 0x0106,
69 RDMA_PS_UDP = 0x0111, 69 RDMA_PS_UDP = 0x0111,
70 RDMA_PS_SCTP = 0x0183 70 RDMA_PS_SCTP = 0x0183
71 }; 71 };
72 72
73 struct rdma_addr { 73 struct rdma_addr {
74 struct sockaddr src_addr; 74 struct sockaddr_storage src_addr;
75 u8 src_pad[sizeof(struct sockaddr_in6) - 75 struct sockaddr_storage dst_addr;
76 sizeof(struct sockaddr)];
77 struct sockaddr dst_addr;
78 u8 dst_pad[sizeof(struct sockaddr_in6) -
79 sizeof(struct sockaddr)];
80 struct rdma_dev_addr dev_addr; 76 struct rdma_dev_addr dev_addr;
81 }; 77 };
82 78
83 struct rdma_route { 79 struct rdma_route {
84 struct rdma_addr addr; 80 struct rdma_addr addr;
85 struct ib_sa_path_rec *path_rec; 81 struct ib_sa_path_rec *path_rec;
86 int num_paths; 82 int num_paths;
87 }; 83 };
88 84
89 struct rdma_conn_param { 85 struct rdma_conn_param {
90 const void *private_data; 86 const void *private_data;
91 u8 private_data_len; 87 u8 private_data_len;
92 u8 responder_resources; 88 u8 responder_resources;
93 u8 initiator_depth; 89 u8 initiator_depth;
94 u8 flow_control; 90 u8 flow_control;
95 u8 retry_count; /* ignored when accepting */ 91 u8 retry_count; /* ignored when accepting */
96 u8 rnr_retry_count; 92 u8 rnr_retry_count;
97 /* Fields below ignored if a QP is created on the rdma_cm_id. */ 93 /* Fields below ignored if a QP is created on the rdma_cm_id. */
98 u8 srq; 94 u8 srq;
99 u32 qp_num; 95 u32 qp_num;
100 }; 96 };
101 97
102 struct rdma_ud_param { 98 struct rdma_ud_param {
103 const void *private_data; 99 const void *private_data;
104 u8 private_data_len; 100 u8 private_data_len;
105 struct ib_ah_attr ah_attr; 101 struct ib_ah_attr ah_attr;
106 u32 qp_num; 102 u32 qp_num;
107 u32 qkey; 103 u32 qkey;
108 }; 104 };
109 105
110 struct rdma_cm_event { 106 struct rdma_cm_event {
111 enum rdma_cm_event_type event; 107 enum rdma_cm_event_type event;
112 int status; 108 int status;
113 union { 109 union {
114 struct rdma_conn_param conn; 110 struct rdma_conn_param conn;
115 struct rdma_ud_param ud; 111 struct rdma_ud_param ud;
116 } param; 112 } param;
117 }; 113 };
118 114
119 struct rdma_cm_id; 115 struct rdma_cm_id;
120 116
121 /** 117 /**
122 * rdma_cm_event_handler - Callback used to report user events. 118 * rdma_cm_event_handler - Callback used to report user events.
123 * 119 *
124 * Notes: Users may not call rdma_destroy_id from this callback to destroy 120 * Notes: Users may not call rdma_destroy_id from this callback to destroy
125 * the passed in id, or a corresponding listen id. Returning a 121 * the passed in id, or a corresponding listen id. Returning a
126 * non-zero value from the callback will destroy the passed in id. 122 * non-zero value from the callback will destroy the passed in id.
127 */ 123 */
128 typedef int (*rdma_cm_event_handler)(struct rdma_cm_id *id, 124 typedef int (*rdma_cm_event_handler)(struct rdma_cm_id *id,
129 struct rdma_cm_event *event); 125 struct rdma_cm_event *event);
130 126
131 struct rdma_cm_id { 127 struct rdma_cm_id {
132 struct ib_device *device; 128 struct ib_device *device;
133 void *context; 129 void *context;
134 struct ib_qp *qp; 130 struct ib_qp *qp;
135 rdma_cm_event_handler event_handler; 131 rdma_cm_event_handler event_handler;
136 struct rdma_route route; 132 struct rdma_route route;
137 enum rdma_port_space ps; 133 enum rdma_port_space ps;
138 u8 port_num; 134 u8 port_num;
139 }; 135 };
140 136
141 /** 137 /**
142 * rdma_create_id - Create an RDMA identifier. 138 * rdma_create_id - Create an RDMA identifier.
143 * 139 *
144 * @event_handler: User callback invoked to report events associated with the 140 * @event_handler: User callback invoked to report events associated with the
145 * returned rdma_id. 141 * returned rdma_id.
146 * @context: User specified context associated with the id. 142 * @context: User specified context associated with the id.
147 * @ps: RDMA port space. 143 * @ps: RDMA port space.
148 */ 144 */
149 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 145 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
150 void *context, enum rdma_port_space ps); 146 void *context, enum rdma_port_space ps);
151 147
152 /** 148 /**
153 * rdma_destroy_id - Destroys an RDMA identifier. 149 * rdma_destroy_id - Destroys an RDMA identifier.
154 * 150 *
155 * @id: RDMA identifier. 151 * @id: RDMA identifier.
156 * 152 *
157 * Note: calling this function has the effect of canceling in-flight 153 * Note: calling this function has the effect of canceling in-flight
158 * asynchronous operations associated with the id. 154 * asynchronous operations associated with the id.
159 */ 155 */
160 void rdma_destroy_id(struct rdma_cm_id *id); 156 void rdma_destroy_id(struct rdma_cm_id *id);
161 157
162 /** 158 /**
163 * rdma_bind_addr - Bind an RDMA identifier to a source address and 159 * rdma_bind_addr - Bind an RDMA identifier to a source address and
164 * associated RDMA device, if needed. 160 * associated RDMA device, if needed.
165 * 161 *
166 * @id: RDMA identifier. 162 * @id: RDMA identifier.
167 * @addr: Local address information. Wildcard values are permitted. 163 * @addr: Local address information. Wildcard values are permitted.
168 * 164 *
169 * This associates a source address with the RDMA identifier before calling 165 * This associates a source address with the RDMA identifier before calling
170 * rdma_listen. If a specific local address is given, the RDMA identifier will 166 * rdma_listen. If a specific local address is given, the RDMA identifier will
171 * be bound to a local RDMA device. 167 * be bound to a local RDMA device.
172 */ 168 */
173 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr); 169 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr);
174 170
175 /** 171 /**
176 * rdma_resolve_addr - Resolve destination and optional source addresses 172 * rdma_resolve_addr - Resolve destination and optional source addresses
177 * from IP addresses to an RDMA address. If successful, the specified 173 * from IP addresses to an RDMA address. If successful, the specified
178 * rdma_cm_id will be bound to a local device. 174 * rdma_cm_id will be bound to a local device.
179 * 175 *
180 * @id: RDMA identifier. 176 * @id: RDMA identifier.
181 * @src_addr: Source address information. This parameter may be NULL. 177 * @src_addr: Source address information. This parameter may be NULL.
182 * @dst_addr: Destination address information. 178 * @dst_addr: Destination address information.
183 * @timeout_ms: Time to wait for resolution to complete. 179 * @timeout_ms: Time to wait for resolution to complete.
184 */ 180 */
185 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 181 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
186 struct sockaddr *dst_addr, int timeout_ms); 182 struct sockaddr *dst_addr, int timeout_ms);
187 183
188 /** 184 /**
189 * rdma_resolve_route - Resolve the RDMA address bound to the RDMA identifier 185 * rdma_resolve_route - Resolve the RDMA address bound to the RDMA identifier
190 * into route information needed to establish a connection. 186 * into route information needed to establish a connection.
191 * 187 *
192 * This is called on the client side of a connection. 188 * This is called on the client side of a connection.
193 * Users must have first called rdma_resolve_addr to resolve a dst_addr 189 * Users must have first called rdma_resolve_addr to resolve a dst_addr
194 * into an RDMA address before calling this routine. 190 * into an RDMA address before calling this routine.
195 */ 191 */
196 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms); 192 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms);
197 193
198 /** 194 /**
199 * rdma_create_qp - Allocate a QP and associate it with the specified RDMA 195 * rdma_create_qp - Allocate a QP and associate it with the specified RDMA
200 * identifier. 196 * identifier.
201 * 197 *
202 * QPs allocated to an rdma_cm_id will automatically be transitioned by the CMA 198 * QPs allocated to an rdma_cm_id will automatically be transitioned by the CMA
203 * through their states. 199 * through their states.
204 */ 200 */
205 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 201 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
206 struct ib_qp_init_attr *qp_init_attr); 202 struct ib_qp_init_attr *qp_init_attr);
207 203
208 /** 204 /**
209 * rdma_destroy_qp - Deallocate the QP associated with the specified RDMA 205 * rdma_destroy_qp - Deallocate the QP associated with the specified RDMA
210 * identifier. 206 * identifier.
211 * 207 *
212 * Users must destroy any QP associated with an RDMA identifier before 208 * Users must destroy any QP associated with an RDMA identifier before
213 * destroying the RDMA ID. 209 * destroying the RDMA ID.
214 */ 210 */
215 void rdma_destroy_qp(struct rdma_cm_id *id); 211 void rdma_destroy_qp(struct rdma_cm_id *id);
216 212
217 /** 213 /**
218 * rdma_init_qp_attr - Initializes the QP attributes for use in transitioning 214 * rdma_init_qp_attr - Initializes the QP attributes for use in transitioning
219 * to a specified QP state. 215 * to a specified QP state.
220 * @id: Communication identifier associated with the QP attributes to 216 * @id: Communication identifier associated with the QP attributes to
221 * initialize. 217 * initialize.
222 * @qp_attr: On input, specifies the desired QP state. On output, the 218 * @qp_attr: On input, specifies the desired QP state. On output, the
223 * mandatory and desired optional attributes will be set in order to 219 * mandatory and desired optional attributes will be set in order to
224 * modify the QP to the specified state. 220 * modify the QP to the specified state.
225 * @qp_attr_mask: The QP attribute mask that may be used to transition the 221 * @qp_attr_mask: The QP attribute mask that may be used to transition the
226 * QP to the specified state. 222 * QP to the specified state.
227 * 223 *
228 * Users must set the @qp_attr->qp_state to the desired QP state. This call 224 * Users must set the @qp_attr->qp_state to the desired QP state. This call
229 * will set all required attributes for the given transition, along with 225 * will set all required attributes for the given transition, along with
230 * known optional attributes. Users may override the attributes returned from 226 * known optional attributes. Users may override the attributes returned from
231 * this call before calling ib_modify_qp. 227 * this call before calling ib_modify_qp.
232 * 228 *
233 * Users that wish to have their QP automatically transitioned through its 229 * Users that wish to have their QP automatically transitioned through its
234 * states can associate a QP with the rdma_cm_id by calling rdma_create_qp(). 230 * states can associate a QP with the rdma_cm_id by calling rdma_create_qp().
235 */ 231 */
236 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 232 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
237 int *qp_attr_mask); 233 int *qp_attr_mask);
238 234
239 /** 235 /**
240 * rdma_connect - Initiate an active connection request. 236 * rdma_connect - Initiate an active connection request.
241 * @id: Connection identifier to connect. 237 * @id: Connection identifier to connect.
242 * @conn_param: Connection information used for connected QPs. 238 * @conn_param: Connection information used for connected QPs.
243 * 239 *
244 * Users must have resolved a route for the rdma_cm_id to connect with 240 * Users must have resolved a route for the rdma_cm_id to connect with
245 * by having called rdma_resolve_route before calling this routine. 241 * by having called rdma_resolve_route before calling this routine.
246 * 242 *
247 * This call will either connect to a remote QP or obtain remote QP 243 * This call will either connect to a remote QP or obtain remote QP
248 * information for unconnected rdma_cm_id's. The actual operation is 244 * information for unconnected rdma_cm_id's. The actual operation is
249 * based on the rdma_cm_id's port space. 245 * based on the rdma_cm_id's port space.
250 */ 246 */
251 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 247 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
252 248
253 /** 249 /**
254 * rdma_listen - This function is called by the passive side to 250 * rdma_listen - This function is called by the passive side to
255 * listen for incoming connection requests. 251 * listen for incoming connection requests.
256 * 252 *
257 * Users must have bound the rdma_cm_id to a local address by calling 253 * Users must have bound the rdma_cm_id to a local address by calling
258 * rdma_bind_addr before calling this routine. 254 * rdma_bind_addr before calling this routine.
259 */ 255 */
260 int rdma_listen(struct rdma_cm_id *id, int backlog); 256 int rdma_listen(struct rdma_cm_id *id, int backlog);
261 257
262 /** 258 /**
263 * rdma_accept - Called to accept a connection request or response. 259 * rdma_accept - Called to accept a connection request or response.
264 * @id: Connection identifier associated with the request. 260 * @id: Connection identifier associated with the request.
265 * @conn_param: Information needed to establish the connection. This must be 261 * @conn_param: Information needed to establish the connection. This must be
266 * provided if accepting a connection request. If accepting a connection 262 * provided if accepting a connection request. If accepting a connection
267 * response, this parameter must be NULL. 263 * response, this parameter must be NULL.
268 * 264 *
269 * Typically, this routine is only called by the listener to accept a connection 265 * Typically, this routine is only called by the listener to accept a connection
270 * request. It must also be called on the active side of a connection if the 266 * request. It must also be called on the active side of a connection if the
271 * user is performing their own QP transitions. 267 * user is performing their own QP transitions.
272 * 268 *
273 * In the case of error, a reject message is sent to the remote side and the 269 * In the case of error, a reject message is sent to the remote side and the
274 * state of the qp associated with the id is modified to error, such that any 270 * state of the qp associated with the id is modified to error, such that any
275 * previously posted receive buffers would be flushed. 271 * previously posted receive buffers would be flushed.
276 */ 272 */
277 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 273 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
278 274
279 /** 275 /**
280 * rdma_notify - Notifies the RDMA CM of an asynchronous event that has 276 * rdma_notify - Notifies the RDMA CM of an asynchronous event that has
281 * occurred on the connection. 277 * occurred on the connection.
282 * @id: Connection identifier to transition to established. 278 * @id: Connection identifier to transition to established.
283 * @event: Asynchronous event. 279 * @event: Asynchronous event.
284 * 280 *
285 * This routine should be invoked by users to notify the CM of relevant 281 * This routine should be invoked by users to notify the CM of relevant
286 * communication events. Events that should be reported to the CM and 282 * communication events. Events that should be reported to the CM and
287 * when to report them are: 283 * when to report them are:
288 * 284 *
289 * IB_EVENT_COMM_EST - Used when a message is received on a connected 285 * IB_EVENT_COMM_EST - Used when a message is received on a connected
290 * QP before an RTU has been received. 286 * QP before an RTU has been received.
291 */ 287 */
292 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event); 288 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event);
293 289
294 /** 290 /**
295 * rdma_reject - Called to reject a connection request or response. 291 * rdma_reject - Called to reject a connection request or response.
296 */ 292 */
297 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 293 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
298 u8 private_data_len); 294 u8 private_data_len);
299 295
300 /** 296 /**
301 * rdma_disconnect - This function disconnects the associated QP and 297 * rdma_disconnect - This function disconnects the associated QP and
302 * transitions it into the error state. 298 * transitions it into the error state.
303 */ 299 */
304 int rdma_disconnect(struct rdma_cm_id *id); 300 int rdma_disconnect(struct rdma_cm_id *id);
305 301
306 /** 302 /**
307 * rdma_join_multicast - Join the multicast group specified by the given 303 * rdma_join_multicast - Join the multicast group specified by the given
308 * address. 304 * address.
309 * @id: Communication identifier associated with the request. 305 * @id: Communication identifier associated with the request.
310 * @addr: Multicast address identifying the group to join. 306 * @addr: Multicast address identifying the group to join.
311 * @context: User-defined context associated with the join request, returned 307 * @context: User-defined context associated with the join request, returned
312 * to the user through the private_data pointer in multicast events. 308 * to the user through the private_data pointer in multicast events.
313 */ 309 */
314 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 310 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
315 void *context); 311 void *context);
316 312
317 /** 313 /**
318 * rdma_leave_multicast - Leave the multicast group specified by the given 314 * rdma_leave_multicast - Leave the multicast group specified by the given
319 * address. 315 * address.
320 */ 316 */
321 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr); 317 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr);
322 318
323 /** 319 /**
324 * rdma_set_service_type - Set the type of service associated with a 320 * rdma_set_service_type - Set the type of service associated with a
325 * connection identifier. 321 * connection identifier.
326 * @id: Communication identifier to associated with service type. 322 * @id: Communication identifier to associated with service type.
327 * @tos: Type of service. 323 * @tos: Type of service.
328 * 324 *
329 * The type of service is interpretted as a differentiated service 325 * The type of service is interpretted as a differentiated service
330 * field (RFC 2474). The service type should be specified before 326 * field (RFC 2474). The service type should be specified before
331 * performing route resolution, as existing communication on the 327 * performing route resolution, as existing communication on the
332 * connection identifier may be unaffected. The type of service 328 * connection identifier may be unaffected. The type of service
333 * requested may not be supported by the network to all destinations. 329 * requested may not be supported by the network to all destinations.
334 */ 330 */
335 void rdma_set_service_type(struct rdma_cm_id *id, int tos); 331 void rdma_set_service_type(struct rdma_cm_id *id, int tos);
336 332
337 #endif /* RDMA_CM_H */ 333 #endif /* RDMA_CM_H */
338 334