Commit 9fe66dfd8846706ff11ed7990d06c92644973bd8

Authored by Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

Showing 17 changed files Side-by-side Diff

drivers/infiniband/Kconfig
... ... @@ -8,15 +8,26 @@
8 8 any protocols you wish to use as well as drivers for your
9 9 InfiniBand hardware.
10 10  
11   -config INFINIBAND_USER_VERBS
12   - tristate "InfiniBand userspace verbs support"
  11 +config INFINIBAND_USER_MAD
  12 + tristate "InfiniBand userspace MAD support"
13 13 depends on INFINIBAND
14 14 ---help---
15   - Userspace InfiniBand verbs support. This is the kernel side
16   - of userspace verbs, which allows userspace processes to
17   - directly access InfiniBand hardware for fast-path
18   - operations. You will also need libibverbs and a hardware
19   - driver library from <http://www.openib.org>.
  15 + Userspace InfiniBand Management Datagram (MAD) support. This
  16 + is the kernel side of the userspace MAD support, which allows
  17 + userspace processes to send and receive MADs. You will also
  18 + need libibumad from <http://www.openib.org>.
  19 +
  20 +config INFINIBAND_USER_ACCESS
  21 + tristate "InfiniBand userspace access (verbs and CM)"
  22 + depends on INFINIBAND
  23 + ---help---
  24 + Userspace InfiniBand access support. This enables the
  25 + kernel side of userspace verbs and the userspace
  26 + communication manager (CM). This allows userspace processes
  27 + to set up connections and directly access InfiniBand
  28 + hardware for fast-path operations. You will also need
  29 + libibverbs, libibcm and a hardware driver library from
  30 + <http://www.openib.org>.
20 31  
21 32 source "drivers/infiniband/hw/mthca/Kconfig"
22 33  
drivers/infiniband/core/Makefile
1 1 obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
2   - ib_cm.o ib_umad.o ib_ucm.o
3   -obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o
  2 + ib_cm.o
  3 +obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
  4 +obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o
4 5  
5 6 ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
6 7 device.o fmr_pool.o cache.o
drivers/infiniband/core/cm.c
... ... @@ -173,7 +173,8 @@
173 173 if (IS_ERR(ah))
174 174 return PTR_ERR(ah);
175 175  
176   - m = ib_create_send_mad(mad_agent, 1, cm_id_priv->av.pkey_index,
  176 + m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
  177 + cm_id_priv->av.pkey_index,
177 178 ah, 0, sizeof(struct ib_mad_hdr),
178 179 sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
179 180 GFP_ATOMIC);
... ... @@ -536,6 +537,7 @@
536 537 cm_id_priv->id.state = IB_CM_IDLE;
537 538 cm_id_priv->id.cm_handler = cm_handler;
538 539 cm_id_priv->id.context = context;
  540 + cm_id_priv->id.remote_cm_qpn = 1;
539 541 ret = cm_alloc_id(cm_id_priv);
540 542 if (ret)
541 543 goto error;
... ... @@ -1313,6 +1315,7 @@
1313 1315 cm_deref_id(listen_cm_id_priv);
1314 1316 cm_cleanup_timewait(cm_id_priv->timewait_info);
1315 1317 error2: kfree(cm_id_priv->timewait_info);
  1318 + cm_id_priv->timewait_info = NULL;
1316 1319 error1: ib_destroy_cm_id(&cm_id_priv->id);
1317 1320 return ret;
1318 1321 }
drivers/infiniband/core/mad_rmpp.c
... ... @@ -593,7 +593,8 @@
593 593 rmpp_mad->rmpp_hdr.paylen_newwin =
594 594 cpu_to_be32(mad_send_wr->total_seg *
595 595 (sizeof(struct ib_rmpp_mad) -
596   - offsetof(struct ib_rmpp_mad, data)));
  596 + offsetof(struct ib_rmpp_mad, data)) -
  597 + mad_send_wr->pad);
597 598 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
598 599 } else {
599 600 mad_send_wr->send_wr.num_sge = 2;
... ... @@ -602,6 +603,7 @@
602 603 mad_send_wr->sg_list[1].length = sizeof(struct ib_rmpp_mad) -
603 604 mad_send_wr->data_offset;
604 605 mad_send_wr->sg_list[1].lkey = mad_send_wr->sg_list[0].lkey;
  606 + rmpp_mad->rmpp_hdr.paylen_newwin = 0;
605 607 }
606 608  
607 609 if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
drivers/infiniband/core/sa_query.c
... ... @@ -113,32 +113,6 @@
113 113 static spinlock_t tid_lock;
114 114 static u32 tid;
115 115  
116   -enum {
117   - IB_SA_ATTR_CLASS_PORTINFO = 0x01,
118   - IB_SA_ATTR_NOTICE = 0x02,
119   - IB_SA_ATTR_INFORM_INFO = 0x03,
120   - IB_SA_ATTR_NODE_REC = 0x11,
121   - IB_SA_ATTR_PORT_INFO_REC = 0x12,
122   - IB_SA_ATTR_SL2VL_REC = 0x13,
123   - IB_SA_ATTR_SWITCH_REC = 0x14,
124   - IB_SA_ATTR_LINEAR_FDB_REC = 0x15,
125   - IB_SA_ATTR_RANDOM_FDB_REC = 0x16,
126   - IB_SA_ATTR_MCAST_FDB_REC = 0x17,
127   - IB_SA_ATTR_SM_INFO_REC = 0x18,
128   - IB_SA_ATTR_LINK_REC = 0x20,
129   - IB_SA_ATTR_GUID_INFO_REC = 0x30,
130   - IB_SA_ATTR_SERVICE_REC = 0x31,
131   - IB_SA_ATTR_PARTITION_REC = 0x33,
132   - IB_SA_ATTR_RANGE_REC = 0x34,
133   - IB_SA_ATTR_PATH_REC = 0x35,
134   - IB_SA_ATTR_VL_ARB_REC = 0x36,
135   - IB_SA_ATTR_MC_GROUP_REC = 0x37,
136   - IB_SA_ATTR_MC_MEMBER_REC = 0x38,
137   - IB_SA_ATTR_TRACE_REC = 0x39,
138   - IB_SA_ATTR_MULTI_PATH_REC = 0x3a,
139   - IB_SA_ATTR_SERVICE_ASSOC_REC = 0x3b
140   -};
141   -
142 116 #define PATH_REC_FIELD(field) \
143 117 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
144 118 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
... ... @@ -431,8 +405,8 @@
431 405 event->event == IB_EVENT_LID_CHANGE ||
432 406 event->event == IB_EVENT_PKEY_CHANGE ||
433 407 event->event == IB_EVENT_SM_CHANGE) {
434   - struct ib_sa_device *sa_dev =
435   - ib_get_client_data(event->device, &sa_client);
  408 + struct ib_sa_device *sa_dev;
  409 + sa_dev = container_of(handler, typeof(*sa_dev), event_handler);
436 410  
437 411 schedule_work(&sa_dev->port[event->element.port_num -
438 412 sa_dev->start_port].update_task);
drivers/infiniband/core/ucm.c
... ... @@ -72,7 +72,6 @@
72 72  
73 73 static struct semaphore ctx_id_mutex;
74 74 static struct idr ctx_id_table;
75   -static int ctx_id_rover = 0;
76 75  
77 76 static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
78 77 {
79 78  
80 79  
... ... @@ -97,33 +96,16 @@
97 96 wake_up(&ctx->wait);
98 97 }
99 98  
100   -static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id)
  99 +static inline int ib_ucm_new_cm_id(int event)
101 100 {
102   - struct ib_ucm_context *ctx;
  101 + return event == IB_CM_REQ_RECEIVED || event == IB_CM_SIDR_REQ_RECEIVED;
  102 +}
  103 +
  104 +static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
  105 +{
103 106 struct ib_ucm_event *uevent;
104 107  
105   - down(&ctx_id_mutex);
106   - ctx = idr_find(&ctx_id_table, id);
107   - if (!ctx)
108   - ctx = ERR_PTR(-ENOENT);
109   - else if (ctx->file != file)
110   - ctx = ERR_PTR(-EINVAL);
111   - else
112   - idr_remove(&ctx_id_table, ctx->id);
113   - up(&ctx_id_mutex);
114   -
115   - if (IS_ERR(ctx))
116   - return PTR_ERR(ctx);
117   -
118   - atomic_dec(&ctx->ref);
119   - wait_event(ctx->wait, !atomic_read(&ctx->ref));
120   -
121   - /* No new events will be generated after destroying the cm_id. */
122   - if (!IS_ERR(ctx->cm_id))
123   - ib_destroy_cm_id(ctx->cm_id);
124   -
125   - /* Cleanup events not yet reported to the user. */
126   - down(&file->mutex);
  108 + down(&ctx->file->mutex);
127 109 list_del(&ctx->file_list);
128 110 while (!list_empty(&ctx->events)) {
129 111  
130 112  
... ... @@ -133,15 +115,12 @@
133 115 list_del(&uevent->ctx_list);
134 116  
135 117 /* clear incoming connections. */
136   - if (uevent->cm_id)
  118 + if (ib_ucm_new_cm_id(uevent->resp.event))
137 119 ib_destroy_cm_id(uevent->cm_id);
138 120  
139 121 kfree(uevent);
140 122 }
141   - up(&file->mutex);
142   -
143   - kfree(ctx);
144   - return 0;
  123 + up(&ctx->file->mutex);
145 124 }
146 125  
147 126 static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
148 127  
149 128  
150 129  
151 130  
152 131  
153 132  
154 133  
155 134  
156 135  
... ... @@ -153,36 +132,31 @@
153 132 if (!ctx)
154 133 return NULL;
155 134  
  135 + memset(ctx, 0, sizeof *ctx);
156 136 atomic_set(&ctx->ref, 1);
157 137 init_waitqueue_head(&ctx->wait);
158 138 ctx->file = file;
159   -
160 139 INIT_LIST_HEAD(&ctx->events);
161 140  
162   - list_add_tail(&ctx->file_list, &file->ctxs);
  141 + do {
  142 + result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
  143 + if (!result)
  144 + goto error;
163 145  
164   - ctx_id_rover = (ctx_id_rover + 1) & INT_MAX;
165   -retry:
166   - result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
167   - if (!result)
168   - goto error;
  146 + down(&ctx_id_mutex);
  147 + result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
  148 + up(&ctx_id_mutex);
  149 + } while (result == -EAGAIN);
169 150  
170   - down(&ctx_id_mutex);
171   - result = idr_get_new_above(&ctx_id_table, ctx, ctx_id_rover, &ctx->id);
172   - up(&ctx_id_mutex);
173   -
174   - if (result == -EAGAIN)
175   - goto retry;
176 151 if (result)
177 152 goto error;
178 153  
  154 + list_add_tail(&ctx->file_list, &file->ctxs);
179 155 ucm_dbg("Allocated CM ID <%d>\n", ctx->id);
180   -
181 156 return ctx;
  157 +
182 158 error:
183   - list_del(&ctx->file_list);
184 159 kfree(ctx);
185   -
186 160 return NULL;
187 161 }
188 162 /*
189 163  
... ... @@ -219,12 +193,9 @@
219 193 kpath->packet_life_time_selector;
220 194 }
221 195  
222   -static void ib_ucm_event_req_get(struct ib_ucm_context *ctx,
223   - struct ib_ucm_req_event_resp *ureq,
  196 +static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
224 197 struct ib_cm_req_event_param *kreq)
225 198 {
226   - ureq->listen_id = ctx->id;
227   -
228 199 ureq->remote_ca_guid = kreq->remote_ca_guid;
229 200 ureq->remote_qkey = kreq->remote_qkey;
230 201 ureq->remote_qpn = kreq->remote_qpn;
... ... @@ -259,14 +230,6 @@
259 230 urep->srq = krep->srq;
260 231 }
261 232  
262   -static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx,
263   - struct ib_ucm_sidr_req_event_resp *ureq,
264   - struct ib_cm_sidr_req_event_param *kreq)
265   -{
266   - ureq->listen_id = ctx->id;
267   - ureq->pkey = kreq->pkey;
268   -}
269   -
270 233 static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
271 234 struct ib_cm_sidr_rep_event_param *krep)
272 235 {
273 236  
... ... @@ -275,15 +238,14 @@
275 238 urep->qpn = krep->qpn;
276 239 };
277 240  
278   -static int ib_ucm_event_process(struct ib_ucm_context *ctx,
279   - struct ib_cm_event *evt,
  241 +static int ib_ucm_event_process(struct ib_cm_event *evt,
280 242 struct ib_ucm_event *uvt)
281 243 {
282 244 void *info = NULL;
283 245  
284 246 switch (evt->event) {
285 247 case IB_CM_REQ_RECEIVED:
286   - ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp,
  248 + ib_ucm_event_req_get(&uvt->resp.u.req_resp,
287 249 &evt->param.req_rcvd);
288 250 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE;
289 251 uvt->resp.present = IB_UCM_PRES_PRIMARY;
... ... @@ -331,8 +293,8 @@
331 293 info = evt->param.apr_rcvd.apr_info;
332 294 break;
333 295 case IB_CM_SIDR_REQ_RECEIVED:
334   - ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp,
335   - &evt->param.sidr_req_rcvd);
  296 + uvt->resp.u.sidr_req_resp.pkey =
  297 + evt->param.sidr_req_rcvd.pkey;
336 298 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
337 299 break;
338 300 case IB_CM_SIDR_REP_RECEIVED:
339 301  
340 302  
341 303  
342 304  
... ... @@ -378,31 +340,24 @@
378 340 struct ib_ucm_event *uevent;
379 341 struct ib_ucm_context *ctx;
380 342 int result = 0;
381   - int id;
382 343  
383 344 ctx = cm_id->context;
384 345  
385   - if (event->event == IB_CM_REQ_RECEIVED ||
386   - event->event == IB_CM_SIDR_REQ_RECEIVED)
387   - id = IB_UCM_CM_ID_INVALID;
388   - else
389   - id = ctx->id;
390   -
391 346 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL);
392 347 if (!uevent)
393 348 goto err1;
394 349  
395 350 memset(uevent, 0, sizeof(*uevent));
396   - uevent->resp.id = id;
  351 + uevent->ctx = ctx;
  352 + uevent->cm_id = cm_id;
  353 + uevent->resp.uid = ctx->uid;
  354 + uevent->resp.id = ctx->id;
397 355 uevent->resp.event = event->event;
398 356  
399   - result = ib_ucm_event_process(ctx, event, uevent);
  357 + result = ib_ucm_event_process(event, uevent);
400 358 if (result)
401 359 goto err2;
402 360  
403   - uevent->ctx = ctx;
404   - uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL;
405   -
406 361 down(&ctx->file->mutex);
407 362 list_add_tail(&uevent->file_list, &ctx->file->events);
408 363 list_add_tail(&uevent->ctx_list, &ctx->events);
... ... @@ -414,7 +369,7 @@
414 369 kfree(uevent);
415 370 err1:
416 371 /* Destroy new cm_id's */
417   - return (id == IB_UCM_CM_ID_INVALID);
  372 + return ib_ucm_new_cm_id(event->event);
418 373 }
419 374  
420 375 static ssize_t ib_ucm_event(struct ib_ucm_file *file,
... ... @@ -423,7 +378,7 @@
423 378 {
424 379 struct ib_ucm_context *ctx;
425 380 struct ib_ucm_event_get cmd;
426   - struct ib_ucm_event *uevent = NULL;
  381 + struct ib_ucm_event *uevent;
427 382 int result = 0;
428 383 DEFINE_WAIT(wait);
429 384  
... ... @@ -436,7 +391,6 @@
436 391 * wait
437 392 */
438 393 down(&file->mutex);
439   -
440 394 while (list_empty(&file->events)) {
441 395  
442 396 if (file->filp->f_flags & O_NONBLOCK) {
443 397  
444 398  
... ... @@ -463,21 +417,18 @@
463 417  
464 418 uevent = list_entry(file->events.next, struct ib_ucm_event, file_list);
465 419  
466   - if (!uevent->cm_id)
467   - goto user;
  420 + if (ib_ucm_new_cm_id(uevent->resp.event)) {
  421 + ctx = ib_ucm_ctx_alloc(file);
  422 + if (!ctx) {
  423 + result = -ENOMEM;
  424 + goto done;
  425 + }
468 426  
469   - ctx = ib_ucm_ctx_alloc(file);
470   - if (!ctx) {
471   - result = -ENOMEM;
472   - goto done;
  427 + ctx->cm_id = uevent->cm_id;
  428 + ctx->cm_id->context = ctx;
  429 + uevent->resp.id = ctx->id;
473 430 }
474 431  
475   - ctx->cm_id = uevent->cm_id;
476   - ctx->cm_id->context = ctx;
477   -
478   - uevent->resp.id = ctx->id;
479   -
480   -user:
481 432 if (copy_to_user((void __user *)(unsigned long)cmd.response,
482 433 &uevent->resp, sizeof(uevent->resp))) {
483 434 result = -EFAULT;
484 435  
... ... @@ -485,12 +436,10 @@
485 436 }
486 437  
487 438 if (uevent->data) {
488   -
489 439 if (cmd.data_len < uevent->data_len) {
490 440 result = -ENOMEM;
491 441 goto done;
492 442 }
493   -
494 443 if (copy_to_user((void __user *)(unsigned long)cmd.data,
495 444 uevent->data, uevent->data_len)) {
496 445 result = -EFAULT;
497 446  
... ... @@ -499,12 +448,10 @@
499 448 }
500 449  
501 450 if (uevent->info) {
502   -
503 451 if (cmd.info_len < uevent->info_len) {
504 452 result = -ENOMEM;
505 453 goto done;
506 454 }
507   -
508 455 if (copy_to_user((void __user *)(unsigned long)cmd.info,
509 456 uevent->info, uevent->info_len)) {
510 457 result = -EFAULT;
... ... @@ -514,6 +461,7 @@
514 461  
515 462 list_del(&uevent->file_list);
516 463 list_del(&uevent->ctx_list);
  464 + uevent->ctx->events_reported++;
517 465  
518 466 kfree(uevent->data);
519 467 kfree(uevent->info);
... ... @@ -545,6 +493,7 @@
545 493 if (!ctx)
546 494 return -ENOMEM;
547 495  
  496 + ctx->uid = cmd.uid;
548 497 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx);
549 498 if (IS_ERR(ctx->cm_id)) {
550 499 result = PTR_ERR(ctx->cm_id);
... ... @@ -561,7 +510,14 @@
561 510 return 0;
562 511  
563 512 err:
564   - ib_ucm_destroy_ctx(file, ctx->id);
  513 + down(&ctx_id_mutex);
  514 + idr_remove(&ctx_id_table, ctx->id);
  515 + up(&ctx_id_mutex);
  516 +
  517 + if (!IS_ERR(ctx->cm_id))
  518 + ib_destroy_cm_id(ctx->cm_id);
  519 +
  520 + kfree(ctx);
565 521 return result;
566 522 }
567 523  
568 524  
569 525  
... ... @@ -570,11 +526,44 @@
570 526 int in_len, int out_len)
571 527 {
572 528 struct ib_ucm_destroy_id cmd;
  529 + struct ib_ucm_destroy_id_resp resp;
  530 + struct ib_ucm_context *ctx;
  531 + int result = 0;
573 532  
  533 + if (out_len < sizeof(resp))
  534 + return -ENOSPC;
  535 +
574 536 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
575 537 return -EFAULT;
576 538  
577   - return ib_ucm_destroy_ctx(file, cmd.id);
  539 + down(&ctx_id_mutex);
  540 + ctx = idr_find(&ctx_id_table, cmd.id);
  541 + if (!ctx)
  542 + ctx = ERR_PTR(-ENOENT);
  543 + else if (ctx->file != file)
  544 + ctx = ERR_PTR(-EINVAL);
  545 + else
  546 + idr_remove(&ctx_id_table, ctx->id);
  547 + up(&ctx_id_mutex);
  548 +
  549 + if (IS_ERR(ctx))
  550 + return PTR_ERR(ctx);
  551 +
  552 + atomic_dec(&ctx->ref);
  553 + wait_event(ctx->wait, !atomic_read(&ctx->ref));
  554 +
  555 + /* No new events will be generated after destroying the cm_id. */
  556 + ib_destroy_cm_id(ctx->cm_id);
  557 + /* Cleanup events not yet reported to the user. */
  558 + ib_ucm_cleanup_events(ctx);
  559 +
  560 + resp.events_reported = ctx->events_reported;
  561 + if (copy_to_user((void __user *)(unsigned long)cmd.response,
  562 + &resp, sizeof(resp)))
  563 + result = -EFAULT;
  564 +
  565 + kfree(ctx);
  566 + return result;
578 567 }
579 568  
580 569 static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
... ... @@ -609,6 +598,98 @@
609 598 return result;
610 599 }
611 600  
  601 +static void ib_ucm_copy_ah_attr(struct ib_ucm_ah_attr *dest_attr,
  602 + struct ib_ah_attr *src_attr)
  603 +{
  604 + memcpy(dest_attr->grh_dgid, src_attr->grh.dgid.raw,
  605 + sizeof src_attr->grh.dgid);
  606 + dest_attr->grh_flow_label = src_attr->grh.flow_label;
  607 + dest_attr->grh_sgid_index = src_attr->grh.sgid_index;
  608 + dest_attr->grh_hop_limit = src_attr->grh.hop_limit;
  609 + dest_attr->grh_traffic_class = src_attr->grh.traffic_class;
  610 +
  611 + dest_attr->dlid = src_attr->dlid;
  612 + dest_attr->sl = src_attr->sl;
  613 + dest_attr->src_path_bits = src_attr->src_path_bits;
  614 + dest_attr->static_rate = src_attr->static_rate;
  615 + dest_attr->is_global = (src_attr->ah_flags & IB_AH_GRH);
  616 + dest_attr->port_num = src_attr->port_num;
  617 +}
  618 +
  619 +static void ib_ucm_copy_qp_attr(struct ib_ucm_init_qp_attr_resp *dest_attr,
  620 + struct ib_qp_attr *src_attr)
  621 +{
  622 + dest_attr->cur_qp_state = src_attr->cur_qp_state;
  623 + dest_attr->path_mtu = src_attr->path_mtu;
  624 + dest_attr->path_mig_state = src_attr->path_mig_state;
  625 + dest_attr->qkey = src_attr->qkey;
  626 + dest_attr->rq_psn = src_attr->rq_psn;
  627 + dest_attr->sq_psn = src_attr->sq_psn;
  628 + dest_attr->dest_qp_num = src_attr->dest_qp_num;
  629 + dest_attr->qp_access_flags = src_attr->qp_access_flags;
  630 +
  631 + dest_attr->max_send_wr = src_attr->cap.max_send_wr;
  632 + dest_attr->max_recv_wr = src_attr->cap.max_recv_wr;
  633 + dest_attr->max_send_sge = src_attr->cap.max_send_sge;
  634 + dest_attr->max_recv_sge = src_attr->cap.max_recv_sge;
  635 + dest_attr->max_inline_data = src_attr->cap.max_inline_data;
  636 +
  637 + ib_ucm_copy_ah_attr(&dest_attr->ah_attr, &src_attr->ah_attr);
  638 + ib_ucm_copy_ah_attr(&dest_attr->alt_ah_attr, &src_attr->alt_ah_attr);
  639 +
  640 + dest_attr->pkey_index = src_attr->pkey_index;
  641 + dest_attr->alt_pkey_index = src_attr->alt_pkey_index;
  642 + dest_attr->en_sqd_async_notify = src_attr->en_sqd_async_notify;
  643 + dest_attr->sq_draining = src_attr->sq_draining;
  644 + dest_attr->max_rd_atomic = src_attr->max_rd_atomic;
  645 + dest_attr->max_dest_rd_atomic = src_attr->max_dest_rd_atomic;
  646 + dest_attr->min_rnr_timer = src_attr->min_rnr_timer;
  647 + dest_attr->port_num = src_attr->port_num;
  648 + dest_attr->timeout = src_attr->timeout;
  649 + dest_attr->retry_cnt = src_attr->retry_cnt;
  650 + dest_attr->rnr_retry = src_attr->rnr_retry;
  651 + dest_attr->alt_port_num = src_attr->alt_port_num;
  652 + dest_attr->alt_timeout = src_attr->alt_timeout;
  653 +}
  654 +
  655 +static ssize_t ib_ucm_init_qp_attr(struct ib_ucm_file *file,
  656 + const char __user *inbuf,
  657 + int in_len, int out_len)
  658 +{
  659 + struct ib_ucm_init_qp_attr_resp resp;
  660 + struct ib_ucm_init_qp_attr cmd;
  661 + struct ib_ucm_context *ctx;
  662 + struct ib_qp_attr qp_attr;
  663 + int result = 0;
  664 +
  665 + if (out_len < sizeof(resp))
  666 + return -ENOSPC;
  667 +
  668 + if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  669 + return -EFAULT;
  670 +
  671 + ctx = ib_ucm_ctx_get(file, cmd.id);
  672 + if (IS_ERR(ctx))
  673 + return PTR_ERR(ctx);
  674 +
  675 + resp.qp_attr_mask = 0;
  676 + memset(&qp_attr, 0, sizeof qp_attr);
  677 + qp_attr.qp_state = cmd.qp_state;
  678 + result = ib_cm_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
  679 + if (result)
  680 + goto out;
  681 +
  682 + ib_ucm_copy_qp_attr(&resp, &qp_attr);
  683 +
  684 + if (copy_to_user((void __user *)(unsigned long)cmd.response,
  685 + &resp, sizeof(resp)))
  686 + result = -EFAULT;
  687 +
  688 +out:
  689 + ib_ucm_ctx_put(ctx);
  690 + return result;
  691 +}
  692 +
612 693 static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
613 694 const char __user *inbuf,
614 695 int in_len, int out_len)
... ... @@ -808,6 +889,7 @@
808 889  
809 890 ctx = ib_ucm_ctx_get(file, cmd.id);
810 891 if (!IS_ERR(ctx)) {
  892 + ctx->uid = cmd.uid;
811 893 result = ib_send_cm_rep(ctx->cm_id, &param);
812 894 ib_ucm_ctx_put(ctx);
813 895 } else
... ... @@ -1086,6 +1168,7 @@
1086 1168 [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req,
1087 1169 [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep,
1088 1170 [IB_USER_CM_CMD_EVENT] = ib_ucm_event,
  1171 + [IB_USER_CM_CMD_INIT_QP_ATTR] = ib_ucm_init_qp_attr,
1089 1172 };
1090 1173  
1091 1174 static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1092 1175  
1093 1176  
... ... @@ -1161,12 +1244,18 @@
1161 1244  
1162 1245 down(&file->mutex);
1163 1246 while (!list_empty(&file->ctxs)) {
1164   -
1165 1247 ctx = list_entry(file->ctxs.next,
1166 1248 struct ib_ucm_context, file_list);
1167   -
1168 1249 up(&file->mutex);
1169   - ib_ucm_destroy_ctx(file, ctx->id);
  1250 +
  1251 + down(&ctx_id_mutex);
  1252 + idr_remove(&ctx_id_table, ctx->id);
  1253 + up(&ctx_id_mutex);
  1254 +
  1255 + ib_destroy_cm_id(ctx->cm_id);
  1256 + ib_ucm_cleanup_events(ctx);
  1257 + kfree(ctx);
  1258 +
1170 1259 down(&file->mutex);
1171 1260 }
1172 1261 up(&file->mutex);
drivers/infiniband/core/ucm.h
1 1 /*
2 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3 + * Copyright (c) 2005 Intel Corporation. All rights reserved.
3 4 *
4 5 * This software is available to you under a choice of one of two
5 6 * licenses. You may choose to be licensed under the terms of the GNU
... ... @@ -43,8 +44,6 @@
43 44 #include <rdma/ib_cm.h>
44 45 #include <rdma/ib_user_cm.h>
45 46  
46   -#define IB_UCM_CM_ID_INVALID 0xffffffff
47   -
48 47 struct ib_ucm_file {
49 48 struct semaphore mutex;
50 49 struct file *filp;
51 50  
... ... @@ -58,9 +57,11 @@
58 57 int id;
59 58 wait_queue_head_t wait;
60 59 atomic_t ref;
  60 + int events_reported;
61 61  
62 62 struct ib_ucm_file *file;
63 63 struct ib_cm_id *cm_id;
  64 + __u64 uid;
64 65  
65 66 struct list_head events; /* list of pending events. */
66 67 struct list_head file_list; /* member in file ctx list */
67 68  
... ... @@ -71,16 +72,12 @@
71 72 struct list_head file_list; /* member in file event list */
72 73 struct list_head ctx_list; /* member in ctx event list */
73 74  
  75 + struct ib_cm_id *cm_id;
74 76 struct ib_ucm_event_resp resp;
75 77 void *data;
76 78 void *info;
77 79 int data_len;
78 80 int info_len;
79   - /*
80   - * new connection identifiers needs to be saved until
81   - * userspace can get a handle on them.
82   - */
83   - struct ib_cm_id *cm_id;
84 81 };
85 82  
86 83 #endif /* UCM_H */
drivers/infiniband/core/uverbs.h
... ... @@ -76,20 +76,28 @@
76 76 struct ib_uverbs_event_file comp_file[1];
77 77 };
78 78  
79   -struct ib_uverbs_async_event {
80   - struct ib_uverbs_async_event_desc desc;
  79 +struct ib_uverbs_event {
  80 + union {
  81 + struct ib_uverbs_async_event_desc async;
  82 + struct ib_uverbs_comp_event_desc comp;
  83 + } desc;
81 84 struct list_head list;
  85 + struct list_head obj_list;
  86 + u32 *counter;
82 87 };
83 88  
84   -struct ib_uverbs_comp_event {
85   - struct ib_uverbs_comp_event_desc desc;
86   - struct list_head list;
  89 +struct ib_uevent_object {
  90 + struct ib_uobject uobject;
  91 + struct list_head event_list;
  92 + u32 events_reported;
87 93 };
88 94  
89   -struct ib_uobject_mr {
90   - struct ib_uobject uobj;
91   - struct page *page_list;
92   - struct scatterlist *sg_list;
  95 +struct ib_ucq_object {
  96 + struct ib_uobject uobject;
  97 + struct list_head comp_list;
  98 + struct list_head async_list;
  99 + u32 comp_events_reported;
  100 + u32 async_events_reported;
93 101 };
94 102  
95 103 extern struct semaphore ib_uverbs_idr_mutex;
drivers/infiniband/core/uverbs_cmd.c
... ... @@ -590,7 +590,7 @@
590 590 struct ib_uverbs_create_cq cmd;
591 591 struct ib_uverbs_create_cq_resp resp;
592 592 struct ib_udata udata;
593   - struct ib_uobject *uobj;
  593 + struct ib_ucq_object *uobj;
594 594 struct ib_cq *cq;
595 595 int ret;
596 596  
... ... @@ -611,8 +611,12 @@
611 611 if (!uobj)
612 612 return -ENOMEM;
613 613  
614   - uobj->user_handle = cmd.user_handle;
615   - uobj->context = file->ucontext;
  614 + uobj->uobject.user_handle = cmd.user_handle;
  615 + uobj->uobject.context = file->ucontext;
  616 + uobj->comp_events_reported = 0;
  617 + uobj->async_events_reported = 0;
  618 + INIT_LIST_HEAD(&uobj->comp_list);
  619 + INIT_LIST_HEAD(&uobj->async_list);
616 620  
617 621 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
618 622 file->ucontext, &udata);
... ... @@ -622,7 +626,7 @@
622 626 }
623 627  
624 628 cq->device = file->device->ib_dev;
625   - cq->uobject = uobj;
  629 + cq->uobject = &uobj->uobject;
626 630 cq->comp_handler = ib_uverbs_comp_handler;
627 631 cq->event_handler = ib_uverbs_cq_event_handler;
628 632 cq->cq_context = file;
... ... @@ -635,7 +639,7 @@
635 639 }
636 640  
637 641 down(&ib_uverbs_idr_mutex);
638   - ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->id);
  642 + ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id);
639 643 up(&ib_uverbs_idr_mutex);
640 644  
641 645 if (ret == -EAGAIN)
642 646  
... ... @@ -644,11 +648,11 @@
644 648 goto err_cq;
645 649  
646 650 spin_lock_irq(&file->ucontext->lock);
647   - list_add_tail(&uobj->list, &file->ucontext->cq_list);
  651 + list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
648 652 spin_unlock_irq(&file->ucontext->lock);
649 653  
650 654 memset(&resp, 0, sizeof resp);
651   - resp.cq_handle = uobj->id;
  655 + resp.cq_handle = uobj->uobject.id;
652 656 resp.cqe = cq->cqe;
653 657  
654 658 if (copy_to_user((void __user *) (unsigned long) cmd.response,
655 659  
... ... @@ -661,11 +665,11 @@
661 665  
662 666 err_list:
663 667 spin_lock_irq(&file->ucontext->lock);
664   - list_del(&uobj->list);
  668 + list_del(&uobj->uobject.list);
665 669 spin_unlock_irq(&file->ucontext->lock);
666 670  
667 671 down(&ib_uverbs_idr_mutex);
668   - idr_remove(&ib_uverbs_cq_idr, uobj->id);
  672 + idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
669 673 up(&ib_uverbs_idr_mutex);
670 674  
671 675 err_cq:
672 676  
673 677  
... ... @@ -680,21 +684,27 @@
680 684 const char __user *buf, int in_len,
681 685 int out_len)
682 686 {
683   - struct ib_uverbs_destroy_cq cmd;
684   - struct ib_cq *cq;
685   - struct ib_uobject *uobj;
686   - int ret = -EINVAL;
  687 + struct ib_uverbs_destroy_cq cmd;
  688 + struct ib_uverbs_destroy_cq_resp resp;
  689 + struct ib_cq *cq;
  690 + struct ib_ucq_object *uobj;
  691 + struct ib_uverbs_event *evt, *tmp;
  692 + u64 user_handle;
  693 + int ret = -EINVAL;
687 694  
688 695 if (copy_from_user(&cmd, buf, sizeof cmd))
689 696 return -EFAULT;
690 697  
  698 + memset(&resp, 0, sizeof resp);
  699 +
691 700 down(&ib_uverbs_idr_mutex);
692 701  
693 702 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
694 703 if (!cq || cq->uobject->context != file->ucontext)
695 704 goto out;
696 705  
697   - uobj = cq->uobject;
  706 + user_handle = cq->uobject->user_handle;
  707 + uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
698 708  
699 709 ret = ib_destroy_cq(cq);
700 710 if (ret)
701 711  
702 712  
... ... @@ -703,11 +713,32 @@
703 713 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
704 714  
705 715 spin_lock_irq(&file->ucontext->lock);
706   - list_del(&uobj->list);
  716 + list_del(&uobj->uobject.list);
707 717 spin_unlock_irq(&file->ucontext->lock);
708 718  
  719 + spin_lock_irq(&file->comp_file[0].lock);
  720 + list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
  721 + list_del(&evt->list);
  722 + kfree(evt);
  723 + }
  724 + spin_unlock_irq(&file->comp_file[0].lock);
  725 +
  726 + spin_lock_irq(&file->async_file.lock);
  727 + list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
  728 + list_del(&evt->list);
  729 + kfree(evt);
  730 + }
  731 + spin_unlock_irq(&file->async_file.lock);
  732 +
  733 + resp.comp_events_reported = uobj->comp_events_reported;
  734 + resp.async_events_reported = uobj->async_events_reported;
  735 +
709 736 kfree(uobj);
710 737  
  738 + if (copy_to_user((void __user *) (unsigned long) cmd.response,
  739 + &resp, sizeof resp))
  740 + ret = -EFAULT;
  741 +
711 742 out:
712 743 up(&ib_uverbs_idr_mutex);
713 744  
... ... @@ -721,7 +752,7 @@
721 752 struct ib_uverbs_create_qp cmd;
722 753 struct ib_uverbs_create_qp_resp resp;
723 754 struct ib_udata udata;
724   - struct ib_uobject *uobj;
  755 + struct ib_uevent_object *uobj;
725 756 struct ib_pd *pd;
726 757 struct ib_cq *scq, *rcq;
727 758 struct ib_srq *srq;
... ... @@ -772,8 +803,10 @@
772 803 attr.cap.max_recv_sge = cmd.max_recv_sge;
773 804 attr.cap.max_inline_data = cmd.max_inline_data;
774 805  
775   - uobj->user_handle = cmd.user_handle;
776   - uobj->context = file->ucontext;
  806 + uobj->uobject.user_handle = cmd.user_handle;
  807 + uobj->uobject.context = file->ucontext;
  808 + uobj->events_reported = 0;
  809 + INIT_LIST_HEAD(&uobj->event_list);
777 810  
778 811 qp = pd->device->create_qp(pd, &attr, &udata);
779 812 if (IS_ERR(qp)) {
... ... @@ -786,7 +819,7 @@
786 819 qp->send_cq = attr.send_cq;
787 820 qp->recv_cq = attr.recv_cq;
788 821 qp->srq = attr.srq;
789   - qp->uobject = uobj;
  822 + qp->uobject = &uobj->uobject;
790 823 qp->event_handler = attr.event_handler;
791 824 qp->qp_context = attr.qp_context;
792 825 qp->qp_type = attr.qp_type;
793 826  
794 827  
... ... @@ -805,17 +838,17 @@
805 838 goto err_destroy;
806 839 }
807 840  
808   - ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->id);
  841 + ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id);
809 842  
810 843 if (ret == -EAGAIN)
811 844 goto retry;
812 845 if (ret)
813 846 goto err_destroy;
814 847  
815   - resp.qp_handle = uobj->id;
  848 + resp.qp_handle = uobj->uobject.id;
816 849  
817 850 spin_lock_irq(&file->ucontext->lock);
818   - list_add_tail(&uobj->list, &file->ucontext->qp_list);
  851 + list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
819 852 spin_unlock_irq(&file->ucontext->lock);
820 853  
821 854 if (copy_to_user((void __user *) (unsigned long) cmd.response,
... ... @@ -830,7 +863,7 @@
830 863  
831 864 err_list:
832 865 spin_lock_irq(&file->ucontext->lock);
833   - list_del(&uobj->list);
  866 + list_del(&uobj->uobject.list);
834 867 spin_unlock_irq(&file->ucontext->lock);
835 868  
836 869 err_destroy:
837 870  
838 871  
... ... @@ -930,21 +963,25 @@
930 963 const char __user *buf, int in_len,
931 964 int out_len)
932 965 {
933   - struct ib_uverbs_destroy_qp cmd;
934   - struct ib_qp *qp;
935   - struct ib_uobject *uobj;
936   - int ret = -EINVAL;
  966 + struct ib_uverbs_destroy_qp cmd;
  967 + struct ib_uverbs_destroy_qp_resp resp;
  968 + struct ib_qp *qp;
  969 + struct ib_uevent_object *uobj;
  970 + struct ib_uverbs_event *evt, *tmp;
  971 + int ret = -EINVAL;
937 972  
938 973 if (copy_from_user(&cmd, buf, sizeof cmd))
939 974 return -EFAULT;
940 975  
  976 + memset(&resp, 0, sizeof resp);
  977 +
941 978 down(&ib_uverbs_idr_mutex);
942 979  
943 980 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
944 981 if (!qp || qp->uobject->context != file->ucontext)
945 982 goto out;
946 983  
947   - uobj = qp->uobject;
  984 + uobj = container_of(qp->uobject, struct ib_uevent_object, uobject);
948 985  
949 986 ret = ib_destroy_qp(qp);
950 987 if (ret)
951 988  
952 989  
... ... @@ -953,11 +990,24 @@
953 990 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
954 991  
955 992 spin_lock_irq(&file->ucontext->lock);
956   - list_del(&uobj->list);
  993 + list_del(&uobj->uobject.list);
957 994 spin_unlock_irq(&file->ucontext->lock);
958 995  
  996 + spin_lock_irq(&file->async_file.lock);
  997 + list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
  998 + list_del(&evt->list);
  999 + kfree(evt);
  1000 + }
  1001 + spin_unlock_irq(&file->async_file.lock);
  1002 +
  1003 + resp.events_reported = uobj->events_reported;
  1004 +
959 1005 kfree(uobj);
960 1006  
  1007 + if (copy_to_user((void __user *) (unsigned long) cmd.response,
  1008 + &resp, sizeof resp))
  1009 + ret = -EFAULT;
  1010 +
961 1011 out:
962 1012 up(&ib_uverbs_idr_mutex);
963 1013  
... ... @@ -1015,7 +1065,7 @@
1015 1065 struct ib_uverbs_create_srq cmd;
1016 1066 struct ib_uverbs_create_srq_resp resp;
1017 1067 struct ib_udata udata;
1018   - struct ib_uobject *uobj;
  1068 + struct ib_uevent_object *uobj;
1019 1069 struct ib_pd *pd;
1020 1070 struct ib_srq *srq;
1021 1071 struct ib_srq_init_attr attr;
... ... @@ -1050,8 +1100,10 @@
1050 1100 attr.attr.max_sge = cmd.max_sge;
1051 1101 attr.attr.srq_limit = cmd.srq_limit;
1052 1102  
1053   - uobj->user_handle = cmd.user_handle;
1054   - uobj->context = file->ucontext;
  1103 + uobj->uobject.user_handle = cmd.user_handle;
  1104 + uobj->uobject.context = file->ucontext;
  1105 + uobj->events_reported = 0;
  1106 + INIT_LIST_HEAD(&uobj->event_list);
1055 1107  
1056 1108 srq = pd->device->create_srq(pd, &attr, &udata);
1057 1109 if (IS_ERR(srq)) {
... ... @@ -1061,7 +1113,7 @@
1061 1113  
1062 1114 srq->device = pd->device;
1063 1115 srq->pd = pd;
1064   - srq->uobject = uobj;
  1116 + srq->uobject = &uobj->uobject;
1065 1117 srq->event_handler = attr.event_handler;
1066 1118 srq->srq_context = attr.srq_context;
1067 1119 atomic_inc(&pd->usecnt);
1068 1120  
1069 1121  
... ... @@ -1075,17 +1127,17 @@
1075 1127 goto err_destroy;
1076 1128 }
1077 1129  
1078   - ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->id);
  1130 + ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id);
1079 1131  
1080 1132 if (ret == -EAGAIN)
1081 1133 goto retry;
1082 1134 if (ret)
1083 1135 goto err_destroy;
1084 1136  
1085   - resp.srq_handle = uobj->id;
  1137 + resp.srq_handle = uobj->uobject.id;
1086 1138  
1087 1139 spin_lock_irq(&file->ucontext->lock);
1088   - list_add_tail(&uobj->list, &file->ucontext->srq_list);
  1140 + list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
1089 1141 spin_unlock_irq(&file->ucontext->lock);
1090 1142  
1091 1143 if (copy_to_user((void __user *) (unsigned long) cmd.response,
... ... @@ -1100,7 +1152,7 @@
1100 1152  
1101 1153 err_list:
1102 1154 spin_lock_irq(&file->ucontext->lock);
1103   - list_del(&uobj->list);
  1155 + list_del(&uobj->uobject.list);
1104 1156 spin_unlock_irq(&file->ucontext->lock);
1105 1157  
1106 1158 err_destroy:
1107 1159  
1108 1160  
... ... @@ -1149,21 +1201,25 @@
1149 1201 const char __user *buf, int in_len,
1150 1202 int out_len)
1151 1203 {
1152   - struct ib_uverbs_destroy_srq cmd;
1153   - struct ib_srq *srq;
1154   - struct ib_uobject *uobj;
1155   - int ret = -EINVAL;
  1204 + struct ib_uverbs_destroy_srq cmd;
  1205 + struct ib_uverbs_destroy_srq_resp resp;
  1206 + struct ib_srq *srq;
  1207 + struct ib_uevent_object *uobj;
  1208 + struct ib_uverbs_event *evt, *tmp;
  1209 + int ret = -EINVAL;
1156 1210  
1157 1211 if (copy_from_user(&cmd, buf, sizeof cmd))
1158 1212 return -EFAULT;
1159 1213  
1160 1214 down(&ib_uverbs_idr_mutex);
1161 1215  
  1216 + memset(&resp, 0, sizeof resp);
  1217 +
1162 1218 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1163 1219 if (!srq || srq->uobject->context != file->ucontext)
1164 1220 goto out;
1165 1221  
1166   - uobj = srq->uobject;
  1222 + uobj = container_of(srq->uobject, struct ib_uevent_object, uobject);
1167 1223  
1168 1224 ret = ib_destroy_srq(srq);
1169 1225 if (ret)
1170 1226  
1171 1227  
... ... @@ -1172,10 +1228,23 @@
1172 1228 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
1173 1229  
1174 1230 spin_lock_irq(&file->ucontext->lock);
1175   - list_del(&uobj->list);
  1231 + list_del(&uobj->uobject.list);
1176 1232 spin_unlock_irq(&file->ucontext->lock);
1177 1233  
  1234 + spin_lock_irq(&file->async_file.lock);
  1235 + list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
  1236 + list_del(&evt->list);
  1237 + kfree(evt);
  1238 + }
  1239 + spin_unlock_irq(&file->async_file.lock);
  1240 +
  1241 + resp.events_reported = uobj->events_reported;
  1242 +
1178 1243 kfree(uobj);
  1244 +
  1245 + if (copy_to_user((void __user *) (unsigned long) cmd.response,
  1246 + &resp, sizeof resp))
  1247 + ret = -EFAULT;
1179 1248  
1180 1249 out:
1181 1250 up(&ib_uverbs_idr_mutex);
drivers/infiniband/core/uverbs_main.c
... ... @@ -120,7 +120,7 @@
120 120 idr_remove(&ib_uverbs_qp_idr, uobj->id);
121 121 ib_destroy_qp(qp);
122 122 list_del(&uobj->list);
123   - kfree(uobj);
  123 + kfree(container_of(uobj, struct ib_uevent_object, uobject));
124 124 }
125 125  
126 126 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
... ... @@ -128,7 +128,7 @@
128 128 idr_remove(&ib_uverbs_cq_idr, uobj->id);
129 129 ib_destroy_cq(cq);
130 130 list_del(&uobj->list);
131   - kfree(uobj);
  131 + kfree(container_of(uobj, struct ib_ucq_object, uobject));
132 132 }
133 133  
134 134 list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
... ... @@ -136,7 +136,7 @@
136 136 idr_remove(&ib_uverbs_srq_idr, uobj->id);
137 137 ib_destroy_srq(srq);
138 138 list_del(&uobj->list);
139   - kfree(uobj);
  139 + kfree(container_of(uobj, struct ib_uevent_object, uobject));
140 140 }
141 141  
142 142 /* XXX Free MWs */
... ... @@ -182,7 +182,7 @@
182 182 size_t count, loff_t *pos)
183 183 {
184 184 struct ib_uverbs_event_file *file = filp->private_data;
185   - void *event;
  185 + struct ib_uverbs_event *event;
186 186 int eventsz;
187 187 int ret = 0;
188 188  
189 189  
190 190  
191 191  
192 192  
... ... @@ -207,21 +207,23 @@
207 207 return -ENODEV;
208 208 }
209 209  
210   - if (file->is_async) {
211   - event = list_entry(file->event_list.next,
212   - struct ib_uverbs_async_event, list);
  210 + event = list_entry(file->event_list.next, struct ib_uverbs_event, list);
  211 +
  212 + if (file->is_async)
213 213 eventsz = sizeof (struct ib_uverbs_async_event_desc);
214   - } else {
215   - event = list_entry(file->event_list.next,
216   - struct ib_uverbs_comp_event, list);
  214 + else
217 215 eventsz = sizeof (struct ib_uverbs_comp_event_desc);
218   - }
219 216  
220 217 if (eventsz > count) {
221 218 ret = -EINVAL;
222 219 event = NULL;
223   - } else
  220 + } else {
224 221 list_del(file->event_list.next);
  222 + if (event->counter) {
  223 + ++(*event->counter);
  224 + list_del(&event->obj_list);
  225 + }
  226 + }
225 227  
226 228 spin_unlock_irq(&file->lock);
227 229  
228 230  
... ... @@ -257,16 +259,13 @@
257 259  
258 260 static void ib_uverbs_event_release(struct ib_uverbs_event_file *file)
259 261 {
260   - struct list_head *entry, *tmp;
  262 + struct ib_uverbs_event *entry, *tmp;
261 263  
262 264 spin_lock_irq(&file->lock);
263 265 if (file->fd != -1) {
264 266 file->fd = -1;
265   - list_for_each_safe(entry, tmp, &file->event_list)
266   - if (file->is_async)
267   - kfree(list_entry(entry, struct ib_uverbs_async_event, list));
268   - else
269   - kfree(list_entry(entry, struct ib_uverbs_comp_event, list));
  267 + list_for_each_entry_safe(entry, tmp, &file->event_list, list)
  268 + kfree(entry);
270 269 }
271 270 spin_unlock_irq(&file->lock);
272 271 }
273 272  
274 273  
275 274  
... ... @@ -304,18 +303,23 @@
304 303  
305 304 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
306 305 {
307   - struct ib_uverbs_file *file = cq_context;
308   - struct ib_uverbs_comp_event *entry;
309   - unsigned long flags;
  306 + struct ib_uverbs_file *file = cq_context;
  307 + struct ib_ucq_object *uobj;
  308 + struct ib_uverbs_event *entry;
  309 + unsigned long flags;
310 310  
311 311 entry = kmalloc(sizeof *entry, GFP_ATOMIC);
312 312 if (!entry)
313 313 return;
314 314  
315   - entry->desc.cq_handle = cq->uobject->user_handle;
  315 + uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
316 316  
  317 + entry->desc.comp.cq_handle = cq->uobject->user_handle;
  318 + entry->counter = &uobj->comp_events_reported;
  319 +
317 320 spin_lock_irqsave(&file->comp_file[0].lock, flags);
318 321 list_add_tail(&entry->list, &file->comp_file[0].event_list);
  322 + list_add_tail(&entry->obj_list, &uobj->comp_list);
319 323 spin_unlock_irqrestore(&file->comp_file[0].lock, flags);
320 324  
321 325 wake_up_interruptible(&file->comp_file[0].poll_wait);
322 326  
323 327  
324 328  
... ... @@ -323,20 +327,25 @@
323 327 }
324 328  
325 329 static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
326   - __u64 element, __u64 event)
  330 + __u64 element, __u64 event,
  331 + struct list_head *obj_list,
  332 + u32 *counter)
327 333 {
328   - struct ib_uverbs_async_event *entry;
  334 + struct ib_uverbs_event *entry;
329 335 unsigned long flags;
330 336  
331 337 entry = kmalloc(sizeof *entry, GFP_ATOMIC);
332 338 if (!entry)
333 339 return;
334 340  
335   - entry->desc.element = element;
336   - entry->desc.event_type = event;
  341 + entry->desc.async.element = element;
  342 + entry->desc.async.event_type = event;
  343 + entry->counter = counter;
337 344  
338 345 spin_lock_irqsave(&file->async_file.lock, flags);
339 346 list_add_tail(&entry->list, &file->async_file.event_list);
  347 + if (obj_list)
  348 + list_add_tail(&entry->obj_list, obj_list);
340 349 spin_unlock_irqrestore(&file->async_file.lock, flags);
341 350  
342 351 wake_up_interruptible(&file->async_file.poll_wait);
343 352  
344 353  
... ... @@ -345,23 +354,39 @@
345 354  
346 355 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
347 356 {
348   - ib_uverbs_async_handler(context_ptr,
349   - event->element.cq->uobject->user_handle,
350   - event->event);
  357 + struct ib_ucq_object *uobj;
  358 +
  359 + uobj = container_of(event->element.cq->uobject,
  360 + struct ib_ucq_object, uobject);
  361 +
  362 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
  363 + event->event, &uobj->async_list,
  364 + &uobj->async_events_reported);
  365 +
351 366 }
352 367  
353 368 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
354 369 {
355   - ib_uverbs_async_handler(context_ptr,
356   - event->element.qp->uobject->user_handle,
357   - event->event);
  370 + struct ib_uevent_object *uobj;
  371 +
  372 + uobj = container_of(event->element.qp->uobject,
  373 + struct ib_uevent_object, uobject);
  374 +
  375 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
  376 + event->event, &uobj->event_list,
  377 + &uobj->events_reported);
358 378 }
359 379  
360 380 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
361 381 {
362   - ib_uverbs_async_handler(context_ptr,
363   - event->element.srq->uobject->user_handle,
364   - event->event);
  382 + struct ib_uevent_object *uobj;
  383 +
  384 + uobj = container_of(event->element.srq->uobject,
  385 + struct ib_uevent_object, uobject);
  386 +
  387 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
  388 + event->event, &uobj->event_list,
  389 + &uobj->events_reported);
365 390 }
366 391  
367 392 static void ib_uverbs_event_handler(struct ib_event_handler *handler,
... ... @@ -370,7 +395,8 @@
370 395 struct ib_uverbs_file *file =
371 396 container_of(handler, struct ib_uverbs_file, event_handler);
372 397  
373   - ib_uverbs_async_handler(file, event->element.port_num, event->event);
  398 + ib_uverbs_async_handler(file, event->element.port_num, event->event,
  399 + NULL, NULL);
374 400 }
375 401  
376 402 static int ib_uverbs_event_init(struct ib_uverbs_event_file *file,
drivers/infiniband/hw/mthca/mthca_qp.c
... ... @@ -220,6 +220,16 @@
220 220 (PAGE_SIZE - 1));
221 221 }
222 222  
  223 +static void mthca_wq_init(struct mthca_wq *wq)
  224 +{
  225 + spin_lock_init(&wq->lock);
  226 + wq->next_ind = 0;
  227 + wq->last_comp = wq->max - 1;
  228 + wq->head = 0;
  229 + wq->tail = 0;
  230 + wq->last = NULL;
  231 +}
  232 +
223 233 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
224 234 enum ib_event_type event_type)
225 235 {
... ... @@ -833,8 +843,8 @@
833 843 store_attrs(to_msqp(qp), attr, attr_mask);
834 844  
835 845 /*
836   - * If we are moving QP0 to RTR, bring the IB link up; if we
837   - * are moving QP0 to RESET or ERROR, bring the link back down.
  846 + * If we moved QP0 to RTR, bring the IB link up; if we moved
  847 + * QP0 to RESET or ERROR, bring the link back down.
838 848 */
839 849 if (is_qp0(dev, qp)) {
840 850 if (cur_state != IB_QPS_RTR &&
... ... @@ -848,6 +858,26 @@
848 858 mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
849 859 }
850 860  
  861 + /*
  862 + * If we moved a kernel QP to RESET, clean up all old CQ
  863 + * entries and reinitialize the QP.
  864 + */
  865 + if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
  866 + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
  867 + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
  868 + if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
  869 + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
  870 + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
  871 +
  872 + mthca_wq_init(&qp->sq);
  873 + mthca_wq_init(&qp->rq);
  874 +
  875 + if (mthca_is_memfree(dev)) {
  876 + *qp->sq.db = 0;
  877 + *qp->rq.db = 0;
  878 + }
  879 + }
  880 +
851 881 return err;
852 882 }
853 883  
... ... @@ -1003,16 +1033,6 @@
1003 1033 }
1004 1034 }
1005 1035  
1006   -static void mthca_wq_init(struct mthca_wq* wq)
1007   -{
1008   - spin_lock_init(&wq->lock);
1009   - wq->next_ind = 0;
1010   - wq->last_comp = wq->max - 1;
1011   - wq->head = 0;
1012   - wq->tail = 0;
1013   - wq->last = NULL;
1014   -}
1015   -
1016 1036 static int mthca_alloc_qp_common(struct mthca_dev *dev,
1017 1037 struct mthca_pd *pd,
1018 1038 struct mthca_cq *send_cq,
... ... @@ -1024,6 +1044,7 @@
1024 1044 int i;
1025 1045  
1026 1046 atomic_set(&qp->refcount, 1);
  1047 + init_waitqueue_head(&qp->wait);
1027 1048 qp->state = IB_QPS_RESET;
1028 1049 qp->atomic_rd_en = 0;
1029 1050 qp->resp_depth = 0;
drivers/infiniband/ulp/ipoib/ipoib_main.c
... ... @@ -1062,6 +1062,8 @@
1062 1062 ipoib_dev_cleanup(priv->dev);
1063 1063 free_netdev(priv->dev);
1064 1064 }
  1065 +
  1066 + kfree(dev_list);
1065 1067 }
1066 1068  
1067 1069 static int __init ipoib_init_module(void)
include/rdma/ib_cm.h
... ... @@ -290,6 +290,7 @@
290 290 enum ib_cm_lap_state lap_state; /* internal CM/debug use */
291 291 __be32 local_id;
292 292 __be32 remote_id;
  293 + u32 remote_cm_qpn; /* 1 unless redirected */
293 294 };
294 295  
295 296 /**
include/rdma/ib_mad.h
... ... @@ -173,6 +173,27 @@
173 173 u8 data[216];
174 174 };
175 175  
  176 +struct ib_class_port_info
  177 +{
  178 + u8 base_version;
  179 + u8 class_version;
  180 + __be16 capability_mask;
  181 + u8 reserved[3];
  182 + u8 resp_time_value;
  183 + u8 redirect_gid[16];
  184 + __be32 redirect_tcslfl;
  185 + __be16 redirect_lid;
  186 + __be16 redirect_pkey;
  187 + __be32 redirect_qp;
  188 + __be32 redirect_qkey;
  189 + u8 trap_gid[16];
  190 + __be32 trap_tcslfl;
  191 + __be16 trap_lid;
  192 + __be16 trap_pkey;
  193 + __be32 trap_hlqp;
  194 + __be32 trap_qkey;
  195 +};
  196 +
176 197 /**
177 198 * ib_mad_send_buf - MAD data buffer and work request for sends.
178 199 * @mad: References an allocated MAD data buffer. The size of the data
include/rdma/ib_sa.h
... ... @@ -46,7 +46,36 @@
46 46  
47 47 IB_SA_METHOD_GET_TABLE = 0x12,
48 48 IB_SA_METHOD_GET_TABLE_RESP = 0x92,
49   - IB_SA_METHOD_DELETE = 0x15
  49 + IB_SA_METHOD_DELETE = 0x15,
  50 + IB_SA_METHOD_DELETE_RESP = 0x95,
  51 + IB_SA_METHOD_GET_MULTI = 0x14,
  52 + IB_SA_METHOD_GET_MULTI_RESP = 0x94,
  53 + IB_SA_METHOD_GET_TRACE_TBL = 0x13
  54 +};
  55 +
  56 +enum {
  57 + IB_SA_ATTR_CLASS_PORTINFO = 0x01,
  58 + IB_SA_ATTR_NOTICE = 0x02,
  59 + IB_SA_ATTR_INFORM_INFO = 0x03,
  60 + IB_SA_ATTR_NODE_REC = 0x11,
  61 + IB_SA_ATTR_PORT_INFO_REC = 0x12,
  62 + IB_SA_ATTR_SL2VL_REC = 0x13,
  63 + IB_SA_ATTR_SWITCH_REC = 0x14,
  64 + IB_SA_ATTR_LINEAR_FDB_REC = 0x15,
  65 + IB_SA_ATTR_RANDOM_FDB_REC = 0x16,
  66 + IB_SA_ATTR_MCAST_FDB_REC = 0x17,
  67 + IB_SA_ATTR_SM_INFO_REC = 0x18,
  68 + IB_SA_ATTR_LINK_REC = 0x20,
  69 + IB_SA_ATTR_GUID_INFO_REC = 0x30,
  70 + IB_SA_ATTR_SERVICE_REC = 0x31,
  71 + IB_SA_ATTR_PARTITION_REC = 0x33,
  72 + IB_SA_ATTR_PATH_REC = 0x35,
  73 + IB_SA_ATTR_VL_ARB_REC = 0x36,
  74 + IB_SA_ATTR_MC_MEMBER_REC = 0x38,
  75 + IB_SA_ATTR_TRACE_REC = 0x39,
  76 + IB_SA_ATTR_MULTI_PATH_REC = 0x3a,
  77 + IB_SA_ATTR_SERVICE_ASSOC_REC = 0x3b,
  78 + IB_SA_ATTR_INFORM_INFO_REC = 0xf3
50 79 };
51 80  
52 81 enum ib_sa_selector {
include/rdma/ib_user_cm.h
1 1 /*
2 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3 + * Copyright (c) 2005 Intel Corporation. All rights reserved.
3 4 *
4 5 * This software is available to you under a choice of one of two
5 6 * licenses. You may choose to be licensed under the terms of the GNU
... ... @@ -37,7 +38,7 @@
37 38  
38 39 #include <linux/types.h>
39 40  
40   -#define IB_USER_CM_ABI_VERSION 1
  41 +#define IB_USER_CM_ABI_VERSION 2
41 42  
42 43 enum {
43 44 IB_USER_CM_CMD_CREATE_ID,
... ... @@ -60,6 +61,7 @@
60 61 IB_USER_CM_CMD_SEND_SIDR_REP,
61 62  
62 63 IB_USER_CM_CMD_EVENT,
  64 + IB_USER_CM_CMD_INIT_QP_ATTR,
63 65 };
64 66 /*
65 67 * command ABI structures.
... ... @@ -71,6 +73,7 @@
71 73 };
72 74  
73 75 struct ib_ucm_create_id {
  76 + __u64 uid;
74 77 __u64 response;
75 78 };
76 79  
77 80  
... ... @@ -79,9 +82,14 @@
79 82 };
80 83  
81 84 struct ib_ucm_destroy_id {
  85 + __u64 response;
82 86 __u32 id;
83 87 };
84 88  
  89 +struct ib_ucm_destroy_id_resp {
  90 + __u32 events_reported;
  91 +};
  92 +
85 93 struct ib_ucm_attr_id {
86 94 __u64 response;
87 95 __u32 id;
... ... @@ -94,6 +102,64 @@
94 102 __be32 remote_id;
95 103 };
96 104  
  105 +struct ib_ucm_init_qp_attr {
  106 + __u64 response;
  107 + __u32 id;
  108 + __u32 qp_state;
  109 +};
  110 +
  111 +struct ib_ucm_ah_attr {
  112 + __u8 grh_dgid[16];
  113 + __u32 grh_flow_label;
  114 + __u16 dlid;
  115 + __u16 reserved;
  116 + __u8 grh_sgid_index;
  117 + __u8 grh_hop_limit;
  118 + __u8 grh_traffic_class;
  119 + __u8 sl;
  120 + __u8 src_path_bits;
  121 + __u8 static_rate;
  122 + __u8 is_global;
  123 + __u8 port_num;
  124 +};
  125 +
  126 +struct ib_ucm_init_qp_attr_resp {
  127 + __u32 qp_attr_mask;
  128 + __u32 qp_state;
  129 + __u32 cur_qp_state;
  130 + __u32 path_mtu;
  131 + __u32 path_mig_state;
  132 + __u32 qkey;
  133 + __u32 rq_psn;
  134 + __u32 sq_psn;
  135 + __u32 dest_qp_num;
  136 + __u32 qp_access_flags;
  137 +
  138 + struct ib_ucm_ah_attr ah_attr;
  139 + struct ib_ucm_ah_attr alt_ah_attr;
  140 +
  141 + /* ib_qp_cap */
  142 + __u32 max_send_wr;
  143 + __u32 max_recv_wr;
  144 + __u32 max_send_sge;
  145 + __u32 max_recv_sge;
  146 + __u32 max_inline_data;
  147 +
  148 + __u16 pkey_index;
  149 + __u16 alt_pkey_index;
  150 + __u8 en_sqd_async_notify;
  151 + __u8 sq_draining;
  152 + __u8 max_rd_atomic;
  153 + __u8 max_dest_rd_atomic;
  154 + __u8 min_rnr_timer;
  155 + __u8 port_num;
  156 + __u8 timeout;
  157 + __u8 retry_cnt;
  158 + __u8 rnr_retry;
  159 + __u8 alt_port_num;
  160 + __u8 alt_timeout;
  161 +};
  162 +
97 163 struct ib_ucm_listen {
98 164 __be64 service_id;
99 165 __be64 service_mask;
... ... @@ -157,6 +223,7 @@
157 223 };
158 224  
159 225 struct ib_ucm_rep {
  226 + __u64 uid;
160 227 __u64 data;
161 228 __u32 id;
162 229 __u32 qpn;
... ... @@ -232,7 +299,6 @@
232 299 };
233 300  
234 301 struct ib_ucm_req_event_resp {
235   - __u32 listen_id;
236 302 /* device */
237 303 /* port */
238 304 struct ib_ucm_path_rec primary_path;
... ... @@ -287,7 +353,6 @@
287 353 };
288 354  
289 355 struct ib_ucm_sidr_req_event_resp {
290   - __u32 listen_id;
291 356 /* device */
292 357 /* port */
293 358 __u16 pkey;
... ... @@ -307,6 +372,7 @@
307 372 #define IB_UCM_PRES_ALTERNATE 0x08
308 373  
309 374 struct ib_ucm_event_resp {
  375 + __u64 uid;
310 376 __u32 id;
311 377 __u32 event;
312 378 __u32 present;
include/rdma/ib_user_verbs.h
... ... @@ -42,7 +42,7 @@
42 42 * Increment this value if any changes that break userspace ABI
43 43 * compatibility are made.
44 44 */
45   -#define IB_USER_VERBS_ABI_VERSION 1
  45 +#define IB_USER_VERBS_ABI_VERSION 2
46 46  
47 47 enum {
48 48 IB_USER_VERBS_CMD_QUERY_PARAMS,
49 49  
50 50  
... ... @@ -292,9 +292,16 @@
292 292 };
293 293  
294 294 struct ib_uverbs_destroy_cq {
  295 + __u64 response;
295 296 __u32 cq_handle;
  297 + __u32 reserved;
296 298 };
297 299  
  300 +struct ib_uverbs_destroy_cq_resp {
  301 + __u32 comp_events_reported;
  302 + __u32 async_events_reported;
  303 +};
  304 +
298 305 struct ib_uverbs_create_qp {
299 306 __u64 response;
300 307 __u64 user_handle;
301 308  
302 309  
... ... @@ -372,9 +379,15 @@
372 379 };
373 380  
374 381 struct ib_uverbs_destroy_qp {
  382 + __u64 response;
375 383 __u32 qp_handle;
  384 + __u32 reserved;
376 385 };
377 386  
  387 +struct ib_uverbs_destroy_qp_resp {
  388 + __u32 events_reported;
  389 +};
  390 +
378 391 struct ib_uverbs_attach_mcast {
379 392 __u8 gid[16];
380 393 __u32 qp_handle;
381 394  
... ... @@ -416,7 +429,13 @@
416 429 };
417 430  
418 431 struct ib_uverbs_destroy_srq {
  432 + __u64 response;
419 433 __u32 srq_handle;
  434 + __u32 reserved;
  435 +};
  436 +
  437 +struct ib_uverbs_destroy_srq_resp {
  438 + __u32 events_reported;
420 439 };
421 440  
422 441 #endif /* IB_USER_VERBS_H */