Commit 3f44675439b136d51179d31eb5a498383cb38624

Authored by Roland Dreier
1 parent 6e86841d05

RDMA/cma: Remove padding arrays by using struct sockaddr_storage

There are a few places where the RDMA CM code handles IPv6 by doing

	struct sockaddr		addr;
	u8			pad[sizeof(struct sockaddr_in6) -
				    sizeof(struct sockaddr)];

This is fragile and ugly; handle this in a better way with just

	struct sockaddr_storage	addr;

[ Also roll in patch from Aleksey Senin <alekseys@voltaire.com> to
  switch to struct sockaddr_storage and get rid of padding arrays in
  struct rdma_addr. ]

Signed-off-by: Roland Dreier <rolandd@cisco.com>

Showing 3 changed files with 26 additions and 33 deletions Side-by-side Diff

drivers/infiniband/core/cma.c
... ... @@ -155,9 +155,7 @@
155 155 } multicast;
156 156 struct list_head list;
157 157 void *context;
158   - struct sockaddr addr;
159   - u8 pad[sizeof(struct sockaddr_in6) -
160   - sizeof(struct sockaddr)];
  158 + struct sockaddr_storage addr;
161 159 };
162 160  
163 161 struct cma_work {
... ... @@ -786,8 +784,8 @@
786 784 cma_cancel_route(id_priv);
787 785 break;
788 786 case CMA_LISTEN:
789   - if (cma_any_addr(&id_priv->id.route.addr.src_addr) &&
790   - !id_priv->cma_dev)
  787 + if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
  788 + && !id_priv->cma_dev)
791 789 cma_cancel_listens(id_priv);
792 790 break;
793 791 default:
... ... @@ -1026,7 +1024,7 @@
1026 1024 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1027 1025  
1028 1026 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1029   - ret = rdma_translate_ip(&id->route.addr.src_addr,
  1027 + ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
1030 1028 &id->route.addr.dev_addr);
1031 1029 if (ret)
1032 1030 goto destroy_id;
... ... @@ -1064,7 +1062,7 @@
1064 1062 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1065 1063 ip_ver, port, src, dst);
1066 1064  
1067   - ret = rdma_translate_ip(&id->route.addr.src_addr,
  1065 + ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
1068 1066 &id->route.addr.dev_addr);
1069 1067 if (ret)
1070 1068 goto err;
... ... @@ -1377,7 +1375,7 @@
1377 1375 if (IS_ERR(id_priv->cm_id.ib))
1378 1376 return PTR_ERR(id_priv->cm_id.ib);
1379 1377  
1380   - addr = &id_priv->id.route.addr.src_addr;
  1378 + addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
1381 1379 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1382 1380 if (cma_any_addr(addr))
1383 1381 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
... ... @@ -1443,7 +1441,7 @@
1443 1441  
1444 1442 dev_id_priv->state = CMA_ADDR_BOUND;
1445 1443 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1446   - ip_addr_size(&id_priv->id.route.addr.src_addr));
  1444 + ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
1447 1445  
1448 1446 cma_attach_to_dev(dev_id_priv, cma_dev);
1449 1447 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
1450 1448  
... ... @@ -1563,13 +1561,14 @@
1563 1561 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
1564 1562 path_rec.numb_path = 1;
1565 1563 path_rec.reversible = 1;
1566   - path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr);
  1564 + path_rec.service_id = cma_get_service_id(id_priv->id.ps,
  1565 + (struct sockaddr *) &addr->dst_addr);
1567 1566  
1568 1567 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1569 1568 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1570 1569 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
1571 1570  
1572   - if (addr->src_addr.sa_family == AF_INET) {
  1571 + if (addr->src_addr.ss_family == AF_INET) {
1573 1572 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
1574 1573 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
1575 1574 } else {
... ... @@ -1848,7 +1847,7 @@
1848 1847 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1849 1848 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1850 1849  
1851   - if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) {
  1850 + if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) {
1852 1851 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1853 1852 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1854 1853 src_in->sin_family = dst_in->sin_family;
... ... @@ -1897,7 +1896,7 @@
1897 1896 if (cma_any_addr(dst_addr))
1898 1897 ret = cma_resolve_loopback(id_priv);
1899 1898 else
1900   - ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr,
  1899 + ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr,
1901 1900 dst_addr, &id->route.addr.dev_addr,
1902 1901 timeout_ms, addr_handler, id_priv);
1903 1902 if (ret)
1904 1903  
... ... @@ -2021,11 +2020,11 @@
2021 2020 * We don't support binding to any address if anyone is bound to
2022 2021 * a specific address on the same port.
2023 2022 */
2024   - if (cma_any_addr(&id_priv->id.route.addr.src_addr))
  2023 + if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2025 2024 return -EADDRNOTAVAIL;
2026 2025  
2027 2026 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
2028   - if (cma_any_addr(&cur_id->id.route.addr.src_addr))
  2027 + if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
2029 2028 return -EADDRNOTAVAIL;
2030 2029  
2031 2030 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
... ... @@ -2060,7 +2059,7 @@
2060 2059 }
2061 2060  
2062 2061 mutex_lock(&lock);
2063   - if (cma_any_port(&id_priv->id.route.addr.src_addr))
  2062 + if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2064 2063 ret = cma_alloc_any_port(ps, id_priv);
2065 2064 else
2066 2065 ret = cma_use_port(ps, id_priv);
... ... @@ -2232,7 +2231,7 @@
2232 2231  
2233 2232 req.path = route->path_rec;
2234 2233 req.service_id = cma_get_service_id(id_priv->id.ps,
2235   - &route->addr.dst_addr);
  2234 + (struct sockaddr *) &route->addr.dst_addr);
2236 2235 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2237 2236 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2238 2237  
... ... @@ -2283,7 +2282,7 @@
2283 2282 req.alternate_path = &route->path_rec[1];
2284 2283  
2285 2284 req.service_id = cma_get_service_id(id_priv->id.ps,
2286   - &route->addr.dst_addr);
  2285 + (struct sockaddr *) &route->addr.dst_addr);
2287 2286 req.qp_num = id_priv->qp_num;
2288 2287 req.qp_type = IB_QPT_RC;
2289 2288 req.starting_psn = id_priv->seq_num;
... ... @@ -2667,7 +2666,7 @@
2667 2666 if (ret)
2668 2667 return ret;
2669 2668  
2670   - cma_set_mgid(id_priv, &mc->addr, &rec.mgid);
  2669 + cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
2671 2670 if (id_priv->id.ps == RDMA_PS_UDP)
2672 2671 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
2673 2672 ib_addr_get_sgid(dev_addr, &rec.port_gid);
drivers/infiniband/core/ucma.c
... ... @@ -81,9 +81,7 @@
81 81  
82 82 u64 uid;
83 83 struct list_head list;
84   - struct sockaddr addr;
85   - u8 pad[sizeof(struct sockaddr_in6) -
86   - sizeof(struct sockaddr)];
  84 + struct sockaddr_storage addr;
87 85 };
88 86  
89 87 struct ucma_event {
90 88  
... ... @@ -603,11 +601,11 @@
603 601 return PTR_ERR(ctx);
604 602  
605 603 memset(&resp, 0, sizeof resp);
606   - addr = &ctx->cm_id->route.addr.src_addr;
  604 + addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
607 605 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
608 606 sizeof(struct sockaddr_in) :
609 607 sizeof(struct sockaddr_in6));
610   - addr = &ctx->cm_id->route.addr.dst_addr;
  608 + addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
611 609 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
612 610 sizeof(struct sockaddr_in) :
613 611 sizeof(struct sockaddr_in6));
... ... @@ -913,7 +911,7 @@
913 911  
914 912 mc->uid = cmd.uid;
915 913 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
916   - ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc);
  914 + ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
917 915 if (ret)
918 916 goto err2;
919 917  
... ... @@ -929,7 +927,7 @@
929 927 return 0;
930 928  
931 929 err3:
932   - rdma_leave_multicast(ctx->cm_id, &mc->addr);
  930 + rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
933 931 ucma_cleanup_mc_events(mc);
934 932 err2:
935 933 mutex_lock(&mut);
... ... @@ -975,7 +973,7 @@
975 973 goto out;
976 974 }
977 975  
978   - rdma_leave_multicast(mc->ctx->cm_id, &mc->addr);
  976 + rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
979 977 mutex_lock(&mc->ctx->file->mut);
980 978 ucma_cleanup_mc_events(mc);
981 979 list_del(&mc->list);
include/rdma/rdma_cm.h
... ... @@ -71,12 +71,8 @@
71 71 };
72 72  
73 73 struct rdma_addr {
74   - struct sockaddr src_addr;
75   - u8 src_pad[sizeof(struct sockaddr_in6) -
76   - sizeof(struct sockaddr)];
77   - struct sockaddr dst_addr;
78   - u8 dst_pad[sizeof(struct sockaddr_in6) -
79   - sizeof(struct sockaddr)];
  74 + struct sockaddr_storage src_addr;
  75 + struct sockaddr_storage dst_addr;
80 76 struct rdma_dev_addr dev_addr;
81 77 };
82 78