Commit 0eae88f31ca2b88911ce843452054139e028771f

Authored by Eric Dumazet
Committed by David S. Miller
1 parent cb903bf4ee

net: Fix various endianness glitches

Sparse can help us find endianness bugs, but we need to make some
cleanups to be able to more easily spot real bugs.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 17 changed files with 65 additions and 61 deletions Side-by-side Diff

net/bridge/br_multicast.c
... ... @@ -29,7 +29,7 @@
29 29  
30 30 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
31 31 {
32   - return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1);
  32 + return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
33 33 }
34 34  
35 35 static struct net_bridge_mdb_entry *__br_mdb_ip_get(
net/bridge/br_private.h
... ... @@ -130,19 +130,20 @@
130 130 #endif
131 131 };
132 132  
  133 +struct br_cpu_netstats {
  134 + unsigned long rx_packets;
  135 + unsigned long rx_bytes;
  136 + unsigned long tx_packets;
  137 + unsigned long tx_bytes;
  138 +};
  139 +
133 140 struct net_bridge
134 141 {
135 142 spinlock_t lock;
136 143 struct list_head port_list;
137 144 struct net_device *dev;
138 145  
139   - struct br_cpu_netstats __percpu {
140   - unsigned long rx_packets;
141   - unsigned long rx_bytes;
142   - unsigned long tx_packets;
143   - unsigned long tx_bytes;
144   - } *stats;
145   -
  146 + struct br_cpu_netstats __percpu *stats;
146 147 spinlock_t hash_lock;
147 148 struct hlist_head hash[BR_HASH_SIZE];
148 149 unsigned long feature_mask;
... ... @@ -136,7 +136,7 @@
136 136 default:
137 137 printk(KERN_DEBUG
138 138 "%s: unable to resolve type %X addresses.\n",
139   - dev->name, (int)eth->h_proto);
  139 + dev->name, (__force int)eth->h_proto);
140 140  
141 141 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
142 142 break;
... ... @@ -1323,8 +1323,8 @@
1323 1323 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1324 1324 goto out_unlock;
1325 1325  
1326   - id = ntohl(*(u32 *)&iph->id);
1327   - flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
  1326 + id = ntohl(*(__be32 *)&iph->id);
  1327 + flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
1328 1328 id >>= 16;
1329 1329  
1330 1330 for (p = *head; p; p = p->next) {
... ... @@ -1337,8 +1337,8 @@
1337 1337  
1338 1338 if ((iph->protocol ^ iph2->protocol) |
1339 1339 (iph->tos ^ iph2->tos) |
1340   - (iph->saddr ^ iph2->saddr) |
1341   - (iph->daddr ^ iph2->daddr)) {
  1340 + ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
  1341 + ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1342 1342 NAPI_GRO_CB(p)->same_flow = 0;
1343 1343 continue;
1344 1344 }
... ... @@ -1772,10 +1772,10 @@
1772 1772  
1773 1773 vif = ipmr_find_vif(mrt, skb->dev);
1774 1774 if (vif >= 0) {
1775   - int err = ipmr_cache_unresolved(mrt, vif, skb);
  1775 + int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1776 1776 read_unlock(&mrt_lock);
1777 1777  
1778   - return err;
  1778 + return err2;
1779 1779 }
1780 1780 read_unlock(&mrt_lock);
1781 1781 kfree_skb(skb);
... ... @@ -2227,9 +2227,9 @@
2227 2227 const struct ipmr_mfc_iter *it = seq->private;
2228 2228 const struct mr_table *mrt = it->mrt;
2229 2229  
2230   - seq_printf(seq, "%08lX %08lX %-3hd",
2231   - (unsigned long) mfc->mfc_mcastgrp,
2232   - (unsigned long) mfc->mfc_origin,
  2230 + seq_printf(seq, "%08X %08X %-3hd",
  2231 + (__force u32) mfc->mfc_mcastgrp,
  2232 + (__force u32) mfc->mfc_origin,
2233 2233 mfc->mfc_parent);
2234 2234  
2235 2235 if (it->cache != &mrt->mfc_unres_queue) {
... ... @@ -258,10 +258,9 @@
258 258 (__raw_get_cpu_var(rt_cache_stat).field++)
259 259  
260 260 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
261   - int genid)
  261 + int genid)
262 262 {
263   - return jhash_3words((__force u32)(__be32)(daddr),
264   - (__force u32)(__be32)(saddr),
  263 + return jhash_3words((__force u32)daddr, (__force u32)saddr,
265 264 idx, genid)
266 265 & rt_hash_mask;
267 266 }
268 267  
269 268  
... ... @@ -378,12 +377,13 @@
378 377 struct rtable *r = v;
379 378 int len;
380 379  
381   - seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
382   - "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
  380 + seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
  381 + "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
383 382 r->u.dst.dev ? r->u.dst.dev->name : "*",
384   - (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
  383 + (__force u32)r->rt_dst,
  384 + (__force u32)r->rt_gateway,
385 385 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
386   - r->u.dst.__use, 0, (unsigned long)r->rt_src,
  386 + r->u.dst.__use, 0, (__force u32)r->rt_src,
387 387 (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
388 388 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
389 389 dst_metric(&r->u.dst, RTAX_WINDOW),
390 390  
391 391  
... ... @@ -685,18 +685,17 @@
685 685 static inline bool compare_hash_inputs(const struct flowi *fl1,
686 686 const struct flowi *fl2)
687 687 {
688   - return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
689   - (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
  688 + return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
  689 + ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
690 690 (fl1->iif ^ fl2->iif)) == 0);
691 691 }
692 692  
693 693 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
694 694 {
695   - return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
696   - (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
  695 + return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
  696 + ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
697 697 (fl1->mark ^ fl2->mark) |
698   - (*(u16 *)&fl1->nl_u.ip4_u.tos ^
699   - *(u16 *)&fl2->nl_u.ip4_u.tos) |
  698 + (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) |
700 699 (fl1->oif ^ fl2->oif) |
701 700 (fl1->iif ^ fl2->iif)) == 0;
702 701 }
... ... @@ -2319,8 +2318,8 @@
2319 2318 rcu_read_lock();
2320 2319 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2321 2320 rth = rcu_dereference(rth->u.dst.rt_next)) {
2322   - if (((rth->fl.fl4_dst ^ daddr) |
2323   - (rth->fl.fl4_src ^ saddr) |
  2321 + if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
  2322 + ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
2324 2323 (rth->fl.iif ^ iif) |
2325 2324 rth->fl.oif |
2326 2325 (rth->fl.fl4_tos ^ tos)) == 0 &&
... ... @@ -2721,7 +2721,7 @@
2721 2721 struct tcphdr *th2;
2722 2722 unsigned int len;
2723 2723 unsigned int thlen;
2724   - unsigned int flags;
  2724 + __be32 flags;
2725 2725 unsigned int mss = 1;
2726 2726 unsigned int hlen;
2727 2727 unsigned int off;
... ... @@ -2771,10 +2771,10 @@
2771 2771  
2772 2772 found:
2773 2773 flush = NAPI_GRO_CB(p)->flush;
2774   - flush |= flags & TCP_FLAG_CWR;
2775   - flush |= (flags ^ tcp_flag_word(th2)) &
2776   - ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
2777   - flush |= th->ack_seq ^ th2->ack_seq;
  2774 + flush |= (__force int)(flags & TCP_FLAG_CWR);
  2775 + flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
  2776 + ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
  2777 + flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
2778 2778 for (i = sizeof(*th); i < thlen; i += 4)
2779 2779 flush |= *(u32 *)((u8 *)th + i) ^
2780 2780 *(u32 *)((u8 *)th2 + i);
... ... @@ -2795,8 +2795,9 @@
2795 2795  
2796 2796 out_check_final:
2797 2797 flush = len < mss;
2798   - flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
2799   - TCP_FLAG_SYN | TCP_FLAG_FIN);
  2798 + flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
  2799 + TCP_FLAG_RST | TCP_FLAG_SYN |
  2800 + TCP_FLAG_FIN));
2800 2801  
2801 2802 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
2802 2803 pp = head;
... ... @@ -1286,8 +1286,8 @@
1286 1286 goto drop_and_release;
1287 1287  
1288 1288 /* Secret recipe starts with IP addresses */
1289   - *mess++ ^= daddr;
1290   - *mess++ ^= saddr;
  1289 + *mess++ ^= (__force u32)daddr;
  1290 + *mess++ ^= (__force u32)saddr;
1291 1291  
1292 1292 /* plus variable length Initiator Cookie */
1293 1293 c = (u8 *)mess;
net/ipv4/tcp_output.c
... ... @@ -861,7 +861,7 @@
861 861 th->urg_ptr = htons(tp->snd_up - tcb->seq);
862 862 th->urg = 1;
863 863 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
864   - th->urg_ptr = 0xFFFF;
  864 + th->urg_ptr = htons(0xFFFF);
865 865 th->urg = 1;
866 866 }
867 867 }
... ... @@ -2485,7 +2485,7 @@
2485 2485 *tail-- ^= TCP_SKB_CB(skb)->seq + 1;
2486 2486  
2487 2487 /* recommended */
2488   - *tail-- ^= ((th->dest << 16) | th->source);
  2488 + *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
2489 2489 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
2490 2490  
2491 2491 sha_transform((__u32 *)&xvp->cookie_bakery[0],
... ... @@ -307,13 +307,13 @@
307 307 static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
308 308 unsigned int port)
309 309 {
310   - return jhash_1word(saddr, net_hash_mix(net)) ^ port;
  310 + return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
311 311 }
312 312  
313 313 int udp_v4_get_port(struct sock *sk, unsigned short snum)
314 314 {
315 315 unsigned int hash2_nulladdr =
316   - udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum);
  316 + udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
317 317 unsigned int hash2_partial =
318 318 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
319 319  
320 320  
... ... @@ -466,14 +466,14 @@
466 466 daddr, hnum, dif,
467 467 hslot2, slot2);
468 468 if (!result) {
469   - hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum);
  469 + hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
470 470 slot2 = hash2 & udptable->mask;
471 471 hslot2 = &udptable->hash2[slot2];
472 472 if (hslot->count < hslot2->count)
473 473 goto begin;
474 474  
475 475 result = udp4_lib_lookup2(net, saddr, sport,
476   - INADDR_ANY, hnum, dif,
  476 + htonl(INADDR_ANY), hnum, dif,
477 477 hslot2, slot2);
478 478 }
479 479 rcu_read_unlock();
... ... @@ -588,7 +588,8 @@
588 588 * We perform the hash function over the last 64 bits of the address
589 589 * This will include the IEEE address token on links that support it.
590 590 */
591   - return jhash_2words(addr->s6_addr32[2], addr->s6_addr32[3], 0)
  591 + return jhash_2words((__force u32)addr->s6_addr32[2],
  592 + (__force u32)addr->s6_addr32[3], 0)
592 593 & (IN6_ADDR_HSIZE - 1);
593 594 }
594 595  
... ... @@ -144,7 +144,8 @@
144 144 * htonl(1 << ((~fn_bit)&0x1F))
145 145 * See include/asm-generic/bitops/le.h.
146 146 */
147   - return (1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) & addr[fn_bit >> 5];
  147 + return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) &
  148 + addr[fn_bit >> 5];
148 149 }
149 150  
150 151 static __inline__ struct fib6_node * node_alloc(void)
... ... @@ -1234,12 +1234,12 @@
1234 1234 goto drop_and_free;
1235 1235  
1236 1236 /* Secret recipe starts with IP addresses */
1237   - d = &ipv6_hdr(skb)->daddr.s6_addr32[0];
  1237 + d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1238 1238 *mess++ ^= *d++;
1239 1239 *mess++ ^= *d++;
1240 1240 *mess++ ^= *d++;
1241 1241 *mess++ ^= *d++;
1242   - d = &ipv6_hdr(skb)->saddr.s6_addr32[0];
  1242 + d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1243 1243 *mess++ ^= *d++;
1244 1244 *mess++ ^= *d++;
1245 1245 *mess++ ^= *d++;
... ... @@ -91,9 +91,9 @@
91 91 if (ipv6_addr_any(addr6))
92 92 hash = jhash_1word(0, mix);
93 93 else if (ipv6_addr_v4mapped(addr6))
94   - hash = jhash_1word(addr6->s6_addr32[3], mix);
  94 + hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
95 95 else
96   - hash = jhash2(addr6->s6_addr32, 4, mix);
  96 + hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
97 97  
98 98 return hash ^ port;
99 99 }
... ... @@ -123,8 +123,8 @@
123 123 case htons(ETH_P_IP):
124 124 {
125 125 const struct iphdr *iph = ip_hdr(skb);
126   - h = iph->daddr;
127   - h2 = iph->saddr ^ iph->protocol;
  126 + h = (__force u32)iph->daddr;
  127 + h2 = (__force u32)iph->saddr ^ iph->protocol;
128 128 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
129 129 (iph->protocol == IPPROTO_TCP ||
130 130 iph->protocol == IPPROTO_UDP ||
... ... @@ -138,8 +138,8 @@
138 138 case htons(ETH_P_IPV6):
139 139 {
140 140 struct ipv6hdr *iph = ipv6_hdr(skb);
141   - h = iph->daddr.s6_addr32[3];
142   - h2 = iph->saddr.s6_addr32[3] ^ iph->nexthdr;
  141 + h = (__force u32)iph->daddr.s6_addr32[3];
  142 + h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
143 143 if (iph->nexthdr == IPPROTO_TCP ||
144 144 iph->nexthdr == IPPROTO_UDP ||
145 145 iph->nexthdr == IPPROTO_UDPLITE ||
... ... @@ -150,7 +150,7 @@
150 150 break;
151 151 }
152 152 default:
153   - h = (unsigned long)skb_dst(skb) ^ skb->protocol;
  153 + h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
154 154 h2 = (unsigned long)skb->sk;
155 155 }
156 156  
... ... @@ -974,7 +974,7 @@
974 974  
975 975 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
976 976 {
977   - return xprt->xid++;
  977 + return (__force __be32)xprt->xid++;
978 978 }
979 979  
980 980 static inline void xprt_init_xid(struct rpc_xprt *xprt)
net/xfrm/xfrm_hash.h
... ... @@ -16,7 +16,8 @@
16 16  
17 17 static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
18 18 {
19   - return ntohl(daddr->a4 + saddr->a4);
  19 + u32 sum = (__force u32)daddr->a4 + (__force u32)saddr->a4;
  20 + return ntohl((__force __be32)sum);
20 21 }
21 22  
22 23 static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)