Commit a4f89fb7c072b8592b296c2ba216269c0c96db43

Authored by Al Viro
Committed by David S. Miller
1 parent 9d3d419558

[NET]: X86_64 checksum annotations and cleanups.

* sanitize prototypes, annotate
* usual ntohs->shift

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 3 changed files with 53 additions and 45 deletions Side-by-side Diff

arch/x86_64/lib/csum-partial.c
... ... @@ -132,9 +132,10 @@
132 132 *
133 133 * it's best to have buff aligned on a 64-bit boundary
134 134 */
135   -unsigned csum_partial(const unsigned char *buff, unsigned len, unsigned sum)
  135 +__wsum csum_partial(const void *buff, int len, __wsum sum)
136 136 {
137   - return add32_with_carry(do_csum(buff, len), sum);
  137 + return (__force __wsum)add32_with_carry(do_csum(buff, len),
  138 + (__force u32)sum);
138 139 }
139 140  
140 141 EXPORT_SYMBOL(csum_partial);
... ... @@ -143,7 +144,7 @@
143 144 * this routine is used for miscellaneous IP-like checksums, mainly
144 145 * in icmp.c
145 146 */
146   -unsigned short ip_compute_csum(unsigned char * buff, int len)
  147 +__sum16 ip_compute_csum(const void *buff, int len)
147 148 {
148 149 return csum_fold(csum_partial(buff,len,0));
149 150 }
arch/x86_64/lib/csum-wrappers.c
... ... @@ -18,9 +18,9 @@
18 18 * Returns an 32bit unfolded checksum of the buffer.
19 19 * src and dst are best aligned to 64bits.
20 20 */
21   -unsigned int
22   -csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
23   - int len, unsigned int isum, int *errp)
  21 +__wsum
  22 +csum_partial_copy_from_user(const void __user *src, void *dst,
  23 + int len, __wsum isum, int *errp)
24 24 {
25 25 might_sleep();
26 26 *errp = 0;
27 27  
28 28  
... ... @@ -34,17 +34,19 @@
34 34 if (unlikely((unsigned long)src & 6)) {
35 35 while (((unsigned long)src & 6) && len >= 2) {
36 36 __u16 val16;
37   - *errp = __get_user(val16, (__u16 __user *)src);
  37 + *errp = __get_user(val16, (const __u16 __user *)src);
38 38 if (*errp)
39 39 return isum;
40 40 *(__u16 *)dst = val16;
41   - isum = add32_with_carry(isum, val16);
  41 + isum = (__force __wsum)add32_with_carry(
  42 + (__force unsigned)isum, val16);
42 43 src += 2;
43 44 dst += 2;
44 45 len -= 2;
45 46 }
46 47 }
47   - isum = csum_partial_copy_generic((__force void *)src,dst,len,isum,errp,NULL);
  48 + isum = csum_partial_copy_generic((__force const void *)src,
  49 + dst, len, isum, errp, NULL);
48 50 if (likely(*errp == 0))
49 51 return isum;
50 52 }
... ... @@ -66,9 +68,9 @@
66 68 * Returns an 32bit unfolded checksum of the buffer.
67 69 * src and dst are best aligned to 64bits.
68 70 */
69   -unsigned int
70   -csum_partial_copy_to_user(unsigned const char *src, unsigned char __user *dst,
71   - int len, unsigned int isum, int *errp)
  71 +__wsum
  72 +csum_partial_copy_to_user(const void *src, void __user *dst,
  73 + int len, __wsum isum, int *errp)
72 74 {
73 75 might_sleep();
74 76 if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
... ... @@ -79,7 +81,8 @@
79 81 if (unlikely((unsigned long)dst & 6)) {
80 82 while (((unsigned long)dst & 6) && len >= 2) {
81 83 __u16 val16 = *(__u16 *)src;
82   - isum = add32_with_carry(isum, val16);
  84 + isum = (__force __wsum)add32_with_carry(
  85 + (__force unsigned)isum, val16);
83 86 *errp = __put_user(val16, (__u16 __user *)dst);
84 87 if (*errp)
85 88 return isum;
86 89  
87 90  
... ... @@ -104,19 +107,21 @@
104 107 *
105 108 * Returns an 32bit unfolded checksum of the buffer.
106 109 */
107   -unsigned int
108   -csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, unsigned int sum)
  110 +__wsum
  111 +csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
109 112 {
110 113 return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL);
111 114 }
112 115 EXPORT_SYMBOL(csum_partial_copy_nocheck);
113 116  
114   -unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
115   - __u32 len, unsigned short proto, unsigned int sum)
  117 +__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
  118 + const struct in6_addr *daddr,
  119 + __u32 len, unsigned short proto, __wsum sum)
116 120 {
117 121 __u64 rest, sum64;
118 122  
119   - rest = (__u64)htonl(len) + (__u64)htons(proto) + (__u64)sum;
  123 + rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
  124 + (__force __u64)sum;
120 125 asm(" addq (%[saddr]),%[sum]\n"
121 126 " adcq 8(%[saddr]),%[sum]\n"
122 127 " adcq (%[daddr]),%[sum]\n"
... ... @@ -124,7 +129,7 @@
124 129 " adcq $0,%[sum]\n"
125 130 : [sum] "=r" (sum64)
126 131 : "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr));
127   - return csum_fold(add32_with_carry(sum64 & 0xffffffff, sum64>>32));
  132 + return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
128 133 }
129 134  
130 135 EXPORT_SYMBOL(csum_ipv6_magic);
include/asm-x86_64/checksum.h
... ... @@ -19,15 +19,16 @@
19 19 * the last step before putting a checksum into a packet.
20 20 * Make sure not to mix with 64bit checksums.
21 21 */
22   -static inline unsigned int csum_fold(unsigned int sum)
  22 +static inline __sum16 csum_fold(__wsum sum)
23 23 {
24 24 __asm__(
25 25 " addl %1,%0\n"
26 26 " adcl $0xffff,%0"
27 27 : "=r" (sum)
28   - : "r" (sum << 16), "0" (sum & 0xffff0000)
  28 + : "r" ((__force u32)sum << 16),
  29 + "0" ((__force u32)sum & 0xffff0000)
29 30 );
30   - return (~sum) >> 16;
  31 + return (__force __sum16)(~(__force u32)sum >> 16);
31 32 }
32 33  
33 34 /*
... ... @@ -43,7 +44,7 @@
43 44 * iph: ipv4 header
44 45 * ihl: length of header / 4
45 46 */
46   -static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
  47 +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
47 48 {
48 49 unsigned int sum;
49 50  
... ... @@ -70,7 +71,7 @@
70 71 : "=r" (sum), "=r" (iph), "=r" (ihl)
71 72 : "1" (iph), "2" (ihl)
72 73 : "memory");
73   - return(sum);
  74 + return (__force __sum16)sum;
74 75 }
75 76  
76 77 /**
77 78  
... ... @@ -84,16 +85,17 @@
84 85 * Returns the pseudo header checksum the input data. Result is
85 86 * 32bit unfolded.
86 87 */
87   -static inline unsigned long
88   -csum_tcpudp_nofold(unsigned saddr, unsigned daddr, unsigned short len,
89   - unsigned short proto, unsigned int sum)
  88 +static inline __wsum
  89 +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
  90 + unsigned short proto, __wsum sum)
90 91 {
91 92 asm(" addl %1, %0\n"
92 93 " adcl %2, %0\n"
93 94 " adcl %3, %0\n"
94 95 " adcl $0, %0\n"
95 96 : "=r" (sum)
96   - : "g" (daddr), "g" (saddr), "g" ((ntohs(len)<<16)+proto*256), "0" (sum));
  97 + : "g" (daddr), "g" (saddr),
  98 + "g" ((len + proto)<<8), "0" (sum));
97 99 return sum;
98 100 }
99 101  
... ... @@ -109,9 +111,9 @@
109 111 * Returns the 16bit pseudo header checksum the input data already
110 112 * complemented and ready to be filled in.
111 113 */
112   -static inline unsigned short int
113   -csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
114   - unsigned short len, unsigned short proto, unsigned int sum)
  114 +static inline __sum16
  115 +csum_tcpudp_magic(__be32 saddr, __be32 daddr,
  116 + unsigned short len, unsigned short proto, __wsum sum)
115 117 {
116 118 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
117 119 }
118 120  
119 121  
... ... @@ -126,25 +128,25 @@
126 128 * Before filling it in it needs to be csum_fold()'ed.
127 129 * buff should be aligned to a 64bit boundary if possible.
128 130 */
129   -extern unsigned int csum_partial(const unsigned char *buff, unsigned len, unsigned int sum);
  131 +extern __wsum csum_partial(const void *buff, int len, __wsum sum);
130 132  
131 133 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
132 134 #define HAVE_CSUM_COPY_USER 1
133 135  
134 136  
135 137 /* Do not call this directly. Use the wrappers below */
136   -extern unsigned long csum_partial_copy_generic(const unsigned char *src, const unsigned char *dst,
137   - unsigned len,
138   - unsigned sum,
  138 +extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
  139 + int len,
  140 + __wsum sum,
139 141 int *src_err_ptr, int *dst_err_ptr);
140 142  
141 143  
142   -extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
143   - int len, unsigned int isum, int *errp);
144   -extern unsigned int csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst,
145   - int len, unsigned int isum, int *errp);
146   -extern unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len,
147   - unsigned int sum);
  144 +extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
  145 + int len, __wsum isum, int *errp);
  146 +extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
  147 + int len, __wsum isum, int *errp);
  148 +extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
  149 + __wsum sum);
148 150  
149 151 /* Old names. To be removed. */
150 152 #define csum_and_copy_to_user csum_partial_copy_to_user
... ... @@ -158,7 +160,7 @@
158 160 * Returns the 16bit folded/inverted checksum of the passed buffer.
159 161 * Ready to fill in.
160 162 */
161   -extern unsigned short ip_compute_csum(unsigned char * buff, int len);
  163 +extern __sum16 ip_compute_csum(const void *buff, int len);
162 164  
163 165 /**
164 166 * csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
... ... @@ -176,9 +178,9 @@
176 178 struct in6_addr;
177 179  
178 180 #define _HAVE_ARCH_IPV6_CSUM 1
179   -extern unsigned short
180   -csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
181   - __u32 len, unsigned short proto, unsigned int sum);
  181 +extern __sum16
  182 +csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
  183 + __u32 len, unsigned short proto, __wsum sum);
182 184  
183 185 static inline unsigned add32_with_carry(unsigned a, unsigned b)
184 186 {