Commit 31bf97f51154cc669513b329d395bcad37b9e213
Committed by
Greg Kroah-Hartman
1 parent
1fb326794a
lib/checksum.c: fix build for generic csum_tcpudp_nofold
commit 9ce357795ef208faa0d59894d9d119a7434e37f3 upstream. Fixed commit added from64to32 under _#ifndef do_csum_ but used it under _#ifndef csum_tcpudp_nofold_, breaking some builds (Fengguang's robot reported TILEGX's). Move from64to32 under the latter. Fixes: 150ae0e94634 ("lib/checksum.c: fix carry in csum_tcpudp_nofold") Reported-by: kbuild test robot <fengguang.wu@intel.com> Signed-off-by: Karl Beldan <karl.beldan@rivierawaves.com> Cc: Eric Dumazet <edumazet@google.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: David S. Miller <davem@davemloft.net> Cc: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 1 changed file with 9 additions and 9 deletions Inline Diff
lib/checksum.c
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX | 3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
4 | * operating system. INET is implemented using the BSD Socket | 4 | * operating system. INET is implemented using the BSD Socket |
5 | * interface as the means of communication with the user level. | 5 | * interface as the means of communication with the user level. |
6 | * | 6 | * |
7 | * IP/TCP/UDP checksumming routines | 7 | * IP/TCP/UDP checksumming routines |
8 | * | 8 | * |
9 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> | 9 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> |
10 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | 10 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
11 | * Tom May, <ftom@netcom.com> | 11 | * Tom May, <ftom@netcom.com> |
12 | * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> | 12 | * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> |
13 | * Lots of code moved from tcp.c and ip.c; see those files | 13 | * Lots of code moved from tcp.c and ip.c; see those files |
14 | * for more names. | 14 | * for more names. |
15 | * | 15 | * |
16 | * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: | 16 | * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: |
17 | * Fixed some nasty bugs, causing some horrible crashes. | 17 | * Fixed some nasty bugs, causing some horrible crashes. |
18 | * A: At some points, the sum (%0) was used as | 18 | * A: At some points, the sum (%0) was used as |
19 | * length-counter instead of the length counter | 19 | * length-counter instead of the length counter |
20 | * (%1). Thanks to Roman Hodek for pointing this out. | 20 | * (%1). Thanks to Roman Hodek for pointing this out. |
21 | * B: GCC seems to mess up if one uses too many | 21 | * B: GCC seems to mess up if one uses too many |
22 | * data-registers to hold input values and one tries to | 22 | * data-registers to hold input values and one tries to |
23 | * specify d0 and d1 as scratch registers. Letting gcc | 23 | * specify d0 and d1 as scratch registers. Letting gcc |
24 | * choose these registers itself solves the problem. | 24 | * choose these registers itself solves the problem. |
25 | * | 25 | * |
26 | * This program is free software; you can redistribute it and/or | 26 | * This program is free software; you can redistribute it and/or |
27 | * modify it under the terms of the GNU General Public License | 27 | * modify it under the terms of the GNU General Public License |
28 | * as published by the Free Software Foundation; either version | 28 | * as published by the Free Software Foundation; either version |
29 | * 2 of the License, or (at your option) any later version. | 29 | * 2 of the License, or (at your option) any later version. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access | 32 | /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access |
33 | kills, so most of the assembly has to go. */ | 33 | kills, so most of the assembly has to go. */ |
34 | 34 | ||
35 | #include <linux/export.h> | 35 | #include <linux/export.h> |
36 | #include <net/checksum.h> | 36 | #include <net/checksum.h> |
37 | 37 | ||
38 | #include <asm/byteorder.h> | 38 | #include <asm/byteorder.h> |
39 | 39 | ||
40 | #ifndef do_csum | 40 | #ifndef do_csum |
41 | static inline unsigned short from32to16(unsigned int x) | 41 | static inline unsigned short from32to16(unsigned int x) |
42 | { | 42 | { |
43 | /* add up 16-bit and 16-bit for 16+c bit */ | 43 | /* add up 16-bit and 16-bit for 16+c bit */ |
44 | x = (x & 0xffff) + (x >> 16); | 44 | x = (x & 0xffff) + (x >> 16); |
45 | /* add up carry.. */ | 45 | /* add up carry.. */ |
46 | x = (x & 0xffff) + (x >> 16); | 46 | x = (x & 0xffff) + (x >> 16); |
47 | return x; | 47 | return x; |
48 | } | 48 | } |
49 | 49 | ||
50 | static inline u32 from64to32(u64 x) | ||
51 | { | ||
52 | /* add up 32-bit and 32-bit for 32+c bit */ | ||
53 | x = (x & 0xffffffff) + (x >> 32); | ||
54 | /* add up carry.. */ | ||
55 | x = (x & 0xffffffff) + (x >> 32); | ||
56 | return (u32)x; | ||
57 | } | ||
58 | |||
59 | static unsigned int do_csum(const unsigned char *buff, int len) | 50 | static unsigned int do_csum(const unsigned char *buff, int len) |
60 | { | 51 | { |
61 | int odd; | 52 | int odd; |
62 | unsigned int result = 0; | 53 | unsigned int result = 0; |
63 | 54 | ||
64 | if (len <= 0) | 55 | if (len <= 0) |
65 | goto out; | 56 | goto out; |
66 | odd = 1 & (unsigned long) buff; | 57 | odd = 1 & (unsigned long) buff; |
67 | if (odd) { | 58 | if (odd) { |
68 | #ifdef __LITTLE_ENDIAN | 59 | #ifdef __LITTLE_ENDIAN |
69 | result += (*buff << 8); | 60 | result += (*buff << 8); |
70 | #else | 61 | #else |
71 | result = *buff; | 62 | result = *buff; |
72 | #endif | 63 | #endif |
73 | len--; | 64 | len--; |
74 | buff++; | 65 | buff++; |
75 | } | 66 | } |
76 | if (len >= 2) { | 67 | if (len >= 2) { |
77 | if (2 & (unsigned long) buff) { | 68 | if (2 & (unsigned long) buff) { |
78 | result += *(unsigned short *) buff; | 69 | result += *(unsigned short *) buff; |
79 | len -= 2; | 70 | len -= 2; |
80 | buff += 2; | 71 | buff += 2; |
81 | } | 72 | } |
82 | if (len >= 4) { | 73 | if (len >= 4) { |
83 | const unsigned char *end = buff + ((unsigned)len & ~3); | 74 | const unsigned char *end = buff + ((unsigned)len & ~3); |
84 | unsigned int carry = 0; | 75 | unsigned int carry = 0; |
85 | do { | 76 | do { |
86 | unsigned int w = *(unsigned int *) buff; | 77 | unsigned int w = *(unsigned int *) buff; |
87 | buff += 4; | 78 | buff += 4; |
88 | result += carry; | 79 | result += carry; |
89 | result += w; | 80 | result += w; |
90 | carry = (w > result); | 81 | carry = (w > result); |
91 | } while (buff < end); | 82 | } while (buff < end); |
92 | result += carry; | 83 | result += carry; |
93 | result = (result & 0xffff) + (result >> 16); | 84 | result = (result & 0xffff) + (result >> 16); |
94 | } | 85 | } |
95 | if (len & 2) { | 86 | if (len & 2) { |
96 | result += *(unsigned short *) buff; | 87 | result += *(unsigned short *) buff; |
97 | buff += 2; | 88 | buff += 2; |
98 | } | 89 | } |
99 | } | 90 | } |
100 | if (len & 1) | 91 | if (len & 1) |
101 | #ifdef __LITTLE_ENDIAN | 92 | #ifdef __LITTLE_ENDIAN |
102 | result += *buff; | 93 | result += *buff; |
103 | #else | 94 | #else |
104 | result += (*buff << 8); | 95 | result += (*buff << 8); |
105 | #endif | 96 | #endif |
106 | result = from32to16(result); | 97 | result = from32to16(result); |
107 | if (odd) | 98 | if (odd) |
108 | result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); | 99 | result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); |
109 | out: | 100 | out: |
110 | return result; | 101 | return result; |
111 | } | 102 | } |
112 | #endif | 103 | #endif |
113 | 104 | ||
114 | #ifndef ip_fast_csum | 105 | #ifndef ip_fast_csum |
115 | /* | 106 | /* |
116 | * This is a version of ip_compute_csum() optimized for IP headers, | 107 | * This is a version of ip_compute_csum() optimized for IP headers, |
117 | * which always checksum on 4 octet boundaries. | 108 | * which always checksum on 4 octet boundaries. |
118 | */ | 109 | */ |
119 | __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | 110 | __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
120 | { | 111 | { |
121 | return (__force __sum16)~do_csum(iph, ihl*4); | 112 | return (__force __sum16)~do_csum(iph, ihl*4); |
122 | } | 113 | } |
123 | EXPORT_SYMBOL(ip_fast_csum); | 114 | EXPORT_SYMBOL(ip_fast_csum); |
124 | #endif | 115 | #endif |
125 | 116 | ||
126 | /* | 117 | /* |
127 | * computes the checksum of a memory block at buff, length len, | 118 | * computes the checksum of a memory block at buff, length len, |
128 | * and adds in "sum" (32-bit) | 119 | * and adds in "sum" (32-bit) |
129 | * | 120 | * |
130 | * returns a 32-bit number suitable for feeding into itself | 121 | * returns a 32-bit number suitable for feeding into itself |
131 | * or csum_tcpudp_magic | 122 | * or csum_tcpudp_magic |
132 | * | 123 | * |
133 | * this function must be called with even lengths, except | 124 | * this function must be called with even lengths, except |
134 | * for the last fragment, which may be odd | 125 | * for the last fragment, which may be odd |
135 | * | 126 | * |
136 | * it's best to have buff aligned on a 32-bit boundary | 127 | * it's best to have buff aligned on a 32-bit boundary |
137 | */ | 128 | */ |
138 | __wsum csum_partial(const void *buff, int len, __wsum wsum) | 129 | __wsum csum_partial(const void *buff, int len, __wsum wsum) |
139 | { | 130 | { |
140 | unsigned int sum = (__force unsigned int)wsum; | 131 | unsigned int sum = (__force unsigned int)wsum; |
141 | unsigned int result = do_csum(buff, len); | 132 | unsigned int result = do_csum(buff, len); |
142 | 133 | ||
143 | /* add in old sum, and carry.. */ | 134 | /* add in old sum, and carry.. */ |
144 | result += sum; | 135 | result += sum; |
145 | if (sum > result) | 136 | if (sum > result) |
146 | result += 1; | 137 | result += 1; |
147 | return (__force __wsum)result; | 138 | return (__force __wsum)result; |
148 | } | 139 | } |
149 | EXPORT_SYMBOL(csum_partial); | 140 | EXPORT_SYMBOL(csum_partial); |
150 | 141 | ||
151 | /* | 142 | /* |
152 | * this routine is used for miscellaneous IP-like checksums, mainly | 143 | * this routine is used for miscellaneous IP-like checksums, mainly |
153 | * in icmp.c | 144 | * in icmp.c |
154 | */ | 145 | */ |
155 | __sum16 ip_compute_csum(const void *buff, int len) | 146 | __sum16 ip_compute_csum(const void *buff, int len) |
156 | { | 147 | { |
157 | return (__force __sum16)~do_csum(buff, len); | 148 | return (__force __sum16)~do_csum(buff, len); |
158 | } | 149 | } |
159 | EXPORT_SYMBOL(ip_compute_csum); | 150 | EXPORT_SYMBOL(ip_compute_csum); |
160 | 151 | ||
161 | /* | 152 | /* |
162 | * copy from fs while checksumming, otherwise like csum_partial | 153 | * copy from fs while checksumming, otherwise like csum_partial |
163 | */ | 154 | */ |
164 | __wsum | 155 | __wsum |
165 | csum_partial_copy_from_user(const void __user *src, void *dst, int len, | 156 | csum_partial_copy_from_user(const void __user *src, void *dst, int len, |
166 | __wsum sum, int *csum_err) | 157 | __wsum sum, int *csum_err) |
167 | { | 158 | { |
168 | int missing; | 159 | int missing; |
169 | 160 | ||
170 | missing = __copy_from_user(dst, src, len); | 161 | missing = __copy_from_user(dst, src, len); |
171 | if (missing) { | 162 | if (missing) { |
172 | memset(dst + len - missing, 0, missing); | 163 | memset(dst + len - missing, 0, missing); |
173 | *csum_err = -EFAULT; | 164 | *csum_err = -EFAULT; |
174 | } else | 165 | } else |
175 | *csum_err = 0; | 166 | *csum_err = 0; |
176 | 167 | ||
177 | return csum_partial(dst, len, sum); | 168 | return csum_partial(dst, len, sum); |
178 | } | 169 | } |
179 | EXPORT_SYMBOL(csum_partial_copy_from_user); | 170 | EXPORT_SYMBOL(csum_partial_copy_from_user); |
180 | 171 | ||
181 | /* | 172 | /* |
182 | * copy from ds while checksumming, otherwise like csum_partial | 173 | * copy from ds while checksumming, otherwise like csum_partial |
183 | */ | 174 | */ |
184 | __wsum | 175 | __wsum |
185 | csum_partial_copy(const void *src, void *dst, int len, __wsum sum) | 176 | csum_partial_copy(const void *src, void *dst, int len, __wsum sum) |
186 | { | 177 | { |
187 | memcpy(dst, src, len); | 178 | memcpy(dst, src, len); |
188 | return csum_partial(dst, len, sum); | 179 | return csum_partial(dst, len, sum); |
189 | } | 180 | } |
190 | EXPORT_SYMBOL(csum_partial_copy); | 181 | EXPORT_SYMBOL(csum_partial_copy); |
191 | 182 | ||
192 | #ifndef csum_tcpudp_nofold | 183 | #ifndef csum_tcpudp_nofold |
184 | static inline u32 from64to32(u64 x) | ||
185 | { | ||
186 | /* add up 32-bit and 32-bit for 32+c bit */ | ||
187 | x = (x & 0xffffffff) + (x >> 32); | ||
188 | /* add up carry.. */ | ||
189 | x = (x & 0xffffffff) + (x >> 32); | ||
190 | return (u32)x; | ||
191 | } | ||
192 | |||
193 | __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | 193 | __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, |
194 | unsigned short len, | 194 | unsigned short len, |
195 | unsigned short proto, | 195 | unsigned short proto, |
196 | __wsum sum) | 196 | __wsum sum) |
197 | { | 197 | { |
198 | unsigned long long s = (__force u32)sum; | 198 | unsigned long long s = (__force u32)sum; |
199 | 199 | ||
200 | s += (__force u32)saddr; | 200 | s += (__force u32)saddr; |
201 | s += (__force u32)daddr; | 201 | s += (__force u32)daddr; |
202 | #ifdef __BIG_ENDIAN | 202 | #ifdef __BIG_ENDIAN |