Commit 920be88e92001fee13d876a6c4aa245cc658800f

Authored by Ramon Fried
Committed by Tom Rini
1 parent c511147cf4

include/linux/byteorder: Sync to latest Linux definitions

generic.h has changed in Linux and new addtionals functions were
added.

This commit takes the latest and greatest from Linux (v4.17-rc5)
to aid with porting drivers that utilize these functions.

Signed-off-by: Ramon Fried <ramon.fried@gmail.com>

Showing 1 changed file with 67 additions and 40 deletions Side-by-side Diff

include/linux/byteorder/generic.h
  1 +/* SPDX-License-Identifier: GPL-2.0 */
1 2 #ifndef _LINUX_BYTEORDER_GENERIC_H
2 3 #define _LINUX_BYTEORDER_GENERIC_H
3 4  
4 5 /*
5   - * linux/byteorder_generic.h
  6 + * linux/byteorder/generic.h
6 7 * Generic Byte-reordering support
7 8 *
  9 + * The "... p" macros, like le64_to_cpup, can be used with pointers
  10 + * to unaligned data, but there will be a performance penalty on
  11 + * some architectures. Use get_unaligned for unaligned data.
  12 + *
8 13 * Francois-Rene Rideau <fare@tunes.org> 19970707
9 14 * gathered all the good ideas from all asm-foo/byteorder.h into one file,
10 15 * cleaned them up.
... ... @@ -78,12 +83,6 @@
78 83 *
79 84 */
80 85  
81   -
82   -#if defined(__KERNEL__)
83   -/*
84   - * inside the kernel, we can use nicknames;
85   - * outside of it, we must avoid POSIX namespace pollution...
86   - */
87 86 #define cpu_to_le64 __cpu_to_le64
88 87 #define le64_to_cpu __le64_to_cpu
89 88 #define cpu_to_le32 __cpu_to_le32
90 89  
91 90  
... ... @@ -120,18 +119,8 @@
120 119 #define be32_to_cpus __be32_to_cpus
121 120 #define cpu_to_be16s __cpu_to_be16s
122 121 #define be16_to_cpus __be16_to_cpus
123   -#endif
124 122  
125   -
126 123 /*
127   - * Handle ntohl and suches. These have various compatibility
128   - * issues - like we want to give the prototype even though we
129   - * also have a macro for them in case some strange program
130   - * wants to take the address of the thing or something..
131   - *
132   - * Note that these used to return a "long" in libc5, even though
133   - * long is often 64-bit these days.. Thus the casts.
134   - *
135 124 * They have to be macros in order to do the constant folding
136 125 * correctly - if the argument passed into a inline function
137 126 * it is no longer constant according to gcc..
138 127  
139 128  
140 129  
141 130  
... ... @@ -142,40 +131,78 @@
142 131 #undef htonl
143 132 #undef htons
144 133  
145   -/*
146   - * Do the prototypes. Somebody might want to take the
147   - * address or some such sick thing..
148   - */
149   -#if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
150   -extern __u32 ntohl(__u32);
151   -extern __u32 htonl(__u32);
152   -#else
153   -extern unsigned long int ntohl(unsigned long int);
154   -extern unsigned long int htonl(unsigned long int);
155   -#endif
156   -extern unsigned short int ntohs(unsigned short int);
157   -extern unsigned short int htons(unsigned short int);
158   -
159   -
160   -#if defined(__GNUC__) && (__GNUC__ >= 2)
161   -
162 134 #define ___htonl(x) __cpu_to_be32(x)
163 135 #define ___htons(x) __cpu_to_be16(x)
164 136 #define ___ntohl(x) __be32_to_cpu(x)
165 137 #define ___ntohs(x) __be16_to_cpu(x)
166 138  
167   -#if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
168 139 #define htonl(x) ___htonl(x)
169 140 #define ntohl(x) ___ntohl(x)
170   -#else
171   -#define htonl(x) ((unsigned long)___htonl(x))
172   -#define ntohl(x) ((unsigned long)___ntohl(x))
173   -#endif
174 141 #define htons(x) ___htons(x)
175 142 #define ntohs(x) ___ntohs(x)
176 143  
177   -#endif /* OPTIMIZE */
  144 +static inline void le16_add_cpu(__le16 *var, u16 val)
  145 +{
  146 + *var = cpu_to_le16(le16_to_cpu(*var) + val);
  147 +}
178 148  
  149 +static inline void le32_add_cpu(__le32 *var, u32 val)
  150 +{
  151 + *var = cpu_to_le32(le32_to_cpu(*var) + val);
  152 +}
  153 +
  154 +static inline void le64_add_cpu(__le64 *var, u64 val)
  155 +{
  156 + *var = cpu_to_le64(le64_to_cpu(*var) + val);
  157 +}
  158 +
  159 +/* XXX: this stuff can be optimized */
  160 +static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
  161 +{
  162 + while (words--) {
  163 + __le32_to_cpus(buf);
  164 + buf++;
  165 + }
  166 +}
  167 +
  168 +static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
  169 +{
  170 + while (words--) {
  171 + __cpu_to_le32s(buf);
  172 + buf++;
  173 + }
  174 +}
  175 +
  176 +static inline void be16_add_cpu(__be16 *var, u16 val)
  177 +{
  178 + *var = cpu_to_be16(be16_to_cpu(*var) + val);
  179 +}
  180 +
  181 +static inline void be32_add_cpu(__be32 *var, u32 val)
  182 +{
  183 + *var = cpu_to_be32(be32_to_cpu(*var) + val);
  184 +}
  185 +
  186 +static inline void be64_add_cpu(__be64 *var, u64 val)
  187 +{
  188 + *var = cpu_to_be64(be64_to_cpu(*var) + val);
  189 +}
  190 +
  191 +static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
  192 +{
  193 + int i;
  194 +
  195 + for (i = 0; i < len; i++)
  196 + dst[i] = cpu_to_be32(src[i]);
  197 +}
  198 +
  199 +static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
  200 +{
  201 + int i;
  202 +
  203 + for (i = 0; i < len; i++)
  204 + dst[i] = be32_to_cpu(src[i]);
  205 +}
179 206  
180 207 #endif /* _LINUX_BYTEORDER_GENERIC_H */