Commit d1992b169d31f339dc5ea4e9f312567c8cf322a3
Committed by
Pablo Neira Ayuso
1 parent
da2e852612
netfilter: xt_HMARK: fix endianness and provide consistent hashing
This patch addresses two issues: a) Fix usage of u32 and __be32 that causes endianess warnings via sparse. b) Ensure consistent hashing in a cluster that is composed of big and little endian systems. Thus, we obtain the same hash mark in an heterogeneous cluster. Reported-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Hans Schillstrom <hans@schillstrom.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Showing 2 changed files with 46 additions and 31 deletions Inline Diff
include/linux/netfilter/xt_HMARK.h
1 | #ifndef XT_HMARK_H_ | 1 | #ifndef XT_HMARK_H_ |
2 | #define XT_HMARK_H_ | 2 | #define XT_HMARK_H_ |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | enum { | 6 | enum { |
7 | XT_HMARK_SADDR_MASK, | 7 | XT_HMARK_SADDR_MASK, |
8 | XT_HMARK_DADDR_MASK, | 8 | XT_HMARK_DADDR_MASK, |
9 | XT_HMARK_SPI, | 9 | XT_HMARK_SPI, |
10 | XT_HMARK_SPI_MASK, | 10 | XT_HMARK_SPI_MASK, |
11 | XT_HMARK_SPORT, | 11 | XT_HMARK_SPORT, |
12 | XT_HMARK_DPORT, | 12 | XT_HMARK_DPORT, |
13 | XT_HMARK_SPORT_MASK, | 13 | XT_HMARK_SPORT_MASK, |
14 | XT_HMARK_DPORT_MASK, | 14 | XT_HMARK_DPORT_MASK, |
15 | XT_HMARK_PROTO_MASK, | 15 | XT_HMARK_PROTO_MASK, |
16 | XT_HMARK_RND, | 16 | XT_HMARK_RND, |
17 | XT_HMARK_MODULUS, | 17 | XT_HMARK_MODULUS, |
18 | XT_HMARK_OFFSET, | 18 | XT_HMARK_OFFSET, |
19 | XT_HMARK_CT, | 19 | XT_HMARK_CT, |
20 | XT_HMARK_METHOD_L3, | 20 | XT_HMARK_METHOD_L3, |
21 | XT_HMARK_METHOD_L3_4, | 21 | XT_HMARK_METHOD_L3_4, |
22 | }; | 22 | }; |
23 | #define XT_HMARK_FLAG(flag) (1 << flag) | 23 | #define XT_HMARK_FLAG(flag) (1 << flag) |
24 | 24 | ||
25 | union hmark_ports { | 25 | union hmark_ports { |
26 | struct { | 26 | struct { |
27 | __u16 src; | 27 | __u16 src; |
28 | __u16 dst; | 28 | __u16 dst; |
29 | } p16; | 29 | } p16; |
30 | struct { | ||
31 | __be16 src; | ||
32 | __be16 dst; | ||
33 | } b16; | ||
30 | __u32 v32; | 34 | __u32 v32; |
35 | __be32 b32; | ||
31 | }; | 36 | }; |
32 | 37 | ||
33 | struct xt_hmark_info { | 38 | struct xt_hmark_info { |
34 | union nf_inet_addr src_mask; | 39 | union nf_inet_addr src_mask; |
35 | union nf_inet_addr dst_mask; | 40 | union nf_inet_addr dst_mask; |
36 | union hmark_ports port_mask; | 41 | union hmark_ports port_mask; |
37 | union hmark_ports port_set; | 42 | union hmark_ports port_set; |
38 | __u32 flags; | 43 | __u32 flags; |
39 | __u16 proto_mask; | 44 | __u16 proto_mask; |
40 | __u32 hashrnd; | 45 | __u32 hashrnd; |
41 | __u32 hmodulus; | 46 | __u32 hmodulus; |
42 | __u32 hoffset; /* Mark offset to start from */ | 47 | __u32 hoffset; /* Mark offset to start from */ |
43 | }; | 48 | }; |
44 | 49 | ||
45 | #endif /* XT_HMARK_H_ */ | 50 | #endif /* XT_HMARK_H_ */ |
46 | 51 |
net/netfilter/xt_HMARK.c
1 | /* | 1 | /* |
2 | * xt_HMARK - Netfilter module to set mark by means of hashing | 2 | * xt_HMARK - Netfilter module to set mark by means of hashing |
3 | * | 3 | * |
4 | * (C) 2012 by Hans Schillstrom <hans.schillstrom@ericsson.com> | 4 | * (C) 2012 by Hans Schillstrom <hans.schillstrom@ericsson.com> |
5 | * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org> | 5 | * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License version 2 as published by | 8 | * under the terms of the GNU General Public License version 2 as published by |
9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/icmp.h> | 14 | #include <linux/icmp.h> |
15 | 15 | ||
16 | #include <linux/netfilter/x_tables.h> | 16 | #include <linux/netfilter/x_tables.h> |
17 | #include <linux/netfilter/xt_HMARK.h> | 17 | #include <linux/netfilter/xt_HMARK.h> |
18 | 18 | ||
19 | #include <net/ip.h> | 19 | #include <net/ip.h> |
20 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | 20 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
21 | #include <net/netfilter/nf_conntrack.h> | 21 | #include <net/netfilter/nf_conntrack.h> |
22 | #endif | 22 | #endif |
23 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) | 23 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) |
24 | #include <net/ipv6.h> | 24 | #include <net/ipv6.h> |
25 | #include <linux/netfilter_ipv6/ip6_tables.h> | 25 | #include <linux/netfilter_ipv6/ip6_tables.h> |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | MODULE_LICENSE("GPL"); | 28 | MODULE_LICENSE("GPL"); |
29 | MODULE_AUTHOR("Hans Schillstrom <hans.schillstrom@ericsson.com>"); | 29 | MODULE_AUTHOR("Hans Schillstrom <hans.schillstrom@ericsson.com>"); |
30 | MODULE_DESCRIPTION("Xtables: packet marking using hash calculation"); | 30 | MODULE_DESCRIPTION("Xtables: packet marking using hash calculation"); |
31 | MODULE_ALIAS("ipt_HMARK"); | 31 | MODULE_ALIAS("ipt_HMARK"); |
32 | MODULE_ALIAS("ip6t_HMARK"); | 32 | MODULE_ALIAS("ip6t_HMARK"); |
33 | 33 | ||
34 | struct hmark_tuple { | 34 | struct hmark_tuple { |
35 | u32 src; | 35 | __be32 src; |
36 | u32 dst; | 36 | __be32 dst; |
37 | union hmark_ports uports; | 37 | union hmark_ports uports; |
38 | uint8_t proto; | 38 | u8 proto; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) | 41 | static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask) |
42 | { | 42 | { |
43 | return (addr32[0] & mask[0]) ^ | 43 | return (addr32[0] & mask[0]) ^ |
44 | (addr32[1] & mask[1]) ^ | 44 | (addr32[1] & mask[1]) ^ |
45 | (addr32[2] & mask[2]) ^ | 45 | (addr32[2] & mask[2]) ^ |
46 | (addr32[3] & mask[3]); | 46 | (addr32[3] & mask[3]); |
47 | } | 47 | } |
48 | 48 | ||
49 | static inline u32 | 49 | static inline __be32 |
50 | hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) | 50 | hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask) |
51 | { | 51 | { |
52 | switch (l3num) { | 52 | switch (l3num) { |
53 | case AF_INET: | 53 | case AF_INET: |
54 | return *addr32 & *mask; | 54 | return *addr32 & *mask; |
55 | case AF_INET6: | 55 | case AF_INET6: |
56 | return hmark_addr6_mask(addr32, mask); | 56 | return hmark_addr6_mask(addr32, mask); |
57 | } | 57 | } |
58 | return 0; | 58 | return 0; |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline void hmark_swap_ports(union hmark_ports *uports, | ||
62 | const struct xt_hmark_info *info) | ||
63 | { | ||
64 | union hmark_ports hp; | ||
65 | u16 src, dst; | ||
66 | |||
67 | hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32; | ||
68 | src = ntohs(hp.b16.src); | ||
69 | dst = ntohs(hp.b16.dst); | ||
70 | |||
71 | if (dst > src) | ||
72 | uports->v32 = (dst << 16) | src; | ||
73 | else | ||
74 | uports->v32 = (src << 16) | dst; | ||
75 | } | ||
76 | |||
61 | static int | 77 | static int |
62 | hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, | 78 | hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, |
63 | const struct xt_hmark_info *info) | 79 | const struct xt_hmark_info *info) |
64 | { | 80 | { |
65 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | 81 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
66 | enum ip_conntrack_info ctinfo; | 82 | enum ip_conntrack_info ctinfo; |
67 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 83 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
68 | struct nf_conntrack_tuple *otuple; | 84 | struct nf_conntrack_tuple *otuple; |
69 | struct nf_conntrack_tuple *rtuple; | 85 | struct nf_conntrack_tuple *rtuple; |
70 | 86 | ||
71 | if (ct == NULL || nf_ct_is_untracked(ct)) | 87 | if (ct == NULL || nf_ct_is_untracked(ct)) |
72 | return -1; | 88 | return -1; |
73 | 89 | ||
74 | otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; | 90 | otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; |
75 | rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; | 91 | rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
76 | 92 | ||
77 | t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all, | 93 | t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6, |
78 | info->src_mask.all); | 94 | info->src_mask.ip6); |
79 | t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all, | 95 | t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6, |
80 | info->dst_mask.all); | 96 | info->dst_mask.ip6); |
81 | 97 | ||
82 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 98 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
83 | return 0; | 99 | return 0; |
84 | 100 | ||
85 | t->proto = nf_ct_protonum(ct); | 101 | t->proto = nf_ct_protonum(ct); |
86 | if (t->proto != IPPROTO_ICMP) { | 102 | if (t->proto != IPPROTO_ICMP) { |
87 | t->uports.p16.src = otuple->src.u.all; | 103 | t->uports.b16.src = otuple->src.u.all; |
88 | t->uports.p16.dst = rtuple->src.u.all; | 104 | t->uports.b16.dst = rtuple->src.u.all; |
89 | t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | | 105 | hmark_swap_ports(&t->uports, info); |
90 | info->port_set.v32; | ||
91 | if (t->uports.p16.dst < t->uports.p16.src) | ||
92 | swap(t->uports.p16.dst, t->uports.p16.src); | ||
93 | } | 106 | } |
94 | 107 | ||
95 | return 0; | 108 | return 0; |
96 | #else | 109 | #else |
97 | return -1; | 110 | return -1; |
98 | #endif | 111 | #endif |
99 | } | 112 | } |
100 | 113 | ||
114 | /* This hash function is endian independent, to ensure consistent hashing if | ||
115 | * the cluster is composed of big and little endian systems. */ | ||
101 | static inline u32 | 116 | static inline u32 |
102 | hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) | 117 | hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) |
103 | { | 118 | { |
104 | u32 hash; | 119 | u32 hash; |
120 | u32 src = ntohl(t->src); | ||
121 | u32 dst = ntohl(t->dst); | ||
105 | 122 | ||
106 | if (t->dst < t->src) | 123 | if (dst < src) |
107 | swap(t->src, t->dst); | 124 | swap(src, dst); |
108 | 125 | ||
109 | hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); | 126 | hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd); |
110 | hash = hash ^ (t->proto & info->proto_mask); | 127 | hash = hash ^ (t->proto & info->proto_mask); |
111 | 128 | ||
112 | return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; | 129 | return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; |
113 | } | 130 | } |
114 | 131 | ||
115 | static void | 132 | static void |
116 | hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff, | 133 | hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff, |
117 | struct hmark_tuple *t, const struct xt_hmark_info *info) | 134 | struct hmark_tuple *t, const struct xt_hmark_info *info) |
118 | { | 135 | { |
119 | int protoff; | 136 | int protoff; |
120 | 137 | ||
121 | protoff = proto_ports_offset(t->proto); | 138 | protoff = proto_ports_offset(t->proto); |
122 | if (protoff < 0) | 139 | if (protoff < 0) |
123 | return; | 140 | return; |
124 | 141 | ||
125 | nhoff += protoff; | 142 | nhoff += protoff; |
126 | if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) | 143 | if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) |
127 | return; | 144 | return; |
128 | 145 | ||
129 | t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | | 146 | hmark_swap_ports(&t->uports, info); |
130 | info->port_set.v32; | ||
131 | |||
132 | if (t->uports.p16.dst < t->uports.p16.src) | ||
133 | swap(t->uports.p16.dst, t->uports.p16.src); | ||
134 | } | 147 | } |
135 | 148 | ||
136 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) | 149 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) |
137 | static int get_inner6_hdr(const struct sk_buff *skb, int *offset) | 150 | static int get_inner6_hdr(const struct sk_buff *skb, int *offset) |
138 | { | 151 | { |
139 | struct icmp6hdr *icmp6h, _ih6; | 152 | struct icmp6hdr *icmp6h, _ih6; |
140 | 153 | ||
141 | icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6); | 154 | icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6); |
142 | if (icmp6h == NULL) | 155 | if (icmp6h == NULL) |
143 | return 0; | 156 | return 0; |
144 | 157 | ||
145 | if (icmp6h->icmp6_type && icmp6h->icmp6_type < 128) { | 158 | if (icmp6h->icmp6_type && icmp6h->icmp6_type < 128) { |
146 | *offset += sizeof(struct icmp6hdr); | 159 | *offset += sizeof(struct icmp6hdr); |
147 | return 1; | 160 | return 1; |
148 | } | 161 | } |
149 | return 0; | 162 | return 0; |
150 | } | 163 | } |
151 | 164 | ||
152 | static int | 165 | static int |
153 | hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, | 166 | hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, |
154 | const struct xt_hmark_info *info) | 167 | const struct xt_hmark_info *info) |
155 | { | 168 | { |
156 | struct ipv6hdr *ip6, _ip6; | 169 | struct ipv6hdr *ip6, _ip6; |
157 | int flag = IP6T_FH_F_AUTH; | 170 | int flag = IP6T_FH_F_AUTH; |
158 | unsigned int nhoff = 0; | 171 | unsigned int nhoff = 0; |
159 | u16 fragoff = 0; | 172 | u16 fragoff = 0; |
160 | int nexthdr; | 173 | int nexthdr; |
161 | 174 | ||
162 | ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb)); | 175 | ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb)); |
163 | nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); | 176 | nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); |
164 | if (nexthdr < 0) | 177 | if (nexthdr < 0) |
165 | return 0; | 178 | return 0; |
166 | /* No need to check for icmp errors on fragments */ | 179 | /* No need to check for icmp errors on fragments */ |
167 | if ((flag & IP6T_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6)) | 180 | if ((flag & IP6T_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6)) |
168 | goto noicmp; | 181 | goto noicmp; |
169 | /* Use inner header in case of ICMP errors */ | 182 | /* Use inner header in case of ICMP errors */ |
170 | if (get_inner6_hdr(skb, &nhoff)) { | 183 | if (get_inner6_hdr(skb, &nhoff)) { |
171 | ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6); | 184 | ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6); |
172 | if (ip6 == NULL) | 185 | if (ip6 == NULL) |
173 | return -1; | 186 | return -1; |
174 | /* If AH present, use SPI like in ESP. */ | 187 | /* If AH present, use SPI like in ESP. */ |
175 | flag = IP6T_FH_F_AUTH; | 188 | flag = IP6T_FH_F_AUTH; |
176 | nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); | 189 | nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); |
177 | if (nexthdr < 0) | 190 | if (nexthdr < 0) |
178 | return -1; | 191 | return -1; |
179 | } | 192 | } |
180 | noicmp: | 193 | noicmp: |
181 | t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all); | 194 | t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6); |
182 | t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all); | 195 | t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6); |
183 | 196 | ||
184 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 197 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
185 | return 0; | 198 | return 0; |
186 | 199 | ||
187 | t->proto = nexthdr; | 200 | t->proto = nexthdr; |
188 | if (t->proto == IPPROTO_ICMPV6) | 201 | if (t->proto == IPPROTO_ICMPV6) |
189 | return 0; | 202 | return 0; |
190 | 203 | ||
191 | if (flag & IP6T_FH_F_FRAG) | 204 | if (flag & IP6T_FH_F_FRAG) |
192 | return 0; | 205 | return 0; |
193 | 206 | ||
194 | hmark_set_tuple_ports(skb, nhoff, t, info); | 207 | hmark_set_tuple_ports(skb, nhoff, t, info); |
195 | return 0; | 208 | return 0; |
196 | } | 209 | } |
197 | 210 | ||
198 | static unsigned int | 211 | static unsigned int |
199 | hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par) | 212 | hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par) |
200 | { | 213 | { |
201 | const struct xt_hmark_info *info = par->targinfo; | 214 | const struct xt_hmark_info *info = par->targinfo; |
202 | struct hmark_tuple t; | 215 | struct hmark_tuple t; |
203 | 216 | ||
204 | memset(&t, 0, sizeof(struct hmark_tuple)); | 217 | memset(&t, 0, sizeof(struct hmark_tuple)); |
205 | 218 | ||
206 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) { | 219 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) { |
207 | if (hmark_ct_set_htuple(skb, &t, info) < 0) | 220 | if (hmark_ct_set_htuple(skb, &t, info) < 0) |
208 | return XT_CONTINUE; | 221 | return XT_CONTINUE; |
209 | } else { | 222 | } else { |
210 | if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0) | 223 | if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0) |
211 | return XT_CONTINUE; | 224 | return XT_CONTINUE; |
212 | } | 225 | } |
213 | 226 | ||
214 | skb->mark = hmark_hash(&t, info); | 227 | skb->mark = hmark_hash(&t, info); |
215 | return XT_CONTINUE; | 228 | return XT_CONTINUE; |
216 | } | 229 | } |
217 | #endif | 230 | #endif |
218 | 231 | ||
219 | static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff) | 232 | static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff) |
220 | { | 233 | { |
221 | const struct icmphdr *icmph; | 234 | const struct icmphdr *icmph; |
222 | struct icmphdr _ih; | 235 | struct icmphdr _ih; |
223 | 236 | ||
224 | /* Not enough header? */ | 237 | /* Not enough header? */ |
225 | icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih); | 238 | icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih); |
226 | if (icmph == NULL || icmph->type > NR_ICMP_TYPES) | 239 | if (icmph == NULL || icmph->type > NR_ICMP_TYPES) |
227 | return 0; | 240 | return 0; |
228 | 241 | ||
229 | /* Error message? */ | 242 | /* Error message? */ |
230 | if (icmph->type != ICMP_DEST_UNREACH && | 243 | if (icmph->type != ICMP_DEST_UNREACH && |
231 | icmph->type != ICMP_SOURCE_QUENCH && | 244 | icmph->type != ICMP_SOURCE_QUENCH && |
232 | icmph->type != ICMP_TIME_EXCEEDED && | 245 | icmph->type != ICMP_TIME_EXCEEDED && |
233 | icmph->type != ICMP_PARAMETERPROB && | 246 | icmph->type != ICMP_PARAMETERPROB && |
234 | icmph->type != ICMP_REDIRECT) | 247 | icmph->type != ICMP_REDIRECT) |
235 | return 0; | 248 | return 0; |
236 | 249 | ||
237 | *nhoff += iphsz + sizeof(_ih); | 250 | *nhoff += iphsz + sizeof(_ih); |
238 | return 1; | 251 | return 1; |
239 | } | 252 | } |
240 | 253 | ||
241 | static int | 254 | static int |
242 | hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t, | 255 | hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t, |
243 | const struct xt_hmark_info *info) | 256 | const struct xt_hmark_info *info) |
244 | { | 257 | { |
245 | struct iphdr *ip, _ip; | 258 | struct iphdr *ip, _ip; |
246 | int nhoff = skb_network_offset(skb); | 259 | int nhoff = skb_network_offset(skb); |
247 | 260 | ||
248 | ip = (struct iphdr *) (skb->data + nhoff); | 261 | ip = (struct iphdr *) (skb->data + nhoff); |
249 | if (ip->protocol == IPPROTO_ICMP) { | 262 | if (ip->protocol == IPPROTO_ICMP) { |
250 | /* Use inner header in case of ICMP errors */ | 263 | /* Use inner header in case of ICMP errors */ |
251 | if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) { | 264 | if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) { |
252 | ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip); | 265 | ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip); |
253 | if (ip == NULL) | 266 | if (ip == NULL) |
254 | return -1; | 267 | return -1; |
255 | } | 268 | } |
256 | } | 269 | } |
257 | 270 | ||
258 | t->src = (__force u32) ip->saddr; | 271 | t->src = ip->saddr & info->src_mask.ip; |
259 | t->dst = (__force u32) ip->daddr; | 272 | t->dst = ip->daddr & info->dst_mask.ip; |
260 | |||
261 | t->src &= info->src_mask.ip; | ||
262 | t->dst &= info->dst_mask.ip; | ||
263 | 273 | ||
264 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 274 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
265 | return 0; | 275 | return 0; |
266 | 276 | ||
267 | t->proto = ip->protocol; | 277 | t->proto = ip->protocol; |
268 | 278 | ||
269 | /* ICMP has no ports, skip */ | 279 | /* ICMP has no ports, skip */ |
270 | if (t->proto == IPPROTO_ICMP) | 280 | if (t->proto == IPPROTO_ICMP) |
271 | return 0; | 281 | return 0; |
272 | 282 | ||
273 | /* follow-up fragments don't contain ports, skip all fragments */ | 283 | /* follow-up fragments don't contain ports, skip all fragments */ |
274 | if (ip->frag_off & htons(IP_MF | IP_OFFSET)) | 284 | if (ip->frag_off & htons(IP_MF | IP_OFFSET)) |
275 | return 0; | 285 | return 0; |
276 | 286 | ||
277 | hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info); | 287 | hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info); |
278 | 288 | ||
279 | return 0; | 289 | return 0; |
280 | } | 290 | } |
281 | 291 | ||
282 | static unsigned int | 292 | static unsigned int |
283 | hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par) | 293 | hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par) |
284 | { | 294 | { |
285 | const struct xt_hmark_info *info = par->targinfo; | 295 | const struct xt_hmark_info *info = par->targinfo; |
286 | struct hmark_tuple t; | 296 | struct hmark_tuple t; |
287 | 297 | ||
288 | memset(&t, 0, sizeof(struct hmark_tuple)); | 298 | memset(&t, 0, sizeof(struct hmark_tuple)); |
289 | 299 | ||
290 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) { | 300 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) { |
291 | if (hmark_ct_set_htuple(skb, &t, info) < 0) | 301 | if (hmark_ct_set_htuple(skb, &t, info) < 0) |
292 | return XT_CONTINUE; | 302 | return XT_CONTINUE; |
293 | } else { | 303 | } else { |
294 | if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0) | 304 | if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0) |
295 | return XT_CONTINUE; | 305 | return XT_CONTINUE; |
296 | } | 306 | } |
297 | 307 | ||
298 | skb->mark = hmark_hash(&t, info); | 308 | skb->mark = hmark_hash(&t, info); |
299 | return XT_CONTINUE; | 309 | return XT_CONTINUE; |
300 | } | 310 | } |
301 | 311 | ||
302 | static int hmark_tg_check(const struct xt_tgchk_param *par) | 312 | static int hmark_tg_check(const struct xt_tgchk_param *par) |
303 | { | 313 | { |
304 | const struct xt_hmark_info *info = par->targinfo; | 314 | const struct xt_hmark_info *info = par->targinfo; |
305 | 315 | ||
306 | if (!info->hmodulus) { | 316 | if (!info->hmodulus) { |
307 | pr_info("xt_HMARK: hash modulus can't be zero\n"); | 317 | pr_info("xt_HMARK: hash modulus can't be zero\n"); |
308 | return -EINVAL; | 318 | return -EINVAL; |
309 | } | 319 | } |
310 | if (info->proto_mask && | 320 | if (info->proto_mask && |
311 | (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) { | 321 | (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) { |
312 | pr_info("xt_HMARK: proto mask must be zero with L3 mode\n"); | 322 | pr_info("xt_HMARK: proto mask must be zero with L3 mode\n"); |
313 | return -EINVAL; | 323 | return -EINVAL; |
314 | } | 324 | } |
315 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) && | 325 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) && |
316 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) | | 326 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) | |
317 | XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) { | 327 | XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) { |
318 | pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n"); | 328 | pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n"); |
319 | return -EINVAL; | 329 | return -EINVAL; |
320 | } | 330 | } |
321 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) && | 331 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) && |
322 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) | | 332 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) | |
323 | XT_HMARK_FLAG(XT_HMARK_DPORT)))) { | 333 | XT_HMARK_FLAG(XT_HMARK_DPORT)))) { |
324 | pr_info("xt_HMARK: spi-set and port-set can't be combined\n"); | 334 | pr_info("xt_HMARK: spi-set and port-set can't be combined\n"); |
325 | return -EINVAL; | 335 | return -EINVAL; |
326 | } | 336 | } |
327 | return 0; | 337 | return 0; |
328 | } | 338 | } |
329 | 339 | ||
330 | static struct xt_target hmark_tg_reg[] __read_mostly = { | 340 | static struct xt_target hmark_tg_reg[] __read_mostly = { |
331 | { | 341 | { |
332 | .name = "HMARK", | 342 | .name = "HMARK", |
333 | .family = NFPROTO_IPV4, | 343 | .family = NFPROTO_IPV4, |
334 | .target = hmark_tg_v4, | 344 | .target = hmark_tg_v4, |
335 | .targetsize = sizeof(struct xt_hmark_info), | 345 | .targetsize = sizeof(struct xt_hmark_info), |
336 | .checkentry = hmark_tg_check, | 346 | .checkentry = hmark_tg_check, |
337 | .me = THIS_MODULE, | 347 | .me = THIS_MODULE, |
338 | }, | 348 | }, |
339 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) | 349 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) |
340 | { | 350 | { |
341 | .name = "HMARK", | 351 | .name = "HMARK", |
342 | .family = NFPROTO_IPV6, | 352 | .family = NFPROTO_IPV6, |
343 | .target = hmark_tg_v6, | 353 | .target = hmark_tg_v6, |
344 | .targetsize = sizeof(struct xt_hmark_info), | 354 | .targetsize = sizeof(struct xt_hmark_info), |
345 | .checkentry = hmark_tg_check, | 355 | .checkentry = hmark_tg_check, |
346 | .me = THIS_MODULE, | 356 | .me = THIS_MODULE, |
347 | }, | 357 | }, |
348 | #endif | 358 | #endif |
349 | }; | 359 | }; |
350 | 360 | ||
351 | static int __init hmark_tg_init(void) | 361 | static int __init hmark_tg_init(void) |
352 | { | 362 | { |
353 | return xt_register_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg)); | 363 | return xt_register_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg)); |