Commit 7e09dccd07518729fe3cf586beb83acffa2e64ca

Authored by David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains two bugfixes for your net tree, they are:

1) Validate netlink group from nfnetlink to avoid an out of bound array
   access. This should only happen with superuser priviledges though.
   Discovered by Andrey Ryabinin using trinity.

2) Don't push ethernet header before calling the netfilter output hook
   for multicast traffic, this breaks ebtables since it expects to see
   skb->data pointing to the network header, patch from Linus Luessing.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 2 changed files Inline Diff

net/bridge/br_multicast.c
1 /* 1 /*
2 * Bridge multicast support. 2 * Bridge multicast support.
3 * 3 *
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free 7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option) 8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version. 9 * any later version.
10 * 10 *
11 */ 11 */
12 12
13 #include <linux/err.h> 13 #include <linux/err.h>
14 #include <linux/export.h> 14 #include <linux/export.h>
15 #include <linux/if_ether.h> 15 #include <linux/if_ether.h>
16 #include <linux/igmp.h> 16 #include <linux/igmp.h>
17 #include <linux/jhash.h> 17 #include <linux/jhash.h>
18 #include <linux/kernel.h> 18 #include <linux/kernel.h>
19 #include <linux/log2.h> 19 #include <linux/log2.h>
20 #include <linux/netdevice.h> 20 #include <linux/netdevice.h>
21 #include <linux/netfilter_bridge.h> 21 #include <linux/netfilter_bridge.h>
22 #include <linux/random.h> 22 #include <linux/random.h>
23 #include <linux/rculist.h> 23 #include <linux/rculist.h>
24 #include <linux/skbuff.h> 24 #include <linux/skbuff.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/timer.h> 26 #include <linux/timer.h>
27 #include <linux/inetdevice.h> 27 #include <linux/inetdevice.h>
28 #include <net/ip.h> 28 #include <net/ip.h>
29 #if IS_ENABLED(CONFIG_IPV6) 29 #if IS_ENABLED(CONFIG_IPV6)
30 #include <net/ipv6.h> 30 #include <net/ipv6.h>
31 #include <net/mld.h> 31 #include <net/mld.h>
32 #include <net/ip6_checksum.h> 32 #include <net/ip6_checksum.h>
33 #include <net/addrconf.h> 33 #include <net/addrconf.h>
34 #endif 34 #endif
35 35
36 #include "br_private.h" 36 #include "br_private.h"
37 37
38 static void br_multicast_start_querier(struct net_bridge *br, 38 static void br_multicast_start_querier(struct net_bridge *br,
39 struct bridge_mcast_own_query *query); 39 struct bridge_mcast_own_query *query);
40 unsigned int br_mdb_rehash_seq; 40 unsigned int br_mdb_rehash_seq;
41 41
42 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 42 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
43 { 43 {
44 if (a->proto != b->proto) 44 if (a->proto != b->proto)
45 return 0; 45 return 0;
46 if (a->vid != b->vid) 46 if (a->vid != b->vid)
47 return 0; 47 return 0;
48 switch (a->proto) { 48 switch (a->proto) {
49 case htons(ETH_P_IP): 49 case htons(ETH_P_IP):
50 return a->u.ip4 == b->u.ip4; 50 return a->u.ip4 == b->u.ip4;
51 #if IS_ENABLED(CONFIG_IPV6) 51 #if IS_ENABLED(CONFIG_IPV6)
52 case htons(ETH_P_IPV6): 52 case htons(ETH_P_IPV6):
53 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); 53 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
54 #endif 54 #endif
55 } 55 }
56 return 0; 56 return 0;
57 } 57 }
58 58
59 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip, 59 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
60 __u16 vid) 60 __u16 vid)
61 { 61 {
62 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1); 62 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
63 } 63 }
64 64
65 #if IS_ENABLED(CONFIG_IPV6) 65 #if IS_ENABLED(CONFIG_IPV6)
66 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 66 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
67 const struct in6_addr *ip, 67 const struct in6_addr *ip,
68 __u16 vid) 68 __u16 vid)
69 { 69 {
70 return jhash_2words(ipv6_addr_hash(ip), vid, 70 return jhash_2words(ipv6_addr_hash(ip), vid,
71 mdb->secret) & (mdb->max - 1); 71 mdb->secret) & (mdb->max - 1);
72 } 72 }
73 #endif 73 #endif
74 74
75 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, 75 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
76 struct br_ip *ip) 76 struct br_ip *ip)
77 { 77 {
78 switch (ip->proto) { 78 switch (ip->proto) {
79 case htons(ETH_P_IP): 79 case htons(ETH_P_IP):
80 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid); 80 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
81 #if IS_ENABLED(CONFIG_IPV6) 81 #if IS_ENABLED(CONFIG_IPV6)
82 case htons(ETH_P_IPV6): 82 case htons(ETH_P_IPV6):
83 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid); 83 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
84 #endif 84 #endif
85 } 85 }
86 return 0; 86 return 0;
87 } 87 }
88 88
89 static struct net_bridge_mdb_entry *__br_mdb_ip_get( 89 static struct net_bridge_mdb_entry *__br_mdb_ip_get(
90 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) 90 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
91 { 91 {
92 struct net_bridge_mdb_entry *mp; 92 struct net_bridge_mdb_entry *mp;
93 93
94 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 94 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
95 if (br_ip_equal(&mp->addr, dst)) 95 if (br_ip_equal(&mp->addr, dst))
96 return mp; 96 return mp;
97 } 97 }
98 98
99 return NULL; 99 return NULL;
100 } 100 }
101 101
102 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, 102 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
103 struct br_ip *dst) 103 struct br_ip *dst)
104 { 104 {
105 if (!mdb) 105 if (!mdb)
106 return NULL; 106 return NULL;
107 107
108 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 108 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
109 } 109 }
110 110
111 static struct net_bridge_mdb_entry *br_mdb_ip4_get( 111 static struct net_bridge_mdb_entry *br_mdb_ip4_get(
112 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid) 112 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
113 { 113 {
114 struct br_ip br_dst; 114 struct br_ip br_dst;
115 115
116 br_dst.u.ip4 = dst; 116 br_dst.u.ip4 = dst;
117 br_dst.proto = htons(ETH_P_IP); 117 br_dst.proto = htons(ETH_P_IP);
118 br_dst.vid = vid; 118 br_dst.vid = vid;
119 119
120 return br_mdb_ip_get(mdb, &br_dst); 120 return br_mdb_ip_get(mdb, &br_dst);
121 } 121 }
122 122
123 #if IS_ENABLED(CONFIG_IPV6) 123 #if IS_ENABLED(CONFIG_IPV6)
124 static struct net_bridge_mdb_entry *br_mdb_ip6_get( 124 static struct net_bridge_mdb_entry *br_mdb_ip6_get(
125 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst, 125 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
126 __u16 vid) 126 __u16 vid)
127 { 127 {
128 struct br_ip br_dst; 128 struct br_ip br_dst;
129 129
130 br_dst.u.ip6 = *dst; 130 br_dst.u.ip6 = *dst;
131 br_dst.proto = htons(ETH_P_IPV6); 131 br_dst.proto = htons(ETH_P_IPV6);
132 br_dst.vid = vid; 132 br_dst.vid = vid;
133 133
134 return br_mdb_ip_get(mdb, &br_dst); 134 return br_mdb_ip_get(mdb, &br_dst);
135 } 135 }
136 #endif 136 #endif
137 137
138 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 138 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
139 struct sk_buff *skb, u16 vid) 139 struct sk_buff *skb, u16 vid)
140 { 140 {
141 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 141 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
142 struct br_ip ip; 142 struct br_ip ip;
143 143
144 if (br->multicast_disabled) 144 if (br->multicast_disabled)
145 return NULL; 145 return NULL;
146 146
147 if (BR_INPUT_SKB_CB(skb)->igmp) 147 if (BR_INPUT_SKB_CB(skb)->igmp)
148 return NULL; 148 return NULL;
149 149
150 ip.proto = skb->protocol; 150 ip.proto = skb->protocol;
151 ip.vid = vid; 151 ip.vid = vid;
152 152
153 switch (skb->protocol) { 153 switch (skb->protocol) {
154 case htons(ETH_P_IP): 154 case htons(ETH_P_IP):
155 ip.u.ip4 = ip_hdr(skb)->daddr; 155 ip.u.ip4 = ip_hdr(skb)->daddr;
156 break; 156 break;
157 #if IS_ENABLED(CONFIG_IPV6) 157 #if IS_ENABLED(CONFIG_IPV6)
158 case htons(ETH_P_IPV6): 158 case htons(ETH_P_IPV6):
159 ip.u.ip6 = ipv6_hdr(skb)->daddr; 159 ip.u.ip6 = ipv6_hdr(skb)->daddr;
160 break; 160 break;
161 #endif 161 #endif
162 default: 162 default:
163 return NULL; 163 return NULL;
164 } 164 }
165 165
166 return br_mdb_ip_get(mdb, &ip); 166 return br_mdb_ip_get(mdb, &ip);
167 } 167 }
168 168
169 static void br_mdb_free(struct rcu_head *head) 169 static void br_mdb_free(struct rcu_head *head)
170 { 170 {
171 struct net_bridge_mdb_htable *mdb = 171 struct net_bridge_mdb_htable *mdb =
172 container_of(head, struct net_bridge_mdb_htable, rcu); 172 container_of(head, struct net_bridge_mdb_htable, rcu);
173 struct net_bridge_mdb_htable *old = mdb->old; 173 struct net_bridge_mdb_htable *old = mdb->old;
174 174
175 mdb->old = NULL; 175 mdb->old = NULL;
176 kfree(old->mhash); 176 kfree(old->mhash);
177 kfree(old); 177 kfree(old);
178 } 178 }
179 179
180 static int br_mdb_copy(struct net_bridge_mdb_htable *new, 180 static int br_mdb_copy(struct net_bridge_mdb_htable *new,
181 struct net_bridge_mdb_htable *old, 181 struct net_bridge_mdb_htable *old,
182 int elasticity) 182 int elasticity)
183 { 183 {
184 struct net_bridge_mdb_entry *mp; 184 struct net_bridge_mdb_entry *mp;
185 int maxlen; 185 int maxlen;
186 int len; 186 int len;
187 int i; 187 int i;
188 188
189 for (i = 0; i < old->max; i++) 189 for (i = 0; i < old->max; i++)
190 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver]) 190 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
191 hlist_add_head(&mp->hlist[new->ver], 191 hlist_add_head(&mp->hlist[new->ver],
192 &new->mhash[br_ip_hash(new, &mp->addr)]); 192 &new->mhash[br_ip_hash(new, &mp->addr)]);
193 193
194 if (!elasticity) 194 if (!elasticity)
195 return 0; 195 return 0;
196 196
197 maxlen = 0; 197 maxlen = 0;
198 for (i = 0; i < new->max; i++) { 198 for (i = 0; i < new->max; i++) {
199 len = 0; 199 len = 0;
200 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver]) 200 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
201 len++; 201 len++;
202 if (len > maxlen) 202 if (len > maxlen)
203 maxlen = len; 203 maxlen = len;
204 } 204 }
205 205
206 return maxlen > elasticity ? -EINVAL : 0; 206 return maxlen > elasticity ? -EINVAL : 0;
207 } 207 }
208 208
209 void br_multicast_free_pg(struct rcu_head *head) 209 void br_multicast_free_pg(struct rcu_head *head)
210 { 210 {
211 struct net_bridge_port_group *p = 211 struct net_bridge_port_group *p =
212 container_of(head, struct net_bridge_port_group, rcu); 212 container_of(head, struct net_bridge_port_group, rcu);
213 213
214 kfree(p); 214 kfree(p);
215 } 215 }
216 216
217 static void br_multicast_free_group(struct rcu_head *head) 217 static void br_multicast_free_group(struct rcu_head *head)
218 { 218 {
219 struct net_bridge_mdb_entry *mp = 219 struct net_bridge_mdb_entry *mp =
220 container_of(head, struct net_bridge_mdb_entry, rcu); 220 container_of(head, struct net_bridge_mdb_entry, rcu);
221 221
222 kfree(mp); 222 kfree(mp);
223 } 223 }
224 224
225 static void br_multicast_group_expired(unsigned long data) 225 static void br_multicast_group_expired(unsigned long data)
226 { 226 {
227 struct net_bridge_mdb_entry *mp = (void *)data; 227 struct net_bridge_mdb_entry *mp = (void *)data;
228 struct net_bridge *br = mp->br; 228 struct net_bridge *br = mp->br;
229 struct net_bridge_mdb_htable *mdb; 229 struct net_bridge_mdb_htable *mdb;
230 230
231 spin_lock(&br->multicast_lock); 231 spin_lock(&br->multicast_lock);
232 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 232 if (!netif_running(br->dev) || timer_pending(&mp->timer))
233 goto out; 233 goto out;
234 234
235 mp->mglist = false; 235 mp->mglist = false;
236 236
237 if (mp->ports) 237 if (mp->ports)
238 goto out; 238 goto out;
239 239
240 mdb = mlock_dereference(br->mdb, br); 240 mdb = mlock_dereference(br->mdb, br);
241 241
242 hlist_del_rcu(&mp->hlist[mdb->ver]); 242 hlist_del_rcu(&mp->hlist[mdb->ver]);
243 mdb->size--; 243 mdb->size--;
244 244
245 call_rcu_bh(&mp->rcu, br_multicast_free_group); 245 call_rcu_bh(&mp->rcu, br_multicast_free_group);
246 246
247 out: 247 out:
248 spin_unlock(&br->multicast_lock); 248 spin_unlock(&br->multicast_lock);
249 } 249 }
250 250
251 static void br_multicast_del_pg(struct net_bridge *br, 251 static void br_multicast_del_pg(struct net_bridge *br,
252 struct net_bridge_port_group *pg) 252 struct net_bridge_port_group *pg)
253 { 253 {
254 struct net_bridge_mdb_htable *mdb; 254 struct net_bridge_mdb_htable *mdb;
255 struct net_bridge_mdb_entry *mp; 255 struct net_bridge_mdb_entry *mp;
256 struct net_bridge_port_group *p; 256 struct net_bridge_port_group *p;
257 struct net_bridge_port_group __rcu **pp; 257 struct net_bridge_port_group __rcu **pp;
258 258
259 mdb = mlock_dereference(br->mdb, br); 259 mdb = mlock_dereference(br->mdb, br);
260 260
261 mp = br_mdb_ip_get(mdb, &pg->addr); 261 mp = br_mdb_ip_get(mdb, &pg->addr);
262 if (WARN_ON(!mp)) 262 if (WARN_ON(!mp))
263 return; 263 return;
264 264
265 for (pp = &mp->ports; 265 for (pp = &mp->ports;
266 (p = mlock_dereference(*pp, br)) != NULL; 266 (p = mlock_dereference(*pp, br)) != NULL;
267 pp = &p->next) { 267 pp = &p->next) {
268 if (p != pg) 268 if (p != pg)
269 continue; 269 continue;
270 270
271 rcu_assign_pointer(*pp, p->next); 271 rcu_assign_pointer(*pp, p->next);
272 hlist_del_init(&p->mglist); 272 hlist_del_init(&p->mglist);
273 del_timer(&p->timer); 273 del_timer(&p->timer);
274 call_rcu_bh(&p->rcu, br_multicast_free_pg); 274 call_rcu_bh(&p->rcu, br_multicast_free_pg);
275 275
276 if (!mp->ports && !mp->mglist && 276 if (!mp->ports && !mp->mglist &&
277 netif_running(br->dev)) 277 netif_running(br->dev))
278 mod_timer(&mp->timer, jiffies); 278 mod_timer(&mp->timer, jiffies);
279 279
280 return; 280 return;
281 } 281 }
282 282
283 WARN_ON(1); 283 WARN_ON(1);
284 } 284 }
285 285
286 static void br_multicast_port_group_expired(unsigned long data) 286 static void br_multicast_port_group_expired(unsigned long data)
287 { 287 {
288 struct net_bridge_port_group *pg = (void *)data; 288 struct net_bridge_port_group *pg = (void *)data;
289 struct net_bridge *br = pg->port->br; 289 struct net_bridge *br = pg->port->br;
290 290
291 spin_lock(&br->multicast_lock); 291 spin_lock(&br->multicast_lock);
292 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 292 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
293 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT) 293 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT)
294 goto out; 294 goto out;
295 295
296 br_multicast_del_pg(br, pg); 296 br_multicast_del_pg(br, pg);
297 297
298 out: 298 out:
299 spin_unlock(&br->multicast_lock); 299 spin_unlock(&br->multicast_lock);
300 } 300 }
301 301
302 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, 302 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
303 int elasticity) 303 int elasticity)
304 { 304 {
305 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); 305 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
306 struct net_bridge_mdb_htable *mdb; 306 struct net_bridge_mdb_htable *mdb;
307 int err; 307 int err;
308 308
309 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); 309 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
310 if (!mdb) 310 if (!mdb)
311 return -ENOMEM; 311 return -ENOMEM;
312 312
313 mdb->max = max; 313 mdb->max = max;
314 mdb->old = old; 314 mdb->old = old;
315 315
316 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); 316 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
317 if (!mdb->mhash) { 317 if (!mdb->mhash) {
318 kfree(mdb); 318 kfree(mdb);
319 return -ENOMEM; 319 return -ENOMEM;
320 } 320 }
321 321
322 mdb->size = old ? old->size : 0; 322 mdb->size = old ? old->size : 0;
323 mdb->ver = old ? old->ver ^ 1 : 0; 323 mdb->ver = old ? old->ver ^ 1 : 0;
324 324
325 if (!old || elasticity) 325 if (!old || elasticity)
326 get_random_bytes(&mdb->secret, sizeof(mdb->secret)); 326 get_random_bytes(&mdb->secret, sizeof(mdb->secret));
327 else 327 else
328 mdb->secret = old->secret; 328 mdb->secret = old->secret;
329 329
330 if (!old) 330 if (!old)
331 goto out; 331 goto out;
332 332
333 err = br_mdb_copy(mdb, old, elasticity); 333 err = br_mdb_copy(mdb, old, elasticity);
334 if (err) { 334 if (err) {
335 kfree(mdb->mhash); 335 kfree(mdb->mhash);
336 kfree(mdb); 336 kfree(mdb);
337 return err; 337 return err;
338 } 338 }
339 339
340 br_mdb_rehash_seq++; 340 br_mdb_rehash_seq++;
341 call_rcu_bh(&mdb->rcu, br_mdb_free); 341 call_rcu_bh(&mdb->rcu, br_mdb_free);
342 342
343 out: 343 out:
344 rcu_assign_pointer(*mdbp, mdb); 344 rcu_assign_pointer(*mdbp, mdb);
345 345
346 return 0; 346 return 0;
347 } 347 }
348 348
349 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 349 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
350 __be32 group) 350 __be32 group)
351 { 351 {
352 struct sk_buff *skb; 352 struct sk_buff *skb;
353 struct igmphdr *ih; 353 struct igmphdr *ih;
354 struct ethhdr *eth; 354 struct ethhdr *eth;
355 struct iphdr *iph; 355 struct iphdr *iph;
356 356
357 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + 357 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
358 sizeof(*ih) + 4); 358 sizeof(*ih) + 4);
359 if (!skb) 359 if (!skb)
360 goto out; 360 goto out;
361 361
362 skb->protocol = htons(ETH_P_IP); 362 skb->protocol = htons(ETH_P_IP);
363 363
364 skb_reset_mac_header(skb); 364 skb_reset_mac_header(skb);
365 eth = eth_hdr(skb); 365 eth = eth_hdr(skb);
366 366
367 ether_addr_copy(eth->h_source, br->dev->dev_addr); 367 ether_addr_copy(eth->h_source, br->dev->dev_addr);
368 eth->h_dest[0] = 1; 368 eth->h_dest[0] = 1;
369 eth->h_dest[1] = 0; 369 eth->h_dest[1] = 0;
370 eth->h_dest[2] = 0x5e; 370 eth->h_dest[2] = 0x5e;
371 eth->h_dest[3] = 0; 371 eth->h_dest[3] = 0;
372 eth->h_dest[4] = 0; 372 eth->h_dest[4] = 0;
373 eth->h_dest[5] = 1; 373 eth->h_dest[5] = 1;
374 eth->h_proto = htons(ETH_P_IP); 374 eth->h_proto = htons(ETH_P_IP);
375 skb_put(skb, sizeof(*eth)); 375 skb_put(skb, sizeof(*eth));
376 376
377 skb_set_network_header(skb, skb->len); 377 skb_set_network_header(skb, skb->len);
378 iph = ip_hdr(skb); 378 iph = ip_hdr(skb);
379 379
380 iph->version = 4; 380 iph->version = 4;
381 iph->ihl = 6; 381 iph->ihl = 6;
382 iph->tos = 0xc0; 382 iph->tos = 0xc0;
383 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); 383 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
384 iph->id = 0; 384 iph->id = 0;
385 iph->frag_off = htons(IP_DF); 385 iph->frag_off = htons(IP_DF);
386 iph->ttl = 1; 386 iph->ttl = 1;
387 iph->protocol = IPPROTO_IGMP; 387 iph->protocol = IPPROTO_IGMP;
388 iph->saddr = br->multicast_query_use_ifaddr ? 388 iph->saddr = br->multicast_query_use_ifaddr ?
389 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; 389 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
390 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); 390 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
391 ((u8 *)&iph[1])[0] = IPOPT_RA; 391 ((u8 *)&iph[1])[0] = IPOPT_RA;
392 ((u8 *)&iph[1])[1] = 4; 392 ((u8 *)&iph[1])[1] = 4;
393 ((u8 *)&iph[1])[2] = 0; 393 ((u8 *)&iph[1])[2] = 0;
394 ((u8 *)&iph[1])[3] = 0; 394 ((u8 *)&iph[1])[3] = 0;
395 ip_send_check(iph); 395 ip_send_check(iph);
396 skb_put(skb, 24); 396 skb_put(skb, 24);
397 397
398 skb_set_transport_header(skb, skb->len); 398 skb_set_transport_header(skb, skb->len);
399 ih = igmp_hdr(skb); 399 ih = igmp_hdr(skb);
400 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 400 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
401 ih->code = (group ? br->multicast_last_member_interval : 401 ih->code = (group ? br->multicast_last_member_interval :
402 br->multicast_query_response_interval) / 402 br->multicast_query_response_interval) /
403 (HZ / IGMP_TIMER_SCALE); 403 (HZ / IGMP_TIMER_SCALE);
404 ih->group = group; 404 ih->group = group;
405 ih->csum = 0; 405 ih->csum = 0;
406 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); 406 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
407 skb_put(skb, sizeof(*ih)); 407 skb_put(skb, sizeof(*ih));
408 408
409 __skb_pull(skb, sizeof(*eth)); 409 __skb_pull(skb, sizeof(*eth));
410 410
411 out: 411 out:
412 return skb; 412 return skb;
413 } 413 }
414 414
415 #if IS_ENABLED(CONFIG_IPV6) 415 #if IS_ENABLED(CONFIG_IPV6)
416 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 416 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
417 const struct in6_addr *group) 417 const struct in6_addr *group)
418 { 418 {
419 struct sk_buff *skb; 419 struct sk_buff *skb;
420 struct ipv6hdr *ip6h; 420 struct ipv6hdr *ip6h;
421 struct mld_msg *mldq; 421 struct mld_msg *mldq;
422 struct ethhdr *eth; 422 struct ethhdr *eth;
423 u8 *hopopt; 423 u8 *hopopt;
424 unsigned long interval; 424 unsigned long interval;
425 425
426 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + 426 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
427 8 + sizeof(*mldq)); 427 8 + sizeof(*mldq));
428 if (!skb) 428 if (!skb)
429 goto out; 429 goto out;
430 430
431 skb->protocol = htons(ETH_P_IPV6); 431 skb->protocol = htons(ETH_P_IPV6);
432 432
433 /* Ethernet header */ 433 /* Ethernet header */
434 skb_reset_mac_header(skb); 434 skb_reset_mac_header(skb);
435 eth = eth_hdr(skb); 435 eth = eth_hdr(skb);
436 436
437 ether_addr_copy(eth->h_source, br->dev->dev_addr); 437 ether_addr_copy(eth->h_source, br->dev->dev_addr);
438 eth->h_proto = htons(ETH_P_IPV6); 438 eth->h_proto = htons(ETH_P_IPV6);
439 skb_put(skb, sizeof(*eth)); 439 skb_put(skb, sizeof(*eth));
440 440
441 /* IPv6 header + HbH option */ 441 /* IPv6 header + HbH option */
442 skb_set_network_header(skb, skb->len); 442 skb_set_network_header(skb, skb->len);
443 ip6h = ipv6_hdr(skb); 443 ip6h = ipv6_hdr(skb);
444 444
445 *(__force __be32 *)ip6h = htonl(0x60000000); 445 *(__force __be32 *)ip6h = htonl(0x60000000);
446 ip6h->payload_len = htons(8 + sizeof(*mldq)); 446 ip6h->payload_len = htons(8 + sizeof(*mldq));
447 ip6h->nexthdr = IPPROTO_HOPOPTS; 447 ip6h->nexthdr = IPPROTO_HOPOPTS;
448 ip6h->hop_limit = 1; 448 ip6h->hop_limit = 1;
449 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 449 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
450 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 450 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
451 &ip6h->saddr)) { 451 &ip6h->saddr)) {
452 kfree_skb(skb); 452 kfree_skb(skb);
453 return NULL; 453 return NULL;
454 } 454 }
455 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 455 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
456 456
457 hopopt = (u8 *)(ip6h + 1); 457 hopopt = (u8 *)(ip6h + 1);
458 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 458 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
459 hopopt[1] = 0; /* length of HbH */ 459 hopopt[1] = 0; /* length of HbH */
460 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ 460 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
461 hopopt[3] = 2; /* Length of RA Option */ 461 hopopt[3] = 2; /* Length of RA Option */
462 hopopt[4] = 0; /* Type = 0x0000 (MLD) */ 462 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
463 hopopt[5] = 0; 463 hopopt[5] = 0;
464 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ 464 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
465 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ 465 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
466 466
467 skb_put(skb, sizeof(*ip6h) + 8); 467 skb_put(skb, sizeof(*ip6h) + 8);
468 468
469 /* ICMPv6 */ 469 /* ICMPv6 */
470 skb_set_transport_header(skb, skb->len); 470 skb_set_transport_header(skb, skb->len);
471 mldq = (struct mld_msg *) icmp6_hdr(skb); 471 mldq = (struct mld_msg *) icmp6_hdr(skb);
472 472
473 interval = ipv6_addr_any(group) ? 473 interval = ipv6_addr_any(group) ?
474 br->multicast_query_response_interval : 474 br->multicast_query_response_interval :
475 br->multicast_last_member_interval; 475 br->multicast_last_member_interval;
476 476
477 mldq->mld_type = ICMPV6_MGM_QUERY; 477 mldq->mld_type = ICMPV6_MGM_QUERY;
478 mldq->mld_code = 0; 478 mldq->mld_code = 0;
479 mldq->mld_cksum = 0; 479 mldq->mld_cksum = 0;
480 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 480 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
481 mldq->mld_reserved = 0; 481 mldq->mld_reserved = 0;
482 mldq->mld_mca = *group; 482 mldq->mld_mca = *group;
483 483
484 /* checksum */ 484 /* checksum */
485 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 485 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
486 sizeof(*mldq), IPPROTO_ICMPV6, 486 sizeof(*mldq), IPPROTO_ICMPV6,
487 csum_partial(mldq, 487 csum_partial(mldq,
488 sizeof(*mldq), 0)); 488 sizeof(*mldq), 0));
489 skb_put(skb, sizeof(*mldq)); 489 skb_put(skb, sizeof(*mldq));
490 490
491 __skb_pull(skb, sizeof(*eth)); 491 __skb_pull(skb, sizeof(*eth));
492 492
493 out: 493 out:
494 return skb; 494 return skb;
495 } 495 }
496 #endif 496 #endif
497 497
498 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 498 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
499 struct br_ip *addr) 499 struct br_ip *addr)
500 { 500 {
501 switch (addr->proto) { 501 switch (addr->proto) {
502 case htons(ETH_P_IP): 502 case htons(ETH_P_IP):
503 return br_ip4_multicast_alloc_query(br, addr->u.ip4); 503 return br_ip4_multicast_alloc_query(br, addr->u.ip4);
504 #if IS_ENABLED(CONFIG_IPV6) 504 #if IS_ENABLED(CONFIG_IPV6)
505 case htons(ETH_P_IPV6): 505 case htons(ETH_P_IPV6):
506 return br_ip6_multicast_alloc_query(br, &addr->u.ip6); 506 return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
507 #endif 507 #endif
508 } 508 }
509 return NULL; 509 return NULL;
510 } 510 }
511 511
512 static struct net_bridge_mdb_entry *br_multicast_get_group( 512 static struct net_bridge_mdb_entry *br_multicast_get_group(
513 struct net_bridge *br, struct net_bridge_port *port, 513 struct net_bridge *br, struct net_bridge_port *port,
514 struct br_ip *group, int hash) 514 struct br_ip *group, int hash)
515 { 515 {
516 struct net_bridge_mdb_htable *mdb; 516 struct net_bridge_mdb_htable *mdb;
517 struct net_bridge_mdb_entry *mp; 517 struct net_bridge_mdb_entry *mp;
518 unsigned int count = 0; 518 unsigned int count = 0;
519 unsigned int max; 519 unsigned int max;
520 int elasticity; 520 int elasticity;
521 int err; 521 int err;
522 522
523 mdb = rcu_dereference_protected(br->mdb, 1); 523 mdb = rcu_dereference_protected(br->mdb, 1);
524 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) { 524 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
525 count++; 525 count++;
526 if (unlikely(br_ip_equal(group, &mp->addr))) 526 if (unlikely(br_ip_equal(group, &mp->addr)))
527 return mp; 527 return mp;
528 } 528 }
529 529
530 elasticity = 0; 530 elasticity = 0;
531 max = mdb->max; 531 max = mdb->max;
532 532
533 if (unlikely(count > br->hash_elasticity && count)) { 533 if (unlikely(count > br->hash_elasticity && count)) {
534 if (net_ratelimit()) 534 if (net_ratelimit())
535 br_info(br, "Multicast hash table " 535 br_info(br, "Multicast hash table "
536 "chain limit reached: %s\n", 536 "chain limit reached: %s\n",
537 port ? port->dev->name : br->dev->name); 537 port ? port->dev->name : br->dev->name);
538 538
539 elasticity = br->hash_elasticity; 539 elasticity = br->hash_elasticity;
540 } 540 }
541 541
542 if (mdb->size >= max) { 542 if (mdb->size >= max) {
543 max *= 2; 543 max *= 2;
544 if (unlikely(max > br->hash_max)) { 544 if (unlikely(max > br->hash_max)) {
545 br_warn(br, "Multicast hash table maximum of %d " 545 br_warn(br, "Multicast hash table maximum of %d "
546 "reached, disabling snooping: %s\n", 546 "reached, disabling snooping: %s\n",
547 br->hash_max, 547 br->hash_max,
548 port ? port->dev->name : br->dev->name); 548 port ? port->dev->name : br->dev->name);
549 err = -E2BIG; 549 err = -E2BIG;
550 disable: 550 disable:
551 br->multicast_disabled = 1; 551 br->multicast_disabled = 1;
552 goto err; 552 goto err;
553 } 553 }
554 } 554 }
555 555
556 if (max > mdb->max || elasticity) { 556 if (max > mdb->max || elasticity) {
557 if (mdb->old) { 557 if (mdb->old) {
558 if (net_ratelimit()) 558 if (net_ratelimit())
559 br_info(br, "Multicast hash table " 559 br_info(br, "Multicast hash table "
560 "on fire: %s\n", 560 "on fire: %s\n",
561 port ? port->dev->name : br->dev->name); 561 port ? port->dev->name : br->dev->name);
562 err = -EEXIST; 562 err = -EEXIST;
563 goto err; 563 goto err;
564 } 564 }
565 565
566 err = br_mdb_rehash(&br->mdb, max, elasticity); 566 err = br_mdb_rehash(&br->mdb, max, elasticity);
567 if (err) { 567 if (err) {
568 br_warn(br, "Cannot rehash multicast " 568 br_warn(br, "Cannot rehash multicast "
569 "hash table, disabling snooping: %s, %d, %d\n", 569 "hash table, disabling snooping: %s, %d, %d\n",
570 port ? port->dev->name : br->dev->name, 570 port ? port->dev->name : br->dev->name,
571 mdb->size, err); 571 mdb->size, err);
572 goto disable; 572 goto disable;
573 } 573 }
574 574
575 err = -EAGAIN; 575 err = -EAGAIN;
576 goto err; 576 goto err;
577 } 577 }
578 578
579 return NULL; 579 return NULL;
580 580
581 err: 581 err:
582 mp = ERR_PTR(err); 582 mp = ERR_PTR(err);
583 return mp; 583 return mp;
584 } 584 }
585 585
586 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, 586 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
587 struct net_bridge_port *port, struct br_ip *group) 587 struct net_bridge_port *port, struct br_ip *group)
588 { 588 {
589 struct net_bridge_mdb_htable *mdb; 589 struct net_bridge_mdb_htable *mdb;
590 struct net_bridge_mdb_entry *mp; 590 struct net_bridge_mdb_entry *mp;
591 int hash; 591 int hash;
592 int err; 592 int err;
593 593
594 mdb = rcu_dereference_protected(br->mdb, 1); 594 mdb = rcu_dereference_protected(br->mdb, 1);
595 if (!mdb) { 595 if (!mdb) {
596 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); 596 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
597 if (err) 597 if (err)
598 return ERR_PTR(err); 598 return ERR_PTR(err);
599 goto rehash; 599 goto rehash;
600 } 600 }
601 601
602 hash = br_ip_hash(mdb, group); 602 hash = br_ip_hash(mdb, group);
603 mp = br_multicast_get_group(br, port, group, hash); 603 mp = br_multicast_get_group(br, port, group, hash);
604 switch (PTR_ERR(mp)) { 604 switch (PTR_ERR(mp)) {
605 case 0: 605 case 0:
606 break; 606 break;
607 607
608 case -EAGAIN: 608 case -EAGAIN:
609 rehash: 609 rehash:
610 mdb = rcu_dereference_protected(br->mdb, 1); 610 mdb = rcu_dereference_protected(br->mdb, 1);
611 hash = br_ip_hash(mdb, group); 611 hash = br_ip_hash(mdb, group);
612 break; 612 break;
613 613
614 default: 614 default:
615 goto out; 615 goto out;
616 } 616 }
617 617
618 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 618 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
619 if (unlikely(!mp)) 619 if (unlikely(!mp))
620 return ERR_PTR(-ENOMEM); 620 return ERR_PTR(-ENOMEM);
621 621
622 mp->br = br; 622 mp->br = br;
623 mp->addr = *group; 623 mp->addr = *group;
624 setup_timer(&mp->timer, br_multicast_group_expired, 624 setup_timer(&mp->timer, br_multicast_group_expired,
625 (unsigned long)mp); 625 (unsigned long)mp);
626 626
627 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 627 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
628 mdb->size++; 628 mdb->size++;
629 629
630 out: 630 out:
631 return mp; 631 return mp;
632 } 632 }
633 633
634 struct net_bridge_port_group *br_multicast_new_port_group( 634 struct net_bridge_port_group *br_multicast_new_port_group(
635 struct net_bridge_port *port, 635 struct net_bridge_port *port,
636 struct br_ip *group, 636 struct br_ip *group,
637 struct net_bridge_port_group __rcu *next, 637 struct net_bridge_port_group __rcu *next,
638 unsigned char state) 638 unsigned char state)
639 { 639 {
640 struct net_bridge_port_group *p; 640 struct net_bridge_port_group *p;
641 641
642 p = kzalloc(sizeof(*p), GFP_ATOMIC); 642 p = kzalloc(sizeof(*p), GFP_ATOMIC);
643 if (unlikely(!p)) 643 if (unlikely(!p))
644 return NULL; 644 return NULL;
645 645
646 p->addr = *group; 646 p->addr = *group;
647 p->port = port; 647 p->port = port;
648 p->state = state; 648 p->state = state;
649 rcu_assign_pointer(p->next, next); 649 rcu_assign_pointer(p->next, next);
650 hlist_add_head(&p->mglist, &port->mglist); 650 hlist_add_head(&p->mglist, &port->mglist);
651 setup_timer(&p->timer, br_multicast_port_group_expired, 651 setup_timer(&p->timer, br_multicast_port_group_expired,
652 (unsigned long)p); 652 (unsigned long)p);
653 return p; 653 return p;
654 } 654 }
655 655
656 static int br_multicast_add_group(struct net_bridge *br, 656 static int br_multicast_add_group(struct net_bridge *br,
657 struct net_bridge_port *port, 657 struct net_bridge_port *port,
658 struct br_ip *group) 658 struct br_ip *group)
659 { 659 {
660 struct net_bridge_mdb_entry *mp; 660 struct net_bridge_mdb_entry *mp;
661 struct net_bridge_port_group *p; 661 struct net_bridge_port_group *p;
662 struct net_bridge_port_group __rcu **pp; 662 struct net_bridge_port_group __rcu **pp;
663 unsigned long now = jiffies; 663 unsigned long now = jiffies;
664 int err; 664 int err;
665 665
666 spin_lock(&br->multicast_lock); 666 spin_lock(&br->multicast_lock);
667 if (!netif_running(br->dev) || 667 if (!netif_running(br->dev) ||
668 (port && port->state == BR_STATE_DISABLED)) 668 (port && port->state == BR_STATE_DISABLED))
669 goto out; 669 goto out;
670 670
671 mp = br_multicast_new_group(br, port, group); 671 mp = br_multicast_new_group(br, port, group);
672 err = PTR_ERR(mp); 672 err = PTR_ERR(mp);
673 if (IS_ERR(mp)) 673 if (IS_ERR(mp))
674 goto err; 674 goto err;
675 675
676 if (!port) { 676 if (!port) {
677 mp->mglist = true; 677 mp->mglist = true;
678 mod_timer(&mp->timer, now + br->multicast_membership_interval); 678 mod_timer(&mp->timer, now + br->multicast_membership_interval);
679 goto out; 679 goto out;
680 } 680 }
681 681
682 for (pp = &mp->ports; 682 for (pp = &mp->ports;
683 (p = mlock_dereference(*pp, br)) != NULL; 683 (p = mlock_dereference(*pp, br)) != NULL;
684 pp = &p->next) { 684 pp = &p->next) {
685 if (p->port == port) 685 if (p->port == port)
686 goto found; 686 goto found;
687 if ((unsigned long)p->port < (unsigned long)port) 687 if ((unsigned long)p->port < (unsigned long)port)
688 break; 688 break;
689 } 689 }
690 690
691 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY); 691 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
692 if (unlikely(!p)) 692 if (unlikely(!p))
693 goto err; 693 goto err;
694 rcu_assign_pointer(*pp, p); 694 rcu_assign_pointer(*pp, p);
695 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 695 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
696 696
697 found: 697 found:
698 mod_timer(&p->timer, now + br->multicast_membership_interval); 698 mod_timer(&p->timer, now + br->multicast_membership_interval);
699 out: 699 out:
700 err = 0; 700 err = 0;
701 701
702 err: 702 err:
703 spin_unlock(&br->multicast_lock); 703 spin_unlock(&br->multicast_lock);
704 return err; 704 return err;
705 } 705 }
706 706
707 static int br_ip4_multicast_add_group(struct net_bridge *br, 707 static int br_ip4_multicast_add_group(struct net_bridge *br,
708 struct net_bridge_port *port, 708 struct net_bridge_port *port,
709 __be32 group, 709 __be32 group,
710 __u16 vid) 710 __u16 vid)
711 { 711 {
712 struct br_ip br_group; 712 struct br_ip br_group;
713 713
714 if (ipv4_is_local_multicast(group)) 714 if (ipv4_is_local_multicast(group))
715 return 0; 715 return 0;
716 716
717 br_group.u.ip4 = group; 717 br_group.u.ip4 = group;
718 br_group.proto = htons(ETH_P_IP); 718 br_group.proto = htons(ETH_P_IP);
719 br_group.vid = vid; 719 br_group.vid = vid;
720 720
721 return br_multicast_add_group(br, port, &br_group); 721 return br_multicast_add_group(br, port, &br_group);
722 } 722 }
723 723
724 #if IS_ENABLED(CONFIG_IPV6) 724 #if IS_ENABLED(CONFIG_IPV6)
725 static int br_ip6_multicast_add_group(struct net_bridge *br, 725 static int br_ip6_multicast_add_group(struct net_bridge *br,
726 struct net_bridge_port *port, 726 struct net_bridge_port *port,
727 const struct in6_addr *group, 727 const struct in6_addr *group,
728 __u16 vid) 728 __u16 vid)
729 { 729 {
730 struct br_ip br_group; 730 struct br_ip br_group;
731 731
732 if (ipv6_addr_is_ll_all_nodes(group)) 732 if (ipv6_addr_is_ll_all_nodes(group))
733 return 0; 733 return 0;
734 734
735 br_group.u.ip6 = *group; 735 br_group.u.ip6 = *group;
736 br_group.proto = htons(ETH_P_IPV6); 736 br_group.proto = htons(ETH_P_IPV6);
737 br_group.vid = vid; 737 br_group.vid = vid;
738 738
739 return br_multicast_add_group(br, port, &br_group); 739 return br_multicast_add_group(br, port, &br_group);
740 } 740 }
741 #endif 741 #endif
742 742
743 static void br_multicast_router_expired(unsigned long data) 743 static void br_multicast_router_expired(unsigned long data)
744 { 744 {
745 struct net_bridge_port *port = (void *)data; 745 struct net_bridge_port *port = (void *)data;
746 struct net_bridge *br = port->br; 746 struct net_bridge *br = port->br;
747 747
748 spin_lock(&br->multicast_lock); 748 spin_lock(&br->multicast_lock);
749 if (port->multicast_router != 1 || 749 if (port->multicast_router != 1 ||
750 timer_pending(&port->multicast_router_timer) || 750 timer_pending(&port->multicast_router_timer) ||
751 hlist_unhashed(&port->rlist)) 751 hlist_unhashed(&port->rlist))
752 goto out; 752 goto out;
753 753
754 hlist_del_init_rcu(&port->rlist); 754 hlist_del_init_rcu(&port->rlist);
755 755
756 out: 756 out:
757 spin_unlock(&br->multicast_lock); 757 spin_unlock(&br->multicast_lock);
758 } 758 }
759 759
760 static void br_multicast_local_router_expired(unsigned long data) 760 static void br_multicast_local_router_expired(unsigned long data)
761 { 761 {
762 } 762 }
763 763
764 static void br_multicast_querier_expired(struct net_bridge *br, 764 static void br_multicast_querier_expired(struct net_bridge *br,
765 struct bridge_mcast_own_query *query) 765 struct bridge_mcast_own_query *query)
766 { 766 {
767 spin_lock(&br->multicast_lock); 767 spin_lock(&br->multicast_lock);
768 if (!netif_running(br->dev) || br->multicast_disabled) 768 if (!netif_running(br->dev) || br->multicast_disabled)
769 goto out; 769 goto out;
770 770
771 br_multicast_start_querier(br, query); 771 br_multicast_start_querier(br, query);
772 772
773 out: 773 out:
774 spin_unlock(&br->multicast_lock); 774 spin_unlock(&br->multicast_lock);
775 } 775 }
776 776
777 static void br_ip4_multicast_querier_expired(unsigned long data) 777 static void br_ip4_multicast_querier_expired(unsigned long data)
778 { 778 {
779 struct net_bridge *br = (void *)data; 779 struct net_bridge *br = (void *)data;
780 780
781 br_multicast_querier_expired(br, &br->ip4_own_query); 781 br_multicast_querier_expired(br, &br->ip4_own_query);
782 } 782 }
783 783
784 #if IS_ENABLED(CONFIG_IPV6) 784 #if IS_ENABLED(CONFIG_IPV6)
785 static void br_ip6_multicast_querier_expired(unsigned long data) 785 static void br_ip6_multicast_querier_expired(unsigned long data)
786 { 786 {
787 struct net_bridge *br = (void *)data; 787 struct net_bridge *br = (void *)data;
788 788
789 br_multicast_querier_expired(br, &br->ip6_own_query); 789 br_multicast_querier_expired(br, &br->ip6_own_query);
790 } 790 }
791 #endif 791 #endif
792 792
793 static void br_multicast_select_own_querier(struct net_bridge *br, 793 static void br_multicast_select_own_querier(struct net_bridge *br,
794 struct br_ip *ip, 794 struct br_ip *ip,
795 struct sk_buff *skb) 795 struct sk_buff *skb)
796 { 796 {
797 if (ip->proto == htons(ETH_P_IP)) 797 if (ip->proto == htons(ETH_P_IP))
798 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr; 798 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
799 #if IS_ENABLED(CONFIG_IPV6) 799 #if IS_ENABLED(CONFIG_IPV6)
800 else 800 else
801 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr; 801 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
802 #endif 802 #endif
803 } 803 }
804 804
805 static void __br_multicast_send_query(struct net_bridge *br, 805 static void __br_multicast_send_query(struct net_bridge *br,
806 struct net_bridge_port *port, 806 struct net_bridge_port *port,
807 struct br_ip *ip) 807 struct br_ip *ip)
808 { 808 {
809 struct sk_buff *skb; 809 struct sk_buff *skb;
810 810
811 skb = br_multicast_alloc_query(br, ip); 811 skb = br_multicast_alloc_query(br, ip);
812 if (!skb) 812 if (!skb)
813 return; 813 return;
814 814
815 if (port) { 815 if (port) {
816 __skb_push(skb, sizeof(struct ethhdr));
817 skb->dev = port->dev; 816 skb->dev = port->dev;
818 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 817 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
819 dev_queue_xmit); 818 br_dev_queue_push_xmit);
820 } else { 819 } else {
821 br_multicast_select_own_querier(br, ip, skb); 820 br_multicast_select_own_querier(br, ip, skb);
822 netif_rx(skb); 821 netif_rx(skb);
823 } 822 }
824 } 823 }
825 824
826 static void br_multicast_send_query(struct net_bridge *br, 825 static void br_multicast_send_query(struct net_bridge *br,
827 struct net_bridge_port *port, 826 struct net_bridge_port *port,
828 struct bridge_mcast_own_query *own_query) 827 struct bridge_mcast_own_query *own_query)
829 { 828 {
830 unsigned long time; 829 unsigned long time;
831 struct br_ip br_group; 830 struct br_ip br_group;
832 struct bridge_mcast_other_query *other_query = NULL; 831 struct bridge_mcast_other_query *other_query = NULL;
833 832
834 if (!netif_running(br->dev) || br->multicast_disabled || 833 if (!netif_running(br->dev) || br->multicast_disabled ||
835 !br->multicast_querier) 834 !br->multicast_querier)
836 return; 835 return;
837 836
838 memset(&br_group.u, 0, sizeof(br_group.u)); 837 memset(&br_group.u, 0, sizeof(br_group.u));
839 838
840 if (port ? (own_query == &port->ip4_own_query) : 839 if (port ? (own_query == &port->ip4_own_query) :
841 (own_query == &br->ip4_own_query)) { 840 (own_query == &br->ip4_own_query)) {
842 other_query = &br->ip4_other_query; 841 other_query = &br->ip4_other_query;
843 br_group.proto = htons(ETH_P_IP); 842 br_group.proto = htons(ETH_P_IP);
844 #if IS_ENABLED(CONFIG_IPV6) 843 #if IS_ENABLED(CONFIG_IPV6)
845 } else { 844 } else {
846 other_query = &br->ip6_other_query; 845 other_query = &br->ip6_other_query;
847 br_group.proto = htons(ETH_P_IPV6); 846 br_group.proto = htons(ETH_P_IPV6);
848 #endif 847 #endif
849 } 848 }
850 849
851 if (!other_query || timer_pending(&other_query->timer)) 850 if (!other_query || timer_pending(&other_query->timer))
852 return; 851 return;
853 852
854 __br_multicast_send_query(br, port, &br_group); 853 __br_multicast_send_query(br, port, &br_group);
855 854
856 time = jiffies; 855 time = jiffies;
857 time += own_query->startup_sent < br->multicast_startup_query_count ? 856 time += own_query->startup_sent < br->multicast_startup_query_count ?
858 br->multicast_startup_query_interval : 857 br->multicast_startup_query_interval :
859 br->multicast_query_interval; 858 br->multicast_query_interval;
860 mod_timer(&own_query->timer, time); 859 mod_timer(&own_query->timer, time);
861 } 860 }
862 861
863 static void 862 static void
864 br_multicast_port_query_expired(struct net_bridge_port *port, 863 br_multicast_port_query_expired(struct net_bridge_port *port,
865 struct bridge_mcast_own_query *query) 864 struct bridge_mcast_own_query *query)
866 { 865 {
867 struct net_bridge *br = port->br; 866 struct net_bridge *br = port->br;
868 867
869 spin_lock(&br->multicast_lock); 868 spin_lock(&br->multicast_lock);
870 if (port->state == BR_STATE_DISABLED || 869 if (port->state == BR_STATE_DISABLED ||
871 port->state == BR_STATE_BLOCKING) 870 port->state == BR_STATE_BLOCKING)
872 goto out; 871 goto out;
873 872
874 if (query->startup_sent < br->multicast_startup_query_count) 873 if (query->startup_sent < br->multicast_startup_query_count)
875 query->startup_sent++; 874 query->startup_sent++;
876 875
877 br_multicast_send_query(port->br, port, query); 876 br_multicast_send_query(port->br, port, query);
878 877
879 out: 878 out:
880 spin_unlock(&br->multicast_lock); 879 spin_unlock(&br->multicast_lock);
881 } 880 }
882 881
883 static void br_ip4_multicast_port_query_expired(unsigned long data) 882 static void br_ip4_multicast_port_query_expired(unsigned long data)
884 { 883 {
885 struct net_bridge_port *port = (void *)data; 884 struct net_bridge_port *port = (void *)data;
886 885
887 br_multicast_port_query_expired(port, &port->ip4_own_query); 886 br_multicast_port_query_expired(port, &port->ip4_own_query);
888 } 887 }
889 888
890 #if IS_ENABLED(CONFIG_IPV6) 889 #if IS_ENABLED(CONFIG_IPV6)
891 static void br_ip6_multicast_port_query_expired(unsigned long data) 890 static void br_ip6_multicast_port_query_expired(unsigned long data)
892 { 891 {
893 struct net_bridge_port *port = (void *)data; 892 struct net_bridge_port *port = (void *)data;
894 893
895 br_multicast_port_query_expired(port, &port->ip6_own_query); 894 br_multicast_port_query_expired(port, &port->ip6_own_query);
896 } 895 }
897 #endif 896 #endif
898 897
899 void br_multicast_add_port(struct net_bridge_port *port) 898 void br_multicast_add_port(struct net_bridge_port *port)
900 { 899 {
901 port->multicast_router = 1; 900 port->multicast_router = 1;
902 901
903 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 902 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
904 (unsigned long)port); 903 (unsigned long)port);
905 setup_timer(&port->ip4_own_query.timer, 904 setup_timer(&port->ip4_own_query.timer,
906 br_ip4_multicast_port_query_expired, (unsigned long)port); 905 br_ip4_multicast_port_query_expired, (unsigned long)port);
907 #if IS_ENABLED(CONFIG_IPV6) 906 #if IS_ENABLED(CONFIG_IPV6)
908 setup_timer(&port->ip6_own_query.timer, 907 setup_timer(&port->ip6_own_query.timer,
909 br_ip6_multicast_port_query_expired, (unsigned long)port); 908 br_ip6_multicast_port_query_expired, (unsigned long)port);
910 #endif 909 #endif
911 } 910 }
912 911
913 void br_multicast_del_port(struct net_bridge_port *port) 912 void br_multicast_del_port(struct net_bridge_port *port)
914 { 913 {
915 del_timer_sync(&port->multicast_router_timer); 914 del_timer_sync(&port->multicast_router_timer);
916 } 915 }
917 916
918 static void br_multicast_enable(struct bridge_mcast_own_query *query) 917 static void br_multicast_enable(struct bridge_mcast_own_query *query)
919 { 918 {
920 query->startup_sent = 0; 919 query->startup_sent = 0;
921 920
922 if (try_to_del_timer_sync(&query->timer) >= 0 || 921 if (try_to_del_timer_sync(&query->timer) >= 0 ||
923 del_timer(&query->timer)) 922 del_timer(&query->timer))
924 mod_timer(&query->timer, jiffies); 923 mod_timer(&query->timer, jiffies);
925 } 924 }
926 925
927 void br_multicast_enable_port(struct net_bridge_port *port) 926 void br_multicast_enable_port(struct net_bridge_port *port)
928 { 927 {
929 struct net_bridge *br = port->br; 928 struct net_bridge *br = port->br;
930 929
931 spin_lock(&br->multicast_lock); 930 spin_lock(&br->multicast_lock);
932 if (br->multicast_disabled || !netif_running(br->dev)) 931 if (br->multicast_disabled || !netif_running(br->dev))
933 goto out; 932 goto out;
934 933
935 br_multicast_enable(&port->ip4_own_query); 934 br_multicast_enable(&port->ip4_own_query);
936 #if IS_ENABLED(CONFIG_IPV6) 935 #if IS_ENABLED(CONFIG_IPV6)
937 br_multicast_enable(&port->ip6_own_query); 936 br_multicast_enable(&port->ip6_own_query);
938 #endif 937 #endif
939 938
940 out: 939 out:
941 spin_unlock(&br->multicast_lock); 940 spin_unlock(&br->multicast_lock);
942 } 941 }
943 942
944 void br_multicast_disable_port(struct net_bridge_port *port) 943 void br_multicast_disable_port(struct net_bridge_port *port)
945 { 944 {
946 struct net_bridge *br = port->br; 945 struct net_bridge *br = port->br;
947 struct net_bridge_port_group *pg; 946 struct net_bridge_port_group *pg;
948 struct hlist_node *n; 947 struct hlist_node *n;
949 948
950 spin_lock(&br->multicast_lock); 949 spin_lock(&br->multicast_lock);
951 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) 950 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
952 br_multicast_del_pg(br, pg); 951 br_multicast_del_pg(br, pg);
953 952
954 if (!hlist_unhashed(&port->rlist)) 953 if (!hlist_unhashed(&port->rlist))
955 hlist_del_init_rcu(&port->rlist); 954 hlist_del_init_rcu(&port->rlist);
956 del_timer(&port->multicast_router_timer); 955 del_timer(&port->multicast_router_timer);
957 del_timer(&port->ip4_own_query.timer); 956 del_timer(&port->ip4_own_query.timer);
958 #if IS_ENABLED(CONFIG_IPV6) 957 #if IS_ENABLED(CONFIG_IPV6)
959 del_timer(&port->ip6_own_query.timer); 958 del_timer(&port->ip6_own_query.timer);
960 #endif 959 #endif
961 spin_unlock(&br->multicast_lock); 960 spin_unlock(&br->multicast_lock);
962 } 961 }
963 962
964 static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 963 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
965 struct net_bridge_port *port, 964 struct net_bridge_port *port,
966 struct sk_buff *skb, 965 struct sk_buff *skb,
967 u16 vid) 966 u16 vid)
968 { 967 {
969 struct igmpv3_report *ih; 968 struct igmpv3_report *ih;
970 struct igmpv3_grec *grec; 969 struct igmpv3_grec *grec;
971 int i; 970 int i;
972 int len; 971 int len;
973 int num; 972 int num;
974 int type; 973 int type;
975 int err = 0; 974 int err = 0;
976 __be32 group; 975 __be32 group;
977 976
978 if (!pskb_may_pull(skb, sizeof(*ih))) 977 if (!pskb_may_pull(skb, sizeof(*ih)))
979 return -EINVAL; 978 return -EINVAL;
980 979
981 ih = igmpv3_report_hdr(skb); 980 ih = igmpv3_report_hdr(skb);
982 num = ntohs(ih->ngrec); 981 num = ntohs(ih->ngrec);
983 len = sizeof(*ih); 982 len = sizeof(*ih);
984 983
985 for (i = 0; i < num; i++) { 984 for (i = 0; i < num; i++) {
986 len += sizeof(*grec); 985 len += sizeof(*grec);
987 if (!pskb_may_pull(skb, len)) 986 if (!pskb_may_pull(skb, len))
988 return -EINVAL; 987 return -EINVAL;
989 988
990 grec = (void *)(skb->data + len - sizeof(*grec)); 989 grec = (void *)(skb->data + len - sizeof(*grec));
991 group = grec->grec_mca; 990 group = grec->grec_mca;
992 type = grec->grec_type; 991 type = grec->grec_type;
993 992
994 len += ntohs(grec->grec_nsrcs) * 4; 993 len += ntohs(grec->grec_nsrcs) * 4;
995 if (!pskb_may_pull(skb, len)) 994 if (!pskb_may_pull(skb, len))
996 return -EINVAL; 995 return -EINVAL;
997 996
998 /* We treat this as an IGMPv2 report for now. */ 997 /* We treat this as an IGMPv2 report for now. */
999 switch (type) { 998 switch (type) {
1000 case IGMPV3_MODE_IS_INCLUDE: 999 case IGMPV3_MODE_IS_INCLUDE:
1001 case IGMPV3_MODE_IS_EXCLUDE: 1000 case IGMPV3_MODE_IS_EXCLUDE:
1002 case IGMPV3_CHANGE_TO_INCLUDE: 1001 case IGMPV3_CHANGE_TO_INCLUDE:
1003 case IGMPV3_CHANGE_TO_EXCLUDE: 1002 case IGMPV3_CHANGE_TO_EXCLUDE:
1004 case IGMPV3_ALLOW_NEW_SOURCES: 1003 case IGMPV3_ALLOW_NEW_SOURCES:
1005 case IGMPV3_BLOCK_OLD_SOURCES: 1004 case IGMPV3_BLOCK_OLD_SOURCES:
1006 break; 1005 break;
1007 1006
1008 default: 1007 default:
1009 continue; 1008 continue;
1010 } 1009 }
1011 1010
1012 err = br_ip4_multicast_add_group(br, port, group, vid); 1011 err = br_ip4_multicast_add_group(br, port, group, vid);
1013 if (err) 1012 if (err)
1014 break; 1013 break;
1015 } 1014 }
1016 1015
1017 return err; 1016 return err;
1018 } 1017 }
1019 1018
1020 #if IS_ENABLED(CONFIG_IPV6) 1019 #if IS_ENABLED(CONFIG_IPV6)
1021 static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1020 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1022 struct net_bridge_port *port, 1021 struct net_bridge_port *port,
1023 struct sk_buff *skb, 1022 struct sk_buff *skb,
1024 u16 vid) 1023 u16 vid)
1025 { 1024 {
1026 struct icmp6hdr *icmp6h; 1025 struct icmp6hdr *icmp6h;
1027 struct mld2_grec *grec; 1026 struct mld2_grec *grec;
1028 int i; 1027 int i;
1029 int len; 1028 int len;
1030 int num; 1029 int num;
1031 int err = 0; 1030 int err = 0;
1032 1031
1033 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1032 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
1034 return -EINVAL; 1033 return -EINVAL;
1035 1034
1036 icmp6h = icmp6_hdr(skb); 1035 icmp6h = icmp6_hdr(skb);
1037 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1036 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1038 len = sizeof(*icmp6h); 1037 len = sizeof(*icmp6h);
1039 1038
1040 for (i = 0; i < num; i++) { 1039 for (i = 0; i < num; i++) {
1041 __be16 *nsrcs, _nsrcs; 1040 __be16 *nsrcs, _nsrcs;
1042 1041
1043 nsrcs = skb_header_pointer(skb, 1042 nsrcs = skb_header_pointer(skb,
1044 len + offsetof(struct mld2_grec, 1043 len + offsetof(struct mld2_grec,
1045 grec_nsrcs), 1044 grec_nsrcs),
1046 sizeof(_nsrcs), &_nsrcs); 1045 sizeof(_nsrcs), &_nsrcs);
1047 if (!nsrcs) 1046 if (!nsrcs)
1048 return -EINVAL; 1047 return -EINVAL;
1049 1048
1050 if (!pskb_may_pull(skb, 1049 if (!pskb_may_pull(skb,
1051 len + sizeof(*grec) + 1050 len + sizeof(*grec) +
1052 sizeof(struct in6_addr) * ntohs(*nsrcs))) 1051 sizeof(struct in6_addr) * ntohs(*nsrcs)))
1053 return -EINVAL; 1052 return -EINVAL;
1054 1053
1055 grec = (struct mld2_grec *)(skb->data + len); 1054 grec = (struct mld2_grec *)(skb->data + len);
1056 len += sizeof(*grec) + 1055 len += sizeof(*grec) +
1057 sizeof(struct in6_addr) * ntohs(*nsrcs); 1056 sizeof(struct in6_addr) * ntohs(*nsrcs);
1058 1057
1059 /* We treat these as MLDv1 reports for now. */ 1058 /* We treat these as MLDv1 reports for now. */
1060 switch (grec->grec_type) { 1059 switch (grec->grec_type) {
1061 case MLD2_MODE_IS_INCLUDE: 1060 case MLD2_MODE_IS_INCLUDE:
1062 case MLD2_MODE_IS_EXCLUDE: 1061 case MLD2_MODE_IS_EXCLUDE:
1063 case MLD2_CHANGE_TO_INCLUDE: 1062 case MLD2_CHANGE_TO_INCLUDE:
1064 case MLD2_CHANGE_TO_EXCLUDE: 1063 case MLD2_CHANGE_TO_EXCLUDE:
1065 case MLD2_ALLOW_NEW_SOURCES: 1064 case MLD2_ALLOW_NEW_SOURCES:
1066 case MLD2_BLOCK_OLD_SOURCES: 1065 case MLD2_BLOCK_OLD_SOURCES:
1067 break; 1066 break;
1068 1067
1069 default: 1068 default:
1070 continue; 1069 continue;
1071 } 1070 }
1072 1071
1073 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1072 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
1074 vid); 1073 vid);
1075 if (!err) 1074 if (!err)
1076 break; 1075 break;
1077 } 1076 }
1078 1077
1079 return err; 1078 return err;
1080 } 1079 }
1081 #endif 1080 #endif
1082 1081
1083 static bool br_ip4_multicast_select_querier(struct net_bridge *br, 1082 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1084 struct net_bridge_port *port, 1083 struct net_bridge_port *port,
1085 __be32 saddr) 1084 __be32 saddr)
1086 { 1085 {
1087 if (!timer_pending(&br->ip4_own_query.timer) && 1086 if (!timer_pending(&br->ip4_own_query.timer) &&
1088 !timer_pending(&br->ip4_other_query.timer)) 1087 !timer_pending(&br->ip4_other_query.timer))
1089 goto update; 1088 goto update;
1090 1089
1091 if (!br->ip4_querier.addr.u.ip4) 1090 if (!br->ip4_querier.addr.u.ip4)
1092 goto update; 1091 goto update;
1093 1092
1094 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4)) 1093 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1095 goto update; 1094 goto update;
1096 1095
1097 return false; 1096 return false;
1098 1097
1099 update: 1098 update:
1100 br->ip4_querier.addr.u.ip4 = saddr; 1099 br->ip4_querier.addr.u.ip4 = saddr;
1101 1100
1102 /* update protected by general multicast_lock by caller */ 1101 /* update protected by general multicast_lock by caller */
1103 rcu_assign_pointer(br->ip4_querier.port, port); 1102 rcu_assign_pointer(br->ip4_querier.port, port);
1104 1103
1105 return true; 1104 return true;
1106 } 1105 }
1107 1106
1108 #if IS_ENABLED(CONFIG_IPV6) 1107 #if IS_ENABLED(CONFIG_IPV6)
1109 static bool br_ip6_multicast_select_querier(struct net_bridge *br, 1108 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1110 struct net_bridge_port *port, 1109 struct net_bridge_port *port,
1111 struct in6_addr *saddr) 1110 struct in6_addr *saddr)
1112 { 1111 {
1113 if (!timer_pending(&br->ip6_own_query.timer) && 1112 if (!timer_pending(&br->ip6_own_query.timer) &&
1114 !timer_pending(&br->ip6_other_query.timer)) 1113 !timer_pending(&br->ip6_other_query.timer))
1115 goto update; 1114 goto update;
1116 1115
1117 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0) 1116 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1118 goto update; 1117 goto update;
1119 1118
1120 return false; 1119 return false;
1121 1120
1122 update: 1121 update:
1123 br->ip6_querier.addr.u.ip6 = *saddr; 1122 br->ip6_querier.addr.u.ip6 = *saddr;
1124 1123
1125 /* update protected by general multicast_lock by caller */ 1124 /* update protected by general multicast_lock by caller */
1126 rcu_assign_pointer(br->ip6_querier.port, port); 1125 rcu_assign_pointer(br->ip6_querier.port, port);
1127 1126
1128 return true; 1127 return true;
1129 } 1128 }
1130 #endif 1129 #endif
1131 1130
1132 static bool br_multicast_select_querier(struct net_bridge *br, 1131 static bool br_multicast_select_querier(struct net_bridge *br,
1133 struct net_bridge_port *port, 1132 struct net_bridge_port *port,
1134 struct br_ip *saddr) 1133 struct br_ip *saddr)
1135 { 1134 {
1136 switch (saddr->proto) { 1135 switch (saddr->proto) {
1137 case htons(ETH_P_IP): 1136 case htons(ETH_P_IP):
1138 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4); 1137 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1139 #if IS_ENABLED(CONFIG_IPV6) 1138 #if IS_ENABLED(CONFIG_IPV6)
1140 case htons(ETH_P_IPV6): 1139 case htons(ETH_P_IPV6):
1141 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6); 1140 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1142 #endif 1141 #endif
1143 } 1142 }
1144 1143
1145 return false; 1144 return false;
1146 } 1145 }
1147 1146
1148 static void 1147 static void
1149 br_multicast_update_query_timer(struct net_bridge *br, 1148 br_multicast_update_query_timer(struct net_bridge *br,
1150 struct bridge_mcast_other_query *query, 1149 struct bridge_mcast_other_query *query,
1151 unsigned long max_delay) 1150 unsigned long max_delay)
1152 { 1151 {
1153 if (!timer_pending(&query->timer)) 1152 if (!timer_pending(&query->timer))
1154 query->delay_time = jiffies + max_delay; 1153 query->delay_time = jiffies + max_delay;
1155 1154
1156 mod_timer(&query->timer, jiffies + br->multicast_querier_interval); 1155 mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1157 } 1156 }
1158 1157
1159 /* 1158 /*
1160 * Add port to router_list 1159 * Add port to router_list
1161 * list is maintained ordered by pointer value 1160 * list is maintained ordered by pointer value
1162 * and locked by br->multicast_lock and RCU 1161 * and locked by br->multicast_lock and RCU
1163 */ 1162 */
1164 static void br_multicast_add_router(struct net_bridge *br, 1163 static void br_multicast_add_router(struct net_bridge *br,
1165 struct net_bridge_port *port) 1164 struct net_bridge_port *port)
1166 { 1165 {
1167 struct net_bridge_port *p; 1166 struct net_bridge_port *p;
1168 struct hlist_node *slot = NULL; 1167 struct hlist_node *slot = NULL;
1169 1168
1170 hlist_for_each_entry(p, &br->router_list, rlist) { 1169 hlist_for_each_entry(p, &br->router_list, rlist) {
1171 if ((unsigned long) port >= (unsigned long) p) 1170 if ((unsigned long) port >= (unsigned long) p)
1172 break; 1171 break;
1173 slot = &p->rlist; 1172 slot = &p->rlist;
1174 } 1173 }
1175 1174
1176 if (slot) 1175 if (slot)
1177 hlist_add_behind_rcu(&port->rlist, slot); 1176 hlist_add_behind_rcu(&port->rlist, slot);
1178 else 1177 else
1179 hlist_add_head_rcu(&port->rlist, &br->router_list); 1178 hlist_add_head_rcu(&port->rlist, &br->router_list);
1180 } 1179 }
1181 1180
1182 static void br_multicast_mark_router(struct net_bridge *br, 1181 static void br_multicast_mark_router(struct net_bridge *br,
1183 struct net_bridge_port *port) 1182 struct net_bridge_port *port)
1184 { 1183 {
1185 unsigned long now = jiffies; 1184 unsigned long now = jiffies;
1186 1185
1187 if (!port) { 1186 if (!port) {
1188 if (br->multicast_router == 1) 1187 if (br->multicast_router == 1)
1189 mod_timer(&br->multicast_router_timer, 1188 mod_timer(&br->multicast_router_timer,
1190 now + br->multicast_querier_interval); 1189 now + br->multicast_querier_interval);
1191 return; 1190 return;
1192 } 1191 }
1193 1192
1194 if (port->multicast_router != 1) 1193 if (port->multicast_router != 1)
1195 return; 1194 return;
1196 1195
1197 if (!hlist_unhashed(&port->rlist)) 1196 if (!hlist_unhashed(&port->rlist))
1198 goto timer; 1197 goto timer;
1199 1198
1200 br_multicast_add_router(br, port); 1199 br_multicast_add_router(br, port);
1201 1200
1202 timer: 1201 timer:
1203 mod_timer(&port->multicast_router_timer, 1202 mod_timer(&port->multicast_router_timer,
1204 now + br->multicast_querier_interval); 1203 now + br->multicast_querier_interval);
1205 } 1204 }
1206 1205
1207 static void br_multicast_query_received(struct net_bridge *br, 1206 static void br_multicast_query_received(struct net_bridge *br,
1208 struct net_bridge_port *port, 1207 struct net_bridge_port *port,
1209 struct bridge_mcast_other_query *query, 1208 struct bridge_mcast_other_query *query,
1210 struct br_ip *saddr, 1209 struct br_ip *saddr,
1211 unsigned long max_delay) 1210 unsigned long max_delay)
1212 { 1211 {
1213 if (!br_multicast_select_querier(br, port, saddr)) 1212 if (!br_multicast_select_querier(br, port, saddr))
1214 return; 1213 return;
1215 1214
1216 br_multicast_update_query_timer(br, query, max_delay); 1215 br_multicast_update_query_timer(br, query, max_delay);
1217 br_multicast_mark_router(br, port); 1216 br_multicast_mark_router(br, port);
1218 } 1217 }
1219 1218
1220 static int br_ip4_multicast_query(struct net_bridge *br, 1219 static int br_ip4_multicast_query(struct net_bridge *br,
1221 struct net_bridge_port *port, 1220 struct net_bridge_port *port,
1222 struct sk_buff *skb, 1221 struct sk_buff *skb,
1223 u16 vid) 1222 u16 vid)
1224 { 1223 {
1225 const struct iphdr *iph = ip_hdr(skb); 1224 const struct iphdr *iph = ip_hdr(skb);
1226 struct igmphdr *ih = igmp_hdr(skb); 1225 struct igmphdr *ih = igmp_hdr(skb);
1227 struct net_bridge_mdb_entry *mp; 1226 struct net_bridge_mdb_entry *mp;
1228 struct igmpv3_query *ih3; 1227 struct igmpv3_query *ih3;
1229 struct net_bridge_port_group *p; 1228 struct net_bridge_port_group *p;
1230 struct net_bridge_port_group __rcu **pp; 1229 struct net_bridge_port_group __rcu **pp;
1231 struct br_ip saddr; 1230 struct br_ip saddr;
1232 unsigned long max_delay; 1231 unsigned long max_delay;
1233 unsigned long now = jiffies; 1232 unsigned long now = jiffies;
1234 __be32 group; 1233 __be32 group;
1235 int err = 0; 1234 int err = 0;
1236 1235
1237 spin_lock(&br->multicast_lock); 1236 spin_lock(&br->multicast_lock);
1238 if (!netif_running(br->dev) || 1237 if (!netif_running(br->dev) ||
1239 (port && port->state == BR_STATE_DISABLED)) 1238 (port && port->state == BR_STATE_DISABLED))
1240 goto out; 1239 goto out;
1241 1240
1242 group = ih->group; 1241 group = ih->group;
1243 1242
1244 if (skb->len == sizeof(*ih)) { 1243 if (skb->len == sizeof(*ih)) {
1245 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1244 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1246 1245
1247 if (!max_delay) { 1246 if (!max_delay) {
1248 max_delay = 10 * HZ; 1247 max_delay = 10 * HZ;
1249 group = 0; 1248 group = 0;
1250 } 1249 }
1251 } else { 1250 } else {
1252 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { 1251 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
1253 err = -EINVAL; 1252 err = -EINVAL;
1254 goto out; 1253 goto out;
1255 } 1254 }
1256 1255
1257 ih3 = igmpv3_query_hdr(skb); 1256 ih3 = igmpv3_query_hdr(skb);
1258 if (ih3->nsrcs) 1257 if (ih3->nsrcs)
1259 goto out; 1258 goto out;
1260 1259
1261 max_delay = ih3->code ? 1260 max_delay = ih3->code ?
1262 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1261 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1263 } 1262 }
1264 1263
1265 /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer 1264 /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
1266 * all-systems destination addresses (224.0.0.1) for general queries 1265 * all-systems destination addresses (224.0.0.1) for general queries
1267 */ 1266 */
1268 if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) { 1267 if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
1269 err = -EINVAL; 1268 err = -EINVAL;
1270 goto out; 1269 goto out;
1271 } 1270 }
1272 1271
1273 if (!group) { 1272 if (!group) {
1274 saddr.proto = htons(ETH_P_IP); 1273 saddr.proto = htons(ETH_P_IP);
1275 saddr.u.ip4 = iph->saddr; 1274 saddr.u.ip4 = iph->saddr;
1276 1275
1277 br_multicast_query_received(br, port, &br->ip4_other_query, 1276 br_multicast_query_received(br, port, &br->ip4_other_query,
1278 &saddr, max_delay); 1277 &saddr, max_delay);
1279 goto out; 1278 goto out;
1280 } 1279 }
1281 1280
1282 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1281 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1283 if (!mp) 1282 if (!mp)
1284 goto out; 1283 goto out;
1285 1284
1286 max_delay *= br->multicast_last_member_count; 1285 max_delay *= br->multicast_last_member_count;
1287 1286
1288 if (mp->mglist && 1287 if (mp->mglist &&
1289 (timer_pending(&mp->timer) ? 1288 (timer_pending(&mp->timer) ?
1290 time_after(mp->timer.expires, now + max_delay) : 1289 time_after(mp->timer.expires, now + max_delay) :
1291 try_to_del_timer_sync(&mp->timer) >= 0)) 1290 try_to_del_timer_sync(&mp->timer) >= 0))
1292 mod_timer(&mp->timer, now + max_delay); 1291 mod_timer(&mp->timer, now + max_delay);
1293 1292
1294 for (pp = &mp->ports; 1293 for (pp = &mp->ports;
1295 (p = mlock_dereference(*pp, br)) != NULL; 1294 (p = mlock_dereference(*pp, br)) != NULL;
1296 pp = &p->next) { 1295 pp = &p->next) {
1297 if (timer_pending(&p->timer) ? 1296 if (timer_pending(&p->timer) ?
1298 time_after(p->timer.expires, now + max_delay) : 1297 time_after(p->timer.expires, now + max_delay) :
1299 try_to_del_timer_sync(&p->timer) >= 0) 1298 try_to_del_timer_sync(&p->timer) >= 0)
1300 mod_timer(&p->timer, now + max_delay); 1299 mod_timer(&p->timer, now + max_delay);
1301 } 1300 }
1302 1301
1303 out: 1302 out:
1304 spin_unlock(&br->multicast_lock); 1303 spin_unlock(&br->multicast_lock);
1305 return err; 1304 return err;
1306 } 1305 }
1307 1306
1308 #if IS_ENABLED(CONFIG_IPV6) 1307 #if IS_ENABLED(CONFIG_IPV6)
1309 static int br_ip6_multicast_query(struct net_bridge *br, 1308 static int br_ip6_multicast_query(struct net_bridge *br,
1310 struct net_bridge_port *port, 1309 struct net_bridge_port *port,
1311 struct sk_buff *skb, 1310 struct sk_buff *skb,
1312 u16 vid) 1311 u16 vid)
1313 { 1312 {
1314 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1313 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1315 struct mld_msg *mld; 1314 struct mld_msg *mld;
1316 struct net_bridge_mdb_entry *mp; 1315 struct net_bridge_mdb_entry *mp;
1317 struct mld2_query *mld2q; 1316 struct mld2_query *mld2q;
1318 struct net_bridge_port_group *p; 1317 struct net_bridge_port_group *p;
1319 struct net_bridge_port_group __rcu **pp; 1318 struct net_bridge_port_group __rcu **pp;
1320 struct br_ip saddr; 1319 struct br_ip saddr;
1321 unsigned long max_delay; 1320 unsigned long max_delay;
1322 unsigned long now = jiffies; 1321 unsigned long now = jiffies;
1323 const struct in6_addr *group = NULL; 1322 const struct in6_addr *group = NULL;
1324 bool is_general_query; 1323 bool is_general_query;
1325 int err = 0; 1324 int err = 0;
1326 1325
1327 spin_lock(&br->multicast_lock); 1326 spin_lock(&br->multicast_lock);
1328 if (!netif_running(br->dev) || 1327 if (!netif_running(br->dev) ||
1329 (port && port->state == BR_STATE_DISABLED)) 1328 (port && port->state == BR_STATE_DISABLED))
1330 goto out; 1329 goto out;
1331 1330
1332 /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */ 1331 /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
1333 if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) { 1332 if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
1334 err = -EINVAL; 1333 err = -EINVAL;
1335 goto out; 1334 goto out;
1336 } 1335 }
1337 1336
1338 if (skb->len == sizeof(*mld)) { 1337 if (skb->len == sizeof(*mld)) {
1339 if (!pskb_may_pull(skb, sizeof(*mld))) { 1338 if (!pskb_may_pull(skb, sizeof(*mld))) {
1340 err = -EINVAL; 1339 err = -EINVAL;
1341 goto out; 1340 goto out;
1342 } 1341 }
1343 mld = (struct mld_msg *) icmp6_hdr(skb); 1342 mld = (struct mld_msg *) icmp6_hdr(skb);
1344 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1343 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1345 if (max_delay) 1344 if (max_delay)
1346 group = &mld->mld_mca; 1345 group = &mld->mld_mca;
1347 } else { 1346 } else {
1348 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1347 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1349 err = -EINVAL; 1348 err = -EINVAL;
1350 goto out; 1349 goto out;
1351 } 1350 }
1352 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1351 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1353 if (!mld2q->mld2q_nsrcs) 1352 if (!mld2q->mld2q_nsrcs)
1354 group = &mld2q->mld2q_mca; 1353 group = &mld2q->mld2q_mca;
1355 1354
1356 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); 1355 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1357 } 1356 }
1358 1357
1359 is_general_query = group && ipv6_addr_any(group); 1358 is_general_query = group && ipv6_addr_any(group);
1360 1359
1361 /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer 1360 /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
1362 * all-nodes destination address (ff02::1) for general queries 1361 * all-nodes destination address (ff02::1) for general queries
1363 */ 1362 */
1364 if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) { 1363 if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
1365 err = -EINVAL; 1364 err = -EINVAL;
1366 goto out; 1365 goto out;
1367 } 1366 }
1368 1367
1369 if (is_general_query) { 1368 if (is_general_query) {
1370 saddr.proto = htons(ETH_P_IPV6); 1369 saddr.proto = htons(ETH_P_IPV6);
1371 saddr.u.ip6 = ip6h->saddr; 1370 saddr.u.ip6 = ip6h->saddr;
1372 1371
1373 br_multicast_query_received(br, port, &br->ip6_other_query, 1372 br_multicast_query_received(br, port, &br->ip6_other_query,
1374 &saddr, max_delay); 1373 &saddr, max_delay);
1375 goto out; 1374 goto out;
1376 } else if (!group) { 1375 } else if (!group) {
1377 goto out; 1376 goto out;
1378 } 1377 }
1379 1378
1380 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1379 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1381 if (!mp) 1380 if (!mp)
1382 goto out; 1381 goto out;
1383 1382
1384 max_delay *= br->multicast_last_member_count; 1383 max_delay *= br->multicast_last_member_count;
1385 if (mp->mglist && 1384 if (mp->mglist &&
1386 (timer_pending(&mp->timer) ? 1385 (timer_pending(&mp->timer) ?
1387 time_after(mp->timer.expires, now + max_delay) : 1386 time_after(mp->timer.expires, now + max_delay) :
1388 try_to_del_timer_sync(&mp->timer) >= 0)) 1387 try_to_del_timer_sync(&mp->timer) >= 0))
1389 mod_timer(&mp->timer, now + max_delay); 1388 mod_timer(&mp->timer, now + max_delay);
1390 1389
1391 for (pp = &mp->ports; 1390 for (pp = &mp->ports;
1392 (p = mlock_dereference(*pp, br)) != NULL; 1391 (p = mlock_dereference(*pp, br)) != NULL;
1393 pp = &p->next) { 1392 pp = &p->next) {
1394 if (timer_pending(&p->timer) ? 1393 if (timer_pending(&p->timer) ?
1395 time_after(p->timer.expires, now + max_delay) : 1394 time_after(p->timer.expires, now + max_delay) :
1396 try_to_del_timer_sync(&p->timer) >= 0) 1395 try_to_del_timer_sync(&p->timer) >= 0)
1397 mod_timer(&p->timer, now + max_delay); 1396 mod_timer(&p->timer, now + max_delay);
1398 } 1397 }
1399 1398
1400 out: 1399 out:
1401 spin_unlock(&br->multicast_lock); 1400 spin_unlock(&br->multicast_lock);
1402 return err; 1401 return err;
1403 } 1402 }
1404 #endif 1403 #endif
1405 1404
1406 static void 1405 static void
1407 br_multicast_leave_group(struct net_bridge *br, 1406 br_multicast_leave_group(struct net_bridge *br,
1408 struct net_bridge_port *port, 1407 struct net_bridge_port *port,
1409 struct br_ip *group, 1408 struct br_ip *group,
1410 struct bridge_mcast_other_query *other_query, 1409 struct bridge_mcast_other_query *other_query,
1411 struct bridge_mcast_own_query *own_query) 1410 struct bridge_mcast_own_query *own_query)
1412 { 1411 {
1413 struct net_bridge_mdb_htable *mdb; 1412 struct net_bridge_mdb_htable *mdb;
1414 struct net_bridge_mdb_entry *mp; 1413 struct net_bridge_mdb_entry *mp;
1415 struct net_bridge_port_group *p; 1414 struct net_bridge_port_group *p;
1416 unsigned long now; 1415 unsigned long now;
1417 unsigned long time; 1416 unsigned long time;
1418 1417
1419 spin_lock(&br->multicast_lock); 1418 spin_lock(&br->multicast_lock);
1420 if (!netif_running(br->dev) || 1419 if (!netif_running(br->dev) ||
1421 (port && port->state == BR_STATE_DISABLED) || 1420 (port && port->state == BR_STATE_DISABLED) ||
1422 timer_pending(&other_query->timer)) 1421 timer_pending(&other_query->timer))
1423 goto out; 1422 goto out;
1424 1423
1425 mdb = mlock_dereference(br->mdb, br); 1424 mdb = mlock_dereference(br->mdb, br);
1426 mp = br_mdb_ip_get(mdb, group); 1425 mp = br_mdb_ip_get(mdb, group);
1427 if (!mp) 1426 if (!mp)
1428 goto out; 1427 goto out;
1429 1428
1430 if (br->multicast_querier) { 1429 if (br->multicast_querier) {
1431 __br_multicast_send_query(br, port, &mp->addr); 1430 __br_multicast_send_query(br, port, &mp->addr);
1432 1431
1433 time = jiffies + br->multicast_last_member_count * 1432 time = jiffies + br->multicast_last_member_count *
1434 br->multicast_last_member_interval; 1433 br->multicast_last_member_interval;
1435 1434
1436 mod_timer(&own_query->timer, time); 1435 mod_timer(&own_query->timer, time);
1437 1436
1438 for (p = mlock_dereference(mp->ports, br); 1437 for (p = mlock_dereference(mp->ports, br);
1439 p != NULL; 1438 p != NULL;
1440 p = mlock_dereference(p->next, br)) { 1439 p = mlock_dereference(p->next, br)) {
1441 if (p->port != port) 1440 if (p->port != port)
1442 continue; 1441 continue;
1443 1442
1444 if (!hlist_unhashed(&p->mglist) && 1443 if (!hlist_unhashed(&p->mglist) &&
1445 (timer_pending(&p->timer) ? 1444 (timer_pending(&p->timer) ?
1446 time_after(p->timer.expires, time) : 1445 time_after(p->timer.expires, time) :
1447 try_to_del_timer_sync(&p->timer) >= 0)) { 1446 try_to_del_timer_sync(&p->timer) >= 0)) {
1448 mod_timer(&p->timer, time); 1447 mod_timer(&p->timer, time);
1449 } 1448 }
1450 1449
1451 break; 1450 break;
1452 } 1451 }
1453 } 1452 }
1454 1453
1455 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { 1454 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1456 struct net_bridge_port_group __rcu **pp; 1455 struct net_bridge_port_group __rcu **pp;
1457 1456
1458 for (pp = &mp->ports; 1457 for (pp = &mp->ports;
1459 (p = mlock_dereference(*pp, br)) != NULL; 1458 (p = mlock_dereference(*pp, br)) != NULL;
1460 pp = &p->next) { 1459 pp = &p->next) {
1461 if (p->port != port) 1460 if (p->port != port)
1462 continue; 1461 continue;
1463 1462
1464 rcu_assign_pointer(*pp, p->next); 1463 rcu_assign_pointer(*pp, p->next);
1465 hlist_del_init(&p->mglist); 1464 hlist_del_init(&p->mglist);
1466 del_timer(&p->timer); 1465 del_timer(&p->timer);
1467 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1466 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1468 br_mdb_notify(br->dev, port, group, RTM_DELMDB); 1467 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1469 1468
1470 if (!mp->ports && !mp->mglist && 1469 if (!mp->ports && !mp->mglist &&
1471 netif_running(br->dev)) 1470 netif_running(br->dev))
1472 mod_timer(&mp->timer, jiffies); 1471 mod_timer(&mp->timer, jiffies);
1473 } 1472 }
1474 goto out; 1473 goto out;
1475 } 1474 }
1476 1475
1477 now = jiffies; 1476 now = jiffies;
1478 time = now + br->multicast_last_member_count * 1477 time = now + br->multicast_last_member_count *
1479 br->multicast_last_member_interval; 1478 br->multicast_last_member_interval;
1480 1479
1481 if (!port) { 1480 if (!port) {
1482 if (mp->mglist && 1481 if (mp->mglist &&
1483 (timer_pending(&mp->timer) ? 1482 (timer_pending(&mp->timer) ?
1484 time_after(mp->timer.expires, time) : 1483 time_after(mp->timer.expires, time) :
1485 try_to_del_timer_sync(&mp->timer) >= 0)) { 1484 try_to_del_timer_sync(&mp->timer) >= 0)) {
1486 mod_timer(&mp->timer, time); 1485 mod_timer(&mp->timer, time);
1487 } 1486 }
1488 1487
1489 goto out; 1488 goto out;
1490 } 1489 }
1491 1490
1492 for (p = mlock_dereference(mp->ports, br); 1491 for (p = mlock_dereference(mp->ports, br);
1493 p != NULL; 1492 p != NULL;
1494 p = mlock_dereference(p->next, br)) { 1493 p = mlock_dereference(p->next, br)) {
1495 if (p->port != port) 1494 if (p->port != port)
1496 continue; 1495 continue;
1497 1496
1498 if (!hlist_unhashed(&p->mglist) && 1497 if (!hlist_unhashed(&p->mglist) &&
1499 (timer_pending(&p->timer) ? 1498 (timer_pending(&p->timer) ?
1500 time_after(p->timer.expires, time) : 1499 time_after(p->timer.expires, time) :
1501 try_to_del_timer_sync(&p->timer) >= 0)) { 1500 try_to_del_timer_sync(&p->timer) >= 0)) {
1502 mod_timer(&p->timer, time); 1501 mod_timer(&p->timer, time);
1503 } 1502 }
1504 1503
1505 break; 1504 break;
1506 } 1505 }
1507 out: 1506 out:
1508 spin_unlock(&br->multicast_lock); 1507 spin_unlock(&br->multicast_lock);
1509 } 1508 }
1510 1509
1511 static void br_ip4_multicast_leave_group(struct net_bridge *br, 1510 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1512 struct net_bridge_port *port, 1511 struct net_bridge_port *port,
1513 __be32 group, 1512 __be32 group,
1514 __u16 vid) 1513 __u16 vid)
1515 { 1514 {
1516 struct br_ip br_group; 1515 struct br_ip br_group;
1517 struct bridge_mcast_own_query *own_query; 1516 struct bridge_mcast_own_query *own_query;
1518 1517
1519 if (ipv4_is_local_multicast(group)) 1518 if (ipv4_is_local_multicast(group))
1520 return; 1519 return;
1521 1520
1522 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1521 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1523 1522
1524 br_group.u.ip4 = group; 1523 br_group.u.ip4 = group;
1525 br_group.proto = htons(ETH_P_IP); 1524 br_group.proto = htons(ETH_P_IP);
1526 br_group.vid = vid; 1525 br_group.vid = vid;
1527 1526
1528 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query, 1527 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1529 own_query); 1528 own_query);
1530 } 1529 }
1531 1530
1532 #if IS_ENABLED(CONFIG_IPV6) 1531 #if IS_ENABLED(CONFIG_IPV6)
1533 static void br_ip6_multicast_leave_group(struct net_bridge *br, 1532 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1534 struct net_bridge_port *port, 1533 struct net_bridge_port *port,
1535 const struct in6_addr *group, 1534 const struct in6_addr *group,
1536 __u16 vid) 1535 __u16 vid)
1537 { 1536 {
1538 struct br_ip br_group; 1537 struct br_ip br_group;
1539 struct bridge_mcast_own_query *own_query; 1538 struct bridge_mcast_own_query *own_query;
1540 1539
1541 if (ipv6_addr_is_ll_all_nodes(group)) 1540 if (ipv6_addr_is_ll_all_nodes(group))
1542 return; 1541 return;
1543 1542
1544 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1543 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1545 1544
1546 br_group.u.ip6 = *group; 1545 br_group.u.ip6 = *group;
1547 br_group.proto = htons(ETH_P_IPV6); 1546 br_group.proto = htons(ETH_P_IPV6);
1548 br_group.vid = vid; 1547 br_group.vid = vid;
1549 1548
1550 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query, 1549 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1551 own_query); 1550 own_query);
1552 } 1551 }
1553 #endif 1552 #endif
1554 1553
1555 static int br_multicast_ipv4_rcv(struct net_bridge *br, 1554 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1556 struct net_bridge_port *port, 1555 struct net_bridge_port *port,
1557 struct sk_buff *skb, 1556 struct sk_buff *skb,
1558 u16 vid) 1557 u16 vid)
1559 { 1558 {
1560 struct sk_buff *skb2 = skb; 1559 struct sk_buff *skb2 = skb;
1561 const struct iphdr *iph; 1560 const struct iphdr *iph;
1562 struct igmphdr *ih; 1561 struct igmphdr *ih;
1563 unsigned int len; 1562 unsigned int len;
1564 unsigned int offset; 1563 unsigned int offset;
1565 int err; 1564 int err;
1566 1565
1567 /* We treat OOM as packet loss for now. */ 1566 /* We treat OOM as packet loss for now. */
1568 if (!pskb_may_pull(skb, sizeof(*iph))) 1567 if (!pskb_may_pull(skb, sizeof(*iph)))
1569 return -EINVAL; 1568 return -EINVAL;
1570 1569
1571 iph = ip_hdr(skb); 1570 iph = ip_hdr(skb);
1572 1571
1573 if (iph->ihl < 5 || iph->version != 4) 1572 if (iph->ihl < 5 || iph->version != 4)
1574 return -EINVAL; 1573 return -EINVAL;
1575 1574
1576 if (!pskb_may_pull(skb, ip_hdrlen(skb))) 1575 if (!pskb_may_pull(skb, ip_hdrlen(skb)))
1577 return -EINVAL; 1576 return -EINVAL;
1578 1577
1579 iph = ip_hdr(skb); 1578 iph = ip_hdr(skb);
1580 1579
1581 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1580 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1582 return -EINVAL; 1581 return -EINVAL;
1583 1582
1584 if (iph->protocol != IPPROTO_IGMP) { 1583 if (iph->protocol != IPPROTO_IGMP) {
1585 if (!ipv4_is_local_multicast(iph->daddr)) 1584 if (!ipv4_is_local_multicast(iph->daddr))
1586 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1585 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1587 return 0; 1586 return 0;
1588 } 1587 }
1589 1588
1590 len = ntohs(iph->tot_len); 1589 len = ntohs(iph->tot_len);
1591 if (skb->len < len || len < ip_hdrlen(skb)) 1590 if (skb->len < len || len < ip_hdrlen(skb))
1592 return -EINVAL; 1591 return -EINVAL;
1593 1592
1594 if (skb->len > len) { 1593 if (skb->len > len) {
1595 skb2 = skb_clone(skb, GFP_ATOMIC); 1594 skb2 = skb_clone(skb, GFP_ATOMIC);
1596 if (!skb2) 1595 if (!skb2)
1597 return -ENOMEM; 1596 return -ENOMEM;
1598 1597
1599 err = pskb_trim_rcsum(skb2, len); 1598 err = pskb_trim_rcsum(skb2, len);
1600 if (err) 1599 if (err)
1601 goto err_out; 1600 goto err_out;
1602 } 1601 }
1603 1602
1604 len -= ip_hdrlen(skb2); 1603 len -= ip_hdrlen(skb2);
1605 offset = skb_network_offset(skb2) + ip_hdrlen(skb2); 1604 offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
1606 __skb_pull(skb2, offset); 1605 __skb_pull(skb2, offset);
1607 skb_reset_transport_header(skb2); 1606 skb_reset_transport_header(skb2);
1608 1607
1609 err = -EINVAL; 1608 err = -EINVAL;
1610 if (!pskb_may_pull(skb2, sizeof(*ih))) 1609 if (!pskb_may_pull(skb2, sizeof(*ih)))
1611 goto out; 1610 goto out;
1612 1611
1613 switch (skb2->ip_summed) { 1612 switch (skb2->ip_summed) {
1614 case CHECKSUM_COMPLETE: 1613 case CHECKSUM_COMPLETE:
1615 if (!csum_fold(skb2->csum)) 1614 if (!csum_fold(skb2->csum))
1616 break; 1615 break;
1617 /* fall through */ 1616 /* fall through */
1618 case CHECKSUM_NONE: 1617 case CHECKSUM_NONE:
1619 skb2->csum = 0; 1618 skb2->csum = 0;
1620 if (skb_checksum_complete(skb2)) 1619 if (skb_checksum_complete(skb2))
1621 goto out; 1620 goto out;
1622 } 1621 }
1623 1622
1624 err = 0; 1623 err = 0;
1625 1624
1626 BR_INPUT_SKB_CB(skb)->igmp = 1; 1625 BR_INPUT_SKB_CB(skb)->igmp = 1;
1627 ih = igmp_hdr(skb2); 1626 ih = igmp_hdr(skb2);
1628 1627
1629 switch (ih->type) { 1628 switch (ih->type) {
1630 case IGMP_HOST_MEMBERSHIP_REPORT: 1629 case IGMP_HOST_MEMBERSHIP_REPORT:
1631 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1630 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1632 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1631 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1633 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1632 err = br_ip4_multicast_add_group(br, port, ih->group, vid);
1634 break; 1633 break;
1635 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1634 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1636 err = br_ip4_multicast_igmp3_report(br, port, skb2, vid); 1635 err = br_ip4_multicast_igmp3_report(br, port, skb2, vid);
1637 break; 1636 break;
1638 case IGMP_HOST_MEMBERSHIP_QUERY: 1637 case IGMP_HOST_MEMBERSHIP_QUERY:
1639 err = br_ip4_multicast_query(br, port, skb2, vid); 1638 err = br_ip4_multicast_query(br, port, skb2, vid);
1640 break; 1639 break;
1641 case IGMP_HOST_LEAVE_MESSAGE: 1640 case IGMP_HOST_LEAVE_MESSAGE:
1642 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1641 br_ip4_multicast_leave_group(br, port, ih->group, vid);
1643 break; 1642 break;
1644 } 1643 }
1645 1644
1646 out: 1645 out:
1647 __skb_push(skb2, offset); 1646 __skb_push(skb2, offset);
1648 err_out: 1647 err_out:
1649 if (skb2 != skb) 1648 if (skb2 != skb)
1650 kfree_skb(skb2); 1649 kfree_skb(skb2);
1651 return err; 1650 return err;
1652 } 1651 }
1653 1652
1654 #if IS_ENABLED(CONFIG_IPV6) 1653 #if IS_ENABLED(CONFIG_IPV6)
1655 static int br_multicast_ipv6_rcv(struct net_bridge *br, 1654 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1656 struct net_bridge_port *port, 1655 struct net_bridge_port *port,
1657 struct sk_buff *skb, 1656 struct sk_buff *skb,
1658 u16 vid) 1657 u16 vid)
1659 { 1658 {
1660 struct sk_buff *skb2; 1659 struct sk_buff *skb2;
1661 const struct ipv6hdr *ip6h; 1660 const struct ipv6hdr *ip6h;
1662 u8 icmp6_type; 1661 u8 icmp6_type;
1663 u8 nexthdr; 1662 u8 nexthdr;
1664 __be16 frag_off; 1663 __be16 frag_off;
1665 unsigned int len; 1664 unsigned int len;
1666 int offset; 1665 int offset;
1667 int err; 1666 int err;
1668 1667
1669 if (!pskb_may_pull(skb, sizeof(*ip6h))) 1668 if (!pskb_may_pull(skb, sizeof(*ip6h)))
1670 return -EINVAL; 1669 return -EINVAL;
1671 1670
1672 ip6h = ipv6_hdr(skb); 1671 ip6h = ipv6_hdr(skb);
1673 1672
1674 /* 1673 /*
1675 * We're interested in MLD messages only. 1674 * We're interested in MLD messages only.
1676 * - Version is 6 1675 * - Version is 6
1677 * - MLD has always Router Alert hop-by-hop option 1676 * - MLD has always Router Alert hop-by-hop option
1678 * - But we do not support jumbrograms. 1677 * - But we do not support jumbrograms.
1679 */ 1678 */
1680 if (ip6h->version != 6) 1679 if (ip6h->version != 6)
1681 return 0; 1680 return 0;
1682 1681
1683 /* Prevent flooding this packet if there is no listener present */ 1682 /* Prevent flooding this packet if there is no listener present */
1684 if (!ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) 1683 if (!ipv6_addr_is_ll_all_nodes(&ip6h->daddr))
1685 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1684 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1686 1685
1687 if (ip6h->nexthdr != IPPROTO_HOPOPTS || 1686 if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
1688 ip6h->payload_len == 0) 1687 ip6h->payload_len == 0)
1689 return 0; 1688 return 0;
1690 1689
1691 len = ntohs(ip6h->payload_len) + sizeof(*ip6h); 1690 len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
1692 if (skb->len < len) 1691 if (skb->len < len)
1693 return -EINVAL; 1692 return -EINVAL;
1694 1693
1695 nexthdr = ip6h->nexthdr; 1694 nexthdr = ip6h->nexthdr;
1696 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off); 1695 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
1697 1696
1698 if (offset < 0 || nexthdr != IPPROTO_ICMPV6) 1697 if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
1699 return 0; 1698 return 0;
1700 1699
1701 /* Okay, we found ICMPv6 header */ 1700 /* Okay, we found ICMPv6 header */
1702 skb2 = skb_clone(skb, GFP_ATOMIC); 1701 skb2 = skb_clone(skb, GFP_ATOMIC);
1703 if (!skb2) 1702 if (!skb2)
1704 return -ENOMEM; 1703 return -ENOMEM;
1705 1704
1706 err = -EINVAL; 1705 err = -EINVAL;
1707 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr))) 1706 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
1708 goto out; 1707 goto out;
1709 1708
1710 len -= offset - skb_network_offset(skb2); 1709 len -= offset - skb_network_offset(skb2);
1711 1710
1712 __skb_pull(skb2, offset); 1711 __skb_pull(skb2, offset);
1713 skb_reset_transport_header(skb2); 1712 skb_reset_transport_header(skb2);
1714 skb_postpull_rcsum(skb2, skb_network_header(skb2), 1713 skb_postpull_rcsum(skb2, skb_network_header(skb2),
1715 skb_network_header_len(skb2)); 1714 skb_network_header_len(skb2));
1716 1715
1717 icmp6_type = icmp6_hdr(skb2)->icmp6_type; 1716 icmp6_type = icmp6_hdr(skb2)->icmp6_type;
1718 1717
1719 switch (icmp6_type) { 1718 switch (icmp6_type) {
1720 case ICMPV6_MGM_QUERY: 1719 case ICMPV6_MGM_QUERY:
1721 case ICMPV6_MGM_REPORT: 1720 case ICMPV6_MGM_REPORT:
1722 case ICMPV6_MGM_REDUCTION: 1721 case ICMPV6_MGM_REDUCTION:
1723 case ICMPV6_MLD2_REPORT: 1722 case ICMPV6_MLD2_REPORT:
1724 break; 1723 break;
1725 default: 1724 default:
1726 err = 0; 1725 err = 0;
1727 goto out; 1726 goto out;
1728 } 1727 }
1729 1728
1730 /* Okay, we found MLD message. Check further. */ 1729 /* Okay, we found MLD message. Check further. */
1731 if (skb2->len > len) { 1730 if (skb2->len > len) {
1732 err = pskb_trim_rcsum(skb2, len); 1731 err = pskb_trim_rcsum(skb2, len);
1733 if (err) 1732 if (err)
1734 goto out; 1733 goto out;
1735 err = -EINVAL; 1734 err = -EINVAL;
1736 } 1735 }
1737 1736
1738 ip6h = ipv6_hdr(skb2); 1737 ip6h = ipv6_hdr(skb2);
1739 1738
1740 switch (skb2->ip_summed) { 1739 switch (skb2->ip_summed) {
1741 case CHECKSUM_COMPLETE: 1740 case CHECKSUM_COMPLETE:
1742 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len, 1741 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
1743 IPPROTO_ICMPV6, skb2->csum)) 1742 IPPROTO_ICMPV6, skb2->csum))
1744 break; 1743 break;
1745 /*FALLTHROUGH*/ 1744 /*FALLTHROUGH*/
1746 case CHECKSUM_NONE: 1745 case CHECKSUM_NONE:
1747 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, 1746 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
1748 &ip6h->daddr, 1747 &ip6h->daddr,
1749 skb2->len, 1748 skb2->len,
1750 IPPROTO_ICMPV6, 0)); 1749 IPPROTO_ICMPV6, 0));
1751 if (__skb_checksum_complete(skb2)) 1750 if (__skb_checksum_complete(skb2))
1752 goto out; 1751 goto out;
1753 } 1752 }
1754 1753
1755 err = 0; 1754 err = 0;
1756 1755
1757 BR_INPUT_SKB_CB(skb)->igmp = 1; 1756 BR_INPUT_SKB_CB(skb)->igmp = 1;
1758 1757
1759 switch (icmp6_type) { 1758 switch (icmp6_type) {
1760 case ICMPV6_MGM_REPORT: 1759 case ICMPV6_MGM_REPORT:
1761 { 1760 {
1762 struct mld_msg *mld; 1761 struct mld_msg *mld;
1763 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1762 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1764 err = -EINVAL; 1763 err = -EINVAL;
1765 goto out; 1764 goto out;
1766 } 1765 }
1767 mld = (struct mld_msg *)skb_transport_header(skb2); 1766 mld = (struct mld_msg *)skb_transport_header(skb2);
1768 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1767 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1769 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid); 1768 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
1770 break; 1769 break;
1771 } 1770 }
1772 case ICMPV6_MLD2_REPORT: 1771 case ICMPV6_MLD2_REPORT:
1773 err = br_ip6_multicast_mld2_report(br, port, skb2, vid); 1772 err = br_ip6_multicast_mld2_report(br, port, skb2, vid);
1774 break; 1773 break;
1775 case ICMPV6_MGM_QUERY: 1774 case ICMPV6_MGM_QUERY:
1776 err = br_ip6_multicast_query(br, port, skb2, vid); 1775 err = br_ip6_multicast_query(br, port, skb2, vid);
1777 break; 1776 break;
1778 case ICMPV6_MGM_REDUCTION: 1777 case ICMPV6_MGM_REDUCTION:
1779 { 1778 {
1780 struct mld_msg *mld; 1779 struct mld_msg *mld;
1781 if (!pskb_may_pull(skb2, sizeof(*mld))) { 1780 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1782 err = -EINVAL; 1781 err = -EINVAL;
1783 goto out; 1782 goto out;
1784 } 1783 }
1785 mld = (struct mld_msg *)skb_transport_header(skb2); 1784 mld = (struct mld_msg *)skb_transport_header(skb2);
1786 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid); 1785 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
1787 } 1786 }
1788 } 1787 }
1789 1788
1790 out: 1789 out:
1791 kfree_skb(skb2); 1790 kfree_skb(skb2);
1792 return err; 1791 return err;
1793 } 1792 }
1794 #endif 1793 #endif
1795 1794
1796 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1795 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1797 struct sk_buff *skb, u16 vid) 1796 struct sk_buff *skb, u16 vid)
1798 { 1797 {
1799 BR_INPUT_SKB_CB(skb)->igmp = 0; 1798 BR_INPUT_SKB_CB(skb)->igmp = 0;
1800 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1799 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1801 1800
1802 if (br->multicast_disabled) 1801 if (br->multicast_disabled)
1803 return 0; 1802 return 0;
1804 1803
1805 switch (skb->protocol) { 1804 switch (skb->protocol) {
1806 case htons(ETH_P_IP): 1805 case htons(ETH_P_IP):
1807 return br_multicast_ipv4_rcv(br, port, skb, vid); 1806 return br_multicast_ipv4_rcv(br, port, skb, vid);
1808 #if IS_ENABLED(CONFIG_IPV6) 1807 #if IS_ENABLED(CONFIG_IPV6)
1809 case htons(ETH_P_IPV6): 1808 case htons(ETH_P_IPV6):
1810 return br_multicast_ipv6_rcv(br, port, skb, vid); 1809 return br_multicast_ipv6_rcv(br, port, skb, vid);
1811 #endif 1810 #endif
1812 } 1811 }
1813 1812
1814 return 0; 1813 return 0;
1815 } 1814 }
1816 1815
1817 static void br_multicast_query_expired(struct net_bridge *br, 1816 static void br_multicast_query_expired(struct net_bridge *br,
1818 struct bridge_mcast_own_query *query, 1817 struct bridge_mcast_own_query *query,
1819 struct bridge_mcast_querier *querier) 1818 struct bridge_mcast_querier *querier)
1820 { 1819 {
1821 spin_lock(&br->multicast_lock); 1820 spin_lock(&br->multicast_lock);
1822 if (query->startup_sent < br->multicast_startup_query_count) 1821 if (query->startup_sent < br->multicast_startup_query_count)
1823 query->startup_sent++; 1822 query->startup_sent++;
1824 1823
1825 RCU_INIT_POINTER(querier, NULL); 1824 RCU_INIT_POINTER(querier, NULL);
1826 br_multicast_send_query(br, NULL, query); 1825 br_multicast_send_query(br, NULL, query);
1827 spin_unlock(&br->multicast_lock); 1826 spin_unlock(&br->multicast_lock);
1828 } 1827 }
1829 1828
1830 static void br_ip4_multicast_query_expired(unsigned long data) 1829 static void br_ip4_multicast_query_expired(unsigned long data)
1831 { 1830 {
1832 struct net_bridge *br = (void *)data; 1831 struct net_bridge *br = (void *)data;
1833 1832
1834 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier); 1833 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1835 } 1834 }
1836 1835
1837 #if IS_ENABLED(CONFIG_IPV6) 1836 #if IS_ENABLED(CONFIG_IPV6)
1838 static void br_ip6_multicast_query_expired(unsigned long data) 1837 static void br_ip6_multicast_query_expired(unsigned long data)
1839 { 1838 {
1840 struct net_bridge *br = (void *)data; 1839 struct net_bridge *br = (void *)data;
1841 1840
1842 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier); 1841 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1843 } 1842 }
1844 #endif 1843 #endif
1845 1844
1846 void br_multicast_init(struct net_bridge *br) 1845 void br_multicast_init(struct net_bridge *br)
1847 { 1846 {
1848 br->hash_elasticity = 4; 1847 br->hash_elasticity = 4;
1849 br->hash_max = 512; 1848 br->hash_max = 512;
1850 1849
1851 br->multicast_router = 1; 1850 br->multicast_router = 1;
1852 br->multicast_querier = 0; 1851 br->multicast_querier = 0;
1853 br->multicast_query_use_ifaddr = 0; 1852 br->multicast_query_use_ifaddr = 0;
1854 br->multicast_last_member_count = 2; 1853 br->multicast_last_member_count = 2;
1855 br->multicast_startup_query_count = 2; 1854 br->multicast_startup_query_count = 2;
1856 1855
1857 br->multicast_last_member_interval = HZ; 1856 br->multicast_last_member_interval = HZ;
1858 br->multicast_query_response_interval = 10 * HZ; 1857 br->multicast_query_response_interval = 10 * HZ;
1859 br->multicast_startup_query_interval = 125 * HZ / 4; 1858 br->multicast_startup_query_interval = 125 * HZ / 4;
1860 br->multicast_query_interval = 125 * HZ; 1859 br->multicast_query_interval = 125 * HZ;
1861 br->multicast_querier_interval = 255 * HZ; 1860 br->multicast_querier_interval = 255 * HZ;
1862 br->multicast_membership_interval = 260 * HZ; 1861 br->multicast_membership_interval = 260 * HZ;
1863 1862
1864 br->ip4_other_query.delay_time = 0; 1863 br->ip4_other_query.delay_time = 0;
1865 br->ip4_querier.port = NULL; 1864 br->ip4_querier.port = NULL;
1866 #if IS_ENABLED(CONFIG_IPV6) 1865 #if IS_ENABLED(CONFIG_IPV6)
1867 br->ip6_other_query.delay_time = 0; 1866 br->ip6_other_query.delay_time = 0;
1868 br->ip6_querier.port = NULL; 1867 br->ip6_querier.port = NULL;
1869 #endif 1868 #endif
1870 1869
1871 spin_lock_init(&br->multicast_lock); 1870 spin_lock_init(&br->multicast_lock);
1872 setup_timer(&br->multicast_router_timer, 1871 setup_timer(&br->multicast_router_timer,
1873 br_multicast_local_router_expired, 0); 1872 br_multicast_local_router_expired, 0);
1874 setup_timer(&br->ip4_other_query.timer, 1873 setup_timer(&br->ip4_other_query.timer,
1875 br_ip4_multicast_querier_expired, (unsigned long)br); 1874 br_ip4_multicast_querier_expired, (unsigned long)br);
1876 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired, 1875 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired,
1877 (unsigned long)br); 1876 (unsigned long)br);
1878 #if IS_ENABLED(CONFIG_IPV6) 1877 #if IS_ENABLED(CONFIG_IPV6)
1879 setup_timer(&br->ip6_other_query.timer, 1878 setup_timer(&br->ip6_other_query.timer,
1880 br_ip6_multicast_querier_expired, (unsigned long)br); 1879 br_ip6_multicast_querier_expired, (unsigned long)br);
1881 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired, 1880 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired,
1882 (unsigned long)br); 1881 (unsigned long)br);
1883 #endif 1882 #endif
1884 } 1883 }
1885 1884
1886 static void __br_multicast_open(struct net_bridge *br, 1885 static void __br_multicast_open(struct net_bridge *br,
1887 struct bridge_mcast_own_query *query) 1886 struct bridge_mcast_own_query *query)
1888 { 1887 {
1889 query->startup_sent = 0; 1888 query->startup_sent = 0;
1890 1889
1891 if (br->multicast_disabled) 1890 if (br->multicast_disabled)
1892 return; 1891 return;
1893 1892
1894 mod_timer(&query->timer, jiffies); 1893 mod_timer(&query->timer, jiffies);
1895 } 1894 }
1896 1895
1897 void br_multicast_open(struct net_bridge *br) 1896 void br_multicast_open(struct net_bridge *br)
1898 { 1897 {
1899 __br_multicast_open(br, &br->ip4_own_query); 1898 __br_multicast_open(br, &br->ip4_own_query);
1900 #if IS_ENABLED(CONFIG_IPV6) 1899 #if IS_ENABLED(CONFIG_IPV6)
1901 __br_multicast_open(br, &br->ip6_own_query); 1900 __br_multicast_open(br, &br->ip6_own_query);
1902 #endif 1901 #endif
1903 } 1902 }
1904 1903
1905 void br_multicast_stop(struct net_bridge *br) 1904 void br_multicast_stop(struct net_bridge *br)
1906 { 1905 {
1907 struct net_bridge_mdb_htable *mdb; 1906 struct net_bridge_mdb_htable *mdb;
1908 struct net_bridge_mdb_entry *mp; 1907 struct net_bridge_mdb_entry *mp;
1909 struct hlist_node *n; 1908 struct hlist_node *n;
1910 u32 ver; 1909 u32 ver;
1911 int i; 1910 int i;
1912 1911
1913 del_timer_sync(&br->multicast_router_timer); 1912 del_timer_sync(&br->multicast_router_timer);
1914 del_timer_sync(&br->ip4_other_query.timer); 1913 del_timer_sync(&br->ip4_other_query.timer);
1915 del_timer_sync(&br->ip4_own_query.timer); 1914 del_timer_sync(&br->ip4_own_query.timer);
1916 #if IS_ENABLED(CONFIG_IPV6) 1915 #if IS_ENABLED(CONFIG_IPV6)
1917 del_timer_sync(&br->ip6_other_query.timer); 1916 del_timer_sync(&br->ip6_other_query.timer);
1918 del_timer_sync(&br->ip6_own_query.timer); 1917 del_timer_sync(&br->ip6_own_query.timer);
1919 #endif 1918 #endif
1920 1919
1921 spin_lock_bh(&br->multicast_lock); 1920 spin_lock_bh(&br->multicast_lock);
1922 mdb = mlock_dereference(br->mdb, br); 1921 mdb = mlock_dereference(br->mdb, br);
1923 if (!mdb) 1922 if (!mdb)
1924 goto out; 1923 goto out;
1925 1924
1926 br->mdb = NULL; 1925 br->mdb = NULL;
1927 1926
1928 ver = mdb->ver; 1927 ver = mdb->ver;
1929 for (i = 0; i < mdb->max; i++) { 1928 for (i = 0; i < mdb->max; i++) {
1930 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1929 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1931 hlist[ver]) { 1930 hlist[ver]) {
1932 del_timer(&mp->timer); 1931 del_timer(&mp->timer);
1933 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1932 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1934 } 1933 }
1935 } 1934 }
1936 1935
1937 if (mdb->old) { 1936 if (mdb->old) {
1938 spin_unlock_bh(&br->multicast_lock); 1937 spin_unlock_bh(&br->multicast_lock);
1939 rcu_barrier_bh(); 1938 rcu_barrier_bh();
1940 spin_lock_bh(&br->multicast_lock); 1939 spin_lock_bh(&br->multicast_lock);
1941 WARN_ON(mdb->old); 1940 WARN_ON(mdb->old);
1942 } 1941 }
1943 1942
1944 mdb->old = mdb; 1943 mdb->old = mdb;
1945 call_rcu_bh(&mdb->rcu, br_mdb_free); 1944 call_rcu_bh(&mdb->rcu, br_mdb_free);
1946 1945
1947 out: 1946 out:
1948 spin_unlock_bh(&br->multicast_lock); 1947 spin_unlock_bh(&br->multicast_lock);
1949 } 1948 }
1950 1949
1951 int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1950 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1952 { 1951 {
1953 int err = -ENOENT; 1952 int err = -ENOENT;
1954 1953
1955 spin_lock_bh(&br->multicast_lock); 1954 spin_lock_bh(&br->multicast_lock);
1956 if (!netif_running(br->dev)) 1955 if (!netif_running(br->dev))
1957 goto unlock; 1956 goto unlock;
1958 1957
1959 switch (val) { 1958 switch (val) {
1960 case 0: 1959 case 0:
1961 case 2: 1960 case 2:
1962 del_timer(&br->multicast_router_timer); 1961 del_timer(&br->multicast_router_timer);
1963 /* fall through */ 1962 /* fall through */
1964 case 1: 1963 case 1:
1965 br->multicast_router = val; 1964 br->multicast_router = val;
1966 err = 0; 1965 err = 0;
1967 break; 1966 break;
1968 1967
1969 default: 1968 default:
1970 err = -EINVAL; 1969 err = -EINVAL;
1971 break; 1970 break;
1972 } 1971 }
1973 1972
1974 unlock: 1973 unlock:
1975 spin_unlock_bh(&br->multicast_lock); 1974 spin_unlock_bh(&br->multicast_lock);
1976 1975
1977 return err; 1976 return err;
1978 } 1977 }
1979 1978
1980 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) 1979 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1981 { 1980 {
1982 struct net_bridge *br = p->br; 1981 struct net_bridge *br = p->br;
1983 int err = -ENOENT; 1982 int err = -ENOENT;
1984 1983
1985 spin_lock(&br->multicast_lock); 1984 spin_lock(&br->multicast_lock);
1986 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED) 1985 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
1987 goto unlock; 1986 goto unlock;
1988 1987
1989 switch (val) { 1988 switch (val) {
1990 case 0: 1989 case 0:
1991 case 1: 1990 case 1:
1992 case 2: 1991 case 2:
1993 p->multicast_router = val; 1992 p->multicast_router = val;
1994 err = 0; 1993 err = 0;
1995 1994
1996 if (val < 2 && !hlist_unhashed(&p->rlist)) 1995 if (val < 2 && !hlist_unhashed(&p->rlist))
1997 hlist_del_init_rcu(&p->rlist); 1996 hlist_del_init_rcu(&p->rlist);
1998 1997
1999 if (val == 1) 1998 if (val == 1)
2000 break; 1999 break;
2001 2000
2002 del_timer(&p->multicast_router_timer); 2001 del_timer(&p->multicast_router_timer);
2003 2002
2004 if (val == 0) 2003 if (val == 0)
2005 break; 2004 break;
2006 2005
2007 br_multicast_add_router(br, p); 2006 br_multicast_add_router(br, p);
2008 break; 2007 break;
2009 2008
2010 default: 2009 default:
2011 err = -EINVAL; 2010 err = -EINVAL;
2012 break; 2011 break;
2013 } 2012 }
2014 2013
2015 unlock: 2014 unlock:
2016 spin_unlock(&br->multicast_lock); 2015 spin_unlock(&br->multicast_lock);
2017 2016
2018 return err; 2017 return err;
2019 } 2018 }
2020 2019
2021 static void br_multicast_start_querier(struct net_bridge *br, 2020 static void br_multicast_start_querier(struct net_bridge *br,
2022 struct bridge_mcast_own_query *query) 2021 struct bridge_mcast_own_query *query)
2023 { 2022 {
2024 struct net_bridge_port *port; 2023 struct net_bridge_port *port;
2025 2024
2026 __br_multicast_open(br, query); 2025 __br_multicast_open(br, query);
2027 2026
2028 list_for_each_entry(port, &br->port_list, list) { 2027 list_for_each_entry(port, &br->port_list, list) {
2029 if (port->state == BR_STATE_DISABLED || 2028 if (port->state == BR_STATE_DISABLED ||
2030 port->state == BR_STATE_BLOCKING) 2029 port->state == BR_STATE_BLOCKING)
2031 continue; 2030 continue;
2032 2031
2033 if (query == &br->ip4_own_query) 2032 if (query == &br->ip4_own_query)
2034 br_multicast_enable(&port->ip4_own_query); 2033 br_multicast_enable(&port->ip4_own_query);
2035 #if IS_ENABLED(CONFIG_IPV6) 2034 #if IS_ENABLED(CONFIG_IPV6)
2036 else 2035 else
2037 br_multicast_enable(&port->ip6_own_query); 2036 br_multicast_enable(&port->ip6_own_query);
2038 #endif 2037 #endif
2039 } 2038 }
2040 } 2039 }
2041 2040
2042 int br_multicast_toggle(struct net_bridge *br, unsigned long val) 2041 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2043 { 2042 {
2044 int err = 0; 2043 int err = 0;
2045 struct net_bridge_mdb_htable *mdb; 2044 struct net_bridge_mdb_htable *mdb;
2046 2045
2047 spin_lock_bh(&br->multicast_lock); 2046 spin_lock_bh(&br->multicast_lock);
2048 if (br->multicast_disabled == !val) 2047 if (br->multicast_disabled == !val)
2049 goto unlock; 2048 goto unlock;
2050 2049
2051 br->multicast_disabled = !val; 2050 br->multicast_disabled = !val;
2052 if (br->multicast_disabled) 2051 if (br->multicast_disabled)
2053 goto unlock; 2052 goto unlock;
2054 2053
2055 if (!netif_running(br->dev)) 2054 if (!netif_running(br->dev))
2056 goto unlock; 2055 goto unlock;
2057 2056
2058 mdb = mlock_dereference(br->mdb, br); 2057 mdb = mlock_dereference(br->mdb, br);
2059 if (mdb) { 2058 if (mdb) {
2060 if (mdb->old) { 2059 if (mdb->old) {
2061 err = -EEXIST; 2060 err = -EEXIST;
2062 rollback: 2061 rollback:
2063 br->multicast_disabled = !!val; 2062 br->multicast_disabled = !!val;
2064 goto unlock; 2063 goto unlock;
2065 } 2064 }
2066 2065
2067 err = br_mdb_rehash(&br->mdb, mdb->max, 2066 err = br_mdb_rehash(&br->mdb, mdb->max,
2068 br->hash_elasticity); 2067 br->hash_elasticity);
2069 if (err) 2068 if (err)
2070 goto rollback; 2069 goto rollback;
2071 } 2070 }
2072 2071
2073 br_multicast_start_querier(br, &br->ip4_own_query); 2072 br_multicast_start_querier(br, &br->ip4_own_query);
2074 #if IS_ENABLED(CONFIG_IPV6) 2073 #if IS_ENABLED(CONFIG_IPV6)
2075 br_multicast_start_querier(br, &br->ip6_own_query); 2074 br_multicast_start_querier(br, &br->ip6_own_query);
2076 #endif 2075 #endif
2077 2076
2078 unlock: 2077 unlock:
2079 spin_unlock_bh(&br->multicast_lock); 2078 spin_unlock_bh(&br->multicast_lock);
2080 2079
2081 return err; 2080 return err;
2082 } 2081 }
2083 2082
2084 int br_multicast_set_querier(struct net_bridge *br, unsigned long val) 2083 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2085 { 2084 {
2086 unsigned long max_delay; 2085 unsigned long max_delay;
2087 2086
2088 val = !!val; 2087 val = !!val;
2089 2088
2090 spin_lock_bh(&br->multicast_lock); 2089 spin_lock_bh(&br->multicast_lock);
2091 if (br->multicast_querier == val) 2090 if (br->multicast_querier == val)
2092 goto unlock; 2091 goto unlock;
2093 2092
2094 br->multicast_querier = val; 2093 br->multicast_querier = val;
2095 if (!val) 2094 if (!val)
2096 goto unlock; 2095 goto unlock;
2097 2096
2098 max_delay = br->multicast_query_response_interval; 2097 max_delay = br->multicast_query_response_interval;
2099 2098
2100 if (!timer_pending(&br->ip4_other_query.timer)) 2099 if (!timer_pending(&br->ip4_other_query.timer))
2101 br->ip4_other_query.delay_time = jiffies + max_delay; 2100 br->ip4_other_query.delay_time = jiffies + max_delay;
2102 2101
2103 br_multicast_start_querier(br, &br->ip4_own_query); 2102 br_multicast_start_querier(br, &br->ip4_own_query);
2104 2103
2105 #if IS_ENABLED(CONFIG_IPV6) 2104 #if IS_ENABLED(CONFIG_IPV6)
2106 if (!timer_pending(&br->ip6_other_query.timer)) 2105 if (!timer_pending(&br->ip6_other_query.timer))
2107 br->ip6_other_query.delay_time = jiffies + max_delay; 2106 br->ip6_other_query.delay_time = jiffies + max_delay;
2108 2107
2109 br_multicast_start_querier(br, &br->ip6_own_query); 2108 br_multicast_start_querier(br, &br->ip6_own_query);
2110 #endif 2109 #endif
2111 2110
2112 unlock: 2111 unlock:
2113 spin_unlock_bh(&br->multicast_lock); 2112 spin_unlock_bh(&br->multicast_lock);
2114 2113
2115 return 0; 2114 return 0;
2116 } 2115 }
2117 2116
2118 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 2117 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
2119 { 2118 {
2120 int err = -ENOENT; 2119 int err = -ENOENT;
2121 u32 old; 2120 u32 old;
2122 struct net_bridge_mdb_htable *mdb; 2121 struct net_bridge_mdb_htable *mdb;
2123 2122
2124 spin_lock_bh(&br->multicast_lock); 2123 spin_lock_bh(&br->multicast_lock);
2125 if (!netif_running(br->dev)) 2124 if (!netif_running(br->dev))
2126 goto unlock; 2125 goto unlock;
2127 2126
2128 err = -EINVAL; 2127 err = -EINVAL;
2129 if (!is_power_of_2(val)) 2128 if (!is_power_of_2(val))
2130 goto unlock; 2129 goto unlock;
2131 2130
2132 mdb = mlock_dereference(br->mdb, br); 2131 mdb = mlock_dereference(br->mdb, br);
2133 if (mdb && val < mdb->size) 2132 if (mdb && val < mdb->size)
2134 goto unlock; 2133 goto unlock;
2135 2134
2136 err = 0; 2135 err = 0;
2137 2136
2138 old = br->hash_max; 2137 old = br->hash_max;
2139 br->hash_max = val; 2138 br->hash_max = val;
2140 2139
2141 if (mdb) { 2140 if (mdb) {
2142 if (mdb->old) { 2141 if (mdb->old) {
2143 err = -EEXIST; 2142 err = -EEXIST;
2144 rollback: 2143 rollback:
2145 br->hash_max = old; 2144 br->hash_max = old;
2146 goto unlock; 2145 goto unlock;
2147 } 2146 }
2148 2147
2149 err = br_mdb_rehash(&br->mdb, br->hash_max, 2148 err = br_mdb_rehash(&br->mdb, br->hash_max,
2150 br->hash_elasticity); 2149 br->hash_elasticity);
2151 if (err) 2150 if (err)
2152 goto rollback; 2151 goto rollback;
2153 } 2152 }
2154 2153
2155 unlock: 2154 unlock:
2156 spin_unlock_bh(&br->multicast_lock); 2155 spin_unlock_bh(&br->multicast_lock);
2157 2156
2158 return err; 2157 return err;
2159 } 2158 }
2160 2159
2161 /** 2160 /**
2162 * br_multicast_list_adjacent - Returns snooped multicast addresses 2161 * br_multicast_list_adjacent - Returns snooped multicast addresses
2163 * @dev: The bridge port adjacent to which to retrieve addresses 2162 * @dev: The bridge port adjacent to which to retrieve addresses
2164 * @br_ip_list: The list to store found, snooped multicast IP addresses in 2163 * @br_ip_list: The list to store found, snooped multicast IP addresses in
2165 * 2164 *
2166 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast 2165 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2167 * snooping feature on all bridge ports of dev's bridge device, excluding 2166 * snooping feature on all bridge ports of dev's bridge device, excluding
2168 * the addresses from dev itself. 2167 * the addresses from dev itself.
2169 * 2168 *
2170 * Returns the number of items added to br_ip_list. 2169 * Returns the number of items added to br_ip_list.
2171 * 2170 *
2172 * Notes: 2171 * Notes:
2173 * - br_ip_list needs to be initialized by caller 2172 * - br_ip_list needs to be initialized by caller
2174 * - br_ip_list might contain duplicates in the end 2173 * - br_ip_list might contain duplicates in the end
2175 * (needs to be taken care of by caller) 2174 * (needs to be taken care of by caller)
2176 * - br_ip_list needs to be freed by caller 2175 * - br_ip_list needs to be freed by caller
2177 */ 2176 */
2178 int br_multicast_list_adjacent(struct net_device *dev, 2177 int br_multicast_list_adjacent(struct net_device *dev,
2179 struct list_head *br_ip_list) 2178 struct list_head *br_ip_list)
2180 { 2179 {
2181 struct net_bridge *br; 2180 struct net_bridge *br;
2182 struct net_bridge_port *port; 2181 struct net_bridge_port *port;
2183 struct net_bridge_port_group *group; 2182 struct net_bridge_port_group *group;
2184 struct br_ip_list *entry; 2183 struct br_ip_list *entry;
2185 int count = 0; 2184 int count = 0;
2186 2185
2187 rcu_read_lock(); 2186 rcu_read_lock();
2188 if (!br_ip_list || !br_port_exists(dev)) 2187 if (!br_ip_list || !br_port_exists(dev))
2189 goto unlock; 2188 goto unlock;
2190 2189
2191 port = br_port_get_rcu(dev); 2190 port = br_port_get_rcu(dev);
2192 if (!port || !port->br) 2191 if (!port || !port->br)
2193 goto unlock; 2192 goto unlock;
2194 2193
2195 br = port->br; 2194 br = port->br;
2196 2195
2197 list_for_each_entry_rcu(port, &br->port_list, list) { 2196 list_for_each_entry_rcu(port, &br->port_list, list) {
2198 if (!port->dev || port->dev == dev) 2197 if (!port->dev || port->dev == dev)
2199 continue; 2198 continue;
2200 2199
2201 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { 2200 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2202 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 2201 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2203 if (!entry) 2202 if (!entry)
2204 goto unlock; 2203 goto unlock;
2205 2204
2206 entry->addr = group->addr; 2205 entry->addr = group->addr;
2207 list_add(&entry->list, br_ip_list); 2206 list_add(&entry->list, br_ip_list);
2208 count++; 2207 count++;
2209 } 2208 }
2210 } 2209 }
2211 2210
2212 unlock: 2211 unlock:
2213 rcu_read_unlock(); 2212 rcu_read_unlock();
2214 return count; 2213 return count;
2215 } 2214 }
2216 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent); 2215 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2217 2216
2218 /** 2217 /**
2219 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge 2218 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2220 * @dev: The bridge port providing the bridge on which to check for a querier 2219 * @dev: The bridge port providing the bridge on which to check for a querier
2221 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2220 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2222 * 2221 *
2223 * Checks whether the given interface has a bridge on top and if so returns 2222 * Checks whether the given interface has a bridge on top and if so returns
2224 * true if a valid querier exists anywhere on the bridged link layer. 2223 * true if a valid querier exists anywhere on the bridged link layer.
2225 * Otherwise returns false. 2224 * Otherwise returns false.
2226 */ 2225 */
2227 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) 2226 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2228 { 2227 {
2229 struct net_bridge *br; 2228 struct net_bridge *br;
2230 struct net_bridge_port *port; 2229 struct net_bridge_port *port;
2231 struct ethhdr eth; 2230 struct ethhdr eth;
2232 bool ret = false; 2231 bool ret = false;
2233 2232
2234 rcu_read_lock(); 2233 rcu_read_lock();
2235 if (!br_port_exists(dev)) 2234 if (!br_port_exists(dev))
2236 goto unlock; 2235 goto unlock;
2237 2236
2238 port = br_port_get_rcu(dev); 2237 port = br_port_get_rcu(dev);
2239 if (!port || !port->br) 2238 if (!port || !port->br)
2240 goto unlock; 2239 goto unlock;
2241 2240
2242 br = port->br; 2241 br = port->br;
2243 2242
2244 memset(&eth, 0, sizeof(eth)); 2243 memset(&eth, 0, sizeof(eth));
2245 eth.h_proto = htons(proto); 2244 eth.h_proto = htons(proto);
2246 2245
2247 ret = br_multicast_querier_exists(br, &eth); 2246 ret = br_multicast_querier_exists(br, &eth);
2248 2247
2249 unlock: 2248 unlock:
2250 rcu_read_unlock(); 2249 rcu_read_unlock();
2251 return ret; 2250 return ret;
2252 } 2251 }
2253 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere); 2252 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2254 2253
2255 /** 2254 /**
2256 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port 2255 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2257 * @dev: The bridge port adjacent to which to check for a querier 2256 * @dev: The bridge port adjacent to which to check for a querier
2258 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6 2257 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2259 * 2258 *
2260 * Checks whether the given interface has a bridge on top and if so returns 2259 * Checks whether the given interface has a bridge on top and if so returns
2261 * true if a selected querier is behind one of the other ports of this 2260 * true if a selected querier is behind one of the other ports of this
2262 * bridge. Otherwise returns false. 2261 * bridge. Otherwise returns false.
2263 */ 2262 */
2264 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) 2263 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2265 { 2264 {
2266 struct net_bridge *br; 2265 struct net_bridge *br;
2267 struct net_bridge_port *port; 2266 struct net_bridge_port *port;
2268 bool ret = false; 2267 bool ret = false;
2269 2268
2270 rcu_read_lock(); 2269 rcu_read_lock();
2271 if (!br_port_exists(dev)) 2270 if (!br_port_exists(dev))
2272 goto unlock; 2271 goto unlock;
2273 2272
2274 port = br_port_get_rcu(dev); 2273 port = br_port_get_rcu(dev);
2275 if (!port || !port->br) 2274 if (!port || !port->br)
2276 goto unlock; 2275 goto unlock;
2277 2276
2278 br = port->br; 2277 br = port->br;
2279 2278
2280 switch (proto) { 2279 switch (proto) {
2281 case ETH_P_IP: 2280 case ETH_P_IP:
2282 if (!timer_pending(&br->ip4_other_query.timer) || 2281 if (!timer_pending(&br->ip4_other_query.timer) ||
2283 rcu_dereference(br->ip4_querier.port) == port) 2282 rcu_dereference(br->ip4_querier.port) == port)
2284 goto unlock; 2283 goto unlock;
2285 break; 2284 break;
2286 #if IS_ENABLED(CONFIG_IPV6) 2285 #if IS_ENABLED(CONFIG_IPV6)
2287 case ETH_P_IPV6: 2286 case ETH_P_IPV6:
2288 if (!timer_pending(&br->ip6_other_query.timer) || 2287 if (!timer_pending(&br->ip6_other_query.timer) ||
2289 rcu_dereference(br->ip6_querier.port) == port) 2288 rcu_dereference(br->ip6_querier.port) == port)
2290 goto unlock; 2289 goto unlock;
2291 break; 2290 break;
2292 #endif 2291 #endif
2293 default: 2292 default:
2294 goto unlock; 2293 goto unlock;
2295 } 2294 }
2296 2295
2297 ret = true; 2296 ret = true;
2298 unlock: 2297 unlock:
2299 rcu_read_unlock(); 2298 rcu_read_unlock();
2300 return ret; 2299 return ret;
2301 } 2300 }
2302 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2301 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2303 2302
net/netfilter/nfnetlink.c
1 /* Netfilter messages via netlink socket. Allows for user space 1 /* Netfilter messages via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace. 2 * protocol helpers and general trouble making from userspace.
3 * 3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>, 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>,
5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org> 5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2005,2007 by Pablo Neira Ayuso <pablo@netfilter.org> 6 * (C) 2005,2007 by Pablo Neira Ayuso <pablo@netfilter.org>
7 * 7 *
8 * Initial netfilter messages via netlink development funded and 8 * Initial netfilter messages via netlink development funded and
9 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 9 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
10 * 10 *
11 * Further development of this code funded by Astaro AG (http://www.astaro.com) 11 * Further development of this code funded by Astaro AG (http://www.astaro.com)
12 * 12 *
13 * This software may be used and distributed according to the terms 13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference. 14 * of the GNU General Public License, incorporated herein by reference.
15 */ 15 */
16 16
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/types.h> 18 #include <linux/types.h>
19 #include <linux/socket.h> 19 #include <linux/socket.h>
20 #include <linux/kernel.h> 20 #include <linux/kernel.h>
21 #include <linux/string.h> 21 #include <linux/string.h>
22 #include <linux/sockios.h> 22 #include <linux/sockios.h>
23 #include <linux/net.h> 23 #include <linux/net.h>
24 #include <linux/skbuff.h> 24 #include <linux/skbuff.h>
25 #include <asm/uaccess.h> 25 #include <asm/uaccess.h>
26 #include <net/sock.h> 26 #include <net/sock.h>
27 #include <linux/init.h> 27 #include <linux/init.h>
28 28
29 #include <net/netlink.h> 29 #include <net/netlink.h>
30 #include <linux/netfilter/nfnetlink.h> 30 #include <linux/netfilter/nfnetlink.h>
31 31
32 MODULE_LICENSE("GPL"); 32 MODULE_LICENSE("GPL");
33 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 33 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
34 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); 34 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
35 35
36 static char __initdata nfversion[] = "0.30"; 36 static char __initdata nfversion[] = "0.30";
37 37
38 static struct { 38 static struct {
39 struct mutex mutex; 39 struct mutex mutex;
40 const struct nfnetlink_subsystem __rcu *subsys; 40 const struct nfnetlink_subsystem __rcu *subsys;
41 } table[NFNL_SUBSYS_COUNT]; 41 } table[NFNL_SUBSYS_COUNT];
42 42
43 static const int nfnl_group2type[NFNLGRP_MAX+1] = { 43 static const int nfnl_group2type[NFNLGRP_MAX+1] = {
44 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK, 44 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
45 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK, 45 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK,
46 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK, 46 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK,
47 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, 47 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP,
48 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, 48 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP,
49 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, 49 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
50 [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES,
51 [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT,
50 }; 52 };
51 53
52 void nfnl_lock(__u8 subsys_id) 54 void nfnl_lock(__u8 subsys_id)
53 { 55 {
54 mutex_lock(&table[subsys_id].mutex); 56 mutex_lock(&table[subsys_id].mutex);
55 } 57 }
56 EXPORT_SYMBOL_GPL(nfnl_lock); 58 EXPORT_SYMBOL_GPL(nfnl_lock);
57 59
58 void nfnl_unlock(__u8 subsys_id) 60 void nfnl_unlock(__u8 subsys_id)
59 { 61 {
60 mutex_unlock(&table[subsys_id].mutex); 62 mutex_unlock(&table[subsys_id].mutex);
61 } 63 }
62 EXPORT_SYMBOL_GPL(nfnl_unlock); 64 EXPORT_SYMBOL_GPL(nfnl_unlock);
63 65
64 #ifdef CONFIG_PROVE_LOCKING 66 #ifdef CONFIG_PROVE_LOCKING
65 int lockdep_nfnl_is_held(u8 subsys_id) 67 int lockdep_nfnl_is_held(u8 subsys_id)
66 { 68 {
67 return lockdep_is_held(&table[subsys_id].mutex); 69 return lockdep_is_held(&table[subsys_id].mutex);
68 } 70 }
69 EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held); 71 EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held);
70 #endif 72 #endif
71 73
72 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) 74 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
73 { 75 {
74 nfnl_lock(n->subsys_id); 76 nfnl_lock(n->subsys_id);
75 if (table[n->subsys_id].subsys) { 77 if (table[n->subsys_id].subsys) {
76 nfnl_unlock(n->subsys_id); 78 nfnl_unlock(n->subsys_id);
77 return -EBUSY; 79 return -EBUSY;
78 } 80 }
79 rcu_assign_pointer(table[n->subsys_id].subsys, n); 81 rcu_assign_pointer(table[n->subsys_id].subsys, n);
80 nfnl_unlock(n->subsys_id); 82 nfnl_unlock(n->subsys_id);
81 83
82 return 0; 84 return 0;
83 } 85 }
84 EXPORT_SYMBOL_GPL(nfnetlink_subsys_register); 86 EXPORT_SYMBOL_GPL(nfnetlink_subsys_register);
85 87
86 int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) 88 int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
87 { 89 {
88 nfnl_lock(n->subsys_id); 90 nfnl_lock(n->subsys_id);
89 table[n->subsys_id].subsys = NULL; 91 table[n->subsys_id].subsys = NULL;
90 nfnl_unlock(n->subsys_id); 92 nfnl_unlock(n->subsys_id);
91 synchronize_rcu(); 93 synchronize_rcu();
92 return 0; 94 return 0;
93 } 95 }
94 EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); 96 EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
95 97
96 static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type) 98 static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type)
97 { 99 {
98 u_int8_t subsys_id = NFNL_SUBSYS_ID(type); 100 u_int8_t subsys_id = NFNL_SUBSYS_ID(type);
99 101
100 if (subsys_id >= NFNL_SUBSYS_COUNT) 102 if (subsys_id >= NFNL_SUBSYS_COUNT)
101 return NULL; 103 return NULL;
102 104
103 return rcu_dereference(table[subsys_id].subsys); 105 return rcu_dereference(table[subsys_id].subsys);
104 } 106 }
105 107
106 static inline const struct nfnl_callback * 108 static inline const struct nfnl_callback *
107 nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss) 109 nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss)
108 { 110 {
109 u_int8_t cb_id = NFNL_MSG_TYPE(type); 111 u_int8_t cb_id = NFNL_MSG_TYPE(type);
110 112
111 if (cb_id >= ss->cb_count) 113 if (cb_id >= ss->cb_count)
112 return NULL; 114 return NULL;
113 115
114 return &ss->cb[cb_id]; 116 return &ss->cb[cb_id];
115 } 117 }
116 118
117 int nfnetlink_has_listeners(struct net *net, unsigned int group) 119 int nfnetlink_has_listeners(struct net *net, unsigned int group)
118 { 120 {
119 return netlink_has_listeners(net->nfnl, group); 121 return netlink_has_listeners(net->nfnl, group);
120 } 122 }
121 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 123 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
122 124
123 struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size, 125 struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
124 u32 dst_portid, gfp_t gfp_mask) 126 u32 dst_portid, gfp_t gfp_mask)
125 { 127 {
126 return netlink_alloc_skb(net->nfnl, size, dst_portid, gfp_mask); 128 return netlink_alloc_skb(net->nfnl, size, dst_portid, gfp_mask);
127 } 129 }
128 EXPORT_SYMBOL_GPL(nfnetlink_alloc_skb); 130 EXPORT_SYMBOL_GPL(nfnetlink_alloc_skb);
129 131
130 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, 132 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
131 unsigned int group, int echo, gfp_t flags) 133 unsigned int group, int echo, gfp_t flags)
132 { 134 {
133 return nlmsg_notify(net->nfnl, skb, portid, group, echo, flags); 135 return nlmsg_notify(net->nfnl, skb, portid, group, echo, flags);
134 } 136 }
135 EXPORT_SYMBOL_GPL(nfnetlink_send); 137 EXPORT_SYMBOL_GPL(nfnetlink_send);
136 138
137 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) 139 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
138 { 140 {
139 return netlink_set_err(net->nfnl, portid, group, error); 141 return netlink_set_err(net->nfnl, portid, group, error);
140 } 142 }
141 EXPORT_SYMBOL_GPL(nfnetlink_set_err); 143 EXPORT_SYMBOL_GPL(nfnetlink_set_err);
142 144
143 int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, 145 int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
144 int flags) 146 int flags)
145 { 147 {
146 return netlink_unicast(net->nfnl, skb, portid, flags); 148 return netlink_unicast(net->nfnl, skb, portid, flags);
147 } 149 }
148 EXPORT_SYMBOL_GPL(nfnetlink_unicast); 150 EXPORT_SYMBOL_GPL(nfnetlink_unicast);
149 151
150 /* Process one complete nfnetlink message. */ 152 /* Process one complete nfnetlink message. */
151 static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 153 static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
152 { 154 {
153 struct net *net = sock_net(skb->sk); 155 struct net *net = sock_net(skb->sk);
154 const struct nfnl_callback *nc; 156 const struct nfnl_callback *nc;
155 const struct nfnetlink_subsystem *ss; 157 const struct nfnetlink_subsystem *ss;
156 int type, err; 158 int type, err;
157 159
158 /* All the messages must at least contain nfgenmsg */ 160 /* All the messages must at least contain nfgenmsg */
159 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg)) 161 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
160 return 0; 162 return 0;
161 163
162 type = nlh->nlmsg_type; 164 type = nlh->nlmsg_type;
163 replay: 165 replay:
164 rcu_read_lock(); 166 rcu_read_lock();
165 ss = nfnetlink_get_subsys(type); 167 ss = nfnetlink_get_subsys(type);
166 if (!ss) { 168 if (!ss) {
167 #ifdef CONFIG_MODULES 169 #ifdef CONFIG_MODULES
168 rcu_read_unlock(); 170 rcu_read_unlock();
169 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); 171 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
170 rcu_read_lock(); 172 rcu_read_lock();
171 ss = nfnetlink_get_subsys(type); 173 ss = nfnetlink_get_subsys(type);
172 if (!ss) 174 if (!ss)
173 #endif 175 #endif
174 { 176 {
175 rcu_read_unlock(); 177 rcu_read_unlock();
176 return -EINVAL; 178 return -EINVAL;
177 } 179 }
178 } 180 }
179 181
180 nc = nfnetlink_find_client(type, ss); 182 nc = nfnetlink_find_client(type, ss);
181 if (!nc) { 183 if (!nc) {
182 rcu_read_unlock(); 184 rcu_read_unlock();
183 return -EINVAL; 185 return -EINVAL;
184 } 186 }
185 187
186 { 188 {
187 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 189 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
188 u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 190 u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
189 struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; 191 struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
190 struct nlattr *attr = (void *)nlh + min_len; 192 struct nlattr *attr = (void *)nlh + min_len;
191 int attrlen = nlh->nlmsg_len - min_len; 193 int attrlen = nlh->nlmsg_len - min_len;
192 __u8 subsys_id = NFNL_SUBSYS_ID(type); 194 __u8 subsys_id = NFNL_SUBSYS_ID(type);
193 195
194 err = nla_parse(cda, ss->cb[cb_id].attr_count, 196 err = nla_parse(cda, ss->cb[cb_id].attr_count,
195 attr, attrlen, ss->cb[cb_id].policy); 197 attr, attrlen, ss->cb[cb_id].policy);
196 if (err < 0) { 198 if (err < 0) {
197 rcu_read_unlock(); 199 rcu_read_unlock();
198 return err; 200 return err;
199 } 201 }
200 202
201 if (nc->call_rcu) { 203 if (nc->call_rcu) {
202 err = nc->call_rcu(net->nfnl, skb, nlh, 204 err = nc->call_rcu(net->nfnl, skb, nlh,
203 (const struct nlattr **)cda); 205 (const struct nlattr **)cda);
204 rcu_read_unlock(); 206 rcu_read_unlock();
205 } else { 207 } else {
206 rcu_read_unlock(); 208 rcu_read_unlock();
207 nfnl_lock(subsys_id); 209 nfnl_lock(subsys_id);
208 if (rcu_dereference_protected(table[subsys_id].subsys, 210 if (rcu_dereference_protected(table[subsys_id].subsys,
209 lockdep_is_held(&table[subsys_id].mutex)) != ss || 211 lockdep_is_held(&table[subsys_id].mutex)) != ss ||
210 nfnetlink_find_client(type, ss) != nc) 212 nfnetlink_find_client(type, ss) != nc)
211 err = -EAGAIN; 213 err = -EAGAIN;
212 else if (nc->call) 214 else if (nc->call)
213 err = nc->call(net->nfnl, skb, nlh, 215 err = nc->call(net->nfnl, skb, nlh,
214 (const struct nlattr **)cda); 216 (const struct nlattr **)cda);
215 else 217 else
216 err = -EINVAL; 218 err = -EINVAL;
217 nfnl_unlock(subsys_id); 219 nfnl_unlock(subsys_id);
218 } 220 }
219 if (err == -EAGAIN) 221 if (err == -EAGAIN)
220 goto replay; 222 goto replay;
221 return err; 223 return err;
222 } 224 }
223 } 225 }
224 226
225 struct nfnl_err { 227 struct nfnl_err {
226 struct list_head head; 228 struct list_head head;
227 struct nlmsghdr *nlh; 229 struct nlmsghdr *nlh;
228 int err; 230 int err;
229 }; 231 };
230 232
231 static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err) 233 static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err)
232 { 234 {
233 struct nfnl_err *nfnl_err; 235 struct nfnl_err *nfnl_err;
234 236
235 nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL); 237 nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL);
236 if (nfnl_err == NULL) 238 if (nfnl_err == NULL)
237 return -ENOMEM; 239 return -ENOMEM;
238 240
239 nfnl_err->nlh = nlh; 241 nfnl_err->nlh = nlh;
240 nfnl_err->err = err; 242 nfnl_err->err = err;
241 list_add_tail(&nfnl_err->head, list); 243 list_add_tail(&nfnl_err->head, list);
242 244
243 return 0; 245 return 0;
244 } 246 }
245 247
246 static void nfnl_err_del(struct nfnl_err *nfnl_err) 248 static void nfnl_err_del(struct nfnl_err *nfnl_err)
247 { 249 {
248 list_del(&nfnl_err->head); 250 list_del(&nfnl_err->head);
249 kfree(nfnl_err); 251 kfree(nfnl_err);
250 } 252 }
251 253
252 static void nfnl_err_reset(struct list_head *err_list) 254 static void nfnl_err_reset(struct list_head *err_list)
253 { 255 {
254 struct nfnl_err *nfnl_err, *next; 256 struct nfnl_err *nfnl_err, *next;
255 257
256 list_for_each_entry_safe(nfnl_err, next, err_list, head) 258 list_for_each_entry_safe(nfnl_err, next, err_list, head)
257 nfnl_err_del(nfnl_err); 259 nfnl_err_del(nfnl_err);
258 } 260 }
259 261
260 static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb) 262 static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
261 { 263 {
262 struct nfnl_err *nfnl_err, *next; 264 struct nfnl_err *nfnl_err, *next;
263 265
264 list_for_each_entry_safe(nfnl_err, next, err_list, head) { 266 list_for_each_entry_safe(nfnl_err, next, err_list, head) {
265 netlink_ack(skb, nfnl_err->nlh, nfnl_err->err); 267 netlink_ack(skb, nfnl_err->nlh, nfnl_err->err);
266 nfnl_err_del(nfnl_err); 268 nfnl_err_del(nfnl_err);
267 } 269 }
268 } 270 }
269 271
270 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, 272 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
271 u_int16_t subsys_id) 273 u_int16_t subsys_id)
272 { 274 {
273 struct sk_buff *nskb, *oskb = skb; 275 struct sk_buff *nskb, *oskb = skb;
274 struct net *net = sock_net(skb->sk); 276 struct net *net = sock_net(skb->sk);
275 const struct nfnetlink_subsystem *ss; 277 const struct nfnetlink_subsystem *ss;
276 const struct nfnl_callback *nc; 278 const struct nfnl_callback *nc;
277 bool success = true, done = false; 279 bool success = true, done = false;
278 static LIST_HEAD(err_list); 280 static LIST_HEAD(err_list);
279 int err; 281 int err;
280 282
281 if (subsys_id >= NFNL_SUBSYS_COUNT) 283 if (subsys_id >= NFNL_SUBSYS_COUNT)
282 return netlink_ack(skb, nlh, -EINVAL); 284 return netlink_ack(skb, nlh, -EINVAL);
283 replay: 285 replay:
284 nskb = netlink_skb_clone(oskb, GFP_KERNEL); 286 nskb = netlink_skb_clone(oskb, GFP_KERNEL);
285 if (!nskb) 287 if (!nskb)
286 return netlink_ack(oskb, nlh, -ENOMEM); 288 return netlink_ack(oskb, nlh, -ENOMEM);
287 289
288 nskb->sk = oskb->sk; 290 nskb->sk = oskb->sk;
289 skb = nskb; 291 skb = nskb;
290 292
291 nfnl_lock(subsys_id); 293 nfnl_lock(subsys_id);
292 ss = rcu_dereference_protected(table[subsys_id].subsys, 294 ss = rcu_dereference_protected(table[subsys_id].subsys,
293 lockdep_is_held(&table[subsys_id].mutex)); 295 lockdep_is_held(&table[subsys_id].mutex));
294 if (!ss) { 296 if (!ss) {
295 #ifdef CONFIG_MODULES 297 #ifdef CONFIG_MODULES
296 nfnl_unlock(subsys_id); 298 nfnl_unlock(subsys_id);
297 request_module("nfnetlink-subsys-%d", subsys_id); 299 request_module("nfnetlink-subsys-%d", subsys_id);
298 nfnl_lock(subsys_id); 300 nfnl_lock(subsys_id);
299 ss = rcu_dereference_protected(table[subsys_id].subsys, 301 ss = rcu_dereference_protected(table[subsys_id].subsys,
300 lockdep_is_held(&table[subsys_id].mutex)); 302 lockdep_is_held(&table[subsys_id].mutex));
301 if (!ss) 303 if (!ss)
302 #endif 304 #endif
303 { 305 {
304 nfnl_unlock(subsys_id); 306 nfnl_unlock(subsys_id);
305 netlink_ack(skb, nlh, -EOPNOTSUPP); 307 netlink_ack(skb, nlh, -EOPNOTSUPP);
306 return kfree_skb(nskb); 308 return kfree_skb(nskb);
307 } 309 }
308 } 310 }
309 311
310 if (!ss->commit || !ss->abort) { 312 if (!ss->commit || !ss->abort) {
311 nfnl_unlock(subsys_id); 313 nfnl_unlock(subsys_id);
312 netlink_ack(skb, nlh, -EOPNOTSUPP); 314 netlink_ack(skb, nlh, -EOPNOTSUPP);
313 return kfree_skb(skb); 315 return kfree_skb(skb);
314 } 316 }
315 317
316 while (skb->len >= nlmsg_total_size(0)) { 318 while (skb->len >= nlmsg_total_size(0)) {
317 int msglen, type; 319 int msglen, type;
318 320
319 nlh = nlmsg_hdr(skb); 321 nlh = nlmsg_hdr(skb);
320 err = 0; 322 err = 0;
321 323
322 if (nlh->nlmsg_len < NLMSG_HDRLEN) { 324 if (nlh->nlmsg_len < NLMSG_HDRLEN) {
323 err = -EINVAL; 325 err = -EINVAL;
324 goto ack; 326 goto ack;
325 } 327 }
326 328
327 /* Only requests are handled by the kernel */ 329 /* Only requests are handled by the kernel */
328 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) { 330 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
329 err = -EINVAL; 331 err = -EINVAL;
330 goto ack; 332 goto ack;
331 } 333 }
332 334
333 type = nlh->nlmsg_type; 335 type = nlh->nlmsg_type;
334 if (type == NFNL_MSG_BATCH_BEGIN) { 336 if (type == NFNL_MSG_BATCH_BEGIN) {
335 /* Malformed: Batch begin twice */ 337 /* Malformed: Batch begin twice */
336 nfnl_err_reset(&err_list); 338 nfnl_err_reset(&err_list);
337 success = false; 339 success = false;
338 goto done; 340 goto done;
339 } else if (type == NFNL_MSG_BATCH_END) { 341 } else if (type == NFNL_MSG_BATCH_END) {
340 done = true; 342 done = true;
341 goto done; 343 goto done;
342 } else if (type < NLMSG_MIN_TYPE) { 344 } else if (type < NLMSG_MIN_TYPE) {
343 err = -EINVAL; 345 err = -EINVAL;
344 goto ack; 346 goto ack;
345 } 347 }
346 348
347 /* We only accept a batch with messages for the same 349 /* We only accept a batch with messages for the same
348 * subsystem. 350 * subsystem.
349 */ 351 */
350 if (NFNL_SUBSYS_ID(type) != subsys_id) { 352 if (NFNL_SUBSYS_ID(type) != subsys_id) {
351 err = -EINVAL; 353 err = -EINVAL;
352 goto ack; 354 goto ack;
353 } 355 }
354 356
355 nc = nfnetlink_find_client(type, ss); 357 nc = nfnetlink_find_client(type, ss);
356 if (!nc) { 358 if (!nc) {
357 err = -EINVAL; 359 err = -EINVAL;
358 goto ack; 360 goto ack;
359 } 361 }
360 362
361 { 363 {
362 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 364 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
363 u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 365 u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
364 struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; 366 struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
365 struct nlattr *attr = (void *)nlh + min_len; 367 struct nlattr *attr = (void *)nlh + min_len;
366 int attrlen = nlh->nlmsg_len - min_len; 368 int attrlen = nlh->nlmsg_len - min_len;
367 369
368 err = nla_parse(cda, ss->cb[cb_id].attr_count, 370 err = nla_parse(cda, ss->cb[cb_id].attr_count,
369 attr, attrlen, ss->cb[cb_id].policy); 371 attr, attrlen, ss->cb[cb_id].policy);
370 if (err < 0) 372 if (err < 0)
371 goto ack; 373 goto ack;
372 374
373 if (nc->call_batch) { 375 if (nc->call_batch) {
374 err = nc->call_batch(net->nfnl, skb, nlh, 376 err = nc->call_batch(net->nfnl, skb, nlh,
375 (const struct nlattr **)cda); 377 (const struct nlattr **)cda);
376 } 378 }
377 379
378 /* The lock was released to autoload some module, we 380 /* The lock was released to autoload some module, we
379 * have to abort and start from scratch using the 381 * have to abort and start from scratch using the
380 * original skb. 382 * original skb.
381 */ 383 */
382 if (err == -EAGAIN) { 384 if (err == -EAGAIN) {
383 nfnl_err_reset(&err_list); 385 nfnl_err_reset(&err_list);
384 ss->abort(oskb); 386 ss->abort(oskb);
385 nfnl_unlock(subsys_id); 387 nfnl_unlock(subsys_id);
386 kfree_skb(nskb); 388 kfree_skb(nskb);
387 goto replay; 389 goto replay;
388 } 390 }
389 } 391 }
390 ack: 392 ack:
391 if (nlh->nlmsg_flags & NLM_F_ACK || err) { 393 if (nlh->nlmsg_flags & NLM_F_ACK || err) {
392 /* Errors are delivered once the full batch has been 394 /* Errors are delivered once the full batch has been
393 * processed, this avoids that the same error is 395 * processed, this avoids that the same error is
394 * reported several times when replaying the batch. 396 * reported several times when replaying the batch.
395 */ 397 */
396 if (nfnl_err_add(&err_list, nlh, err) < 0) { 398 if (nfnl_err_add(&err_list, nlh, err) < 0) {
397 /* We failed to enqueue an error, reset the 399 /* We failed to enqueue an error, reset the
398 * list of errors and send OOM to userspace 400 * list of errors and send OOM to userspace
399 * pointing to the batch header. 401 * pointing to the batch header.
400 */ 402 */
401 nfnl_err_reset(&err_list); 403 nfnl_err_reset(&err_list);
402 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); 404 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
403 success = false; 405 success = false;
404 goto done; 406 goto done;
405 } 407 }
406 /* We don't stop processing the batch on errors, thus, 408 /* We don't stop processing the batch on errors, thus,
407 * userspace gets all the errors that the batch 409 * userspace gets all the errors that the batch
408 * triggers. 410 * triggers.
409 */ 411 */
410 if (err) 412 if (err)
411 success = false; 413 success = false;
412 } 414 }
413 415
414 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 416 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
415 if (msglen > skb->len) 417 if (msglen > skb->len)
416 msglen = skb->len; 418 msglen = skb->len;
417 skb_pull(skb, msglen); 419 skb_pull(skb, msglen);
418 } 420 }
419 done: 421 done:
420 if (success && done) 422 if (success && done)
421 ss->commit(oskb); 423 ss->commit(oskb);
422 else 424 else
423 ss->abort(oskb); 425 ss->abort(oskb);
424 426
425 nfnl_err_deliver(&err_list, oskb); 427 nfnl_err_deliver(&err_list, oskb);
426 nfnl_unlock(subsys_id); 428 nfnl_unlock(subsys_id);
427 kfree_skb(nskb); 429 kfree_skb(nskb);
428 } 430 }
429 431
430 static void nfnetlink_rcv(struct sk_buff *skb) 432 static void nfnetlink_rcv(struct sk_buff *skb)
431 { 433 {
432 struct nlmsghdr *nlh = nlmsg_hdr(skb); 434 struct nlmsghdr *nlh = nlmsg_hdr(skb);
433 int msglen; 435 int msglen;
434 436
435 if (nlh->nlmsg_len < NLMSG_HDRLEN || 437 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
436 skb->len < nlh->nlmsg_len) 438 skb->len < nlh->nlmsg_len)
437 return; 439 return;
438 440
439 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) { 441 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
440 netlink_ack(skb, nlh, -EPERM); 442 netlink_ack(skb, nlh, -EPERM);
441 return; 443 return;
442 } 444 }
443 445
444 if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) { 446 if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) {
445 struct nfgenmsg *nfgenmsg; 447 struct nfgenmsg *nfgenmsg;
446 448
447 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 449 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
448 if (msglen > skb->len) 450 if (msglen > skb->len)
449 msglen = skb->len; 451 msglen = skb->len;
450 452
451 if (nlh->nlmsg_len < NLMSG_HDRLEN || 453 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
452 skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) 454 skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
453 return; 455 return;
454 456
455 nfgenmsg = nlmsg_data(nlh); 457 nfgenmsg = nlmsg_data(nlh);
456 skb_pull(skb, msglen); 458 skb_pull(skb, msglen);
457 nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id); 459 nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
458 } else { 460 } else {
459 netlink_rcv_skb(skb, &nfnetlink_rcv_msg); 461 netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
460 } 462 }
461 } 463 }
462 464
463 #ifdef CONFIG_MODULES 465 #ifdef CONFIG_MODULES
464 static int nfnetlink_bind(int group) 466 static int nfnetlink_bind(int group)
465 { 467 {
466 const struct nfnetlink_subsystem *ss; 468 const struct nfnetlink_subsystem *ss;
467 int type = nfnl_group2type[group]; 469 int type;
468 470
471 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
472 return -EINVAL;
473
474 type = nfnl_group2type[group];
475
469 rcu_read_lock(); 476 rcu_read_lock();
470 ss = nfnetlink_get_subsys(type); 477 ss = nfnetlink_get_subsys(type);
471 rcu_read_unlock(); 478 rcu_read_unlock();
472 if (!ss) 479 if (!ss)
473 request_module("nfnetlink-subsys-%d", type); 480 request_module("nfnetlink-subsys-%d", type);
474 return 0; 481 return 0;
475 } 482 }
476 #endif 483 #endif
477 484
478 static int __net_init nfnetlink_net_init(struct net *net) 485 static int __net_init nfnetlink_net_init(struct net *net)
479 { 486 {
480 struct sock *nfnl; 487 struct sock *nfnl;
481 struct netlink_kernel_cfg cfg = { 488 struct netlink_kernel_cfg cfg = {
482 .groups = NFNLGRP_MAX, 489 .groups = NFNLGRP_MAX,
483 .input = nfnetlink_rcv, 490 .input = nfnetlink_rcv,
484 #ifdef CONFIG_MODULES 491 #ifdef CONFIG_MODULES
485 .bind = nfnetlink_bind, 492 .bind = nfnetlink_bind,
486 #endif 493 #endif
487 }; 494 };
488 495
489 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg); 496 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg);
490 if (!nfnl) 497 if (!nfnl)
491 return -ENOMEM; 498 return -ENOMEM;
492 net->nfnl_stash = nfnl; 499 net->nfnl_stash = nfnl;
493 rcu_assign_pointer(net->nfnl, nfnl); 500 rcu_assign_pointer(net->nfnl, nfnl);
494 return 0; 501 return 0;
495 } 502 }
496 503
497 static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) 504 static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
498 { 505 {
499 struct net *net; 506 struct net *net;
500 507
501 list_for_each_entry(net, net_exit_list, exit_list) 508 list_for_each_entry(net, net_exit_list, exit_list)
502 RCU_INIT_POINTER(net->nfnl, NULL); 509 RCU_INIT_POINTER(net->nfnl, NULL);
503 synchronize_net(); 510 synchronize_net();
504 list_for_each_entry(net, net_exit_list, exit_list) 511 list_for_each_entry(net, net_exit_list, exit_list)
505 netlink_kernel_release(net->nfnl_stash); 512 netlink_kernel_release(net->nfnl_stash);
506 } 513 }
507 514
508 static struct pernet_operations nfnetlink_net_ops = { 515 static struct pernet_operations nfnetlink_net_ops = {
509 .init = nfnetlink_net_init, 516 .init = nfnetlink_net_init,
510 .exit_batch = nfnetlink_net_exit_batch, 517 .exit_batch = nfnetlink_net_exit_batch,
511 }; 518 };
512 519
513 static int __init nfnetlink_init(void) 520 static int __init nfnetlink_init(void)
514 { 521 {
515 int i; 522 int i;
523
524 for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++)
525 BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE);
516 526
517 for (i=0; i<NFNL_SUBSYS_COUNT; i++) 527 for (i=0; i<NFNL_SUBSYS_COUNT; i++)
518 mutex_init(&table[i].mutex); 528 mutex_init(&table[i].mutex);
519 529
520 pr_info("Netfilter messages via NETLINK v%s.\n", nfversion); 530 pr_info("Netfilter messages via NETLINK v%s.\n", nfversion);
521 return register_pernet_subsys(&nfnetlink_net_ops); 531 return register_pernet_subsys(&nfnetlink_net_ops);
522 } 532 }
523 533
524 static void __exit nfnetlink_exit(void) 534 static void __exit nfnetlink_exit(void)
525 { 535 {
526 pr_info("Removing netfilter NETLINK layer.\n"); 536 pr_info("Removing netfilter NETLINK layer.\n");
527 unregister_pernet_subsys(&nfnetlink_net_ops); 537 unregister_pernet_subsys(&nfnetlink_net_ops);
528 } 538 }
529 module_init(nfnetlink_init); 539 module_init(nfnetlink_init);
530 module_exit(nfnetlink_exit); 540 module_exit(nfnetlink_exit);
531 541