Blame view

net/batman-adv/send.c 11.3 KB
c6c8fea29   Sven Eckelmann   net: Add batman-a...
1
  /*
64afe3539   Sven Eckelmann   batman-adv: Updat...
2
   * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
c6c8fea29   Sven Eckelmann   net: Add batman-a...
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
   *
   * Marek Lindner, Simon Wunderlich
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of version 2 of the GNU General Public
   * License as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful, but
   * WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
   * General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
   * 02110-1301, USA
   *
   */
  
  #include "main.h"
  #include "send.h"
  #include "routing.h"
  #include "translation-table.h"
  #include "soft-interface.h"
  #include "hard-interface.h"
c6c8fea29   Sven Eckelmann   net: Add batman-a...
28
  #include "vis.h"
c6c8fea29   Sven Eckelmann   net: Add batman-a...
29
30
  #include "gateway_common.h"
  #include "originator.h"
b9dacc521   Marek Lindner   batman-adv: agglo...
31
  #include "bat_ogm.h"
c6c8fea29   Sven Eckelmann   net: Add batman-a...
32
33
  
  static void send_outstanding_bcast_packet(struct work_struct *work);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
34
35
  /* send out an already prepared packet to the given address via the
   * specified batman interface */
747e4221a   Sven Eckelmann   batman-adv: Add c...
36
37
  int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
  		    const uint8_t *dst_addr)
c6c8fea29   Sven Eckelmann   net: Add batman-a...
38
39
  {
  	struct ethhdr *ethhdr;
e6c10f433   Marek Lindner   batman-adv: renam...
40
  	if (hard_iface->if_status != IF_ACTIVE)
c6c8fea29   Sven Eckelmann   net: Add batman-a...
41
  		goto send_skb_err;
e6c10f433   Marek Lindner   batman-adv: renam...
42
  	if (unlikely(!hard_iface->net_dev))
c6c8fea29   Sven Eckelmann   net: Add batman-a...
43
  		goto send_skb_err;
e6c10f433   Marek Lindner   batman-adv: renam...
44
  	if (!(hard_iface->net_dev->flags & IFF_UP)) {
c6c8fea29   Sven Eckelmann   net: Add batman-a...
45
  		pr_warning("Interface %s is not up - can't send packet via "
e6c10f433   Marek Lindner   batman-adv: renam...
46
47
  			   "that interface!
  ", hard_iface->net_dev->name);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
48
49
50
51
  		goto send_skb_err;
  	}
  
  	/* push to the ethernet header. */
704509b8d   Sven Eckelmann   batman-adv: Calcu...
52
  	if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
c6c8fea29   Sven Eckelmann   net: Add batman-a...
53
54
55
56
57
  		goto send_skb_err;
  
  	skb_reset_mac_header(skb);
  
  	ethhdr = (struct ethhdr *) skb_mac_header(skb);
e6c10f433   Marek Lindner   batman-adv: renam...
58
  	memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
59
60
61
62
63
64
  	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
  	ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
  
  	skb_set_network_header(skb, ETH_HLEN);
  	skb->priority = TC_PRIO_CONTROL;
  	skb->protocol = __constant_htons(ETH_P_BATMAN);
e6c10f433   Marek Lindner   batman-adv: renam...
65
  	skb->dev = hard_iface->net_dev;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
66
67
68
69
70
71
72
73
74
75
  
  	/* dev_queue_xmit() returns a negative result on error.	 However on
  	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
  	 * (which is > 0). This will not be treated as an error. */
  
  	return dev_queue_xmit(skb);
  send_skb_err:
  	kfree_skb(skb);
  	return NET_XMIT_DROP;
  }
a73105b8d   Antonio Quartulli   batman-adv: impro...
76
  static void realloc_packet_buffer(struct hard_iface *hard_iface,
b6da4bf5d   Marek Lindner   batman-adv: renam...
77
  				  int new_len)
c6c8fea29   Sven Eckelmann   net: Add batman-a...
78
  {
c6c8fea29   Sven Eckelmann   net: Add batman-a...
79
  	unsigned char *new_buff;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
80

c6c8fea29   Sven Eckelmann   net: Add batman-a...
81
82
83
84
  	new_buff = kmalloc(new_len, GFP_ATOMIC);
  
  	/* keep old buffer if kmalloc should fail */
  	if (new_buff) {
e6c10f433   Marek Lindner   batman-adv: renam...
85
  		memcpy(new_buff, hard_iface->packet_buff,
b6da4bf5d   Marek Lindner   batman-adv: renam...
86
  		       BATMAN_OGM_LEN);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
87

e6c10f433   Marek Lindner   batman-adv: renam...
88
89
90
  		kfree(hard_iface->packet_buff);
  		hard_iface->packet_buff = new_buff;
  		hard_iface->packet_len = new_len;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
91
92
  	}
  }
a73105b8d   Antonio Quartulli   batman-adv: impro...
93
  /* when calling this function (hard_iface == primary_if) has to be true */
b9dacc521   Marek Lindner   batman-adv: agglo...
94
  static int prepare_packet_buffer(struct bat_priv *bat_priv,
a73105b8d   Antonio Quartulli   batman-adv: impro...
95
96
97
  				  struct hard_iface *hard_iface)
  {
  	int new_len;
a73105b8d   Antonio Quartulli   batman-adv: impro...
98

b6da4bf5d   Marek Lindner   batman-adv: renam...
99
  	new_len = BATMAN_OGM_LEN +
a73105b8d   Antonio Quartulli   batman-adv: impro...
100
101
102
103
104
  		  tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
  
  	/* if we have too many changes for one packet don't send any
  	 * and wait for the tt table request which will be fragmented */
  	if (new_len > hard_iface->soft_iface->mtu)
b6da4bf5d   Marek Lindner   batman-adv: renam...
105
  		new_len = BATMAN_OGM_LEN;
a73105b8d   Antonio Quartulli   batman-adv: impro...
106
107
  
  	realloc_packet_buffer(hard_iface, new_len);
a73105b8d   Antonio Quartulli   batman-adv: impro...
108
109
110
111
112
  
  	atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
  
  	/* reset the sending counter */
  	atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
b9dacc521   Marek Lindner   batman-adv: agglo...
113
114
115
  	return tt_changes_fill_buffer(bat_priv,
  				      hard_iface->packet_buff + BATMAN_OGM_LEN,
  				      hard_iface->packet_len - BATMAN_OGM_LEN);
a73105b8d   Antonio Quartulli   batman-adv: impro...
116
  }
b9dacc521   Marek Lindner   batman-adv: agglo...
117
  static int reset_packet_buffer(struct bat_priv *bat_priv,
b6da4bf5d   Marek Lindner   batman-adv: renam...
118
  				struct hard_iface *hard_iface)
a73105b8d   Antonio Quartulli   batman-adv: impro...
119
  {
b6da4bf5d   Marek Lindner   batman-adv: renam...
120
  	realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
b9dacc521   Marek Lindner   batman-adv: agglo...
121
  	return 0;
a73105b8d   Antonio Quartulli   batman-adv: impro...
122
  }
b9dacc521   Marek Lindner   batman-adv: agglo...
123
  void schedule_bat_ogm(struct hard_iface *hard_iface)
c6c8fea29   Sven Eckelmann   net: Add batman-a...
124
  {
e6c10f433   Marek Lindner   batman-adv: renam...
125
  	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
32ae9b221   Marek Lindner   batman-adv: Make ...
126
  	struct hard_iface *primary_if;
b9dacc521   Marek Lindner   batman-adv: agglo...
127
  	int tt_num_changes = -1;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
128

e6c10f433   Marek Lindner   batman-adv: renam...
129
130
  	if ((hard_iface->if_status == IF_NOT_IN_USE) ||
  	    (hard_iface->if_status == IF_TO_BE_REMOVED))
c6c8fea29   Sven Eckelmann   net: Add batman-a...
131
  		return;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
132
133
134
135
136
137
138
  	/**
  	 * the interface gets activated here to avoid race conditions between
  	 * the moment of activating the interface in
  	 * hardif_activate_interface() where the originator mac is set and
  	 * outdated packets (especially uninitialized mac addresses) in the
  	 * packet queue
  	 */
e6c10f433   Marek Lindner   batman-adv: renam...
139
140
  	if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
  		hard_iface->if_status = IF_ACTIVE;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
141

b9dacc521   Marek Lindner   batman-adv: agglo...
142
  	primary_if = primary_if_get_selected(bat_priv);
a73105b8d   Antonio Quartulli   batman-adv: impro...
143
144
145
  	if (hard_iface == primary_if) {
  		/* if at least one change happened */
  		if (atomic_read(&bat_priv->tt_local_changes) > 0) {
058d0e269   Antonio Quartulli   batman-adv: keep ...
146
  			tt_commit_changes(bat_priv);
b9dacc521   Marek Lindner   batman-adv: agglo...
147
148
  			tt_num_changes = prepare_packet_buffer(bat_priv,
  							       hard_iface);
a73105b8d   Antonio Quartulli   batman-adv: impro...
149
  		}
015758d00   Antonio Quartulli   batman-adv: corre...
150
  		/* if the changes have been sent often enough */
a73105b8d   Antonio Quartulli   batman-adv: impro...
151
  		if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
b9dacc521   Marek Lindner   batman-adv: agglo...
152
153
  			tt_num_changes = reset_packet_buffer(bat_priv,
  							     hard_iface);
a73105b8d   Antonio Quartulli   batman-adv: impro...
154
  	}
c6c8fea29   Sven Eckelmann   net: Add batman-a...
155

32ae9b221   Marek Lindner   batman-adv: Make ...
156
157
  	if (primary_if)
  		hardif_free_ref(primary_if);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
158

b9dacc521   Marek Lindner   batman-adv: agglo...
159
  	bat_ogm_schedule(hard_iface, tt_num_changes);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
160
161
162
163
164
165
  }
  
  static void forw_packet_free(struct forw_packet *forw_packet)
  {
  	if (forw_packet->skb)
  		kfree_skb(forw_packet->skb);
6d5808d4a   Sven Eckelmann   batman-adv: Add m...
166
167
  	if (forw_packet->if_incoming)
  		hardif_free_ref(forw_packet->if_incoming);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
  	kfree(forw_packet);
  }
  
  static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
  				      struct forw_packet *forw_packet,
  				      unsigned long send_time)
  {
  	INIT_HLIST_NODE(&forw_packet->list);
  
  	/* add new packet to packet list */
  	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
  	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
  	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
  
  	/* start timer for this packet */
  	INIT_DELAYED_WORK(&forw_packet->delayed_work,
  			  send_outstanding_bcast_packet);
  	queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
  			   send_time);
  }
c6c8fea29   Sven Eckelmann   net: Add batman-a...
188
  /* add a broadcast packet to the queue and setup timers. broadcast packets
015758d00   Antonio Quartulli   batman-adv: corre...
189
   * are sent multiple times to increase probability for being received.
c6c8fea29   Sven Eckelmann   net: Add batman-a...
190
191
192
193
194
195
   *
   * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
   * errors.
   *
   * The skb is not consumed, so the caller should make sure that the
   * skb is freed. */
747e4221a   Sven Eckelmann   batman-adv: Add c...
196
  int add_bcast_packet_to_list(struct bat_priv *bat_priv,
8698529d2   Antonio Quartulli   batman-adv: add_b...
197
  			     const struct sk_buff *skb, unsigned long delay)
c6c8fea29   Sven Eckelmann   net: Add batman-a...
198
  {
32ae9b221   Marek Lindner   batman-adv: Make ...
199
  	struct hard_iface *primary_if = NULL;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
200
201
  	struct forw_packet *forw_packet;
  	struct bcast_packet *bcast_packet;
747e4221a   Sven Eckelmann   batman-adv: Add c...
202
  	struct sk_buff *newskb;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
203
204
205
206
207
208
  
  	if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
  		bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full
  ");
  		goto out;
  	}
32ae9b221   Marek Lindner   batman-adv: Make ...
209
210
  	primary_if = primary_if_get_selected(bat_priv);
  	if (!primary_if)
ca06c6eb9   Marek Lindner   batman-adv: reset...
211
  		goto out_and_inc;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
212

704509b8d   Sven Eckelmann   batman-adv: Calcu...
213
  	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
214
215
216
  
  	if (!forw_packet)
  		goto out_and_inc;
747e4221a   Sven Eckelmann   batman-adv: Add c...
217
218
  	newskb = skb_copy(skb, GFP_ATOMIC);
  	if (!newskb)
c6c8fea29   Sven Eckelmann   net: Add batman-a...
219
220
221
  		goto packet_free;
  
  	/* as we have a copy now, it is safe to decrease the TTL */
747e4221a   Sven Eckelmann   batman-adv: Add c...
222
  	bcast_packet = (struct bcast_packet *)newskb->data;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
223
  	bcast_packet->ttl--;
747e4221a   Sven Eckelmann   batman-adv: Add c...
224
  	skb_reset_mac_header(newskb);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
225

747e4221a   Sven Eckelmann   batman-adv: Add c...
226
  	forw_packet->skb = newskb;
32ae9b221   Marek Lindner   batman-adv: Make ...
227
  	forw_packet->if_incoming = primary_if;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
228
229
230
  
  	/* how often did we send the bcast packet ? */
  	forw_packet->num_packets = 0;
8698529d2   Antonio Quartulli   batman-adv: add_b...
231
  	_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
232
233
234
235
236
237
238
  	return NETDEV_TX_OK;
  
  packet_free:
  	kfree(forw_packet);
  out_and_inc:
  	atomic_inc(&bat_priv->bcast_queue_left);
  out:
32ae9b221   Marek Lindner   batman-adv: Make ...
239
240
  	if (primary_if)
  		hardif_free_ref(primary_if);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
241
242
243
244
245
  	return NETDEV_TX_BUSY;
  }
  
  static void send_outstanding_bcast_packet(struct work_struct *work)
  {
e6c10f433   Marek Lindner   batman-adv: renam...
246
  	struct hard_iface *hard_iface;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
  	struct delayed_work *delayed_work =
  		container_of(work, struct delayed_work, work);
  	struct forw_packet *forw_packet =
  		container_of(delayed_work, struct forw_packet, delayed_work);
  	struct sk_buff *skb1;
  	struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
  	struct bat_priv *bat_priv = netdev_priv(soft_iface);
  
  	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
  	hlist_del(&forw_packet->list);
  	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
  
  	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
  		goto out;
  
  	/* rebroadcast packet */
  	rcu_read_lock();
e6c10f433   Marek Lindner   batman-adv: renam...
264
265
  	list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  		if (hard_iface->soft_iface != soft_iface)
c6c8fea29   Sven Eckelmann   net: Add batman-a...
266
267
268
269
270
  			continue;
  
  		/* send a copy of the saved skb */
  		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
  		if (skb1)
e6c10f433   Marek Lindner   batman-adv: renam...
271
  			send_skb_packet(skb1, hard_iface, broadcast_addr);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
  	}
  	rcu_read_unlock();
  
  	forw_packet->num_packets++;
  
  	/* if we still have some more bcasts to send */
  	if (forw_packet->num_packets < 3) {
  		_add_bcast_packet_to_list(bat_priv, forw_packet,
  					  ((5 * HZ) / 1000));
  		return;
  	}
  
  out:
  	forw_packet_free(forw_packet);
  	atomic_inc(&bat_priv->bcast_queue_left);
  }
b9dacc521   Marek Lindner   batman-adv: agglo...
288
  void send_outstanding_bat_ogm_packet(struct work_struct *work)
c6c8fea29   Sven Eckelmann   net: Add batman-a...
289
290
291
292
293
294
295
296
297
298
299
300
301
302
  {
  	struct delayed_work *delayed_work =
  		container_of(work, struct delayed_work, work);
  	struct forw_packet *forw_packet =
  		container_of(delayed_work, struct forw_packet, delayed_work);
  	struct bat_priv *bat_priv;
  
  	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
  	spin_lock_bh(&bat_priv->forw_bat_list_lock);
  	hlist_del(&forw_packet->list);
  	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
  
  	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
  		goto out;
b9dacc521   Marek Lindner   batman-adv: agglo...
303
  	bat_ogm_emit(forw_packet);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
304
305
306
307
308
309
310
  
  	/**
  	 * we have to have at least one packet in the queue
  	 * to determine the queues wake up time unless we are
  	 * shutting down
  	 */
  	if (forw_packet->own)
b9dacc521   Marek Lindner   batman-adv: agglo...
311
  		schedule_bat_ogm(forw_packet->if_incoming);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
312
313
314
315
316
317
318
319
320
321
  
  out:
  	/* don't count own packet */
  	if (!forw_packet->own)
  		atomic_inc(&bat_priv->batman_queue_left);
  
  	forw_packet_free(forw_packet);
  }
  
  void purge_outstanding_packets(struct bat_priv *bat_priv,
747e4221a   Sven Eckelmann   batman-adv: Add c...
322
  			       const struct hard_iface *hard_iface)
c6c8fea29   Sven Eckelmann   net: Add batman-a...
323
324
325
  {
  	struct forw_packet *forw_packet;
  	struct hlist_node *tmp_node, *safe_tmp_node;
6d5808d4a   Sven Eckelmann   batman-adv: Add m...
326
  	bool pending;
c6c8fea29   Sven Eckelmann   net: Add batman-a...
327

e6c10f433   Marek Lindner   batman-adv: renam...
328
  	if (hard_iface)
c6c8fea29   Sven Eckelmann   net: Add batman-a...
329
330
331
  		bat_dbg(DBG_BATMAN, bat_priv,
  			"purge_outstanding_packets(): %s
  ",
e6c10f433   Marek Lindner   batman-adv: renam...
332
  			hard_iface->net_dev->name);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
333
334
335
336
337
338
339
340
341
342
343
  	else
  		bat_dbg(DBG_BATMAN, bat_priv,
  			"purge_outstanding_packets()
  ");
  
  	/* free bcast list */
  	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
  	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
  				  &bat_priv->forw_bcast_list, list) {
  
  		/**
015758d00   Antonio Quartulli   batman-adv: corre...
344
  		 * if purge_outstanding_packets() was called with an argument
c6c8fea29   Sven Eckelmann   net: Add batman-a...
345
346
  		 * we delete only packets belonging to the given interface
  		 */
e6c10f433   Marek Lindner   batman-adv: renam...
347
348
  		if ((hard_iface) &&
  		    (forw_packet->if_incoming != hard_iface))
c6c8fea29   Sven Eckelmann   net: Add batman-a...
349
350
351
352
353
354
355
356
  			continue;
  
  		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
  
  		/**
  		 * send_outstanding_bcast_packet() will lock the list to
  		 * delete the item from the list
  		 */
6d5808d4a   Sven Eckelmann   batman-adv: Add m...
357
  		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
358
  		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
6d5808d4a   Sven Eckelmann   batman-adv: Add m...
359
360
361
362
363
  
  		if (pending) {
  			hlist_del(&forw_packet->list);
  			forw_packet_free(forw_packet);
  		}
c6c8fea29   Sven Eckelmann   net: Add batman-a...
364
365
366
367
368
369
370
371
372
  	}
  	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
  
  	/* free batman packet list */
  	spin_lock_bh(&bat_priv->forw_bat_list_lock);
  	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
  				  &bat_priv->forw_bat_list, list) {
  
  		/**
015758d00   Antonio Quartulli   batman-adv: corre...
373
  		 * if purge_outstanding_packets() was called with an argument
c6c8fea29   Sven Eckelmann   net: Add batman-a...
374
375
  		 * we delete only packets belonging to the given interface
  		 */
e6c10f433   Marek Lindner   batman-adv: renam...
376
377
  		if ((hard_iface) &&
  		    (forw_packet->if_incoming != hard_iface))
c6c8fea29   Sven Eckelmann   net: Add batman-a...
378
379
380
381
382
383
384
385
  			continue;
  
  		spin_unlock_bh(&bat_priv->forw_bat_list_lock);
  
  		/**
  		 * send_outstanding_bat_packet() will lock the list to
  		 * delete the item from the list
  		 */
6d5808d4a   Sven Eckelmann   batman-adv: Add m...
386
  		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
c6c8fea29   Sven Eckelmann   net: Add batman-a...
387
  		spin_lock_bh(&bat_priv->forw_bat_list_lock);
6d5808d4a   Sven Eckelmann   batman-adv: Add m...
388
389
390
391
392
  
  		if (pending) {
  			hlist_del(&forw_packet->list);
  			forw_packet_free(forw_packet);
  		}
c6c8fea29   Sven Eckelmann   net: Add batman-a...
393
394
395
  	}
  	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
  }