Blame view
net/core/link_watch.c
5.49 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 |
/* * Linux network device link state notification * * Author: * Stefan Rompf <sux@loplof.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ |
1da177e4c
|
13 14 15 16 |
#include <linux/module.h> #include <linux/netdevice.h> #include <linux/if.h> #include <net/sock.h> |
cacaddf57
|
17 |
#include <net/pkt_sched.h> |
1da177e4c
|
18 19 20 |
#include <linux/rtnetlink.h> #include <linux/jiffies.h> #include <linux/spinlock.h> |
1da177e4c
|
21 22 23 24 25 26 |
#include <linux/workqueue.h> #include <linux/bitops.h> #include <asm/types.h> enum lw_bits { |
d9568ba91
|
27 |
LW_URGENT = 0, |
1da177e4c
|
28 29 30 31 |
}; static unsigned long linkwatch_flags; static unsigned long linkwatch_nextevent; |
65f27f384
|
32 33 |
static void linkwatch_event(struct work_struct *dummy); static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); |
1da177e4c
|
34 |
|
e014debec
|
35 |
static LIST_HEAD(lweventlist); |
1da177e4c
|
36 |
static DEFINE_SPINLOCK(lweventlist_lock); |
b00055aac
|
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
static unsigned char default_operstate(const struct net_device *dev) { if (!netif_carrier_ok(dev)) return (dev->ifindex != dev->iflink ? IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN); if (netif_dormant(dev)) return IF_OPER_DORMANT; return IF_OPER_UP; } static void rfc2863_policy(struct net_device *dev) { unsigned char operstate = default_operstate(dev); if (operstate == dev->operstate) return; write_lock_bh(&dev_base_lock); switch(dev->link_mode) { case IF_LINK_MODE_DORMANT: if (operstate == IF_OPER_UP) operstate = IF_OPER_DORMANT; break; case IF_LINK_MODE_DEFAULT: default: break; |
3ff50b799
|
68 |
} |
b00055aac
|
69 70 71 72 73 |
dev->operstate = operstate; write_unlock_bh(&dev_base_lock); } |
6fa9864b5
|
74 |
static bool linkwatch_urgent_event(struct net_device *dev) |
294cc44b7
|
75 |
{ |
c37e0c993
|
76 77 78 79 80 81 82 |
if (!netif_running(dev)) return false; if (dev->ifindex != dev->iflink) return true; return netif_carrier_ok(dev) && qdisc_tx_changing(dev); |
294cc44b7
|
83 84 85 86 87 88 89 90 |
} static void linkwatch_add_event(struct net_device *dev) { unsigned long flags; spin_lock_irqsave(&lweventlist_lock, flags); |
e014debec
|
91 92 93 94 |
if (list_empty(&dev->link_watch_list)) { list_add_tail(&dev->link_watch_list, &lweventlist); dev_hold(dev); } |
294cc44b7
|
95 96 |
spin_unlock_irqrestore(&lweventlist_lock, flags); } |
d9568ba91
|
97 |
static void linkwatch_schedule_work(int urgent) |
294cc44b7
|
98 |
{ |
d9568ba91
|
99 100 101 |
unsigned long delay = linkwatch_nextevent - jiffies; if (test_bit(LW_URGENT, &linkwatch_flags)) |
294cc44b7
|
102 |
return; |
d9568ba91
|
103 104 105 106 |
/* Minimise down-time: drop delay for up event. */ if (urgent) { if (test_and_set_bit(LW_URGENT, &linkwatch_flags)) return; |
294cc44b7
|
107 |
delay = 0; |
db0ccffed
|
108 |
} |
294cc44b7
|
109 |
|
d9568ba91
|
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
/* If we wrap around we'll delay it by at most HZ. */ if (delay > HZ) delay = 0; /* * This is true if we've scheduled it immeditately or if we don't * need an immediate execution and it's already pending. */ if (schedule_delayed_work(&linkwatch_work, delay) == !delay) return; /* Don't bother if there is nothing urgent. */ if (!test_bit(LW_URGENT, &linkwatch_flags)) return; /* It's already running which is good enough. */ |
1821f7cd6
|
126 |
if (!__cancel_delayed_work(&linkwatch_work)) |
d9568ba91
|
127 |
return; |
25985edce
|
128 |
/* Otherwise we reschedule it again for immediate execution. */ |
d9568ba91
|
129 |
schedule_delayed_work(&linkwatch_work, 0); |
294cc44b7
|
130 |
} |
e014debec
|
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
static void linkwatch_do_dev(struct net_device *dev) { /* * Make sure the above read is complete since it can be * rewritten as soon as we clear the bit below. */ smp_mb__before_clear_bit(); /* We are about to handle this device, * so new events can be accepted */ clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); rfc2863_policy(dev); if (dev->flags & IFF_UP) { if (netif_carrier_ok(dev)) dev_activate(dev); else dev_deactivate(dev); netdev_state_change(dev); } dev_put(dev); } |
294cc44b7
|
155 |
static void __linkwatch_run_queue(int urgent_only) |
1da177e4c
|
156 |
{ |
e014debec
|
157 158 |
struct net_device *dev; LIST_HEAD(wrk); |
1da177e4c
|
159 |
|
294cc44b7
|
160 161 162 163 164 165 166 167 168 |
/* * Limit the number of linkwatch events to one * per second so that a runaway driver does not * cause a storm of messages on the netlink * socket. This limit does not apply to up events * while the device qdisc is down. */ if (!urgent_only) linkwatch_nextevent = jiffies + HZ; |
d9568ba91
|
169 170 171 172 173 |
/* Limit wrap-around effect on delay. */ else if (time_after(linkwatch_nextevent, jiffies + HZ)) linkwatch_nextevent = jiffies; clear_bit(LW_URGENT, &linkwatch_flags); |
294cc44b7
|
174 |
|
1da177e4c
|
175 |
spin_lock_irq(&lweventlist_lock); |
e014debec
|
176 |
list_splice_init(&lweventlist, &wrk); |
1da177e4c
|
177 |
|
e014debec
|
178 |
while (!list_empty(&wrk)) { |
1da177e4c
|
179 |
|
e014debec
|
180 181 |
dev = list_first_entry(&wrk, struct net_device, link_watch_list); list_del_init(&dev->link_watch_list); |
572a103de
|
182 |
|
294cc44b7
|
183 |
if (urgent_only && !linkwatch_urgent_event(dev)) { |
e014debec
|
184 |
list_add_tail(&dev->link_watch_list, &lweventlist); |
294cc44b7
|
185 186 |
continue; } |
e014debec
|
187 188 189 |
spin_unlock_irq(&lweventlist_lock); linkwatch_do_dev(dev); spin_lock_irq(&lweventlist_lock); |
1da177e4c
|
190 |
} |
294cc44b7
|
191 |
|
e014debec
|
192 |
if (!list_empty(&lweventlist)) |
d9568ba91
|
193 |
linkwatch_schedule_work(0); |
e014debec
|
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 |
spin_unlock_irq(&lweventlist_lock); } void linkwatch_forget_dev(struct net_device *dev) { unsigned long flags; int clean = 0; spin_lock_irqsave(&lweventlist_lock, flags); if (!list_empty(&dev->link_watch_list)) { list_del_init(&dev->link_watch_list); clean = 1; } spin_unlock_irqrestore(&lweventlist_lock, flags); if (clean) linkwatch_do_dev(dev); |
4ec93edb1
|
210 |
} |
1da177e4c
|
211 |
|
294cc44b7
|
212 213 |
/* Must be called with the rtnl semaphore held */ void linkwatch_run_queue(void) |
1da177e4c
|
214 |
{ |
294cc44b7
|
215 216 |
__linkwatch_run_queue(0); } |
1da177e4c
|
217 |
|
294cc44b7
|
218 219 |
static void linkwatch_event(struct work_struct *dummy) { |
6756ae4b4
|
220 |
rtnl_lock(); |
294cc44b7
|
221 |
__linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies)); |
6756ae4b4
|
222 |
rtnl_unlock(); |
1da177e4c
|
223 224 225 226 227 |
} void linkwatch_fire_event(struct net_device *dev) { |
6fa9864b5
|
228 |
bool urgent = linkwatch_urgent_event(dev); |
1da177e4c
|
229 |
|
d9568ba91
|
230 |
if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { |
294cc44b7
|
231 |
linkwatch_add_event(dev); |
d9568ba91
|
232 233 |
} else if (!urgent) return; |
1da177e4c
|
234 |
|
d9568ba91
|
235 |
linkwatch_schedule_work(urgent); |
1da177e4c
|
236 |
} |
1da177e4c
|
237 |
EXPORT_SYMBOL(linkwatch_fire_event); |