Blame view
net/core/drop_monitor.c
9.73 KB
9a8afc8d3
|
1 2 3 4 5 |
/* * Monitoring code for network dropped packet alerts * * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com> */ |
e005d193d
|
6 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9a8afc8d3
|
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
#include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/string.h> #include <linux/if_arp.h> #include <linux/inetdevice.h> #include <linux/inet.h> #include <linux/interrupt.h> #include <linux/netpoll.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/workqueue.h> #include <linux/netlink.h> #include <linux/net_dropmon.h> #include <linux/percpu.h> #include <linux/timer.h> #include <linux/bitops.h> |
5a0e3ad6a
|
24 |
#include <linux/slab.h> |
cad456d5a
|
25 |
#include <linux/module.h> |
9a8afc8d3
|
26 |
#include <net/genetlink.h> |
4ea7e3869
|
27 |
#include <net/netevent.h> |
9a8afc8d3
|
28 |
|
ad8d75fff
|
29 |
#include <trace/events/skb.h> |
9cbc1cb8c
|
30 |
#include <trace/events/napi.h> |
9a8afc8d3
|
31 32 33 34 35 |
#include <asm/unaligned.h> #define TRACE_ON 1 #define TRACE_OFF 0 |
9a8afc8d3
|
36 37 38 39 40 |
/* * Globals, our netlink socket pointer * and the work handle that will send up * netlink alerts */ |
4ea7e3869
|
41 |
static int trace_state = TRACE_OFF; |
cde2e9a65
|
42 |
static DEFINE_MUTEX(trace_state_mutex); |
9a8afc8d3
|
43 44 |
struct per_cpu_dm_data { |
bec4596b4
|
45 46 47 48 |
spinlock_t lock; struct sk_buff *skb; struct work_struct dm_alert_work; struct timer_list send_timer; |
9a8afc8d3
|
49 |
}; |
4ea7e3869
|
50 51 |
struct dm_hw_stat_delta { struct net_device *dev; |
5848cc096
|
52 |
unsigned long last_rx; |
4ea7e3869
|
53 54 55 56 |
struct list_head list; struct rcu_head rcu; unsigned long last_drop_val; }; |
9a8afc8d3
|
57 58 59 60 |
static struct genl_family net_drop_monitor_family = { .id = GENL_ID_GENERATE, .hdrsize = 0, .name = "NET_DM", |
683703a26
|
61 |
.version = 2, |
9a8afc8d3
|
62 63 64 65 66 67 |
}; static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data); static int dm_hit_limit = 64; static int dm_delay = 1; |
4ea7e3869
|
68 69 |
static unsigned long dm_hw_check_delta = 2*HZ; static LIST_HEAD(hw_stats_list); |
9a8afc8d3
|
70 |
|
bec4596b4
|
71 |
static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) |
9a8afc8d3
|
72 73 74 |
{ size_t al; struct net_dm_alert_msg *msg; |
683703a26
|
75 |
struct nlattr *nla; |
3885ca785
|
76 |
struct sk_buff *skb; |
bec4596b4
|
77 |
unsigned long flags; |
9a8afc8d3
|
78 79 80 |
al = sizeof(struct net_dm_alert_msg); al += dm_hit_limit * sizeof(struct net_dm_drop_point); |
683703a26
|
81 |
al += sizeof(struct nlattr); |
3885ca785
|
82 83 84 85 86 87 88 89 90 |
skb = genlmsg_new(al, GFP_KERNEL); if (skb) { genlmsg_put(skb, 0, 0, &net_drop_monitor_family, 0, NET_DM_CMD_ALERT); nla = nla_reserve(skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); msg = nla_data(nla); memset(msg, 0, al); |
bec4596b4
|
91 92 |
} else { mod_timer(&data->send_timer, jiffies + HZ / 10); |
3885ca785
|
93 |
} |
bec4596b4
|
94 95 96 97 98 |
spin_lock_irqsave(&data->lock, flags); swap(data->skb, skb); spin_unlock_irqrestore(&data->lock, flags); return skb; |
9a8afc8d3
|
99 |
} |
2a94fe48f
|
100 101 |
static struct genl_multicast_group dropmon_mcgrps[] = { { .name = "events", }, |
e5dcecba0
|
102 |
}; |
bec4596b4
|
103 |
static void send_dm_alert(struct work_struct *work) |
9a8afc8d3
|
104 105 |
{ struct sk_buff *skb; |
bec4596b4
|
106 |
struct per_cpu_dm_data *data; |
9a8afc8d3
|
107 |
|
bec4596b4
|
108 |
data = container_of(work, struct per_cpu_dm_data, dm_alert_work); |
4fdcfa128
|
109 |
|
bec4596b4
|
110 |
skb = reset_per_cpu_data(data); |
9a8afc8d3
|
111 |
|
3885ca785
|
112 |
if (skb) |
68eb55031
|
113 |
genlmsg_multicast(&net_drop_monitor_family, skb, 0, |
2a94fe48f
|
114 |
0, GFP_KERNEL); |
9a8afc8d3
|
115 116 117 118 119 |
} /* * This is the timer function to delay the sending of an alert * in the event that more drops will arrive during the |
bec4596b4
|
120 |
* hysteresis period. |
9a8afc8d3
|
121 |
*/ |
bec4596b4
|
122 |
static void sched_send_work(unsigned long _data) |
9a8afc8d3
|
123 |
{ |
bec4596b4
|
124 |
struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data; |
3885ca785
|
125 |
|
bec4596b4
|
126 |
schedule_work(&data->dm_alert_work); |
9a8afc8d3
|
127 |
} |
4ea7e3869
|
128 |
static void trace_drop_common(struct sk_buff *skb, void *location) |
9a8afc8d3
|
129 130 131 |
{ struct net_dm_alert_msg *msg; struct nlmsghdr *nlh; |
683703a26
|
132 |
struct nlattr *nla; |
9a8afc8d3
|
133 |
int i; |
3885ca785
|
134 |
struct sk_buff *dskb; |
bec4596b4
|
135 136 |
struct per_cpu_dm_data *data; unsigned long flags; |
9a8afc8d3
|
137 |
|
bec4596b4
|
138 139 140 141 |
local_irq_save(flags); data = &__get_cpu_var(dm_cpu_data); spin_lock(&data->lock); dskb = data->skb; |
3885ca785
|
142 143 144 |
if (!dskb) goto out; |
3885ca785
|
145 |
nlh = (struct nlmsghdr *)dskb->data; |
683703a26
|
146 147 |
nla = genlmsg_data(nlmsg_data(nlh)); msg = nla_data(nla); |
9a8afc8d3
|
148 149 150 151 152 153 |
for (i = 0; i < msg->entries; i++) { if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { msg->points[i].count++; goto out; } } |
bec4596b4
|
154 155 |
if (msg->entries == dm_hit_limit) goto out; |
9a8afc8d3
|
156 157 158 |
/* * We need to create a new entry */ |
3885ca785
|
159 |
__nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); |
683703a26
|
160 |
nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); |
9a8afc8d3
|
161 162 163 164 165 166 |
memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); msg->points[msg->entries].count = 1; msg->entries++; if (!timer_pending(&data->send_timer)) { data->send_timer.expires = jiffies + dm_delay * HZ; |
bec4596b4
|
167 |
add_timer(&data->send_timer); |
9a8afc8d3
|
168 169 170 |
} out: |
bec4596b4
|
171 |
spin_unlock_irqrestore(&data->lock, flags); |
9a8afc8d3
|
172 |
} |
38516ab59
|
173 |
static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) |
4ea7e3869
|
174 175 176 |
{ trace_drop_common(skb, location); } |
38516ab59
|
177 |
static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi) |
4ea7e3869
|
178 179 180 181 |
{ struct dm_hw_stat_delta *new_stat; /* |
5848cc096
|
182 |
* Don't check napi structures with no associated device |
4ea7e3869
|
183 |
*/ |
5848cc096
|
184 |
if (!napi->dev) |
4ea7e3869
|
185 186 187 188 |
return; rcu_read_lock(); list_for_each_entry_rcu(new_stat, &hw_stats_list, list) { |
5848cc096
|
189 190 191 192 193 194 |
/* * only add a note to our monitor buffer if: * 1) this is the dev we received on * 2) its after the last_rx delta * 3) our rx_dropped count has gone up */ |
4ea7e3869
|
195 |
if ((new_stat->dev == napi->dev) && |
5848cc096
|
196 |
(time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) && |
4ea7e3869
|
197 198 199 |
(napi->dev->stats.rx_dropped != new_stat->last_drop_val)) { trace_drop_common(NULL, NULL); new_stat->last_drop_val = napi->dev->stats.rx_dropped; |
5848cc096
|
200 |
new_stat->last_rx = jiffies; |
4ea7e3869
|
201 202 203 204 205 |
break; } } rcu_read_unlock(); } |
9a8afc8d3
|
206 207 208 |
static int set_all_monitor_traces(int state) { int rc = 0; |
4ea7e3869
|
209 210 |
struct dm_hw_stat_delta *new_stat = NULL; struct dm_hw_stat_delta *temp; |
cde2e9a65
|
211 |
mutex_lock(&trace_state_mutex); |
9a8afc8d3
|
212 |
|
4b706372f
|
213 214 215 216 |
if (state == trace_state) { rc = -EAGAIN; goto out_unlock; } |
9a8afc8d3
|
217 218 |
switch (state) { case TRACE_ON: |
cad456d5a
|
219 220 221 222 |
if (!try_module_get(THIS_MODULE)) { rc = -ENODEV; break; } |
38516ab59
|
223 224 |
rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL); |
9a8afc8d3
|
225 |
break; |
cad456d5a
|
226 |
|
9a8afc8d3
|
227 |
case TRACE_OFF: |
38516ab59
|
228 229 |
rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL); |
9a8afc8d3
|
230 231 |
tracepoint_synchronize_unregister(); |
4ea7e3869
|
232 233 234 235 236 237 238 |
/* * Clean the device list */ list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) { if (new_stat->dev == NULL) { list_del_rcu(&new_stat->list); |
fa81c0e1d
|
239 |
kfree_rcu(new_stat, rcu); |
4ea7e3869
|
240 241 |
} } |
cad456d5a
|
242 243 |
module_put(THIS_MODULE); |
9a8afc8d3
|
244 245 246 247 248 |
break; default: rc = 1; break; } |
4ea7e3869
|
249 250 |
if (!rc) trace_state = state; |
4b706372f
|
251 252 |
else rc = -EINPROGRESS; |
4ea7e3869
|
253 |
|
4b706372f
|
254 |
out_unlock: |
cde2e9a65
|
255 |
mutex_unlock(&trace_state_mutex); |
4ea7e3869
|
256 |
|
9a8afc8d3
|
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 |
return rc; } static int net_dm_cmd_config(struct sk_buff *skb, struct genl_info *info) { return -ENOTSUPP; } static int net_dm_cmd_trace(struct sk_buff *skb, struct genl_info *info) { switch (info->genlhdr->cmd) { case NET_DM_CMD_START: return set_all_monitor_traces(TRACE_ON); break; case NET_DM_CMD_STOP: return set_all_monitor_traces(TRACE_OFF); break; } return -ENOTSUPP; } |
4ea7e3869
|
281 |
static int dropmon_net_event(struct notifier_block *ev_block, |
351638e7d
|
282 |
unsigned long event, void *ptr) |
4ea7e3869
|
283 |
{ |
351638e7d
|
284 |
struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
4ea7e3869
|
285 286 287 288 289 290 291 292 293 294 295 |
struct dm_hw_stat_delta *new_stat = NULL; struct dm_hw_stat_delta *tmp; switch (event) { case NETDEV_REGISTER: new_stat = kzalloc(sizeof(struct dm_hw_stat_delta), GFP_KERNEL); if (!new_stat) goto out; new_stat->dev = dev; |
5848cc096
|
296 |
new_stat->last_rx = jiffies; |
cde2e9a65
|
297 |
mutex_lock(&trace_state_mutex); |
4ea7e3869
|
298 |
list_add_rcu(&new_stat->list, &hw_stats_list); |
cde2e9a65
|
299 |
mutex_unlock(&trace_state_mutex); |
4ea7e3869
|
300 301 |
break; case NETDEV_UNREGISTER: |
cde2e9a65
|
302 |
mutex_lock(&trace_state_mutex); |
4ea7e3869
|
303 304 305 306 307 |
list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { if (new_stat->dev == dev) { new_stat->dev = NULL; if (trace_state == TRACE_OFF) { list_del_rcu(&new_stat->list); |
fa81c0e1d
|
308 |
kfree_rcu(new_stat, rcu); |
4ea7e3869
|
309 310 311 312 |
break; } } } |
cde2e9a65
|
313 |
mutex_unlock(&trace_state_mutex); |
4ea7e3869
|
314 315 316 317 318 |
break; } out: return NOTIFY_DONE; } |
9a8afc8d3
|
319 |
|
4534de830
|
320 |
static const struct genl_ops dropmon_ops[] = { |
9a8afc8d3
|
321 322 323 324 325 326 327 328 329 330 331 332 333 |
{ .cmd = NET_DM_CMD_CONFIG, .doit = net_dm_cmd_config, }, { .cmd = NET_DM_CMD_START, .doit = net_dm_cmd_trace, }, { .cmd = NET_DM_CMD_STOP, .doit = net_dm_cmd_trace, }, }; |
4ea7e3869
|
334 335 336 |
static struct notifier_block dropmon_net_notifier = { .notifier_call = dropmon_net_event }; |
9a8afc8d3
|
337 338 |
static int __init init_net_drop_monitor(void) { |
9a8afc8d3
|
339 |
struct per_cpu_dm_data *data; |
a256be70c
|
340 |
int cpu, rc; |
e005d193d
|
341 342 |
pr_info("Initializing network drop monitor service "); |
9a8afc8d3
|
343 344 |
if (sizeof(void *) > 8) { |
e005d193d
|
345 346 |
pr_err("Unable to store program counters on this arch, Drop monitor failed "); |
9a8afc8d3
|
347 348 |
return -ENOSPC; } |
2a94fe48f
|
349 350 |
rc = genl_register_family_with_ops_groups(&net_drop_monitor_family, dropmon_ops, dropmon_mcgrps); |
a256be70c
|
351 |
if (rc) { |
e005d193d
|
352 353 |
pr_err("Could not create drop monitor netlink family "); |
a256be70c
|
354 |
return rc; |
9a8afc8d3
|
355 |
} |
2a94fe48f
|
356 |
WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT); |
e5dcecba0
|
357 |
|
4ea7e3869
|
358 359 |
rc = register_netdevice_notifier(&dropmon_net_notifier); if (rc < 0) { |
e005d193d
|
360 361 |
pr_crit("Failed to register netdevice notifier "); |
4ea7e3869
|
362 363 |
goto out_unreg; } |
9a8afc8d3
|
364 |
rc = 0; |
cad456d5a
|
365 |
for_each_possible_cpu(cpu) { |
9a8afc8d3
|
366 |
data = &per_cpu(dm_cpu_data, cpu); |
9a8afc8d3
|
367 368 |
INIT_WORK(&data->dm_alert_work, send_dm_alert); init_timer(&data->send_timer); |
bec4596b4
|
369 |
data->send_timer.data = (unsigned long)data; |
9a8afc8d3
|
370 |
data->send_timer.function = sched_send_work; |
bec4596b4
|
371 |
spin_lock_init(&data->lock); |
4fdcfa128
|
372 |
reset_per_cpu_data(data); |
9a8afc8d3
|
373 |
} |
4ea7e3869
|
374 |
|
3885ca785
|
375 |
|
9a8afc8d3
|
376 377 378 379 380 381 382 |
goto out; out_unreg: genl_unregister_family(&net_drop_monitor_family); out: return rc; } |
cad456d5a
|
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 |
static void exit_net_drop_monitor(void) { struct per_cpu_dm_data *data; int cpu; BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier)); /* * Because of the module_get/put we do in the trace state change path * we are guarnateed not to have any current users when we get here * all we need to do is make sure that we don't have any running timers * or pending schedule calls */ for_each_possible_cpu(cpu) { data = &per_cpu(dm_cpu_data, cpu); del_timer_sync(&data->send_timer); cancel_work_sync(&data->dm_alert_work); /* * At this point, we should have exclusive access * to this struct and can free the skb inside it */ kfree_skb(data->skb); } BUG_ON(genl_unregister_family(&net_drop_monitor_family)); } module_init(init_net_drop_monitor); module_exit(exit_net_drop_monitor); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>"); |
3fdcbd453
|
416 |
MODULE_ALIAS_GENL_FAMILY("NET_DM"); |