Commit 6882f933ccee5c3a86443ffc7621ce888b93ab6b
1 parent
12f4d0a877
Exists in
master
and in
4 other branches
ipv4: Kill RT_CACHE_DEBUG
It's way past it's usefulness. And this gets rid of a bunch of stray ->rt_{dst,src} references. Even the comment documenting the macro was inaccurate (stated default was 1 when it's 0). If reintroduced, it should be done properly, with dynamic debug facilities. Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 3 changed files with 0 additions and 51 deletions Side-by-side Diff
include/net/dst.h
... | ... | @@ -16,13 +16,6 @@ |
16 | 16 | #include <net/neighbour.h> |
17 | 17 | #include <asm/processor.h> |
18 | 18 | |
19 | -/* | |
20 | - * 0 - no debugging messages | |
21 | - * 1 - rare events and bugs (default) | |
22 | - * 2 - trace mode. | |
23 | - */ | |
24 | -#define RT_CACHE_DEBUG 0 | |
25 | - | |
26 | 19 | #define DST_GC_MIN (HZ/10) |
27 | 20 | #define DST_GC_INC (HZ/2) |
28 | 21 | #define DST_GC_MAX (120*HZ) |
net/core/dst.c
... | ... | @@ -33,9 +33,6 @@ |
33 | 33 | * 3) This list is guarded by a mutex, |
34 | 34 | * so that the gc_task and dst_dev_event() can be synchronized. |
35 | 35 | */ |
36 | -#if RT_CACHE_DEBUG >= 2 | |
37 | -static atomic_t dst_total = ATOMIC_INIT(0); | |
38 | -#endif | |
39 | 36 | |
40 | 37 | /* |
41 | 38 | * We want to keep lock & list close together |
... | ... | @@ -69,10 +66,6 @@ |
69 | 66 | unsigned long expires = ~0L; |
70 | 67 | struct dst_entry *dst, *next, head; |
71 | 68 | struct dst_entry *last = &head; |
72 | -#if RT_CACHE_DEBUG >= 2 | |
73 | - ktime_t time_start = ktime_get(); | |
74 | - struct timespec elapsed; | |
75 | -#endif | |
76 | 69 | |
77 | 70 | mutex_lock(&dst_gc_mutex); |
78 | 71 | next = dst_busy_list; |
... | ... | @@ -146,15 +139,6 @@ |
146 | 139 | |
147 | 140 | spin_unlock_bh(&dst_garbage.lock); |
148 | 141 | mutex_unlock(&dst_gc_mutex); |
149 | -#if RT_CACHE_DEBUG >= 2 | |
150 | - elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start)); | |
151 | - printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d" | |
152 | - " expires: %lu elapsed: %lu us\n", | |
153 | - atomic_read(&dst_total), delayed, work_performed, | |
154 | - expires, | |
155 | - elapsed.tv_sec * USEC_PER_SEC + | |
156 | - elapsed.tv_nsec / NSEC_PER_USEC); | |
157 | -#endif | |
158 | 142 | } |
159 | 143 | |
160 | 144 | int dst_discard(struct sk_buff *skb) |
... | ... | @@ -205,9 +189,6 @@ |
205 | 189 | dst->lastuse = jiffies; |
206 | 190 | dst->flags = flags; |
207 | 191 | dst->next = NULL; |
208 | -#if RT_CACHE_DEBUG >= 2 | |
209 | - atomic_inc(&dst_total); | |
210 | -#endif | |
211 | 192 | dst_entries_add(ops, 1); |
212 | 193 | return dst; |
213 | 194 | } |
... | ... | @@ -267,9 +248,6 @@ |
267 | 248 | dst->ops->destroy(dst); |
268 | 249 | if (dst->dev) |
269 | 250 | dev_put(dst->dev); |
270 | -#if RT_CACHE_DEBUG >= 2 | |
271 | - atomic_dec(&dst_total); | |
272 | -#endif | |
273 | 251 | kmem_cache_free(dst->ops->kmem_cachep, dst); |
274 | 252 | |
275 | 253 | dst = child; |
net/ipv4/route.c
... | ... | @@ -968,10 +968,6 @@ |
968 | 968 | break; |
969 | 969 | |
970 | 970 | expire >>= 1; |
971 | -#if RT_CACHE_DEBUG >= 2 | |
972 | - printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire, | |
973 | - dst_entries_get_fast(&ipv4_dst_ops), goal, i); | |
974 | -#endif | |
975 | 971 | |
976 | 972 | if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size) |
977 | 973 | goto out; |
... | ... | @@ -992,10 +988,6 @@ |
992 | 988 | dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh || |
993 | 989 | dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh) |
994 | 990 | expire = ip_rt_gc_timeout; |
995 | -#if RT_CACHE_DEBUG >= 2 | |
996 | - printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire, | |
997 | - dst_entries_get_fast(&ipv4_dst_ops), goal, rover); | |
998 | -#endif | |
999 | 991 | out: return 0; |
1000 | 992 | } |
1001 | 993 | |
... | ... | @@ -1179,16 +1171,6 @@ |
1179 | 1171 | |
1180 | 1172 | rt->dst.rt_next = rt_hash_table[hash].chain; |
1181 | 1173 | |
1182 | -#if RT_CACHE_DEBUG >= 2 | |
1183 | - if (rt->dst.rt_next) { | |
1184 | - struct rtable *trt; | |
1185 | - printk(KERN_DEBUG "rt_cache @%02x: %pI4", | |
1186 | - hash, &rt->rt_dst); | |
1187 | - for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next) | |
1188 | - printk(" . %pI4", &trt->rt_dst); | |
1189 | - printk("\n"); | |
1190 | - } | |
1191 | -#endif | |
1192 | 1174 | /* |
1193 | 1175 | * Since lookup is lockfree, we must make sure |
1194 | 1176 | * previous writes to rt are committed to memory |
... | ... | @@ -1347,10 +1329,6 @@ |
1347 | 1329 | unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, |
1348 | 1330 | rt->rt_oif, |
1349 | 1331 | rt_genid(dev_net(dst->dev))); |
1350 | -#if RT_CACHE_DEBUG >= 1 | |
1351 | - printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n", | |
1352 | - &rt->rt_dst, rt->rt_key_tos); | |
1353 | -#endif | |
1354 | 1332 | rt_del(hash, rt); |
1355 | 1333 | ret = NULL; |
1356 | 1334 | } else if (rt->peer && |