Commit 245b2e70eabd797932adb263a65da0bab3711753
1 parent
b9bf3121af
Exists in
master
and in
20 other branches
percpu: clean up percpu variable definitions
Percpu variable definition is about to be updated such that all percpu symbols including the static ones must be unique. Update percpu variable definitions accordingly. * as,cfq: rename ioc_count uniquely * cpufreq: rename cpu_dbs_info uniquely * xen: move nesting_count out of xen_evtchn_do_upcall() and rename it * mm: move ratelimits out of balance_dirty_pages_ratelimited_nr() and rename it * ipv4,6: rename cookie_scratch uniquely * x86 perf_counter: rename prev_left to pmc_prev_left, irq_entry to pmc_irq_entry and nmi_entry to pmc_nmi_entry * perf_counter: rename disable_count to perf_disable_count * ftrace: rename test_event_disable to ftrace_test_event_disable * kmemleak: rename test_pointer to kmemleak_test_pointer * mce: rename next_interval to mce_next_interval [ Impact: percpu usage cleanups, no duplicate static percpu var names ] Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Dave Jones <davej@redhat.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: linux-mm <linux-mm@kvack.org> Cc: David S. Miller <davem@davemloft.net> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <srostedt@redhat.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Andi Kleen <andi@firstfloor.org>
Showing 13 changed files with 58 additions and 53 deletions Side-by-side Diff
- arch/x86/kernel/cpu/mcheck/mce.c
- arch/x86/kernel/cpu/perf_counter.c
- block/as-iosched.c
- block/cfq-iosched.c
- drivers/cpufreq/cpufreq_conservative.c
- drivers/cpufreq/cpufreq_ondemand.c
- drivers/xen/events.c
- kernel/perf_counter.c
- kernel/trace/trace_events.c
- mm/kmemleak-test.c
- mm/page-writeback.c
- net/ipv4/syncookies.c
- net/ipv6/syncookies.c
arch/x86/kernel/cpu/mcheck/mce.c
... | ... | @@ -1091,7 +1091,7 @@ |
1091 | 1091 | */ |
1092 | 1092 | static int check_interval = 5 * 60; /* 5 minutes */ |
1093 | 1093 | |
1094 | -static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ | |
1094 | +static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ | |
1095 | 1095 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
1096 | 1096 | |
1097 | 1097 | static void mcheck_timer(unsigned long data) |
... | ... | @@ -1110,7 +1110,7 @@ |
1110 | 1110 | * Alert userspace if needed. If we logged an MCE, reduce the |
1111 | 1111 | * polling interval, otherwise increase the polling interval. |
1112 | 1112 | */ |
1113 | - n = &__get_cpu_var(next_interval); | |
1113 | + n = &__get_cpu_var(mce_next_interval); | |
1114 | 1114 | if (mce_notify_irq()) |
1115 | 1115 | *n = max(*n/2, HZ/100); |
1116 | 1116 | else |
... | ... | @@ -1311,7 +1311,7 @@ |
1311 | 1311 | static void mce_init_timer(void) |
1312 | 1312 | { |
1313 | 1313 | struct timer_list *t = &__get_cpu_var(mce_timer); |
1314 | - int *n = &__get_cpu_var(next_interval); | |
1314 | + int *n = &__get_cpu_var(mce_next_interval); | |
1315 | 1315 | |
1316 | 1316 | if (mce_ignore_ce) |
1317 | 1317 | return; |
... | ... | @@ -1914,7 +1914,7 @@ |
1914 | 1914 | case CPU_DOWN_FAILED: |
1915 | 1915 | case CPU_DOWN_FAILED_FROZEN: |
1916 | 1916 | t->expires = round_jiffies(jiffies + |
1917 | - __get_cpu_var(next_interval)); | |
1917 | + __get_cpu_var(mce_next_interval)); | |
1918 | 1918 | add_timer_on(t, cpu); |
1919 | 1919 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); |
1920 | 1920 | break; |
arch/x86/kernel/cpu/perf_counter.c
... | ... | @@ -862,7 +862,7 @@ |
862 | 862 | x86_pmu_disable_counter(hwc, idx); |
863 | 863 | } |
864 | 864 | |
865 | -static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], prev_left); | |
865 | +static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | |
866 | 866 | |
867 | 867 | /* |
868 | 868 | * Set the next IRQ period, based on the hwc->period_left value. |
... | ... | @@ -901,7 +901,7 @@ |
901 | 901 | if (left > x86_pmu.max_period) |
902 | 902 | left = x86_pmu.max_period; |
903 | 903 | |
904 | - per_cpu(prev_left[idx], smp_processor_id()) = left; | |
904 | + per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; | |
905 | 905 | |
906 | 906 | /* |
907 | 907 | * The hw counter starts counting from this counter offset, |
... | ... | @@ -1089,7 +1089,7 @@ |
1089 | 1089 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); |
1090 | 1090 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); |
1091 | 1091 | |
1092 | - prev_left = per_cpu(prev_left[idx], cpu); | |
1092 | + prev_left = per_cpu(pmc_prev_left[idx], cpu); | |
1093 | 1093 | |
1094 | 1094 | pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", |
1095 | 1095 | cpu, idx, pmc_ctrl); |
... | ... | @@ -1561,8 +1561,8 @@ |
1561 | 1561 | entry->ip[entry->nr++] = ip; |
1562 | 1562 | } |
1563 | 1563 | |
1564 | -static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); | |
1565 | -static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); | |
1564 | +static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | |
1565 | +static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); | |
1566 | 1566 | |
1567 | 1567 | |
1568 | 1568 | static void |
1569 | 1569 | |
... | ... | @@ -1709,9 +1709,9 @@ |
1709 | 1709 | struct perf_callchain_entry *entry; |
1710 | 1710 | |
1711 | 1711 | if (in_nmi()) |
1712 | - entry = &__get_cpu_var(nmi_entry); | |
1712 | + entry = &__get_cpu_var(pmc_nmi_entry); | |
1713 | 1713 | else |
1714 | - entry = &__get_cpu_var(irq_entry); | |
1714 | + entry = &__get_cpu_var(pmc_irq_entry); | |
1715 | 1715 | |
1716 | 1716 | entry->nr = 0; |
1717 | 1717 |
block/as-iosched.c
... | ... | @@ -146,7 +146,7 @@ |
146 | 146 | #define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) |
147 | 147 | #define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) |
148 | 148 | |
149 | -static DEFINE_PER_CPU(unsigned long, ioc_count); | |
149 | +static DEFINE_PER_CPU(unsigned long, as_ioc_count); | |
150 | 150 | static struct completion *ioc_gone; |
151 | 151 | static DEFINE_SPINLOCK(ioc_gone_lock); |
152 | 152 | |
... | ... | @@ -161,7 +161,7 @@ |
161 | 161 | static void free_as_io_context(struct as_io_context *aic) |
162 | 162 | { |
163 | 163 | kfree(aic); |
164 | - elv_ioc_count_dec(ioc_count); | |
164 | + elv_ioc_count_dec(as_ioc_count); | |
165 | 165 | if (ioc_gone) { |
166 | 166 | /* |
167 | 167 | * AS scheduler is exiting, grab exit lock and check |
... | ... | @@ -169,7 +169,7 @@ |
169 | 169 | * complete ioc_gone and set it back to NULL. |
170 | 170 | */ |
171 | 171 | spin_lock(&ioc_gone_lock); |
172 | - if (ioc_gone && !elv_ioc_count_read(ioc_count)) { | |
172 | + if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) { | |
173 | 173 | complete(ioc_gone); |
174 | 174 | ioc_gone = NULL; |
175 | 175 | } |
... | ... | @@ -211,7 +211,7 @@ |
211 | 211 | ret->seek_total = 0; |
212 | 212 | ret->seek_samples = 0; |
213 | 213 | ret->seek_mean = 0; |
214 | - elv_ioc_count_inc(ioc_count); | |
214 | + elv_ioc_count_inc(as_ioc_count); | |
215 | 215 | } |
216 | 216 | |
217 | 217 | return ret; |
... | ... | @@ -1507,7 +1507,7 @@ |
1507 | 1507 | ioc_gone = &all_gone; |
1508 | 1508 | /* ioc_gone's update must be visible before reading ioc_count */ |
1509 | 1509 | smp_wmb(); |
1510 | - if (elv_ioc_count_read(ioc_count)) | |
1510 | + if (elv_ioc_count_read(as_ioc_count)) | |
1511 | 1511 | wait_for_completion(&all_gone); |
1512 | 1512 | synchronize_rcu(); |
1513 | 1513 | } |
block/cfq-iosched.c
... | ... | @@ -48,7 +48,7 @@ |
48 | 48 | static struct kmem_cache *cfq_pool; |
49 | 49 | static struct kmem_cache *cfq_ioc_pool; |
50 | 50 | |
51 | -static DEFINE_PER_CPU(unsigned long, ioc_count); | |
51 | +static DEFINE_PER_CPU(unsigned long, cfq_ioc_count); | |
52 | 52 | static struct completion *ioc_gone; |
53 | 53 | static DEFINE_SPINLOCK(ioc_gone_lock); |
54 | 54 | |
... | ... | @@ -1422,7 +1422,7 @@ |
1422 | 1422 | cic = container_of(head, struct cfq_io_context, rcu_head); |
1423 | 1423 | |
1424 | 1424 | kmem_cache_free(cfq_ioc_pool, cic); |
1425 | - elv_ioc_count_dec(ioc_count); | |
1425 | + elv_ioc_count_dec(cfq_ioc_count); | |
1426 | 1426 | |
1427 | 1427 | if (ioc_gone) { |
1428 | 1428 | /* |
... | ... | @@ -1431,7 +1431,7 @@ |
1431 | 1431 | * complete ioc_gone and set it back to NULL |
1432 | 1432 | */ |
1433 | 1433 | spin_lock(&ioc_gone_lock); |
1434 | - if (ioc_gone && !elv_ioc_count_read(ioc_count)) { | |
1434 | + if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) { | |
1435 | 1435 | complete(ioc_gone); |
1436 | 1436 | ioc_gone = NULL; |
1437 | 1437 | } |
... | ... | @@ -1557,7 +1557,7 @@ |
1557 | 1557 | INIT_HLIST_NODE(&cic->cic_list); |
1558 | 1558 | cic->dtor = cfq_free_io_context; |
1559 | 1559 | cic->exit = cfq_exit_io_context; |
1560 | - elv_ioc_count_inc(ioc_count); | |
1560 | + elv_ioc_count_inc(cfq_ioc_count); | |
1561 | 1561 | } |
1562 | 1562 | |
1563 | 1563 | return cic; |
... | ... | @@ -2658,7 +2658,7 @@ |
2658 | 2658 | * this also protects us from entering cfq_slab_kill() with |
2659 | 2659 | * pending RCU callbacks |
2660 | 2660 | */ |
2661 | - if (elv_ioc_count_read(ioc_count)) | |
2661 | + if (elv_ioc_count_read(cfq_ioc_count)) | |
2662 | 2662 | wait_for_completion(&all_gone); |
2663 | 2663 | cfq_slab_kill(); |
2664 | 2664 | } |
drivers/cpufreq/cpufreq_conservative.c
... | ... | @@ -65,7 +65,7 @@ |
65 | 65 | int cpu; |
66 | 66 | unsigned int enable:1; |
67 | 67 | }; |
68 | -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | |
68 | +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); | |
69 | 69 | |
70 | 70 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
71 | 71 | |
... | ... | @@ -138,7 +138,7 @@ |
138 | 138 | void *data) |
139 | 139 | { |
140 | 140 | struct cpufreq_freqs *freq = data; |
141 | - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, | |
141 | + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, | |
142 | 142 | freq->cpu); |
143 | 143 | |
144 | 144 | struct cpufreq_policy *policy; |
... | ... | @@ -298,7 +298,7 @@ |
298 | 298 | /* we need to re-evaluate prev_cpu_idle */ |
299 | 299 | for_each_online_cpu(j) { |
300 | 300 | struct cpu_dbs_info_s *dbs_info; |
301 | - dbs_info = &per_cpu(cpu_dbs_info, j); | |
301 | + dbs_info = &per_cpu(cs_cpu_dbs_info, j); | |
302 | 302 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
303 | 303 | &dbs_info->prev_cpu_wall); |
304 | 304 | if (dbs_tuners_ins.ignore_nice) |
... | ... | @@ -388,7 +388,7 @@ |
388 | 388 | cputime64_t cur_wall_time, cur_idle_time; |
389 | 389 | unsigned int idle_time, wall_time; |
390 | 390 | |
391 | - j_dbs_info = &per_cpu(cpu_dbs_info, j); | |
391 | + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); | |
392 | 392 | |
393 | 393 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); |
394 | 394 | |
... | ... | @@ -528,7 +528,7 @@ |
528 | 528 | unsigned int j; |
529 | 529 | int rc; |
530 | 530 | |
531 | - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); | |
531 | + this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); | |
532 | 532 | |
533 | 533 | switch (event) { |
534 | 534 | case CPUFREQ_GOV_START: |
... | ... | @@ -548,7 +548,7 @@ |
548 | 548 | |
549 | 549 | for_each_cpu(j, policy->cpus) { |
550 | 550 | struct cpu_dbs_info_s *j_dbs_info; |
551 | - j_dbs_info = &per_cpu(cpu_dbs_info, j); | |
551 | + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); | |
552 | 552 | j_dbs_info->cur_policy = policy; |
553 | 553 | |
554 | 554 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
drivers/cpufreq/cpufreq_ondemand.c
... | ... | @@ -73,7 +73,7 @@ |
73 | 73 | unsigned int enable:1, |
74 | 74 | sample_type:1; |
75 | 75 | }; |
76 | -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | |
76 | +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); | |
77 | 77 | |
78 | 78 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
79 | 79 | |
... | ... | @@ -151,7 +151,8 @@ |
151 | 151 | unsigned int freq_hi, freq_lo; |
152 | 152 | unsigned int index = 0; |
153 | 153 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; |
154 | - struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); | |
154 | + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, | |
155 | + policy->cpu); | |
155 | 156 | |
156 | 157 | if (!dbs_info->freq_table) { |
157 | 158 | dbs_info->freq_lo = 0; |
... | ... | @@ -196,7 +197,7 @@ |
196 | 197 | { |
197 | 198 | int i; |
198 | 199 | for_each_online_cpu(i) { |
199 | - struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); | |
200 | + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, i); | |
200 | 201 | dbs_info->freq_table = cpufreq_frequency_get_table(i); |
201 | 202 | dbs_info->freq_lo = 0; |
202 | 203 | } |
... | ... | @@ -297,7 +298,7 @@ |
297 | 298 | /* we need to re-evaluate prev_cpu_idle */ |
298 | 299 | for_each_online_cpu(j) { |
299 | 300 | struct cpu_dbs_info_s *dbs_info; |
300 | - dbs_info = &per_cpu(cpu_dbs_info, j); | |
301 | + dbs_info = &per_cpu(od_cpu_dbs_info, j); | |
301 | 302 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
302 | 303 | &dbs_info->prev_cpu_wall); |
303 | 304 | if (dbs_tuners_ins.ignore_nice) |
... | ... | @@ -391,7 +392,7 @@ |
391 | 392 | unsigned int load, load_freq; |
392 | 393 | int freq_avg; |
393 | 394 | |
394 | - j_dbs_info = &per_cpu(cpu_dbs_info, j); | |
395 | + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); | |
395 | 396 | |
396 | 397 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); |
397 | 398 | |
... | ... | @@ -548,7 +549,7 @@ |
548 | 549 | unsigned int j; |
549 | 550 | int rc; |
550 | 551 | |
551 | - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); | |
552 | + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); | |
552 | 553 | |
553 | 554 | switch (event) { |
554 | 555 | case CPUFREQ_GOV_START: |
... | ... | @@ -570,7 +571,7 @@ |
570 | 571 | |
571 | 572 | for_each_cpu(j, policy->cpus) { |
572 | 573 | struct cpu_dbs_info_s *j_dbs_info; |
573 | - j_dbs_info = &per_cpu(cpu_dbs_info, j); | |
574 | + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); | |
574 | 575 | j_dbs_info->cur_policy = policy; |
575 | 576 | |
576 | 577 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
drivers/xen/events.c
... | ... | @@ -602,6 +602,8 @@ |
602 | 602 | return IRQ_HANDLED; |
603 | 603 | } |
604 | 604 | |
605 | +static DEFINE_PER_CPU(unsigned, xed_nesting_count); | |
606 | + | |
605 | 607 | /* |
606 | 608 | * Search the CPUs pending events bitmasks. For each one found, map |
607 | 609 | * the event number to an irq, and feed it into do_IRQ() for |
... | ... | @@ -617,7 +619,6 @@ |
617 | 619 | struct pt_regs *old_regs = set_irq_regs(regs); |
618 | 620 | struct shared_info *s = HYPERVISOR_shared_info; |
619 | 621 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); |
620 | - static DEFINE_PER_CPU(unsigned, nesting_count); | |
621 | 622 | unsigned count; |
622 | 623 | |
623 | 624 | exit_idle(); |
... | ... | @@ -628,7 +629,7 @@ |
628 | 629 | |
629 | 630 | vcpu_info->evtchn_upcall_pending = 0; |
630 | 631 | |
631 | - if (__get_cpu_var(nesting_count)++) | |
632 | + if (__get_cpu_var(xed_nesting_count)++) | |
632 | 633 | goto out; |
633 | 634 | |
634 | 635 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ |
... | ... | @@ -653,8 +654,8 @@ |
653 | 654 | |
654 | 655 | BUG_ON(!irqs_disabled()); |
655 | 656 | |
656 | - count = __get_cpu_var(nesting_count); | |
657 | - __get_cpu_var(nesting_count) = 0; | |
657 | + count = __get_cpu_var(xed_nesting_count); | |
658 | + __get_cpu_var(xed_nesting_count) = 0; | |
658 | 659 | } while(count != 1); |
659 | 660 | |
660 | 661 | out: |
kernel/perf_counter.c
... | ... | @@ -98,16 +98,16 @@ |
98 | 98 | |
99 | 99 | void __weak perf_counter_print_debug(void) { } |
100 | 100 | |
101 | -static DEFINE_PER_CPU(int, disable_count); | |
101 | +static DEFINE_PER_CPU(int, perf_disable_count); | |
102 | 102 | |
103 | 103 | void __perf_disable(void) |
104 | 104 | { |
105 | - __get_cpu_var(disable_count)++; | |
105 | + __get_cpu_var(perf_disable_count)++; | |
106 | 106 | } |
107 | 107 | |
108 | 108 | bool __perf_enable(void) |
109 | 109 | { |
110 | - return !--__get_cpu_var(disable_count); | |
110 | + return !--__get_cpu_var(perf_disable_count); | |
111 | 111 | } |
112 | 112 | |
113 | 113 | void perf_disable(void) |
kernel/trace/trace_events.c
... | ... | @@ -1318,7 +1318,7 @@ |
1318 | 1318 | |
1319 | 1319 | #ifdef CONFIG_FUNCTION_TRACER |
1320 | 1320 | |
1321 | -static DEFINE_PER_CPU(atomic_t, test_event_disable); | |
1321 | +static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); | |
1322 | 1322 | |
1323 | 1323 | static void |
1324 | 1324 | function_test_events_call(unsigned long ip, unsigned long parent_ip) |
... | ... | @@ -1334,7 +1334,7 @@ |
1334 | 1334 | pc = preempt_count(); |
1335 | 1335 | resched = ftrace_preempt_disable(); |
1336 | 1336 | cpu = raw_smp_processor_id(); |
1337 | - disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); | |
1337 | + disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); | |
1338 | 1338 | |
1339 | 1339 | if (disabled != 1) |
1340 | 1340 | goto out; |
... | ... | @@ -1352,7 +1352,7 @@ |
1352 | 1352 | trace_nowake_buffer_unlock_commit(event, flags, pc); |
1353 | 1353 | |
1354 | 1354 | out: |
1355 | - atomic_dec(&per_cpu(test_event_disable, cpu)); | |
1355 | + atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); | |
1356 | 1356 | ftrace_preempt_enable(resched); |
1357 | 1357 | } |
1358 | 1358 |
mm/kmemleak-test.c
... | ... | @@ -36,7 +36,7 @@ |
36 | 36 | }; |
37 | 37 | |
38 | 38 | static LIST_HEAD(test_list); |
39 | -static DEFINE_PER_CPU(void *, test_pointer); | |
39 | +static DEFINE_PER_CPU(void *, kmemleak_test_pointer); | |
40 | 40 | |
41 | 41 | /* |
42 | 42 | * Some very simple testing. This function needs to be extended for |
43 | 43 | |
... | ... | @@ -86,9 +86,9 @@ |
86 | 86 | } |
87 | 87 | |
88 | 88 | for_each_possible_cpu(i) { |
89 | - per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL); | |
89 | + per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL); | |
90 | 90 | pr_info("kmemleak: kmalloc(129) = %p\n", |
91 | - per_cpu(test_pointer, i)); | |
91 | + per_cpu(kmemleak_test_pointer, i)); | |
92 | 92 | } |
93 | 93 | |
94 | 94 | return 0; |
mm/page-writeback.c
... | ... | @@ -607,6 +607,8 @@ |
607 | 607 | } |
608 | 608 | } |
609 | 609 | |
610 | +static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0; | |
611 | + | |
610 | 612 | /** |
611 | 613 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state |
612 | 614 | * @mapping: address_space which was dirtied |
... | ... | @@ -624,7 +626,6 @@ |
624 | 626 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
625 | 627 | unsigned long nr_pages_dirtied) |
626 | 628 | { |
627 | - static DEFINE_PER_CPU(unsigned long, ratelimits) = 0; | |
628 | 629 | unsigned long ratelimit; |
629 | 630 | unsigned long *p; |
630 | 631 | |
... | ... | @@ -637,7 +638,7 @@ |
637 | 638 | * tasks in balance_dirty_pages(). Period. |
638 | 639 | */ |
639 | 640 | preempt_disable(); |
640 | - p = &__get_cpu_var(ratelimits); | |
641 | + p = &__get_cpu_var(bdp_ratelimits); | |
641 | 642 | *p += nr_pages_dirtied; |
642 | 643 | if (unlikely(*p >= ratelimit)) { |
643 | 644 | *p = 0; |
net/ipv4/syncookies.c
... | ... | @@ -37,12 +37,13 @@ |
37 | 37 | #define COOKIEBITS 24 /* Upper bits store count */ |
38 | 38 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) |
39 | 39 | |
40 | -static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch); | |
40 | +static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], | |
41 | + ipv4_cookie_scratch); | |
41 | 42 | |
42 | 43 | static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, |
43 | 44 | u32 count, int c) |
44 | 45 | { |
45 | - __u32 *tmp = __get_cpu_var(cookie_scratch); | |
46 | + __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch); | |
46 | 47 | |
47 | 48 | memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); |
48 | 49 | tmp[0] = (__force u32)saddr; |
net/ipv6/syncookies.c
... | ... | @@ -74,12 +74,13 @@ |
74 | 74 | return child; |
75 | 75 | } |
76 | 76 | |
77 | -static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch); | |
77 | +static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], | |
78 | + ipv6_cookie_scratch); | |
78 | 79 | |
79 | 80 | static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, |
80 | 81 | __be16 sport, __be16 dport, u32 count, int c) |
81 | 82 | { |
82 | - __u32 *tmp = __get_cpu_var(cookie_scratch); | |
83 | + __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch); | |
83 | 84 | |
84 | 85 | /* |
85 | 86 | * we have 320 bits of information to hash, copy in the remaining |