Commit 02316067852187b8bec781bec07410e91af79627
Committed by
Linus Torvalds
1 parent
a38a44c1a9
Exists in
master
and in
4 other branches
[PATCH] hotplug CPU: clean up hotcpu_notifier() use
There was lots of #ifdef noise in the kernel due to hotcpu_notifier(fn, prio) not correctly marking 'fn' as used in the !HOTPLUG_CPU case, and thus generating compiler warnings of unused symbols, hence forcing people to add #ifdefs. the compiler can skip truly unused functions just fine: text data bss dec hex filename 1624412 728710 3674856 6027978 5bfaca vmlinux.before 1624412 728710 3674856 6027978 5bfaca vmlinux.after [akpm@osdl.org: topology.c fix] Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 25 changed files with 8 additions and 56 deletions Side-by-side Diff
- arch/i386/kernel/cpu/mcheck/therm_throt.c
- arch/i386/kernel/cpuid.c
- arch/i386/kernel/microcode.c
- arch/i386/kernel/msr.c
- arch/ia64/kernel/palinfo.c
- arch/ia64/kernel/salinfo.c
- arch/s390/appldata/appldata_base.c
- arch/x86_64/kernel/mce.c
- arch/x86_64/kernel/mce_amd.c
- arch/x86_64/kernel/vsyscall.c
- block/ll_rw_blk.c
- drivers/base/topology.c
- drivers/cpufreq/cpufreq.c
- fs/buffer.c
- include/linux/cpu.h
- kernel/cpuset.c
- kernel/profile.c
- kernel/sched.c
- kernel/workqueue.c
- lib/radix-tree.c
- mm/page_alloc.c
- mm/swap.c
- mm/vmscan.c
- net/core/dev.c
- net/core/flow.c
arch/i386/kernel/cpu/mcheck/therm_throt.c
... | ... | @@ -116,7 +116,6 @@ |
116 | 116 | return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group); |
117 | 117 | } |
118 | 118 | |
119 | -#ifdef CONFIG_HOTPLUG_CPU | |
120 | 119 | static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) |
121 | 120 | { |
122 | 121 | return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); |
... | ... | @@ -153,7 +152,6 @@ |
153 | 152 | { |
154 | 153 | .notifier_call = thermal_throttle_cpu_callback, |
155 | 154 | }; |
156 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
157 | 155 | |
158 | 156 | static __init int thermal_throttle_init_device(void) |
159 | 157 | { |
arch/i386/kernel/cpuid.c
... | ... | @@ -167,7 +167,6 @@ |
167 | 167 | return err; |
168 | 168 | } |
169 | 169 | |
170 | -#ifdef CONFIG_HOTPLUG_CPU | |
171 | 170 | static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
172 | 171 | { |
173 | 172 | unsigned int cpu = (unsigned long)hcpu; |
... | ... | @@ -187,7 +186,6 @@ |
187 | 186 | { |
188 | 187 | .notifier_call = cpuid_class_cpu_callback, |
189 | 188 | }; |
190 | -#endif /* !CONFIG_HOTPLUG_CPU */ | |
191 | 189 | |
192 | 190 | static int __init cpuid_init(void) |
193 | 191 | { |
arch/i386/kernel/microcode.c
... | ... | @@ -703,7 +703,6 @@ |
703 | 703 | .resume = mc_sysdev_resume, |
704 | 704 | }; |
705 | 705 | |
706 | -#ifdef CONFIG_HOTPLUG_CPU | |
707 | 706 | static __cpuinit int |
708 | 707 | mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) |
709 | 708 | { |
... | ... | @@ -726,7 +725,6 @@ |
726 | 725 | static struct notifier_block mc_cpu_notifier = { |
727 | 726 | .notifier_call = mc_cpu_callback, |
728 | 727 | }; |
729 | -#endif | |
730 | 728 | |
731 | 729 | static int __init microcode_init (void) |
732 | 730 | { |
arch/i386/kernel/msr.c
... | ... | @@ -250,7 +250,6 @@ |
250 | 250 | return err; |
251 | 251 | } |
252 | 252 | |
253 | -#ifdef CONFIG_HOTPLUG_CPU | |
254 | 253 | static int msr_class_cpu_callback(struct notifier_block *nfb, |
255 | 254 | unsigned long action, void *hcpu) |
256 | 255 | { |
... | ... | @@ -271,7 +270,6 @@ |
271 | 270 | { |
272 | 271 | .notifier_call = msr_class_cpu_callback, |
273 | 272 | }; |
274 | -#endif | |
275 | 273 | |
276 | 274 | static int __init msr_init(void) |
277 | 275 | { |
arch/ia64/kernel/palinfo.c
... | ... | @@ -952,7 +952,6 @@ |
952 | 952 | } |
953 | 953 | } |
954 | 954 | |
955 | -#ifdef CONFIG_HOTPLUG_CPU | |
956 | 955 | static int palinfo_cpu_callback(struct notifier_block *nfb, |
957 | 956 | unsigned long action, void *hcpu) |
958 | 957 | { |
... | ... | @@ -974,7 +973,6 @@ |
974 | 973 | .notifier_call = palinfo_cpu_callback, |
975 | 974 | .priority = 0, |
976 | 975 | }; |
977 | -#endif | |
978 | 976 | |
979 | 977 | static int __init |
980 | 978 | palinfo_init(void) |
arch/ia64/kernel/salinfo.c
... | ... | @@ -575,7 +575,6 @@ |
575 | 575 | .write = salinfo_log_write, |
576 | 576 | }; |
577 | 577 | |
578 | -#ifdef CONFIG_HOTPLUG_CPU | |
579 | 578 | static int __devinit |
580 | 579 | salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) |
581 | 580 | { |
... | ... | @@ -620,7 +619,6 @@ |
620 | 619 | .notifier_call = salinfo_cpu_callback, |
621 | 620 | .priority = 0, |
622 | 621 | }; |
623 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
624 | 622 | |
625 | 623 | static int __init |
626 | 624 | salinfo_init(void) |
arch/s390/appldata/appldata_base.c
... | ... | @@ -561,7 +561,6 @@ |
561 | 561 | spin_unlock(&appldata_timer_lock); |
562 | 562 | } |
563 | 563 | |
564 | -#ifdef CONFIG_HOTPLUG_CPU | |
565 | 564 | static int __cpuinit |
566 | 565 | appldata_cpu_notify(struct notifier_block *self, |
567 | 566 | unsigned long action, void *hcpu) |
... | ... | @@ -582,7 +581,6 @@ |
582 | 581 | static struct notifier_block appldata_nb = { |
583 | 582 | .notifier_call = appldata_cpu_notify, |
584 | 583 | }; |
585 | -#endif | |
586 | 584 | |
587 | 585 | /* |
588 | 586 | * appldata_init() |
arch/x86_64/kernel/mce.c
... | ... | @@ -641,7 +641,6 @@ |
641 | 641 | return err; |
642 | 642 | } |
643 | 643 | |
644 | -#ifdef CONFIG_HOTPLUG_CPU | |
645 | 644 | static void mce_remove_device(unsigned int cpu) |
646 | 645 | { |
647 | 646 | int i; |
... | ... | @@ -674,7 +673,6 @@ |
674 | 673 | static struct notifier_block mce_cpu_notifier = { |
675 | 674 | .notifier_call = mce_cpu_callback, |
676 | 675 | }; |
677 | -#endif | |
678 | 676 | |
679 | 677 | static __init int mce_init_device(void) |
680 | 678 | { |
arch/x86_64/kernel/mce_amd.c
... | ... | @@ -551,7 +551,6 @@ |
551 | 551 | return err; |
552 | 552 | } |
553 | 553 | |
554 | -#ifdef CONFIG_HOTPLUG_CPU | |
555 | 554 | /* |
556 | 555 | * let's be hotplug friendly. |
557 | 556 | * in case of multiple core processors, the first core always takes ownership |
558 | 557 | |
... | ... | @@ -594,12 +593,14 @@ |
594 | 593 | |
595 | 594 | sprintf(name, "threshold_bank%i", bank); |
596 | 595 | |
596 | +#ifdef CONFIG_SMP | |
597 | 597 | /* sibling symlink */ |
598 | 598 | if (shared_bank[bank] && b->blocks->cpu != cpu) { |
599 | 599 | sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name); |
600 | 600 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
601 | 601 | return; |
602 | 602 | } |
603 | +#endif | |
603 | 604 | |
604 | 605 | /* remove all sibling symlinks before unregistering */ |
605 | 606 | for_each_cpu_mask(i, b->cpus) { |
... | ... | @@ -656,7 +657,6 @@ |
656 | 657 | static struct notifier_block threshold_cpu_notifier = { |
657 | 658 | .notifier_call = threshold_cpu_callback, |
658 | 659 | }; |
659 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
660 | 660 | |
661 | 661 | static __init int threshold_init_device(void) |
662 | 662 | { |
arch/x86_64/kernel/vsyscall.c
... | ... | @@ -275,7 +275,6 @@ |
275 | 275 | vsyscall_set_cpu(raw_smp_processor_id()); |
276 | 276 | } |
277 | 277 | |
278 | -#ifdef CONFIG_HOTPLUG_CPU | |
279 | 278 | static int __cpuinit |
280 | 279 | cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) |
281 | 280 | { |
... | ... | @@ -284,7 +283,6 @@ |
284 | 283 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); |
285 | 284 | return NOTIFY_DONE; |
286 | 285 | } |
287 | -#endif | |
288 | 286 | |
289 | 287 | static void __init map_vsyscall(void) |
290 | 288 | { |
block/ll_rw_blk.c
... | ... | @@ -3459,8 +3459,6 @@ |
3459 | 3459 | } |
3460 | 3460 | } |
3461 | 3461 | |
3462 | -#ifdef CONFIG_HOTPLUG_CPU | |
3463 | - | |
3464 | 3462 | static int blk_cpu_notify(struct notifier_block *self, unsigned long action, |
3465 | 3463 | void *hcpu) |
3466 | 3464 | { |
... | ... | @@ -3485,8 +3483,6 @@ |
3485 | 3483 | static struct notifier_block __devinitdata blk_cpu_notifier = { |
3486 | 3484 | .notifier_call = blk_cpu_notify, |
3487 | 3485 | }; |
3488 | - | |
3489 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
3490 | 3486 | |
3491 | 3487 | /** |
3492 | 3488 | * blk_complete_request - end I/O on a request |
drivers/base/topology.c
... | ... | @@ -108,7 +108,6 @@ |
108 | 108 | return rc; |
109 | 109 | } |
110 | 110 | |
111 | -#ifdef CONFIG_HOTPLUG_CPU | |
112 | 111 | static void __cpuinit topology_remove_dev(unsigned int cpu) |
113 | 112 | { |
114 | 113 | struct sys_device *sys_dev = get_cpu_sysdev(cpu); |
... | ... | @@ -136,7 +135,6 @@ |
136 | 135 | } |
137 | 136 | return rc ? NOTIFY_BAD : NOTIFY_OK; |
138 | 137 | } |
139 | -#endif | |
140 | 138 | |
141 | 139 | static int __cpuinit topology_sysfs_init(void) |
142 | 140 | { |
drivers/cpufreq/cpufreq.c
... | ... | @@ -1537,7 +1537,6 @@ |
1537 | 1537 | } |
1538 | 1538 | EXPORT_SYMBOL(cpufreq_update_policy); |
1539 | 1539 | |
1540 | -#ifdef CONFIG_HOTPLUG_CPU | |
1541 | 1540 | static int cpufreq_cpu_callback(struct notifier_block *nfb, |
1542 | 1541 | unsigned long action, void *hcpu) |
1543 | 1542 | { |
... | ... | @@ -1577,7 +1576,6 @@ |
1577 | 1576 | { |
1578 | 1577 | .notifier_call = cpufreq_cpu_callback, |
1579 | 1578 | }; |
1580 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
1581 | 1579 | |
1582 | 1580 | /********************************************************************* |
1583 | 1581 | * REGISTER / UNREGISTER CPUFREQ DRIVER * |
fs/buffer.c
... | ... | @@ -2972,7 +2972,6 @@ |
2972 | 2972 | } |
2973 | 2973 | } |
2974 | 2974 | |
2975 | -#ifdef CONFIG_HOTPLUG_CPU | |
2976 | 2975 | static void buffer_exit_cpu(int cpu) |
2977 | 2976 | { |
2978 | 2977 | int i; |
... | ... | @@ -2994,7 +2993,6 @@ |
2994 | 2993 | buffer_exit_cpu((unsigned long)hcpu); |
2995 | 2994 | return NOTIFY_OK; |
2996 | 2995 | } |
2997 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
2998 | 2996 | |
2999 | 2997 | void __init buffer_init(void) |
3000 | 2998 | { |
include/linux/cpu.h
... | ... | @@ -89,9 +89,9 @@ |
89 | 89 | #define lock_cpu_hotplug() do { } while (0) |
90 | 90 | #define unlock_cpu_hotplug() do { } while (0) |
91 | 91 | #define lock_cpu_hotplug_interruptible() 0 |
92 | -#define hotcpu_notifier(fn, pri) do { } while (0) | |
93 | -#define register_hotcpu_notifier(nb) do { } while (0) | |
94 | -#define unregister_hotcpu_notifier(nb) do { } while (0) | |
92 | +#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) | |
93 | +#define register_hotcpu_notifier(nb) do { (void)(nb); } while (0) | |
94 | +#define unregister_hotcpu_notifier(nb) do { (void)(nb); } while (0) | |
95 | 95 | |
96 | 96 | /* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */ |
97 | 97 | static inline int cpu_is_offline(int cpu) { return 0; } |
kernel/cpuset.c
... | ... | @@ -2044,7 +2044,6 @@ |
2044 | 2044 | return err; |
2045 | 2045 | } |
2046 | 2046 | |
2047 | -#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_MEMORY_HOTPLUG) | |
2048 | 2047 | /* |
2049 | 2048 | * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs |
2050 | 2049 | * or memory nodes, we need to walk over the cpuset hierarchy, |
2051 | 2050 | |
... | ... | @@ -2108,9 +2107,7 @@ |
2108 | 2107 | mutex_unlock(&callback_mutex); |
2109 | 2108 | mutex_unlock(&manage_mutex); |
2110 | 2109 | } |
2111 | -#endif | |
2112 | 2110 | |
2113 | -#ifdef CONFIG_HOTPLUG_CPU | |
2114 | 2111 | /* |
2115 | 2112 | * The top_cpuset tracks what CPUs and Memory Nodes are online, |
2116 | 2113 | * period. This is necessary in order to make cpusets transparent |
... | ... | @@ -2127,7 +2124,6 @@ |
2127 | 2124 | common_cpu_mem_hotplug_unplug(); |
2128 | 2125 | return 0; |
2129 | 2126 | } |
2130 | -#endif | |
2131 | 2127 | |
2132 | 2128 | #ifdef CONFIG_MEMORY_HOTPLUG |
2133 | 2129 | /* |
kernel/profile.c
... | ... | @@ -319,7 +319,6 @@ |
319 | 319 | put_cpu(); |
320 | 320 | } |
321 | 321 | |
322 | -#ifdef CONFIG_HOTPLUG_CPU | |
323 | 322 | static int __devinit profile_cpu_callback(struct notifier_block *info, |
324 | 323 | unsigned long action, void *__cpu) |
325 | 324 | { |
326 | 325 | |
... | ... | @@ -372,10 +371,10 @@ |
372 | 371 | } |
373 | 372 | return NOTIFY_OK; |
374 | 373 | } |
375 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
376 | 374 | #else /* !CONFIG_SMP */ |
377 | 375 | #define profile_flip_buffers() do { } while (0) |
378 | 376 | #define profile_discard_flip_buffers() do { } while (0) |
377 | +#define profile_cpu_callback NULL | |
379 | 378 | |
380 | 379 | void profile_hits(int type, void *__pc, unsigned int nr_hits) |
381 | 380 | { |
kernel/sched.c
... | ... | @@ -6740,8 +6740,6 @@ |
6740 | 6740 | sched_smt_power_savings_store); |
6741 | 6741 | #endif |
6742 | 6742 | |
6743 | - | |
6744 | -#ifdef CONFIG_HOTPLUG_CPU | |
6745 | 6743 | /* |
6746 | 6744 | * Force a reinitialization of the sched domains hierarchy. The domains |
6747 | 6745 | * and groups cannot be updated in place without racing with the balancing |
... | ... | @@ -6774,7 +6772,6 @@ |
6774 | 6772 | |
6775 | 6773 | return NOTIFY_OK; |
6776 | 6774 | } |
6777 | -#endif | |
6778 | 6775 | |
6779 | 6776 | void __init sched_init_smp(void) |
6780 | 6777 | { |
kernel/workqueue.c
... | ... | @@ -655,7 +655,6 @@ |
655 | 655 | |
656 | 656 | } |
657 | 657 | |
658 | -#ifdef CONFIG_HOTPLUG_CPU | |
659 | 658 | /* Take the work from this (downed) CPU. */ |
660 | 659 | static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) |
661 | 660 | { |
... | ... | @@ -738,7 +737,6 @@ |
738 | 737 | |
739 | 738 | return NOTIFY_OK; |
740 | 739 | } |
741 | -#endif | |
742 | 740 | |
743 | 741 | void init_workqueues(void) |
744 | 742 | { |
lib/radix-tree.c
... | ... | @@ -996,7 +996,6 @@ |
996 | 996 | height_to_maxindex[i] = __maxindex(i); |
997 | 997 | } |
998 | 998 | |
999 | -#ifdef CONFIG_HOTPLUG_CPU | |
1000 | 999 | static int radix_tree_callback(struct notifier_block *nfb, |
1001 | 1000 | unsigned long action, |
1002 | 1001 | void *hcpu) |
... | ... | @@ -1016,7 +1015,6 @@ |
1016 | 1015 | } |
1017 | 1016 | return NOTIFY_OK; |
1018 | 1017 | } |
1019 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
1020 | 1018 | |
1021 | 1019 | void __init radix_tree_init(void) |
1022 | 1020 | { |
mm/page_alloc.c
... | ... | @@ -701,7 +701,6 @@ |
701 | 701 | } |
702 | 702 | #endif |
703 | 703 | |
704 | -#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) | |
705 | 704 | static void __drain_pages(unsigned int cpu) |
706 | 705 | { |
707 | 706 | unsigned long flags; |
... | ... | @@ -723,7 +722,6 @@ |
723 | 722 | } |
724 | 723 | } |
725 | 724 | } |
726 | -#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ | |
727 | 725 | |
728 | 726 | #ifdef CONFIG_PM |
729 | 727 | |
... | ... | @@ -2907,7 +2905,6 @@ |
2907 | 2905 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); |
2908 | 2906 | } |
2909 | 2907 | |
2910 | -#ifdef CONFIG_HOTPLUG_CPU | |
2911 | 2908 | static int page_alloc_cpu_notify(struct notifier_block *self, |
2912 | 2909 | unsigned long action, void *hcpu) |
2913 | 2910 | { |
... | ... | @@ -2922,7 +2919,6 @@ |
2922 | 2919 | } |
2923 | 2920 | return NOTIFY_OK; |
2924 | 2921 | } |
2925 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
2926 | 2922 | |
2927 | 2923 | void __init page_alloc_init(void) |
2928 | 2924 | { |
mm/swap.c
mm/vmscan.c
... | ... | @@ -1513,7 +1513,6 @@ |
1513 | 1513 | } |
1514 | 1514 | #endif |
1515 | 1515 | |
1516 | -#ifdef CONFIG_HOTPLUG_CPU | |
1517 | 1516 | /* It's optimal to keep kswapds on the same CPUs as their memory, but |
1518 | 1517 | not required for correctness. So if the last cpu in a node goes |
1519 | 1518 | away, we get changed to run anywhere: as the first one comes back, |
... | ... | @@ -1534,7 +1533,6 @@ |
1534 | 1533 | } |
1535 | 1534 | return NOTIFY_OK; |
1536 | 1535 | } |
1537 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
1538 | 1536 | |
1539 | 1537 | /* |
1540 | 1538 | * This kswapd start function will be called by init and node-hot-add. |
net/core/dev.c
... | ... | @@ -3340,7 +3340,6 @@ |
3340 | 3340 | |
3341 | 3341 | EXPORT_SYMBOL(unregister_netdev); |
3342 | 3342 | |
3343 | -#ifdef CONFIG_HOTPLUG_CPU | |
3344 | 3343 | static int dev_cpu_callback(struct notifier_block *nfb, |
3345 | 3344 | unsigned long action, |
3346 | 3345 | void *ocpu) |
... | ... | @@ -3384,7 +3383,6 @@ |
3384 | 3383 | |
3385 | 3384 | return NOTIFY_OK; |
3386 | 3385 | } |
3387 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
3388 | 3386 | |
3389 | 3387 | #ifdef CONFIG_NET_DMA |
3390 | 3388 | /** |
net/core/flow.c
... | ... | @@ -340,7 +340,6 @@ |
340 | 340 | tasklet_init(tasklet, flow_cache_flush_tasklet, 0); |
341 | 341 | } |
342 | 342 | |
343 | -#ifdef CONFIG_HOTPLUG_CPU | |
344 | 343 | static int flow_cache_cpu(struct notifier_block *nfb, |
345 | 344 | unsigned long action, |
346 | 345 | void *hcpu) |
... | ... | @@ -349,7 +348,6 @@ |
349 | 348 | __flow_cache_shrink((unsigned long)hcpu, 0); |
350 | 349 | return NOTIFY_OK; |
351 | 350 | } |
352 | -#endif /* CONFIG_HOTPLUG_CPU */ | |
353 | 351 | |
354 | 352 | static int __init flow_cache_init(void) |
355 | 353 | { |