Commit 179f7ebff6be45738c6e2fa68c8d2cc5c2c6308e

Authored by Eric Dumazet
Committed by Linus Torvalds
1 parent e3d5a27d58

percpu_counter: FBC_BATCH should be a variable

For NR_CPUS >= 16 values, FBC_BATCH is 2*NR_CPUS

Considering more and more distros are using high NR_CPUS values, it makes
sense to use a more sensible value for FBC_BATCH, and get rid of NR_CPUS.

A sensible value is 2*num_online_cpus(), with a minimum value of 32 (This
minimum value helps branch prediction in __percpu_counter_add())

We already have a hotcpu notifier, so we can adjust FBC_BATCH dynamically.

We rename FBC_BATCH to percpu_counter_batch since its not a constant
anymore.

Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 4 changed files with 20 additions and 14 deletions Side-by-side Diff

... ... @@ -1225,11 +1225,11 @@
1225 1225 } while (0)
1226 1226  
1227 1227 #ifdef CONFIG_SMP
1228   -/* Each CPU can accumulate FBC_BATCH blocks in their local
  1228 +/* Each CPU can accumulate percpu_counter_batch blocks in their local
1229 1229 * counters. So we need to make sure we have free blocks more
1230   - * than FBC_BATCH * nr_cpu_ids. Also add a window of 4 times.
  1230 + * than percpu_counter_batch * nr_cpu_ids. Also add a window of 4 times.
1231 1231 */
1232   -#define EXT4_FREEBLOCKS_WATERMARK (4 * (FBC_BATCH * nr_cpu_ids))
  1232 +#define EXT4_FREEBLOCKS_WATERMARK (4 * (percpu_counter_batch * nr_cpu_ids))
1233 1233 #else
1234 1234 #define EXT4_FREEBLOCKS_WATERMARK 0
1235 1235 #endif
... ... @@ -2498,7 +2498,7 @@
2498 2498 /*
2499 2499 * switch to non delalloc mode if we are running low
2500 2500 * on free block. The free block accounting via percpu
2501   - * counters can get slightly wrong with FBC_BATCH getting
  2501 + * counters can get slightly wrong with percpu_counter_batch getting
2502 2502 * accumulated on each CPU without updating global counters
2503 2503 * Delalloc need an accurate free block accounting. So switch
2504 2504 * to non delalloc when we are near to error range.
include/linux/percpu_counter.h
... ... @@ -24,11 +24,7 @@
24 24 s32 *counters;
25 25 };
26 26  
27   -#if NR_CPUS >= 16
28   -#define FBC_BATCH (NR_CPUS*2)
29   -#else
30   -#define FBC_BATCH (NR_CPUS*4)
31   -#endif
  27 +extern int percpu_counter_batch;
32 28  
33 29 int percpu_counter_init(struct percpu_counter *fbc, s64 amount);
34 30 int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
... ... @@ -39,7 +35,7 @@
39 35  
40 36 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
41 37 {
42   - __percpu_counter_add(fbc, amount, FBC_BATCH);
  38 + __percpu_counter_add(fbc, amount, percpu_counter_batch);
43 39 }
44 40  
45 41 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
lib/percpu_counter.c
... ... @@ -9,10 +9,8 @@
9 9 #include <linux/cpu.h>
10 10 #include <linux/module.h>
11 11  
12   -#ifdef CONFIG_HOTPLUG_CPU
13 12 static LIST_HEAD(percpu_counters);
14 13 static DEFINE_MUTEX(percpu_counters_lock);
15   -#endif
16 14  
17 15 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
18 16 {
19 17  
20 18  
... ... @@ -111,13 +109,24 @@
111 109 }
112 110 EXPORT_SYMBOL(percpu_counter_destroy);
113 111  
114   -#ifdef CONFIG_HOTPLUG_CPU
  112 +int percpu_counter_batch __read_mostly = 32;
  113 +EXPORT_SYMBOL(percpu_counter_batch);
  114 +
  115 +static void compute_batch_value(void)
  116 +{
  117 + int nr = num_online_cpus();
  118 +
  119 + percpu_counter_batch = max(32, nr*2);
  120 +}
  121 +
115 122 static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
116 123 unsigned long action, void *hcpu)
117 124 {
  125 +#ifdef CONFIG_HOTPLUG_CPU
118 126 unsigned int cpu;
119 127 struct percpu_counter *fbc;
120 128  
  129 + compute_batch_value();
121 130 if (action != CPU_DEAD)
122 131 return NOTIFY_OK;
123 132  
124 133  
125 134  
... ... @@ -134,14 +143,15 @@
134 143 spin_unlock_irqrestore(&fbc->lock, flags);
135 144 }
136 145 mutex_unlock(&percpu_counters_lock);
  146 +#endif
137 147 return NOTIFY_OK;
138 148 }
139 149  
140 150 static int __init percpu_counter_startup(void)
141 151 {
  152 + compute_batch_value();
142 153 hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
143 154 return 0;
144 155 }
145 156 module_init(percpu_counter_startup);
146   -#endif