Blame view

lib/percpu_counter.c 6.43 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
2
3
4
5
6
  /*
   * Fast batching percpu counters.
   */
  
  #include <linux/percpu_counter.h>
c67ad917c   Andrew Morton   percpu_counters()...
7
8
9
  #include <linux/mutex.h>
  #include <linux/init.h>
  #include <linux/cpu.h>
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
10
  #include <linux/module.h>
e2852ae82   Tejun Heo   percpu_counter: a...
11
  #include <linux/debugobjects.h>
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
12

3a8495c73   Glauber Costa   lib/percpu_counte...
13
  #ifdef CONFIG_HOTPLUG_CPU
c67ad917c   Andrew Morton   percpu_counters()...
14
  static LIST_HEAD(percpu_counters);
d87aae2f3   Al Viro   switch the protec...
15
  static DEFINE_SPINLOCK(percpu_counters_lock);
3a8495c73   Glauber Costa   lib/percpu_counte...
16
  #endif
c67ad917c   Andrew Morton   percpu_counters()...
17

e2852ae82   Tejun Heo   percpu_counter: a...
18
  #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
f9e62f318   Stephen Boyd   treewide: Make al...
19
  static const struct debug_obj_descr percpu_counter_debug_descr;
e2852ae82   Tejun Heo   percpu_counter: a...
20

d99b1d891   Du, Changbin   percpu_counter: u...
21
  static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
e2852ae82   Tejun Heo   percpu_counter: a...
22
23
24
25
26
27
28
  {
  	struct percpu_counter *fbc = addr;
  
  	switch (state) {
  	case ODEBUG_STATE_ACTIVE:
  		percpu_counter_destroy(fbc);
  		debug_object_free(fbc, &percpu_counter_debug_descr);
d99b1d891   Du, Changbin   percpu_counter: u...
29
  		return true;
e2852ae82   Tejun Heo   percpu_counter: a...
30
  	default:
d99b1d891   Du, Changbin   percpu_counter: u...
31
  		return false;
e2852ae82   Tejun Heo   percpu_counter: a...
32
33
  	}
  }
f9e62f318   Stephen Boyd   treewide: Make al...
34
  static const struct debug_obj_descr percpu_counter_debug_descr = {
e2852ae82   Tejun Heo   percpu_counter: a...
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
  	.name		= "percpu_counter",
  	.fixup_free	= percpu_counter_fixup_free,
  };
  
  static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  {
  	debug_object_init(fbc, &percpu_counter_debug_descr);
  	debug_object_activate(fbc, &percpu_counter_debug_descr);
  }
  
  static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  {
  	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
  	debug_object_free(fbc, &percpu_counter_debug_descr);
  }
  
  #else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
  static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  { }
  static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  { }
  #endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
3a587f47b   Peter Zijlstra   lib: percpu_count...
57
58
59
  void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  {
  	int cpu;
098faf580   Shaohua Li   percpu_counter: m...
60
  	unsigned long flags;
3a587f47b   Peter Zijlstra   lib: percpu_count...
61

098faf580   Shaohua Li   percpu_counter: m...
62
  	raw_spin_lock_irqsave(&fbc->lock, flags);
3a587f47b   Peter Zijlstra   lib: percpu_count...
63
64
65
66
67
  	for_each_possible_cpu(cpu) {
  		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  		*pcount = 0;
  	}
  	fbc->count = amount;
098faf580   Shaohua Li   percpu_counter: m...
68
  	raw_spin_unlock_irqrestore(&fbc->lock, flags);
3a587f47b   Peter Zijlstra   lib: percpu_count...
69
70
  }
  EXPORT_SYMBOL(percpu_counter_set);
3e8f399da   Nikolay Borisov   writeback: rework...
71
72
73
74
75
76
77
  /**
   * This function is both preempt and irq safe. The former is due to explicit
   * preemption disable. The latter is guaranteed by the fact that the slow path
   * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
   * this_cpu_add which is irq-safe by definition. Hence there is no need muck
   * with irq state before calling this one
   */
104b4e513   Nikolay Borisov   percpu_counter: R...
78
  void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
79
  {
20e897670   Peter Zijlstra   lib: make percpu_...
80
  	s64 count;
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
81

ea00c30b5   Christoph Lameter   percpu_counter: u...
82
  	preempt_disable();
819a72af8   Christoph Lameter   percpucounter: Op...
83
  	count = __this_cpu_read(*fbc->counters) + amount;
1d339638a   Miaohe Lin   lib/percpu_counte...
84
  	if (abs(count) >= batch) {
098faf580   Shaohua Li   percpu_counter: m...
85
86
  		unsigned long flags;
  		raw_spin_lock_irqsave(&fbc->lock, flags);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
87
  		fbc->count += count;
d1969a84d   Hugh Dickins   percpu_counter: u...
88
  		__this_cpu_sub(*fbc->counters, count - amount);
098faf580   Shaohua Li   percpu_counter: m...
89
  		raw_spin_unlock_irqrestore(&fbc->lock, flags);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
90
  	} else {
74e72f894   Ming Lei   lib/percpu_counte...
91
  		this_cpu_add(*fbc->counters, amount);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
92
  	}
ea00c30b5   Christoph Lameter   percpu_counter: u...
93
  	preempt_enable();
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
94
  }
104b4e513   Nikolay Borisov   percpu_counter: R...
95
  EXPORT_SYMBOL(percpu_counter_add_batch);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
96
97
  
  /*
0a4954a85   Feng Tang   percpu_counter: a...
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
   * For percpu_counter with a big batch, the devication of its count could
   * be big, and there is requirement to reduce the deviation, like when the
   * counter's batch could be runtime decreased to get a better accuracy,
   * which can be achieved by running this sync function on each CPU.
   */
  void percpu_counter_sync(struct percpu_counter *fbc)
  {
  	unsigned long flags;
  	s64 count;
  
  	raw_spin_lock_irqsave(&fbc->lock, flags);
  	count = __this_cpu_read(*fbc->counters);
  	fbc->count += count;
  	__this_cpu_sub(*fbc->counters, count);
  	raw_spin_unlock_irqrestore(&fbc->lock, flags);
  }
  EXPORT_SYMBOL(percpu_counter_sync);
  
  /*
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
117
118
119
   * Add up all the per-cpu counts, return the result.  This is a more accurate
   * but much slower version of percpu_counter_read_positive()
   */
02d211688   Andrew Morton   revert "percpu_co...
120
  s64 __percpu_counter_sum(struct percpu_counter *fbc)
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
121
  {
0216bfcff   Mingming Cao   [PATCH] percpu co...
122
  	s64 ret;
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
123
  	int cpu;
098faf580   Shaohua Li   percpu_counter: m...
124
  	unsigned long flags;
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
125

098faf580   Shaohua Li   percpu_counter: m...
126
  	raw_spin_lock_irqsave(&fbc->lock, flags);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
127
  	ret = fbc->count;
b4ef0296f   Andrew Morton   percpu_counters: ...
128
  	for_each_online_cpu(cpu) {
0216bfcff   Mingming Cao   [PATCH] percpu co...
129
  		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
130
131
  		ret += *pcount;
  	}
098faf580   Shaohua Li   percpu_counter: m...
132
  	raw_spin_unlock_irqrestore(&fbc->lock, flags);
bf1d89c81   Peter Zijlstra   lib: percpu_count...
133
  	return ret;
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
134
  }
bf1d89c81   Peter Zijlstra   lib: percpu_count...
135
  EXPORT_SYMBOL(__percpu_counter_sum);
c67ad917c   Andrew Morton   percpu_counters()...
136

908c7f194   Tejun Heo   percpu_counter: a...
137
  int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
ea319518b   Peter Zijlstra   locking, percpu c...
138
  			  struct lock_class_key *key)
c67ad917c   Andrew Morton   percpu_counters()...
139
  {
ebd8fef30   Tejun Heo   percpu_counter: m...
140
  	unsigned long flags __maybe_unused;
f032a4508   Thomas Gleixner   locking, percpu_c...
141
  	raw_spin_lock_init(&fbc->lock);
ea319518b   Peter Zijlstra   locking, percpu c...
142
  	lockdep_set_class(&fbc->lock, key);
c67ad917c   Andrew Morton   percpu_counters()...
143
  	fbc->count = amount;
908c7f194   Tejun Heo   percpu_counter: a...
144
  	fbc->counters = alloc_percpu_gfp(s32, gfp);
833f4077b   Peter Zijlstra   lib: percpu_count...
145
146
  	if (!fbc->counters)
  		return -ENOMEM;
e2852ae82   Tejun Heo   percpu_counter: a...
147
148
  
  	debug_percpu_counter_activate(fbc);
c67ad917c   Andrew Morton   percpu_counters()...
149
  #ifdef CONFIG_HOTPLUG_CPU
8474b591f   Masanori ITOH   percpu: fix list_...
150
  	INIT_LIST_HEAD(&fbc->list);
ebd8fef30   Tejun Heo   percpu_counter: m...
151
  	spin_lock_irqsave(&percpu_counters_lock, flags);
c67ad917c   Andrew Morton   percpu_counters()...
152
  	list_add(&fbc->list, &percpu_counters);
ebd8fef30   Tejun Heo   percpu_counter: m...
153
  	spin_unlock_irqrestore(&percpu_counters_lock, flags);
c67ad917c   Andrew Morton   percpu_counters()...
154
  #endif
833f4077b   Peter Zijlstra   lib: percpu_count...
155
  	return 0;
c67ad917c   Andrew Morton   percpu_counters()...
156
  }
ea319518b   Peter Zijlstra   locking, percpu c...
157
  EXPORT_SYMBOL(__percpu_counter_init);
c67ad917c   Andrew Morton   percpu_counters()...
158
159
160
  
  void percpu_counter_destroy(struct percpu_counter *fbc)
  {
ebd8fef30   Tejun Heo   percpu_counter: m...
161
  	unsigned long flags __maybe_unused;
833f4077b   Peter Zijlstra   lib: percpu_count...
162
163
  	if (!fbc->counters)
  		return;
e2852ae82   Tejun Heo   percpu_counter: a...
164
  	debug_percpu_counter_deactivate(fbc);
c67ad917c   Andrew Morton   percpu_counters()...
165
  #ifdef CONFIG_HOTPLUG_CPU
ebd8fef30   Tejun Heo   percpu_counter: m...
166
  	spin_lock_irqsave(&percpu_counters_lock, flags);
c67ad917c   Andrew Morton   percpu_counters()...
167
  	list_del(&fbc->list);
ebd8fef30   Tejun Heo   percpu_counter: m...
168
  	spin_unlock_irqrestore(&percpu_counters_lock, flags);
c67ad917c   Andrew Morton   percpu_counters()...
169
  #endif
fd3d664fe   Eric Dumazet   percpu_counter: f...
170
171
  	free_percpu(fbc->counters);
  	fbc->counters = NULL;
c67ad917c   Andrew Morton   percpu_counters()...
172
173
  }
  EXPORT_SYMBOL(percpu_counter_destroy);
179f7ebff   Eric Dumazet   percpu_counter: F...
174
175
  int percpu_counter_batch __read_mostly = 32;
  EXPORT_SYMBOL(percpu_counter_batch);
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
176
  static int compute_batch_value(unsigned int cpu)
179f7ebff   Eric Dumazet   percpu_counter: F...
177
178
179
180
  {
  	int nr = num_online_cpus();
  
  	percpu_counter_batch = max(32, nr*2);
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
181
  	return 0;
179f7ebff   Eric Dumazet   percpu_counter: F...
182
  }
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
183
  static int percpu_counter_cpu_dead(unsigned int cpu)
c67ad917c   Andrew Morton   percpu_counters()...
184
  {
179f7ebff   Eric Dumazet   percpu_counter: F...
185
  #ifdef CONFIG_HOTPLUG_CPU
c67ad917c   Andrew Morton   percpu_counters()...
186
  	struct percpu_counter *fbc;
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
187
  	compute_batch_value(cpu);
c67ad917c   Andrew Morton   percpu_counters()...
188

ebd8fef30   Tejun Heo   percpu_counter: m...
189
  	spin_lock_irq(&percpu_counters_lock);
c67ad917c   Andrew Morton   percpu_counters()...
190
191
  	list_for_each_entry(fbc, &percpu_counters, list) {
  		s32 *pcount;
aaf0f2fa6   Eric Dumazet   percpu_counter: p...
192
  		raw_spin_lock(&fbc->lock);
c67ad917c   Andrew Morton   percpu_counters()...
193
194
195
  		pcount = per_cpu_ptr(fbc->counters, cpu);
  		fbc->count += *pcount;
  		*pcount = 0;
aaf0f2fa6   Eric Dumazet   percpu_counter: p...
196
  		raw_spin_unlock(&fbc->lock);
c67ad917c   Andrew Morton   percpu_counters()...
197
  	}
ebd8fef30   Tejun Heo   percpu_counter: m...
198
  	spin_unlock_irq(&percpu_counters_lock);
179f7ebff   Eric Dumazet   percpu_counter: F...
199
  #endif
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
200
  	return 0;
c67ad917c   Andrew Morton   percpu_counters()...
201
  }
27f5e0f69   Tim Chen   tmpfs: add accura...
202
203
204
205
  /*
   * Compare counter against given value.
   * Return 1 if greater, 0 if equal and -1 if less
   */
80188b0d7   Dave Chinner   percpu_counter: b...
206
  int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
27f5e0f69   Tim Chen   tmpfs: add accura...
207
208
209
210
211
  {
  	s64	count;
  
  	count = percpu_counter_read(fbc);
  	/* Check to see if rough count will be sufficient for comparison */
80188b0d7   Dave Chinner   percpu_counter: b...
212
  	if (abs(count - rhs) > (batch * num_online_cpus())) {
27f5e0f69   Tim Chen   tmpfs: add accura...
213
214
215
216
217
218
219
220
221
222
223
224
225
226
  		if (count > rhs)
  			return 1;
  		else
  			return -1;
  	}
  	/* Need to use precise count */
  	count = percpu_counter_sum(fbc);
  	if (count > rhs)
  		return 1;
  	else if (count < rhs)
  		return -1;
  	else
  		return 0;
  }
80188b0d7   Dave Chinner   percpu_counter: b...
227
  EXPORT_SYMBOL(__percpu_counter_compare);
27f5e0f69   Tim Chen   tmpfs: add accura...
228

c67ad917c   Andrew Morton   percpu_counters()...
229
230
  static int __init percpu_counter_startup(void)
  {
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
231
232
233
234
235
236
237
238
239
  	int ret;
  
  	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
  				compute_batch_value, NULL);
  	WARN_ON(ret < 0);
  	ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
  					"lib/percpu_cnt:dead", NULL,
  					percpu_counter_cpu_dead);
  	WARN_ON(ret < 0);
c67ad917c   Andrew Morton   percpu_counters()...
240
241
242
  	return 0;
  }
  module_init(percpu_counter_startup);