Blame view

lib/percpu_counter.c 5.82 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
2
3
4
5
6
  /*
   * Fast batching percpu counters.
   */
  
  #include <linux/percpu_counter.h>
c67ad917c   Andrew Morton   percpu_counters()...
7
8
9
  #include <linux/mutex.h>
  #include <linux/init.h>
  #include <linux/cpu.h>
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
10
  #include <linux/module.h>
e2852ae82   Tejun Heo   percpu_counter: a...
11
  #include <linux/debugobjects.h>
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
12

3a8495c73   Glauber Costa   lib/percpu_counte...
13
  #ifdef CONFIG_HOTPLUG_CPU
c67ad917c   Andrew Morton   percpu_counters()...
14
  static LIST_HEAD(percpu_counters);
d87aae2f3   Al Viro   switch the protec...
15
  static DEFINE_SPINLOCK(percpu_counters_lock);
3a8495c73   Glauber Costa   lib/percpu_counte...
16
  #endif
c67ad917c   Andrew Morton   percpu_counters()...
17

e2852ae82   Tejun Heo   percpu_counter: a...
18
19
20
  #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
  
  static struct debug_obj_descr percpu_counter_debug_descr;
d99b1d891   Du, Changbin   percpu_counter: u...
21
  static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
e2852ae82   Tejun Heo   percpu_counter: a...
22
23
24
25
26
27
28
  {
  	struct percpu_counter *fbc = addr;
  
  	switch (state) {
  	case ODEBUG_STATE_ACTIVE:
  		percpu_counter_destroy(fbc);
  		debug_object_free(fbc, &percpu_counter_debug_descr);
d99b1d891   Du, Changbin   percpu_counter: u...
29
  		return true;
e2852ae82   Tejun Heo   percpu_counter: a...
30
  	default:
d99b1d891   Du, Changbin   percpu_counter: u...
31
  		return false;
e2852ae82   Tejun Heo   percpu_counter: a...
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
  	}
  }
  
  static struct debug_obj_descr percpu_counter_debug_descr = {
  	.name		= "percpu_counter",
  	.fixup_free	= percpu_counter_fixup_free,
  };
  
  static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  {
  	debug_object_init(fbc, &percpu_counter_debug_descr);
  	debug_object_activate(fbc, &percpu_counter_debug_descr);
  }
  
  static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  {
  	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
  	debug_object_free(fbc, &percpu_counter_debug_descr);
  }
  
  #else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
  static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  { }
  static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  { }
  #endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
3a587f47b   Peter Zijlstra   lib: percpu_count...
58
59
60
  void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  {
  	int cpu;
098faf580   Shaohua Li   percpu_counter: m...
61
  	unsigned long flags;
3a587f47b   Peter Zijlstra   lib: percpu_count...
62

098faf580   Shaohua Li   percpu_counter: m...
63
  	raw_spin_lock_irqsave(&fbc->lock, flags);
3a587f47b   Peter Zijlstra   lib: percpu_count...
64
65
66
67
68
  	for_each_possible_cpu(cpu) {
  		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  		*pcount = 0;
  	}
  	fbc->count = amount;
098faf580   Shaohua Li   percpu_counter: m...
69
  	raw_spin_unlock_irqrestore(&fbc->lock, flags);
3a587f47b   Peter Zijlstra   lib: percpu_count...
70
71
  }
  EXPORT_SYMBOL(percpu_counter_set);
3e8f399da   Nikolay Borisov   writeback: rework...
72
73
74
75
76
77
78
  /**
   * This function is both preempt and irq safe. The former is due to explicit
   * preemption disable. The latter is guaranteed by the fact that the slow path
   * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
   * this_cpu_add which is irq-safe by definition. Hence there is no need muck
   * with irq state before calling this one
   */
104b4e513   Nikolay Borisov   percpu_counter: R...
79
  void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
80
  {
20e897670   Peter Zijlstra   lib: make percpu_...
81
  	s64 count;
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
82

ea00c30b5   Christoph Lameter   percpu_counter: u...
83
  	preempt_disable();
819a72af8   Christoph Lameter   percpucounter: Op...
84
  	count = __this_cpu_read(*fbc->counters) + amount;
252e0ba6b   Peter Zijlstra   lib: percpu_count...
85
  	if (count >= batch || count <= -batch) {
098faf580   Shaohua Li   percpu_counter: m...
86
87
  		unsigned long flags;
  		raw_spin_lock_irqsave(&fbc->lock, flags);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
88
  		fbc->count += count;
d1969a84d   Hugh Dickins   percpu_counter: u...
89
  		__this_cpu_sub(*fbc->counters, count - amount);
098faf580   Shaohua Li   percpu_counter: m...
90
  		raw_spin_unlock_irqrestore(&fbc->lock, flags);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
91
  	} else {
74e72f894   Ming Lei   lib/percpu_counte...
92
  		this_cpu_add(*fbc->counters, amount);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
93
  	}
ea00c30b5   Christoph Lameter   percpu_counter: u...
94
  	preempt_enable();
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
95
  }
104b4e513   Nikolay Borisov   percpu_counter: R...
96
  EXPORT_SYMBOL(percpu_counter_add_batch);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
97
98
99
100
101
  
  /*
   * Add up all the per-cpu counts, return the result.  This is a more accurate
   * but much slower version of percpu_counter_read_positive()
   */
02d211688   Andrew Morton   revert "percpu_co...
102
  s64 __percpu_counter_sum(struct percpu_counter *fbc)
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
103
  {
0216bfcff   Mingming Cao   [PATCH] percpu co...
104
  	s64 ret;
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
105
  	int cpu;
098faf580   Shaohua Li   percpu_counter: m...
106
  	unsigned long flags;
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
107

098faf580   Shaohua Li   percpu_counter: m...
108
  	raw_spin_lock_irqsave(&fbc->lock, flags);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
109
  	ret = fbc->count;
b4ef0296f   Andrew Morton   percpu_counters: ...
110
  	for_each_online_cpu(cpu) {
0216bfcff   Mingming Cao   [PATCH] percpu co...
111
  		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
112
113
  		ret += *pcount;
  	}
098faf580   Shaohua Li   percpu_counter: m...
114
  	raw_spin_unlock_irqrestore(&fbc->lock, flags);
bf1d89c81   Peter Zijlstra   lib: percpu_count...
115
  	return ret;
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
116
  }
bf1d89c81   Peter Zijlstra   lib: percpu_count...
117
  EXPORT_SYMBOL(__percpu_counter_sum);
c67ad917c   Andrew Morton   percpu_counters()...
118

908c7f194   Tejun Heo   percpu_counter: a...
119
  int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
ea319518b   Peter Zijlstra   locking, percpu c...
120
  			  struct lock_class_key *key)
c67ad917c   Andrew Morton   percpu_counters()...
121
  {
ebd8fef30   Tejun Heo   percpu_counter: m...
122
  	unsigned long flags __maybe_unused;
f032a4508   Thomas Gleixner   locking, percpu_c...
123
  	raw_spin_lock_init(&fbc->lock);
ea319518b   Peter Zijlstra   locking, percpu c...
124
  	lockdep_set_class(&fbc->lock, key);
c67ad917c   Andrew Morton   percpu_counters()...
125
  	fbc->count = amount;
908c7f194   Tejun Heo   percpu_counter: a...
126
  	fbc->counters = alloc_percpu_gfp(s32, gfp);
833f4077b   Peter Zijlstra   lib: percpu_count...
127
128
  	if (!fbc->counters)
  		return -ENOMEM;
e2852ae82   Tejun Heo   percpu_counter: a...
129
130
  
  	debug_percpu_counter_activate(fbc);
c67ad917c   Andrew Morton   percpu_counters()...
131
  #ifdef CONFIG_HOTPLUG_CPU
8474b591f   Masanori ITOH   percpu: fix list_...
132
  	INIT_LIST_HEAD(&fbc->list);
ebd8fef30   Tejun Heo   percpu_counter: m...
133
  	spin_lock_irqsave(&percpu_counters_lock, flags);
c67ad917c   Andrew Morton   percpu_counters()...
134
  	list_add(&fbc->list, &percpu_counters);
ebd8fef30   Tejun Heo   percpu_counter: m...
135
  	spin_unlock_irqrestore(&percpu_counters_lock, flags);
c67ad917c   Andrew Morton   percpu_counters()...
136
  #endif
833f4077b   Peter Zijlstra   lib: percpu_count...
137
  	return 0;
c67ad917c   Andrew Morton   percpu_counters()...
138
  }
ea319518b   Peter Zijlstra   locking, percpu c...
139
  EXPORT_SYMBOL(__percpu_counter_init);
c67ad917c   Andrew Morton   percpu_counters()...
140
141
142
  
  void percpu_counter_destroy(struct percpu_counter *fbc)
  {
ebd8fef30   Tejun Heo   percpu_counter: m...
143
  	unsigned long flags __maybe_unused;
833f4077b   Peter Zijlstra   lib: percpu_count...
144
145
  	if (!fbc->counters)
  		return;
e2852ae82   Tejun Heo   percpu_counter: a...
146
  	debug_percpu_counter_deactivate(fbc);
c67ad917c   Andrew Morton   percpu_counters()...
147
  #ifdef CONFIG_HOTPLUG_CPU
ebd8fef30   Tejun Heo   percpu_counter: m...
148
  	spin_lock_irqsave(&percpu_counters_lock, flags);
c67ad917c   Andrew Morton   percpu_counters()...
149
  	list_del(&fbc->list);
ebd8fef30   Tejun Heo   percpu_counter: m...
150
  	spin_unlock_irqrestore(&percpu_counters_lock, flags);
c67ad917c   Andrew Morton   percpu_counters()...
151
  #endif
fd3d664fe   Eric Dumazet   percpu_counter: f...
152
153
  	free_percpu(fbc->counters);
  	fbc->counters = NULL;
c67ad917c   Andrew Morton   percpu_counters()...
154
155
  }
  EXPORT_SYMBOL(percpu_counter_destroy);
179f7ebff   Eric Dumazet   percpu_counter: F...
156
157
  int percpu_counter_batch __read_mostly = 32;
  EXPORT_SYMBOL(percpu_counter_batch);
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
158
  static int compute_batch_value(unsigned int cpu)
179f7ebff   Eric Dumazet   percpu_counter: F...
159
160
161
162
  {
  	int nr = num_online_cpus();
  
  	percpu_counter_batch = max(32, nr*2);
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
163
  	return 0;
179f7ebff   Eric Dumazet   percpu_counter: F...
164
  }
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
165
  static int percpu_counter_cpu_dead(unsigned int cpu)
c67ad917c   Andrew Morton   percpu_counters()...
166
  {
179f7ebff   Eric Dumazet   percpu_counter: F...
167
  #ifdef CONFIG_HOTPLUG_CPU
c67ad917c   Andrew Morton   percpu_counters()...
168
  	struct percpu_counter *fbc;
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
169
  	compute_batch_value(cpu);
c67ad917c   Andrew Morton   percpu_counters()...
170

ebd8fef30   Tejun Heo   percpu_counter: m...
171
  	spin_lock_irq(&percpu_counters_lock);
c67ad917c   Andrew Morton   percpu_counters()...
172
173
  	list_for_each_entry(fbc, &percpu_counters, list) {
  		s32 *pcount;
aaf0f2fa6   Eric Dumazet   percpu_counter: p...
174
  		raw_spin_lock(&fbc->lock);
c67ad917c   Andrew Morton   percpu_counters()...
175
176
177
  		pcount = per_cpu_ptr(fbc->counters, cpu);
  		fbc->count += *pcount;
  		*pcount = 0;
aaf0f2fa6   Eric Dumazet   percpu_counter: p...
178
  		raw_spin_unlock(&fbc->lock);
c67ad917c   Andrew Morton   percpu_counters()...
179
  	}
ebd8fef30   Tejun Heo   percpu_counter: m...
180
  	spin_unlock_irq(&percpu_counters_lock);
179f7ebff   Eric Dumazet   percpu_counter: F...
181
  #endif
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
182
  	return 0;
c67ad917c   Andrew Morton   percpu_counters()...
183
  }
27f5e0f69   Tim Chen   tmpfs: add accura...
184
185
186
187
  /*
   * Compare counter against given value.
   * Return 1 if greater, 0 if equal and -1 if less
   */
80188b0d7   Dave Chinner   percpu_counter: b...
188
  int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
27f5e0f69   Tim Chen   tmpfs: add accura...
189
190
191
192
193
  {
  	s64	count;
  
  	count = percpu_counter_read(fbc);
  	/* Check to see if rough count will be sufficient for comparison */
80188b0d7   Dave Chinner   percpu_counter: b...
194
  	if (abs(count - rhs) > (batch * num_online_cpus())) {
27f5e0f69   Tim Chen   tmpfs: add accura...
195
196
197
198
199
200
201
202
203
204
205
206
207
208
  		if (count > rhs)
  			return 1;
  		else
  			return -1;
  	}
  	/* Need to use precise count */
  	count = percpu_counter_sum(fbc);
  	if (count > rhs)
  		return 1;
  	else if (count < rhs)
  		return -1;
  	else
  		return 0;
  }
80188b0d7   Dave Chinner   percpu_counter: b...
209
  EXPORT_SYMBOL(__percpu_counter_compare);
27f5e0f69   Tim Chen   tmpfs: add accura...
210

c67ad917c   Andrew Morton   percpu_counters()...
211
212
  static int __init percpu_counter_startup(void)
  {
5588f5afb   Sebastian Andrzej Siewior   lib/percpu_counte...
213
214
215
216
217
218
219
220
221
  	int ret;
  
  	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
  				compute_batch_value, NULL);
  	WARN_ON(ret < 0);
  	ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
  					"lib/percpu_cnt:dead", NULL,
  					percpu_counter_cpu_dead);
  	WARN_ON(ret < 0);
c67ad917c   Andrew Morton   percpu_counters()...
222
223
224
  	return 0;
  }
  module_init(percpu_counter_startup);