Blame view

lib/percpu_counter.c 2.45 KB
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
1
2
3
4
5
  /*
   * Fast batching percpu counters.
   */
  
  #include <linux/percpu_counter.h>
c67ad917c   Andrew Morton   percpu_counters()...
6
7
8
9
  #include <linux/notifier.h>
  #include <linux/mutex.h>
  #include <linux/init.h>
  #include <linux/cpu.h>
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
10
  #include <linux/module.h>
c67ad917c   Andrew Morton   percpu_counters()...
11
12
13
14
  #ifdef CONFIG_HOTPLUG_CPU
  static LIST_HEAD(percpu_counters);
  static DEFINE_MUTEX(percpu_counters_lock);
  #endif
0216bfcff   Mingming Cao   [PATCH] percpu co...
15
  void percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
16
17
  {
  	long count;
0216bfcff   Mingming Cao   [PATCH] percpu co...
18
  	s32 *pcount;
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
  	int cpu = get_cpu();
  
  	pcount = per_cpu_ptr(fbc->counters, cpu);
  	count = *pcount + amount;
  	if (count >= FBC_BATCH || count <= -FBC_BATCH) {
  		spin_lock(&fbc->lock);
  		fbc->count += count;
  		*pcount = 0;
  		spin_unlock(&fbc->lock);
  	} else {
  		*pcount = count;
  	}
  	put_cpu();
  }
  EXPORT_SYMBOL(percpu_counter_mod);
  
  /*
   * Add up all the per-cpu counts, return the result.  This is a more accurate
   * but much slower version of percpu_counter_read_positive()
   */
0216bfcff   Mingming Cao   [PATCH] percpu co...
39
  s64 percpu_counter_sum(struct percpu_counter *fbc)
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
40
  {
0216bfcff   Mingming Cao   [PATCH] percpu co...
41
  	s64 ret;
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
42
43
44
45
  	int cpu;
  
  	spin_lock(&fbc->lock);
  	ret = fbc->count;
b4ef0296f   Andrew Morton   percpu_counters: ...
46
  	for_each_online_cpu(cpu) {
0216bfcff   Mingming Cao   [PATCH] percpu co...
47
  		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
3cbc56402   Ravikiran G Thirumalai   [PATCH] percpu_co...
48
49
50
51
52
53
  		ret += *pcount;
  	}
  	spin_unlock(&fbc->lock);
  	return ret < 0 ? 0 : ret;
  }
  EXPORT_SYMBOL(percpu_counter_sum);
c67ad917c   Andrew Morton   percpu_counters()...
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
  
  void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
  {
  	spin_lock_init(&fbc->lock);
  	fbc->count = amount;
  	fbc->counters = alloc_percpu(s32);
  #ifdef CONFIG_HOTPLUG_CPU
  	mutex_lock(&percpu_counters_lock);
  	list_add(&fbc->list, &percpu_counters);
  	mutex_unlock(&percpu_counters_lock);
  #endif
  }
  EXPORT_SYMBOL(percpu_counter_init);
  
  void percpu_counter_destroy(struct percpu_counter *fbc)
  {
  	free_percpu(fbc->counters);
  #ifdef CONFIG_HOTPLUG_CPU
  	mutex_lock(&percpu_counters_lock);
  	list_del(&fbc->list);
  	mutex_unlock(&percpu_counters_lock);
  #endif
  }
  EXPORT_SYMBOL(percpu_counter_destroy);
  
  #ifdef CONFIG_HOTPLUG_CPU
  static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
  					unsigned long action, void *hcpu)
  {
  	unsigned int cpu;
  	struct percpu_counter *fbc;
  
  	if (action != CPU_DEAD)
  		return NOTIFY_OK;
  
  	cpu = (unsigned long)hcpu;
  	mutex_lock(&percpu_counters_lock);
  	list_for_each_entry(fbc, &percpu_counters, list) {
  		s32 *pcount;
  
  		spin_lock(&fbc->lock);
  		pcount = per_cpu_ptr(fbc->counters, cpu);
  		fbc->count += *pcount;
  		*pcount = 0;
  		spin_unlock(&fbc->lock);
  	}
  	mutex_unlock(&percpu_counters_lock);
  	return NOTIFY_OK;
  }
  
  static int __init percpu_counter_startup(void)
  {
  	hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
  	return 0;
  }
  module_init(percpu_counter_startup);
  #endif