Blame view

mm/allocpercpu.c 3.92 KB
d00bcc98d   Christoph Lameter   [PATCH] Extract t...
1
2
3
4
5
6
7
  /*
   * linux/mm/allocpercpu.c
   *
   * Separated from slab.c August 11, 2006 Christoph Lameter <clameter@sgi.com>
   */
  #include <linux/mm.h>
  #include <linux/module.h>
be852795e   Eric Dumazet   alloc_percpu() fa...
8
9
10
  #ifndef cache_line_size
  #define cache_line_size()	L1_CACHE_BYTES
  #endif
d00bcc98d   Christoph Lameter   [PATCH] Extract t...
11
12
13
14
15
16
17
18
19
20
21
  /**
   * percpu_depopulate - depopulate per-cpu data for given cpu
   * @__pdata: per-cpu data to depopulate
   * @cpu: depopulate per-cpu data for this cpu
   *
   * Depopulating per-cpu data for a cpu going offline would be a typical
   * use case. You need to register a cpu hotplug handler for that purpose.
   */
  void percpu_depopulate(void *__pdata, int cpu)
  {
  	struct percpu_data *pdata = __percpu_disguise(__pdata);
a12058687   Alan Stern   [PATCH] Allow NUL...
22
23
24
  
  	kfree(pdata->ptrs[cpu]);
  	pdata->ptrs[cpu] = NULL;
d00bcc98d   Christoph Lameter   [PATCH] Extract t...
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
  }
  EXPORT_SYMBOL_GPL(percpu_depopulate);
  
  /**
   * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
   * @__pdata: per-cpu data to depopulate
   * @mask: depopulate per-cpu data for cpu's selected through mask bits
   */
  void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
  {
  	int cpu;
  	for_each_cpu_mask(cpu, *mask)
  		percpu_depopulate(__pdata, cpu);
  }
  EXPORT_SYMBOL_GPL(__percpu_depopulate_mask);
  
  /**
   * percpu_populate - populate per-cpu data for given cpu
   * @__pdata: per-cpu data to populate further
   * @size: size of per-cpu object
   * @gfp: may sleep or not etc.
   * @cpu: populate per-data for this cpu
   *
   * Populating per-cpu data for a cpu coming online would be a typical
   * use case. You need to register a cpu hotplug handler for that purpose.
   * Per-cpu object is populated with zeroed buffer.
   */
  void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
  {
  	struct percpu_data *pdata = __percpu_disguise(__pdata);
  	int node = cpu_to_node(cpu);
be852795e   Eric Dumazet   alloc_percpu() fa...
56
57
58
59
  	/*
  	 * We should make sure each CPU gets private memory.
  	 */
  	size = roundup(size, cache_line_size());
d00bcc98d   Christoph Lameter   [PATCH] Extract t...
60
  	BUG_ON(pdata->ptrs[cpu]);
94f6030ca   Christoph Lameter   Slab allocators: ...
61
62
63
  	if (node_online(node))
  		pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
  	else
d00bcc98d   Christoph Lameter   [PATCH] Extract t...
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
  		pdata->ptrs[cpu] = kzalloc(size, gfp);
  	return pdata->ptrs[cpu];
  }
  EXPORT_SYMBOL_GPL(percpu_populate);
  
  /**
   * percpu_populate_mask - populate per-cpu data for more cpu's
   * @__pdata: per-cpu data to populate further
   * @size: size of per-cpu object
   * @gfp: may sleep or not etc.
   * @mask: populate per-cpu data for cpu's selected through mask bits
   *
   * Per-cpu objects are populated with zeroed buffers.
   */
  int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
  			   cpumask_t *mask)
  {
d366f8cbc   Mike Travis   cpumask: Cleanup ...
81
  	cpumask_t populated;
d00bcc98d   Christoph Lameter   [PATCH] Extract t...
82
  	int cpu;
d366f8cbc   Mike Travis   cpumask: Cleanup ...
83
  	cpus_clear(populated);
d00bcc98d   Christoph Lameter   [PATCH] Extract t...
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  	for_each_cpu_mask(cpu, *mask)
  		if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
  			__percpu_depopulate_mask(__pdata, &populated);
  			return -ENOMEM;
  		} else
  			cpu_set(cpu, populated);
  	return 0;
  }
  EXPORT_SYMBOL_GPL(__percpu_populate_mask);
  
  /**
   * percpu_alloc_mask - initial setup of per-cpu data
   * @size: size of per-cpu object
   * @gfp: may sleep or not etc.
   * @mask: populate per-data for cpu's selected through mask bits
   *
   * Populating per-cpu data for all online cpu's would be a typical use case,
   * which is simplified by the percpu_alloc() wrapper.
   * Per-cpu objects are populated with zeroed buffers.
   */
  void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
  {
be852795e   Eric Dumazet   alloc_percpu() fa...
106
107
108
109
110
  	/*
  	 * We allocate whole cache lines to avoid false sharing
  	 */
  	size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
  	void *pdata = kzalloc(sz, gfp);
d00bcc98d   Christoph Lameter   [PATCH] Extract t...
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
  	void *__pdata = __percpu_disguise(pdata);
  
  	if (unlikely(!pdata))
  		return NULL;
  	if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
  		return __pdata;
  	kfree(pdata);
  	return NULL;
  }
  EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
  
  /**
   * percpu_free - final cleanup of per-cpu data
   * @__pdata: object to clean up
   *
   * We simply clean up any per-cpu object left. No need for the client to
   * track and specify through a bis mask which per-cpu objects are to free.
   */
  void percpu_free(void *__pdata)
  {
a12058687   Alan Stern   [PATCH] Allow NUL...
131
132
  	if (unlikely(!__pdata))
  		return;
d00bcc98d   Christoph Lameter   [PATCH] Extract t...
133
134
135
136
  	__percpu_depopulate_mask(__pdata, &cpu_possible_map);
  	kfree(__percpu_disguise(__pdata));
  }
  EXPORT_SYMBOL_GPL(percpu_free);