Blame view

lib/cpumask.c 6.82 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
5a0e3ad6a   Tejun Heo   include cleanup: ...
2
  #include <linux/slab.h>
ccb46000f   Andrew Morton   [PATCH] cpumask: ...
3
4
5
  #include <linux/kernel.h>
  #include <linux/bitops.h>
  #include <linux/cpumask.h>
8bc3bcc93   Paul Gortmaker   lib: reduce the u...
6
  #include <linux/export.h>
57c8a661d   Mike Rapoport   mm: remove includ...
7
  #include <linux/memblock.h>
98fa15f34   Anshuman Khandual   mm: replace all o...
8
  #include <linux/numa.h>
1abdfe706   Alex Belits   lib: Restrict cpu...
9
  #include <linux/sched/isolation.h>
ccb46000f   Andrew Morton   [PATCH] cpumask: ...
10

2d3854a37   Rusty Russell   cpumask: introduc...
11
  /**
f22ef333c   Alexey Dobriyan   cpumask: make cpu...
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
   * cpumask_next - get the next cpu in a cpumask
   * @n: the cpu prior to the place to search (ie. return will be > @n)
   * @srcp: the cpumask pointer
   *
   * Returns >= nr_cpu_ids if no further cpus set.
   */
  unsigned int cpumask_next(int n, const struct cpumask *srcp)
  {
  	/* -1 is a legal arg here. */
  	if (n != -1)
  		cpumask_check(n);
  	return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
  }
  EXPORT_SYMBOL(cpumask_next);
  
  /**
2d3854a37   Rusty Russell   cpumask: introduc...
28
29
30
31
32
33
34
35
36
37
   * cpumask_next_and - get the next cpu in *src1p & *src2p
   * @n: the cpu prior to the place to search (ie. return will be > @n)
   * @src1p: the first cpumask pointer
   * @src2p: the second cpumask pointer
   *
   * Returns >= nr_cpu_ids if no further cpus set in both.
   */
  int cpumask_next_and(int n, const struct cpumask *src1p,
  		     const struct cpumask *src2p)
  {
0ade34c37   Clement Courbet   lib: optimize cpu...
38
39
40
41
42
  	/* -1 is a legal arg here. */
  	if (n != -1)
  		cpumask_check(n);
  	return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
  		nr_cpumask_bits, n + 1);
2d3854a37   Rusty Russell   cpumask: introduc...
43
44
45
46
47
48
49
50
51
52
53
54
55
56
  }
  EXPORT_SYMBOL(cpumask_next_and);
  
  /**
   * cpumask_any_but - return a "random" in a cpumask, but not this one.
   * @mask: the cpumask to search
   * @cpu: the cpu to ignore.
   *
   * Often used to find any cpu but smp_processor_id() in a mask.
   * Returns >= nr_cpu_ids if no cpus set.
   */
  int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
  {
  	unsigned int i;
984f2f377   Rusty Russell   cpumask: introduc...
57
  	cpumask_check(cpu);
2d3854a37   Rusty Russell   cpumask: introduc...
58
59
60
61
62
  	for_each_cpu(i, mask)
  		if (i != cpu)
  			break;
  	return i;
  }
3712bba1a   Thomas Gleixner   cpumask: Export c...
63
  EXPORT_SYMBOL(cpumask_any_but);
2d3854a37   Rusty Russell   cpumask: introduc...
64

c743f0a5c   Peter Zijlstra   sched/fair, cpuma...
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
  /**
   * cpumask_next_wrap - helper to implement for_each_cpu_wrap
   * @n: the cpu prior to the place to search
   * @mask: the cpumask pointer
   * @start: the start point of the iteration
   * @wrap: assume @n crossing @start terminates the iteration
   *
   * Returns >= nr_cpu_ids on completion
   *
   * Note: the @wrap argument is required for the start condition when
   * we cannot assume @start is set in @mask.
   */
  int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
  {
  	int next;
  
  again:
  	next = cpumask_next(n, mask);
  
  	if (wrap && n < start && next >= start) {
  		return nr_cpumask_bits;
  
  	} else if (next >= nr_cpumask_bits) {
  		wrap = true;
  		n = -1;
  		goto again;
  	}
  
  	return next;
  }
  EXPORT_SYMBOL(cpumask_next_wrap);
2d3854a37   Rusty Russell   cpumask: introduc...
96
97
  /* These are not inline because of header tangles. */
  #ifdef CONFIG_CPUMASK_OFFSTACK
ec26b8058   Mike Travis   cpumask: document...
98
99
100
101
102
103
104
105
106
107
108
109
110
111
  /**
   * alloc_cpumask_var_node - allocate a struct cpumask on a given node
   * @mask: pointer to cpumask_var_t where the cpumask is returned
   * @flags: GFP_ flags
   *
   * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
   * a nop returning a constant 1 (in <linux/cpumask.h>)
   * Returns TRUE if memory allocation succeeded, FALSE otherwise.
   *
   * In addition, mask will be NULL if this fails.  Note that gcc is
   * usually smart enough to know that mask can never be NULL if
   * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
   * too.
   */
7b4967c53   Mike Travis   cpumask: Add allo...
112
  bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
2d3854a37   Rusty Russell   cpumask: introduc...
113
  {
38c7fed2f   Yinghai Lu   x86: remove some ...
114
  	*mask = kmalloc_node(cpumask_size(), flags, node);
2d3854a37   Rusty Russell   cpumask: introduc...
115
116
117
118
119
120
121
  #ifdef CONFIG_DEBUG_PER_CPU_MAPS
  	if (!*mask) {
  		printk(KERN_ERR "=> alloc_cpumask_var: failed!
  ");
  		dump_stack();
  	}
  #endif
2a5300803   Rusty Russell   cpumask: zero ext...
122

2d3854a37   Rusty Russell   cpumask: introduc...
123
124
  	return *mask != NULL;
  }
7b4967c53   Mike Travis   cpumask: Add allo...
125
  EXPORT_SYMBOL(alloc_cpumask_var_node);
0281b5dc0   Yinghai Lu   cpumask: introduc...
126
127
128
129
130
  bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
  {
  	return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
  }
  EXPORT_SYMBOL(zalloc_cpumask_var_node);
ec26b8058   Mike Travis   cpumask: document...
131
132
133
134
135
136
137
138
139
140
  /**
   * alloc_cpumask_var - allocate a struct cpumask
   * @mask: pointer to cpumask_var_t where the cpumask is returned
   * @flags: GFP_ flags
   *
   * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
   * a nop returning a constant 1 (in <linux/cpumask.h>).
   *
   * See alloc_cpumask_var_node.
   */
7b4967c53   Mike Travis   cpumask: Add allo...
141
142
  bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
  {
37e7b5f15   KOSAKI Motohiro   cpumask: alloc_cp...
143
  	return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
7b4967c53   Mike Travis   cpumask: Add allo...
144
  }
2d3854a37   Rusty Russell   cpumask: introduc...
145
  EXPORT_SYMBOL(alloc_cpumask_var);
0281b5dc0   Yinghai Lu   cpumask: introduc...
146
147
148
149
150
  bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
  {
  	return alloc_cpumask_var(mask, flags | __GFP_ZERO);
  }
  EXPORT_SYMBOL(zalloc_cpumask_var);
ec26b8058   Mike Travis   cpumask: document...
151
152
153
154
155
  /**
   * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
   * @mask: pointer to cpumask_var_t where the cpumask is returned
   *
   * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
e9690a6e4   Li Zefan   cpumask: fix bogu...
156
   * a nop (in <linux/cpumask.h>).
ec26b8058   Mike Travis   cpumask: document...
157
158
159
   * Either returns an allocated (zero-filled) cpumask, or causes the
   * system to panic.
   */
2d3854a37   Rusty Russell   cpumask: introduc...
160
161
  void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
  {
7e1c4e279   Mike Rapoport   memblock: stop us...
162
  	*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
8a7f97b90   Mike Rapoport   treewide: add che...
163
164
165
166
  	if (!*mask)
  		panic("%s: Failed to allocate %u bytes
  ", __func__,
  		      cpumask_size());
2d3854a37   Rusty Russell   cpumask: introduc...
167
  }
ec26b8058   Mike Travis   cpumask: document...
168
169
170
171
172
173
  /**
   * free_cpumask_var - frees memory allocated for a struct cpumask.
   * @mask: cpumask to free
   *
   * This is safe on a NULL mask.
   */
2d3854a37   Rusty Russell   cpumask: introduc...
174
175
176
177
178
  void free_cpumask_var(cpumask_var_t mask)
  {
  	kfree(mask);
  }
  EXPORT_SYMBOL(free_cpumask_var);
cd83e42c6   Rusty Russell   cpumask: new API, v2
179

ec26b8058   Mike Travis   cpumask: document...
180
181
182
183
  /**
   * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
   * @mask: cpumask to free
   */
984f2f377   Rusty Russell   cpumask: introduc...
184
  void __init free_bootmem_cpumask_var(cpumask_var_t mask)
cd83e42c6   Rusty Russell   cpumask: new API, v2
185
  {
c15295001   Santosh Shilimkar   lib/cpumask.c: us...
186
  	memblock_free_early(__pa(mask), cpumask_size());
cd83e42c6   Rusty Russell   cpumask: new API, v2
187
  }
2d3854a37   Rusty Russell   cpumask: introduc...
188
  #endif
da91309e0   Amir Vadai   cpumask: Utility ...
189
190
  
  /**
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
191
   * cpumask_local_spread - select the i'th cpu with local numa cpu's first
da91309e0   Amir Vadai   cpumask: Utility ...
192
   * @i: index number
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
193
   * @node: local numa_node
da91309e0   Amir Vadai   cpumask: Utility ...
194
   *
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
195
196
197
   * This function selects an online CPU according to a numa aware policy;
   * local cpus are returned first, followed by non-local ones, then it
   * wraps around.
da91309e0   Amir Vadai   cpumask: Utility ...
198
   *
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
199
   * It's not very efficient, but useful for setup.
da91309e0   Amir Vadai   cpumask: Utility ...
200
   */
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
201
  unsigned int cpumask_local_spread(unsigned int i, int node)
da91309e0   Amir Vadai   cpumask: Utility ...
202
  {
1abdfe706   Alex Belits   lib: Restrict cpu...
203
204
  	int cpu, hk_flags;
  	const struct cpumask *mask;
da91309e0   Amir Vadai   cpumask: Utility ...
205

1abdfe706   Alex Belits   lib: Restrict cpu...
206
207
  	hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
  	mask = housekeeping_cpumask(hk_flags);
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
208
  	/* Wrap: we always want a cpu. */
1abdfe706   Alex Belits   lib: Restrict cpu...
209
  	i %= cpumask_weight(mask);
da91309e0   Amir Vadai   cpumask: Utility ...
210

98fa15f34   Anshuman Khandual   mm: replace all o...
211
  	if (node == NUMA_NO_NODE) {
1abdfe706   Alex Belits   lib: Restrict cpu...
212
  		for_each_cpu(cpu, mask) {
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
213
214
  			if (i-- == 0)
  				return cpu;
1abdfe706   Alex Belits   lib: Restrict cpu...
215
  		}
da91309e0   Amir Vadai   cpumask: Utility ...
216
  	} else {
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
217
  		/* NUMA first. */
1abdfe706   Alex Belits   lib: Restrict cpu...
218
  		for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
219
220
  			if (i-- == 0)
  				return cpu;
1abdfe706   Alex Belits   lib: Restrict cpu...
221
  		}
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
222

1abdfe706   Alex Belits   lib: Restrict cpu...
223
  		for_each_cpu(cpu, mask) {
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
224
225
226
227
228
229
  			/* Skip NUMA nodes, done above. */
  			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
  				continue;
  
  			if (i-- == 0)
  				return cpu;
da91309e0   Amir Vadai   cpumask: Utility ...
230
231
  		}
  	}
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
232
  	BUG();
da91309e0   Amir Vadai   cpumask: Utility ...
233
  }
f36963c9d   Rusty Russell   cpumask_set_cpu_l...
234
  EXPORT_SYMBOL(cpumask_local_spread);
46a87b385   Paul Turner   sched/core: Distr...
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
  
  static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
  
  /**
   * Returns an arbitrary cpu within srcp1 & srcp2.
   *
   * Iterated calls using the same srcp1 and srcp2 will be distributed within
   * their intersection.
   *
   * Returns >= nr_cpu_ids if the intersection is empty.
   */
  int cpumask_any_and_distribute(const struct cpumask *src1p,
  			       const struct cpumask *src2p)
  {
  	int next, prev;
  
  	/* NOTE: our first selection will skip 0. */
  	prev = __this_cpu_read(distribute_cpu_mask_prev);
  
  	next = cpumask_next_and(prev, src1p, src2p);
  	if (next >= nr_cpu_ids)
  		next = cpumask_first_and(src1p, src2p);
  
  	if (next < nr_cpu_ids)
  		__this_cpu_write(distribute_cpu_mask_prev, next);
  
  	return next;
  }
  EXPORT_SYMBOL(cpumask_any_and_distribute);