Blame view
lib/cpumask.c
5.75 KB
5a0e3ad6a include cleanup: ... |
1 |
#include <linux/slab.h> |
ccb46000f [PATCH] cpumask: ... |
2 3 4 |
#include <linux/kernel.h> #include <linux/bitops.h> #include <linux/cpumask.h> |
8bc3bcc93 lib: reduce the u... |
5 |
#include <linux/export.h> |
2d3854a37 cpumask: introduc... |
6 |
#include <linux/bootmem.h> |
ccb46000f [PATCH] cpumask: ... |
7 8 9 10 11 12 |
int __first_cpu(const cpumask_t *srcp) { return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); } EXPORT_SYMBOL(__first_cpu); |
3d18bd74a [PATCH] cpumask: ... |
13 14 15 16 17 |
int __next_cpu(int n, const cpumask_t *srcp) { return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); } EXPORT_SYMBOL(__next_cpu); |
863028207 [PATCH] cpumask: ... |
18 |
|
41df0d61c x86: Add performa... |
19 20 21 22 23 24 25 26 |
#if NR_CPUS > 64 int __next_cpu_nr(int n, const cpumask_t *srcp) { return min_t(int, nr_cpu_ids, find_next_bit(srcp->bits, nr_cpu_ids, n+1)); } EXPORT_SYMBOL(__next_cpu_nr); #endif |
2d3854a37 cpumask: introduc... |
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
/** * cpumask_next_and - get the next cpu in *src1p & *src2p * @n: the cpu prior to the place to search (ie. return will be > @n) * @src1p: the first cpumask pointer * @src2p: the second cpumask pointer * * Returns >= nr_cpu_ids if no further cpus set in both. */ int cpumask_next_and(int n, const struct cpumask *src1p, const struct cpumask *src2p) { while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) if (cpumask_test_cpu(n, src2p)) break; return n; } EXPORT_SYMBOL(cpumask_next_and); /** * cpumask_any_but - return a "random" in a cpumask, but not this one. * @mask: the cpumask to search * @cpu: the cpu to ignore. * * Often used to find any cpu but smp_processor_id() in a mask. * Returns >= nr_cpu_ids if no cpus set. */ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) { unsigned int i; |
984f2f377 cpumask: introduc... |
56 |
cpumask_check(cpu); |
2d3854a37 cpumask: introduc... |
57 58 59 60 61 62 63 64 |
for_each_cpu(i, mask) if (i != cpu) break; return i; } /* These are not inline because of header tangles. */ #ifdef CONFIG_CPUMASK_OFFSTACK |
ec26b8058 cpumask: document... |
65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
/** * alloc_cpumask_var_node - allocate a struct cpumask on a given node * @mask: pointer to cpumask_var_t where the cpumask is returned * @flags: GFP_ flags * * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is * a nop returning a constant 1 (in <linux/cpumask.h>) * Returns TRUE if memory allocation succeeded, FALSE otherwise. * * In addition, mask will be NULL if this fails. Note that gcc is * usually smart enough to know that mask can never be NULL if * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case * too. */ |
7b4967c53 cpumask: Add allo... |
79 |
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) |
2d3854a37 cpumask: introduc... |
80 |
{ |
38c7fed2f x86: remove some ... |
81 |
*mask = kmalloc_node(cpumask_size(), flags, node); |
2d3854a37 cpumask: introduc... |
82 83 84 85 86 87 88 |
#ifdef CONFIG_DEBUG_PER_CPU_MAPS if (!*mask) { printk(KERN_ERR "=> alloc_cpumask_var: failed! "); dump_stack(); } #endif |
2a5300803 cpumask: zero ext... |
89 90 |
/* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ if (*mask) { |
4f032ac41 cpumask: fix slab... |
91 |
unsigned char *ptr = (unsigned char *)cpumask_bits(*mask); |
2a5300803 cpumask: zero ext... |
92 93 |
unsigned int tail; tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); |
4f032ac41 cpumask: fix slab... |
94 |
memset(ptr + cpumask_size() - tail, 0, tail); |
2a5300803 cpumask: zero ext... |
95 |
} |
2d3854a37 cpumask: introduc... |
96 97 |
return *mask != NULL; } |
7b4967c53 cpumask: Add allo... |
98 |
EXPORT_SYMBOL(alloc_cpumask_var_node); |
0281b5dc0 cpumask: introduc... |
99 100 101 102 103 |
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) { return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); } EXPORT_SYMBOL(zalloc_cpumask_var_node); |
ec26b8058 cpumask: document... |
104 105 106 107 108 109 110 111 112 113 |
/** * alloc_cpumask_var - allocate a struct cpumask * @mask: pointer to cpumask_var_t where the cpumask is returned * @flags: GFP_ flags * * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is * a nop returning a constant 1 (in <linux/cpumask.h>). * * See alloc_cpumask_var_node. */ |
7b4967c53 cpumask: Add allo... |
114 115 |
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) { |
37e7b5f15 cpumask: alloc_cp... |
116 |
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE); |
7b4967c53 cpumask: Add allo... |
117 |
} |
2d3854a37 cpumask: introduc... |
118 |
EXPORT_SYMBOL(alloc_cpumask_var); |
0281b5dc0 cpumask: introduc... |
119 120 121 122 123 |
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) { return alloc_cpumask_var(mask, flags | __GFP_ZERO); } EXPORT_SYMBOL(zalloc_cpumask_var); |
ec26b8058 cpumask: document... |
124 125 126 127 128 |
/** * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. * @mask: pointer to cpumask_var_t where the cpumask is returned * * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is |
e9690a6e4 cpumask: fix bogu... |
129 |
* a nop (in <linux/cpumask.h>). |
ec26b8058 cpumask: document... |
130 131 132 |
* Either returns an allocated (zero-filled) cpumask, or causes the * system to panic. */ |
2d3854a37 cpumask: introduc... |
133 134 |
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) { |
c15295001 lib/cpumask.c: us... |
135 |
*mask = memblock_virt_alloc(cpumask_size(), 0); |
2d3854a37 cpumask: introduc... |
136 |
} |
ec26b8058 cpumask: document... |
137 138 139 140 141 142 |
/** * free_cpumask_var - frees memory allocated for a struct cpumask. * @mask: cpumask to free * * This is safe on a NULL mask. */ |
2d3854a37 cpumask: introduc... |
143 144 145 146 147 |
void free_cpumask_var(cpumask_var_t mask) { kfree(mask); } EXPORT_SYMBOL(free_cpumask_var); |
cd83e42c6 cpumask: new API, v2 |
148 |
|
ec26b8058 cpumask: document... |
149 150 151 152 |
/** * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var * @mask: cpumask to free */ |
984f2f377 cpumask: introduc... |
153 |
void __init free_bootmem_cpumask_var(cpumask_var_t mask) |
cd83e42c6 cpumask: new API, v2 |
154 |
{ |
c15295001 lib/cpumask.c: us... |
155 |
memblock_free_early(__pa(mask), cpumask_size()); |
cd83e42c6 cpumask: new API, v2 |
156 |
} |
2d3854a37 cpumask: introduc... |
157 |
#endif |
da91309e0 cpumask: Utility ... |
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
/** * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first * * @i: index number * @numa_node: local numa_node * @dstp: cpumask with the relevant cpu bit set according to the policy * * This function sets the cpumask according to a numa aware policy. * cpumask could be used as an affinity hint for the IRQ related to a * queue. When the policy is to spread queues across cores - local cores * first. * * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set * the cpu bit and need to re-call the function. */ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) { cpumask_var_t mask; int cpu; int ret = 0; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; i %= num_online_cpus(); if (!cpumask_of_node(numa_node)) { /* Use all online cpu's for non numa aware system */ cpumask_copy(mask, cpu_online_mask); } else { int n; cpumask_and(mask, cpumask_of_node(numa_node), cpu_online_mask); n = cpumask_weight(mask); if (i >= n) { i -= n; /* If index > number of local cpu's, mask out local * cpu's */ cpumask_andnot(mask, cpu_online_mask, mask); } } for_each_cpu(cpu, mask) { if (--i < 0) goto out; } ret = -EAGAIN; out: free_cpumask_var(mask); if (!ret) cpumask_set_cpu(cpu, dstp); return ret; } EXPORT_SYMBOL(cpumask_set_cpu_local_first); |