Blame view
mm/allocpercpu.c
3.98 KB
d00bcc98d
|
1 2 3 |
/* * linux/mm/allocpercpu.c * |
cde535359
|
4 |
* Separated from slab.c August 11, 2006 Christoph Lameter |
d00bcc98d
|
5 6 7 |
*/ #include <linux/mm.h> #include <linux/module.h> |
be852795e
|
8 9 10 |
#ifndef cache_line_size #define cache_line_size() L1_CACHE_BYTES #endif |
d00bcc98d
|
11 12 13 14 15 16 17 18 |
/** * percpu_depopulate - depopulate per-cpu data for given cpu * @__pdata: per-cpu data to depopulate * @cpu: depopulate per-cpu data for this cpu * * Depopulating per-cpu data for a cpu going offline would be a typical * use case. You need to register a cpu hotplug handler for that purpose. */ |
9d8fddfb1
|
19 |
static void percpu_depopulate(void *__pdata, int cpu) |
d00bcc98d
|
20 21 |
{ struct percpu_data *pdata = __percpu_disguise(__pdata); |
a12058687
|
22 23 24 |
kfree(pdata->ptrs[cpu]); pdata->ptrs[cpu] = NULL; |
d00bcc98d
|
25 |
} |
d00bcc98d
|
26 27 28 29 30 31 |
/** * percpu_depopulate_mask - depopulate per-cpu data for some cpu's * @__pdata: per-cpu data to depopulate * @mask: depopulate per-cpu data for cpu's selected through mask bits */ |
9d8fddfb1
|
32 |
static void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) |
d00bcc98d
|
33 34 |
{ int cpu; |
6d6a43608
|
35 |
for_each_cpu_mask_nr(cpu, *mask) |
d00bcc98d
|
36 37 |
percpu_depopulate(__pdata, cpu); } |
9d8fddfb1
|
38 39 40 |
#define percpu_depopulate_mask(__pdata, mask) \ __percpu_depopulate_mask((__pdata), &(mask)) |
d00bcc98d
|
41 42 43 44 45 46 47 48 49 50 51 52 |
/** * percpu_populate - populate per-cpu data for given cpu * @__pdata: per-cpu data to populate further * @size: size of per-cpu object * @gfp: may sleep or not etc. * @cpu: populate per-data for this cpu * * Populating per-cpu data for a cpu coming online would be a typical * use case. You need to register a cpu hotplug handler for that purpose. * Per-cpu object is populated with zeroed buffer. */ |
9d8fddfb1
|
53 |
static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) |
d00bcc98d
|
54 55 56 |
{ struct percpu_data *pdata = __percpu_disguise(__pdata); int node = cpu_to_node(cpu); |
be852795e
|
57 58 59 60 |
/* * We should make sure each CPU gets private memory. */ size = roundup(size, cache_line_size()); |
d00bcc98d
|
61 |
BUG_ON(pdata->ptrs[cpu]); |
94f6030ca
|
62 63 64 |
if (node_online(node)) pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); else |
d00bcc98d
|
65 66 67 |
pdata->ptrs[cpu] = kzalloc(size, gfp); return pdata->ptrs[cpu]; } |
d00bcc98d
|
68 69 70 71 72 73 74 75 76 77 |
/** * percpu_populate_mask - populate per-cpu data for more cpu's * @__pdata: per-cpu data to populate further * @size: size of per-cpu object * @gfp: may sleep or not etc. * @mask: populate per-cpu data for cpu's selected through mask bits * * Per-cpu objects are populated with zeroed buffers. */ |
9d8fddfb1
|
78 79 |
static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, cpumask_t *mask) |
d00bcc98d
|
80 |
{ |
d366f8cbc
|
81 |
cpumask_t populated; |
d00bcc98d
|
82 |
int cpu; |
d366f8cbc
|
83 |
cpus_clear(populated); |
6d6a43608
|
84 |
for_each_cpu_mask_nr(cpu, *mask) |
d00bcc98d
|
85 86 87 88 89 90 91 |
if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { __percpu_depopulate_mask(__pdata, &populated); return -ENOMEM; } else cpu_set(cpu, populated); return 0; } |
9d8fddfb1
|
92 93 94 |
#define percpu_populate_mask(__pdata, size, gfp, mask) \ __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) |
d00bcc98d
|
95 96 97 98 99 100 101 102 103 104 105 106 107 |
/** * percpu_alloc_mask - initial setup of per-cpu data * @size: size of per-cpu object * @gfp: may sleep or not etc. * @mask: populate per-data for cpu's selected through mask bits * * Populating per-cpu data for all online cpu's would be a typical use case, * which is simplified by the percpu_alloc() wrapper. * Per-cpu objects are populated with zeroed buffers. */ void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) { |
be852795e
|
108 109 110 111 112 |
/* * We allocate whole cache lines to avoid false sharing */ size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size()); void *pdata = kzalloc(sz, gfp); |
d00bcc98d
|
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
void *__pdata = __percpu_disguise(pdata); if (unlikely(!pdata)) return NULL; if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask))) return __pdata; kfree(pdata); return NULL; } EXPORT_SYMBOL_GPL(__percpu_alloc_mask); /** * percpu_free - final cleanup of per-cpu data * @__pdata: object to clean up * * We simply clean up any per-cpu object left. No need for the client to * track and specify through a bis mask which per-cpu objects are to free. */ void percpu_free(void *__pdata) { |
a12058687
|
133 134 |
if (unlikely(!__pdata)) return; |
d00bcc98d
|
135 136 137 138 |
__percpu_depopulate_mask(__pdata, &cpu_possible_map); kfree(__percpu_disguise(__pdata)); } EXPORT_SYMBOL_GPL(percpu_free); |