Commit fb0f330e62d71f7c535251438068199af320cf73

Authored by Mike Travis
Committed by Ingo Molnar
1 parent 9f0e8d0400

x86: modify show_shared_cpu_map in intel_cacheinfo

* Removed kmalloc (or local array) in show_shared_cpu_map().

  * Added show_shared_cpu_list() function.

Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 1 changed file with 23 additions and 7 deletions Inline Diff

arch/x86/kernel/cpu/intel_cacheinfo.c
1 /* 1 /*
2 * Routines to indentify caches on Intel CPU. 2 * Routines to indentify caches on Intel CPU.
3 * 3 *
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */ 8 */
9 9
10 #include <linux/init.h> 10 #include <linux/init.h>
11 #include <linux/slab.h> 11 #include <linux/slab.h>
12 #include <linux/device.h> 12 #include <linux/device.h>
13 #include <linux/compiler.h> 13 #include <linux/compiler.h>
14 #include <linux/cpu.h> 14 #include <linux/cpu.h>
15 #include <linux/sched.h> 15 #include <linux/sched.h>
16 16
17 #include <asm/processor.h> 17 #include <asm/processor.h>
18 #include <asm/smp.h> 18 #include <asm/smp.h>
19 19
20 #define LVL_1_INST 1 20 #define LVL_1_INST 1
21 #define LVL_1_DATA 2 21 #define LVL_1_DATA 2
22 #define LVL_2 3 22 #define LVL_2 3
23 #define LVL_3 4 23 #define LVL_3 4
24 #define LVL_TRACE 5 24 #define LVL_TRACE 5
25 25
26 struct _cache_table 26 struct _cache_table
27 { 27 {
28 unsigned char descriptor; 28 unsigned char descriptor;
29 char cache_type; 29 char cache_type;
30 short size; 30 short size;
31 }; 31 };
32 32
33 /* all the cache descriptor types we care about (no TLB or trace cache entries) */ 33 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
34 static struct _cache_table cache_table[] __cpuinitdata = 34 static struct _cache_table cache_table[] __cpuinitdata =
35 { 35 {
36 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 36 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
37 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 37 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
38 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ 38 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
39 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ 39 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 40 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
41 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 41 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 42 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 43 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ 44 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ 45 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 46 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */ 47 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
48 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ 48 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ 50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */ 52 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
53 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ 53 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
54 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ 54 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
55 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ 55 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
56 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */ 56 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
57 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */ 57 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
58 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */ 58 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
59 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */ 59 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
60 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ 60 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
61 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */ 61 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
62 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ 62 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
63 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */ 63 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
64 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */ 64 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
65 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 65 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
66 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 66 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
67 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 67 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
68 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 68 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ 69 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
70 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ 70 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
71 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ 71 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
72 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */ 72 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
73 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */ 73 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
74 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 74 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
75 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 75 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 76 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 77 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */ 78 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
79 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ 79 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
80 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ 80 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
81 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ 81 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
82 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */ 82 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
83 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ 83 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
84 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ 84 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
85 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ 85 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
86 { 0x00, 0, 0} 86 { 0x00, 0, 0}
87 }; 87 };
88 88
89 89
90 enum _cache_type 90 enum _cache_type
91 { 91 {
92 CACHE_TYPE_NULL = 0, 92 CACHE_TYPE_NULL = 0,
93 CACHE_TYPE_DATA = 1, 93 CACHE_TYPE_DATA = 1,
94 CACHE_TYPE_INST = 2, 94 CACHE_TYPE_INST = 2,
95 CACHE_TYPE_UNIFIED = 3 95 CACHE_TYPE_UNIFIED = 3
96 }; 96 };
97 97
98 union _cpuid4_leaf_eax { 98 union _cpuid4_leaf_eax {
99 struct { 99 struct {
100 enum _cache_type type:5; 100 enum _cache_type type:5;
101 unsigned int level:3; 101 unsigned int level:3;
102 unsigned int is_self_initializing:1; 102 unsigned int is_self_initializing:1;
103 unsigned int is_fully_associative:1; 103 unsigned int is_fully_associative:1;
104 unsigned int reserved:4; 104 unsigned int reserved:4;
105 unsigned int num_threads_sharing:12; 105 unsigned int num_threads_sharing:12;
106 unsigned int num_cores_on_die:6; 106 unsigned int num_cores_on_die:6;
107 } split; 107 } split;
108 u32 full; 108 u32 full;
109 }; 109 };
110 110
111 union _cpuid4_leaf_ebx { 111 union _cpuid4_leaf_ebx {
112 struct { 112 struct {
113 unsigned int coherency_line_size:12; 113 unsigned int coherency_line_size:12;
114 unsigned int physical_line_partition:10; 114 unsigned int physical_line_partition:10;
115 unsigned int ways_of_associativity:10; 115 unsigned int ways_of_associativity:10;
116 } split; 116 } split;
117 u32 full; 117 u32 full;
118 }; 118 };
119 119
120 union _cpuid4_leaf_ecx { 120 union _cpuid4_leaf_ecx {
121 struct { 121 struct {
122 unsigned int number_of_sets:32; 122 unsigned int number_of_sets:32;
123 } split; 123 } split;
124 u32 full; 124 u32 full;
125 }; 125 };
126 126
127 struct _cpuid4_info { 127 struct _cpuid4_info {
128 union _cpuid4_leaf_eax eax; 128 union _cpuid4_leaf_eax eax;
129 union _cpuid4_leaf_ebx ebx; 129 union _cpuid4_leaf_ebx ebx;
130 union _cpuid4_leaf_ecx ecx; 130 union _cpuid4_leaf_ecx ecx;
131 unsigned long size; 131 unsigned long size;
132 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ 132 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
133 }; 133 };
134 134
135 unsigned short num_cache_leaves; 135 unsigned short num_cache_leaves;
136 136
137 /* AMD doesn't have CPUID4. Emulate it here to report the same 137 /* AMD doesn't have CPUID4. Emulate it here to report the same
138 information to the user. This makes some assumptions about the machine: 138 information to the user. This makes some assumptions about the machine:
139 L2 not shared, no SMT etc. that is currently true on AMD CPUs. 139 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
140 140
141 In theory the TLBs could be reported as fake type (they are in "dummy"). 141 In theory the TLBs could be reported as fake type (they are in "dummy").
142 Maybe later */ 142 Maybe later */
143 union l1_cache { 143 union l1_cache {
144 struct { 144 struct {
145 unsigned line_size : 8; 145 unsigned line_size : 8;
146 unsigned lines_per_tag : 8; 146 unsigned lines_per_tag : 8;
147 unsigned assoc : 8; 147 unsigned assoc : 8;
148 unsigned size_in_kb : 8; 148 unsigned size_in_kb : 8;
149 }; 149 };
150 unsigned val; 150 unsigned val;
151 }; 151 };
152 152
153 union l2_cache { 153 union l2_cache {
154 struct { 154 struct {
155 unsigned line_size : 8; 155 unsigned line_size : 8;
156 unsigned lines_per_tag : 4; 156 unsigned lines_per_tag : 4;
157 unsigned assoc : 4; 157 unsigned assoc : 4;
158 unsigned size_in_kb : 16; 158 unsigned size_in_kb : 16;
159 }; 159 };
160 unsigned val; 160 unsigned val;
161 }; 161 };
162 162
163 union l3_cache { 163 union l3_cache {
164 struct { 164 struct {
165 unsigned line_size : 8; 165 unsigned line_size : 8;
166 unsigned lines_per_tag : 4; 166 unsigned lines_per_tag : 4;
167 unsigned assoc : 4; 167 unsigned assoc : 4;
168 unsigned res : 2; 168 unsigned res : 2;
169 unsigned size_encoded : 14; 169 unsigned size_encoded : 14;
170 }; 170 };
171 unsigned val; 171 unsigned val;
172 }; 172 };
173 173
174 static unsigned short assocs[] __cpuinitdata = { 174 static unsigned short assocs[] __cpuinitdata = {
175 [1] = 1, [2] = 2, [4] = 4, [6] = 8, 175 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
176 [8] = 16, [0xa] = 32, [0xb] = 48, 176 [8] = 16, [0xa] = 32, [0xb] = 48,
177 [0xc] = 64, 177 [0xc] = 64,
178 [0xf] = 0xffff // ?? 178 [0xf] = 0xffff // ??
179 }; 179 };
180 180
181 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; 181 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
182 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; 182 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
183 183
184 static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 184 static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
185 union _cpuid4_leaf_ebx *ebx, 185 union _cpuid4_leaf_ebx *ebx,
186 union _cpuid4_leaf_ecx *ecx) 186 union _cpuid4_leaf_ecx *ecx)
187 { 187 {
188 unsigned dummy; 188 unsigned dummy;
189 unsigned line_size, lines_per_tag, assoc, size_in_kb; 189 unsigned line_size, lines_per_tag, assoc, size_in_kb;
190 union l1_cache l1i, l1d; 190 union l1_cache l1i, l1d;
191 union l2_cache l2; 191 union l2_cache l2;
192 union l3_cache l3; 192 union l3_cache l3;
193 union l1_cache *l1 = &l1d; 193 union l1_cache *l1 = &l1d;
194 194
195 eax->full = 0; 195 eax->full = 0;
196 ebx->full = 0; 196 ebx->full = 0;
197 ecx->full = 0; 197 ecx->full = 0;
198 198
199 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val); 199 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
200 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val); 200 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
201 201
202 switch (leaf) { 202 switch (leaf) {
203 case 1: 203 case 1:
204 l1 = &l1i; 204 l1 = &l1i;
205 case 0: 205 case 0:
206 if (!l1->val) 206 if (!l1->val)
207 return; 207 return;
208 assoc = l1->assoc; 208 assoc = l1->assoc;
209 line_size = l1->line_size; 209 line_size = l1->line_size;
210 lines_per_tag = l1->lines_per_tag; 210 lines_per_tag = l1->lines_per_tag;
211 size_in_kb = l1->size_in_kb; 211 size_in_kb = l1->size_in_kb;
212 break; 212 break;
213 case 2: 213 case 2:
214 if (!l2.val) 214 if (!l2.val)
215 return; 215 return;
216 assoc = l2.assoc; 216 assoc = l2.assoc;
217 line_size = l2.line_size; 217 line_size = l2.line_size;
218 lines_per_tag = l2.lines_per_tag; 218 lines_per_tag = l2.lines_per_tag;
219 /* cpu_data has errata corrections for K7 applied */ 219 /* cpu_data has errata corrections for K7 applied */
220 size_in_kb = current_cpu_data.x86_cache_size; 220 size_in_kb = current_cpu_data.x86_cache_size;
221 break; 221 break;
222 case 3: 222 case 3:
223 if (!l3.val) 223 if (!l3.val)
224 return; 224 return;
225 assoc = l3.assoc; 225 assoc = l3.assoc;
226 line_size = l3.line_size; 226 line_size = l3.line_size;
227 lines_per_tag = l3.lines_per_tag; 227 lines_per_tag = l3.lines_per_tag;
228 size_in_kb = l3.size_encoded * 512; 228 size_in_kb = l3.size_encoded * 512;
229 break; 229 break;
230 default: 230 default:
231 return; 231 return;
232 } 232 }
233 233
234 eax->split.is_self_initializing = 1; 234 eax->split.is_self_initializing = 1;
235 eax->split.type = types[leaf]; 235 eax->split.type = types[leaf];
236 eax->split.level = levels[leaf]; 236 eax->split.level = levels[leaf];
237 if (leaf == 3) 237 if (leaf == 3)
238 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1; 238 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
239 else 239 else
240 eax->split.num_threads_sharing = 0; 240 eax->split.num_threads_sharing = 0;
241 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; 241 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
242 242
243 243
244 if (assoc == 0xf) 244 if (assoc == 0xf)
245 eax->split.is_fully_associative = 1; 245 eax->split.is_fully_associative = 1;
246 ebx->split.coherency_line_size = line_size - 1; 246 ebx->split.coherency_line_size = line_size - 1;
247 ebx->split.ways_of_associativity = assocs[assoc] - 1; 247 ebx->split.ways_of_associativity = assocs[assoc] - 1;
248 ebx->split.physical_line_partition = lines_per_tag - 1; 248 ebx->split.physical_line_partition = lines_per_tag - 1;
249 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / 249 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
250 (ebx->split.ways_of_associativity + 1) - 1; 250 (ebx->split.ways_of_associativity + 1) - 1;
251 } 251 }
252 252
253 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 253 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
254 { 254 {
255 union _cpuid4_leaf_eax eax; 255 union _cpuid4_leaf_eax eax;
256 union _cpuid4_leaf_ebx ebx; 256 union _cpuid4_leaf_ebx ebx;
257 union _cpuid4_leaf_ecx ecx; 257 union _cpuid4_leaf_ecx ecx;
258 unsigned edx; 258 unsigned edx;
259 259
260 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 260 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
261 amd_cpuid4(index, &eax, &ebx, &ecx); 261 amd_cpuid4(index, &eax, &ebx, &ecx);
262 else 262 else
263 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); 263 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
264 if (eax.split.type == CACHE_TYPE_NULL) 264 if (eax.split.type == CACHE_TYPE_NULL)
265 return -EIO; /* better error ? */ 265 return -EIO; /* better error ? */
266 266
267 this_leaf->eax = eax; 267 this_leaf->eax = eax;
268 this_leaf->ebx = ebx; 268 this_leaf->ebx = ebx;
269 this_leaf->ecx = ecx; 269 this_leaf->ecx = ecx;
270 this_leaf->size = (ecx.split.number_of_sets + 1) * 270 this_leaf->size = (ecx.split.number_of_sets + 1) *
271 (ebx.split.coherency_line_size + 1) * 271 (ebx.split.coherency_line_size + 1) *
272 (ebx.split.physical_line_partition + 1) * 272 (ebx.split.physical_line_partition + 1) *
273 (ebx.split.ways_of_associativity + 1); 273 (ebx.split.ways_of_associativity + 1);
274 return 0; 274 return 0;
275 } 275 }
276 276
277 static int __cpuinit find_num_cache_leaves(void) 277 static int __cpuinit find_num_cache_leaves(void)
278 { 278 {
279 unsigned int eax, ebx, ecx, edx; 279 unsigned int eax, ebx, ecx, edx;
280 union _cpuid4_leaf_eax cache_eax; 280 union _cpuid4_leaf_eax cache_eax;
281 int i = -1; 281 int i = -1;
282 282
283 do { 283 do {
284 ++i; 284 ++i;
285 /* Do cpuid(4) loop to find out num_cache_leaves */ 285 /* Do cpuid(4) loop to find out num_cache_leaves */
286 cpuid_count(4, i, &eax, &ebx, &ecx, &edx); 286 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
287 cache_eax.full = eax; 287 cache_eax.full = eax;
288 } while (cache_eax.split.type != CACHE_TYPE_NULL); 288 } while (cache_eax.split.type != CACHE_TYPE_NULL);
289 return i; 289 return i;
290 } 290 }
291 291
292 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 292 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
293 { 293 {
294 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 294 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
295 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 295 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
296 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 296 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
297 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; 297 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
298 #ifdef CONFIG_X86_HT 298 #ifdef CONFIG_X86_HT
299 unsigned int cpu = c->cpu_index; 299 unsigned int cpu = c->cpu_index;
300 #endif 300 #endif
301 301
302 if (c->cpuid_level > 3) { 302 if (c->cpuid_level > 3) {
303 static int is_initialized; 303 static int is_initialized;
304 304
305 if (is_initialized == 0) { 305 if (is_initialized == 0) {
306 /* Init num_cache_leaves from boot CPU */ 306 /* Init num_cache_leaves from boot CPU */
307 num_cache_leaves = find_num_cache_leaves(); 307 num_cache_leaves = find_num_cache_leaves();
308 is_initialized++; 308 is_initialized++;
309 } 309 }
310 310
311 /* 311 /*
312 * Whenever possible use cpuid(4), deterministic cache 312 * Whenever possible use cpuid(4), deterministic cache
313 * parameters cpuid leaf to find the cache details 313 * parameters cpuid leaf to find the cache details
314 */ 314 */
315 for (i = 0; i < num_cache_leaves; i++) { 315 for (i = 0; i < num_cache_leaves; i++) {
316 struct _cpuid4_info this_leaf; 316 struct _cpuid4_info this_leaf;
317 317
318 int retval; 318 int retval;
319 319
320 retval = cpuid4_cache_lookup(i, &this_leaf); 320 retval = cpuid4_cache_lookup(i, &this_leaf);
321 if (retval >= 0) { 321 if (retval >= 0) {
322 switch(this_leaf.eax.split.level) { 322 switch(this_leaf.eax.split.level) {
323 case 1: 323 case 1:
324 if (this_leaf.eax.split.type == 324 if (this_leaf.eax.split.type ==
325 CACHE_TYPE_DATA) 325 CACHE_TYPE_DATA)
326 new_l1d = this_leaf.size/1024; 326 new_l1d = this_leaf.size/1024;
327 else if (this_leaf.eax.split.type == 327 else if (this_leaf.eax.split.type ==
328 CACHE_TYPE_INST) 328 CACHE_TYPE_INST)
329 new_l1i = this_leaf.size/1024; 329 new_l1i = this_leaf.size/1024;
330 break; 330 break;
331 case 2: 331 case 2:
332 new_l2 = this_leaf.size/1024; 332 new_l2 = this_leaf.size/1024;
333 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; 333 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
334 index_msb = get_count_order(num_threads_sharing); 334 index_msb = get_count_order(num_threads_sharing);
335 l2_id = c->apicid >> index_msb; 335 l2_id = c->apicid >> index_msb;
336 break; 336 break;
337 case 3: 337 case 3:
338 new_l3 = this_leaf.size/1024; 338 new_l3 = this_leaf.size/1024;
339 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; 339 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
340 index_msb = get_count_order(num_threads_sharing); 340 index_msb = get_count_order(num_threads_sharing);
341 l3_id = c->apicid >> index_msb; 341 l3_id = c->apicid >> index_msb;
342 break; 342 break;
343 default: 343 default:
344 break; 344 break;
345 } 345 }
346 } 346 }
347 } 347 }
348 } 348 }
349 /* 349 /*
350 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for 350 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
351 * trace cache 351 * trace cache
352 */ 352 */
353 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { 353 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
354 /* supports eax=2 call */ 354 /* supports eax=2 call */
355 int j, n; 355 int j, n;
356 unsigned int regs[4]; 356 unsigned int regs[4];
357 unsigned char *dp = (unsigned char *)regs; 357 unsigned char *dp = (unsigned char *)regs;
358 int only_trace = 0; 358 int only_trace = 0;
359 359
360 if (num_cache_leaves != 0 && c->x86 == 15) 360 if (num_cache_leaves != 0 && c->x86 == 15)
361 only_trace = 1; 361 only_trace = 1;
362 362
363 /* Number of times to iterate */ 363 /* Number of times to iterate */
364 n = cpuid_eax(2) & 0xFF; 364 n = cpuid_eax(2) & 0xFF;
365 365
366 for ( i = 0 ; i < n ; i++ ) { 366 for ( i = 0 ; i < n ; i++ ) {
367 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]); 367 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
368 368
369 /* If bit 31 is set, this is an unknown format */ 369 /* If bit 31 is set, this is an unknown format */
370 for ( j = 0 ; j < 3 ; j++ ) { 370 for ( j = 0 ; j < 3 ; j++ ) {
371 if (regs[j] & (1 << 31)) regs[j] = 0; 371 if (regs[j] & (1 << 31)) regs[j] = 0;
372 } 372 }
373 373
374 /* Byte 0 is level count, not a descriptor */ 374 /* Byte 0 is level count, not a descriptor */
375 for ( j = 1 ; j < 16 ; j++ ) { 375 for ( j = 1 ; j < 16 ; j++ ) {
376 unsigned char des = dp[j]; 376 unsigned char des = dp[j];
377 unsigned char k = 0; 377 unsigned char k = 0;
378 378
379 /* look up this descriptor in the table */ 379 /* look up this descriptor in the table */
380 while (cache_table[k].descriptor != 0) 380 while (cache_table[k].descriptor != 0)
381 { 381 {
382 if (cache_table[k].descriptor == des) { 382 if (cache_table[k].descriptor == des) {
383 if (only_trace && cache_table[k].cache_type != LVL_TRACE) 383 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
384 break; 384 break;
385 switch (cache_table[k].cache_type) { 385 switch (cache_table[k].cache_type) {
386 case LVL_1_INST: 386 case LVL_1_INST:
387 l1i += cache_table[k].size; 387 l1i += cache_table[k].size;
388 break; 388 break;
389 case LVL_1_DATA: 389 case LVL_1_DATA:
390 l1d += cache_table[k].size; 390 l1d += cache_table[k].size;
391 break; 391 break;
392 case LVL_2: 392 case LVL_2:
393 l2 += cache_table[k].size; 393 l2 += cache_table[k].size;
394 break; 394 break;
395 case LVL_3: 395 case LVL_3:
396 l3 += cache_table[k].size; 396 l3 += cache_table[k].size;
397 break; 397 break;
398 case LVL_TRACE: 398 case LVL_TRACE:
399 trace += cache_table[k].size; 399 trace += cache_table[k].size;
400 break; 400 break;
401 } 401 }
402 402
403 break; 403 break;
404 } 404 }
405 405
406 k++; 406 k++;
407 } 407 }
408 } 408 }
409 } 409 }
410 } 410 }
411 411
412 if (new_l1d) 412 if (new_l1d)
413 l1d = new_l1d; 413 l1d = new_l1d;
414 414
415 if (new_l1i) 415 if (new_l1i)
416 l1i = new_l1i; 416 l1i = new_l1i;
417 417
418 if (new_l2) { 418 if (new_l2) {
419 l2 = new_l2; 419 l2 = new_l2;
420 #ifdef CONFIG_X86_HT 420 #ifdef CONFIG_X86_HT
421 per_cpu(cpu_llc_id, cpu) = l2_id; 421 per_cpu(cpu_llc_id, cpu) = l2_id;
422 #endif 422 #endif
423 } 423 }
424 424
425 if (new_l3) { 425 if (new_l3) {
426 l3 = new_l3; 426 l3 = new_l3;
427 #ifdef CONFIG_X86_HT 427 #ifdef CONFIG_X86_HT
428 per_cpu(cpu_llc_id, cpu) = l3_id; 428 per_cpu(cpu_llc_id, cpu) = l3_id;
429 #endif 429 #endif
430 } 430 }
431 431
432 if (trace) 432 if (trace)
433 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); 433 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
434 else if ( l1i ) 434 else if ( l1i )
435 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); 435 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
436 436
437 if (l1d) 437 if (l1d)
438 printk(", L1 D cache: %dK\n", l1d); 438 printk(", L1 D cache: %dK\n", l1d);
439 else 439 else
440 printk("\n"); 440 printk("\n");
441 441
442 if (l2) 442 if (l2)
443 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); 443 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
444 444
445 if (l3) 445 if (l3)
446 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); 446 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
447 447
448 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); 448 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
449 449
450 return l2; 450 return l2;
451 } 451 }
452 452
453 /* pointer to _cpuid4_info array (for each cache leaf) */ 453 /* pointer to _cpuid4_info array (for each cache leaf) */
454 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); 454 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
455 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) 455 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
456 456
457 #ifdef CONFIG_SMP 457 #ifdef CONFIG_SMP
458 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 458 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
459 { 459 {
460 struct _cpuid4_info *this_leaf, *sibling_leaf; 460 struct _cpuid4_info *this_leaf, *sibling_leaf;
461 unsigned long num_threads_sharing; 461 unsigned long num_threads_sharing;
462 int index_msb, i; 462 int index_msb, i;
463 struct cpuinfo_x86 *c = &cpu_data(cpu); 463 struct cpuinfo_x86 *c = &cpu_data(cpu);
464 464
465 this_leaf = CPUID4_INFO_IDX(cpu, index); 465 this_leaf = CPUID4_INFO_IDX(cpu, index);
466 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 466 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
467 467
468 if (num_threads_sharing == 1) 468 if (num_threads_sharing == 1)
469 cpu_set(cpu, this_leaf->shared_cpu_map); 469 cpu_set(cpu, this_leaf->shared_cpu_map);
470 else { 470 else {
471 index_msb = get_count_order(num_threads_sharing); 471 index_msb = get_count_order(num_threads_sharing);
472 472
473 for_each_online_cpu(i) { 473 for_each_online_cpu(i) {
474 if (cpu_data(i).apicid >> index_msb == 474 if (cpu_data(i).apicid >> index_msb ==
475 c->apicid >> index_msb) { 475 c->apicid >> index_msb) {
476 cpu_set(i, this_leaf->shared_cpu_map); 476 cpu_set(i, this_leaf->shared_cpu_map);
477 if (i != cpu && per_cpu(cpuid4_info, i)) { 477 if (i != cpu && per_cpu(cpuid4_info, i)) {
478 sibling_leaf = CPUID4_INFO_IDX(i, index); 478 sibling_leaf = CPUID4_INFO_IDX(i, index);
479 cpu_set(cpu, sibling_leaf->shared_cpu_map); 479 cpu_set(cpu, sibling_leaf->shared_cpu_map);
480 } 480 }
481 } 481 }
482 } 482 }
483 } 483 }
484 } 484 }
485 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 485 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
486 { 486 {
487 struct _cpuid4_info *this_leaf, *sibling_leaf; 487 struct _cpuid4_info *this_leaf, *sibling_leaf;
488 int sibling; 488 int sibling;
489 489
490 this_leaf = CPUID4_INFO_IDX(cpu, index); 490 this_leaf = CPUID4_INFO_IDX(cpu, index);
491 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { 491 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
492 sibling_leaf = CPUID4_INFO_IDX(sibling, index); 492 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
493 cpu_clear(cpu, sibling_leaf->shared_cpu_map); 493 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
494 } 494 }
495 } 495 }
496 #else 496 #else
497 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {} 497 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
498 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {} 498 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
499 #endif 499 #endif
500 500
501 static void __cpuinit free_cache_attributes(unsigned int cpu) 501 static void __cpuinit free_cache_attributes(unsigned int cpu)
502 { 502 {
503 int i; 503 int i;
504 504
505 for (i = 0; i < num_cache_leaves; i++) 505 for (i = 0; i < num_cache_leaves; i++)
506 cache_remove_shared_cpu_map(cpu, i); 506 cache_remove_shared_cpu_map(cpu, i);
507 507
508 kfree(per_cpu(cpuid4_info, cpu)); 508 kfree(per_cpu(cpuid4_info, cpu));
509 per_cpu(cpuid4_info, cpu) = NULL; 509 per_cpu(cpuid4_info, cpu) = NULL;
510 } 510 }
511 511
512 static int __cpuinit detect_cache_attributes(unsigned int cpu) 512 static int __cpuinit detect_cache_attributes(unsigned int cpu)
513 { 513 {
514 struct _cpuid4_info *this_leaf; 514 struct _cpuid4_info *this_leaf;
515 unsigned long j; 515 unsigned long j;
516 int retval; 516 int retval;
517 cpumask_t oldmask; 517 cpumask_t oldmask;
518 518
519 if (num_cache_leaves == 0) 519 if (num_cache_leaves == 0)
520 return -ENOENT; 520 return -ENOENT;
521 521
522 per_cpu(cpuid4_info, cpu) = kzalloc( 522 per_cpu(cpuid4_info, cpu) = kzalloc(
523 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); 523 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
524 if (per_cpu(cpuid4_info, cpu) == NULL) 524 if (per_cpu(cpuid4_info, cpu) == NULL)
525 return -ENOMEM; 525 return -ENOMEM;
526 526
527 oldmask = current->cpus_allowed; 527 oldmask = current->cpus_allowed;
528 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 528 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
529 if (retval) 529 if (retval)
530 goto out; 530 goto out;
531 531
532 /* Do cpuid and store the results */ 532 /* Do cpuid and store the results */
533 for (j = 0; j < num_cache_leaves; j++) { 533 for (j = 0; j < num_cache_leaves; j++) {
534 this_leaf = CPUID4_INFO_IDX(cpu, j); 534 this_leaf = CPUID4_INFO_IDX(cpu, j);
535 retval = cpuid4_cache_lookup(j, this_leaf); 535 retval = cpuid4_cache_lookup(j, this_leaf);
536 if (unlikely(retval < 0)) { 536 if (unlikely(retval < 0)) {
537 int i; 537 int i;
538 538
539 for (i = 0; i < j; i++) 539 for (i = 0; i < j; i++)
540 cache_remove_shared_cpu_map(cpu, i); 540 cache_remove_shared_cpu_map(cpu, i);
541 break; 541 break;
542 } 542 }
543 cache_shared_cpu_map_setup(cpu, j); 543 cache_shared_cpu_map_setup(cpu, j);
544 } 544 }
545 set_cpus_allowed_ptr(current, &oldmask); 545 set_cpus_allowed_ptr(current, &oldmask);
546 546
547 out: 547 out:
548 if (retval) { 548 if (retval) {
549 kfree(per_cpu(cpuid4_info, cpu)); 549 kfree(per_cpu(cpuid4_info, cpu));
550 per_cpu(cpuid4_info, cpu) = NULL; 550 per_cpu(cpuid4_info, cpu) = NULL;
551 } 551 }
552 552
553 return retval; 553 return retval;
554 } 554 }
555 555
556 #ifdef CONFIG_SYSFS 556 #ifdef CONFIG_SYSFS
557 557
558 #include <linux/kobject.h> 558 #include <linux/kobject.h>
559 #include <linux/sysfs.h> 559 #include <linux/sysfs.h>
560 560
561 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ 561 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
562 562
563 /* pointer to kobject for cpuX/cache */ 563 /* pointer to kobject for cpuX/cache */
564 static DEFINE_PER_CPU(struct kobject *, cache_kobject); 564 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
565 565
566 struct _index_kobject { 566 struct _index_kobject {
567 struct kobject kobj; 567 struct kobject kobj;
568 unsigned int cpu; 568 unsigned int cpu;
569 unsigned short index; 569 unsigned short index;
570 }; 570 };
571 571
572 /* pointer to array of kobjects for cpuX/cache/indexY */ 572 /* pointer to array of kobjects for cpuX/cache/indexY */
573 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); 573 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
574 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) 574 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
575 575
576 #define show_one_plus(file_name, object, val) \ 576 #define show_one_plus(file_name, object, val) \
577 static ssize_t show_##file_name \ 577 static ssize_t show_##file_name \
578 (struct _cpuid4_info *this_leaf, char *buf) \ 578 (struct _cpuid4_info *this_leaf, char *buf) \
579 { \ 579 { \
580 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \ 580 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
581 } 581 }
582 582
583 show_one_plus(level, eax.split.level, 0); 583 show_one_plus(level, eax.split.level, 0);
584 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1); 584 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
585 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1); 585 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
586 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); 586 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
587 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); 587 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
588 588
589 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) 589 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
590 { 590 {
591 return sprintf (buf, "%luK\n", this_leaf->size / 1024); 591 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
592 } 592 }
593 593
594 static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf) 594 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
595 int type, char *buf)
595 { 596 {
597 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
596 int n = 0; 598 int n = 0;
597 int len = cpumask_scnprintf_len(nr_cpu_ids);
598 char *mask_str = kmalloc(len, GFP_KERNEL);
599 599
600 if (mask_str) { 600 if (len > 1) {
601 cpumask_scnprintf(mask_str, len, this_leaf->shared_cpu_map); 601 cpumask_t *mask = &this_leaf->shared_cpu_map;
602 n = sprintf(buf, "%s\n", mask_str); 602
603 kfree(mask_str); 603 n = type?
604 cpulist_scnprintf(buf, len-2, *mask):
605 cpumask_scnprintf(buf, len-2, *mask);
606 buf[n++] = '\n';
607 buf[n] = '\0';
604 } 608 }
605 return n; 609 return n;
606 } 610 }
607 611
612 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
613 {
614 return show_shared_cpu_map_func(leaf, 0, buf);
615 }
616
617 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
618 {
619 return show_shared_cpu_map_func(leaf, 1, buf);
620 }
621
608 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { 622 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
609 switch(this_leaf->eax.split.type) { 623 switch(this_leaf->eax.split.type) {
610 case CACHE_TYPE_DATA: 624 case CACHE_TYPE_DATA:
611 return sprintf(buf, "Data\n"); 625 return sprintf(buf, "Data\n");
612 break; 626 break;
613 case CACHE_TYPE_INST: 627 case CACHE_TYPE_INST:
614 return sprintf(buf, "Instruction\n"); 628 return sprintf(buf, "Instruction\n");
615 break; 629 break;
616 case CACHE_TYPE_UNIFIED: 630 case CACHE_TYPE_UNIFIED:
617 return sprintf(buf, "Unified\n"); 631 return sprintf(buf, "Unified\n");
618 break; 632 break;
619 default: 633 default:
620 return sprintf(buf, "Unknown\n"); 634 return sprintf(buf, "Unknown\n");
621 break; 635 break;
622 } 636 }
623 } 637 }
624 638
625 struct _cache_attr { 639 struct _cache_attr {
626 struct attribute attr; 640 struct attribute attr;
627 ssize_t (*show)(struct _cpuid4_info *, char *); 641 ssize_t (*show)(struct _cpuid4_info *, char *);
628 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); 642 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
629 }; 643 };
630 644
631 #define define_one_ro(_name) \ 645 #define define_one_ro(_name) \
632 static struct _cache_attr _name = \ 646 static struct _cache_attr _name = \
633 __ATTR(_name, 0444, show_##_name, NULL) 647 __ATTR(_name, 0444, show_##_name, NULL)
634 648
635 define_one_ro(level); 649 define_one_ro(level);
636 define_one_ro(type); 650 define_one_ro(type);
637 define_one_ro(coherency_line_size); 651 define_one_ro(coherency_line_size);
638 define_one_ro(physical_line_partition); 652 define_one_ro(physical_line_partition);
639 define_one_ro(ways_of_associativity); 653 define_one_ro(ways_of_associativity);
640 define_one_ro(number_of_sets); 654 define_one_ro(number_of_sets);
641 define_one_ro(size); 655 define_one_ro(size);
642 define_one_ro(shared_cpu_map); 656 define_one_ro(shared_cpu_map);
657 define_one_ro(shared_cpu_list);
643 658
644 static struct attribute * default_attrs[] = { 659 static struct attribute * default_attrs[] = {
645 &type.attr, 660 &type.attr,
646 &level.attr, 661 &level.attr,
647 &coherency_line_size.attr, 662 &coherency_line_size.attr,
648 &physical_line_partition.attr, 663 &physical_line_partition.attr,
649 &ways_of_associativity.attr, 664 &ways_of_associativity.attr,
650 &number_of_sets.attr, 665 &number_of_sets.attr,
651 &size.attr, 666 &size.attr,
652 &shared_cpu_map.attr, 667 &shared_cpu_map.attr,
668 &shared_cpu_list.attr,
653 NULL 669 NULL
654 }; 670 };
655 671
656 #define to_object(k) container_of(k, struct _index_kobject, kobj) 672 #define to_object(k) container_of(k, struct _index_kobject, kobj)
657 #define to_attr(a) container_of(a, struct _cache_attr, attr) 673 #define to_attr(a) container_of(a, struct _cache_attr, attr)
658 674
659 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) 675 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
660 { 676 {
661 struct _cache_attr *fattr = to_attr(attr); 677 struct _cache_attr *fattr = to_attr(attr);
662 struct _index_kobject *this_leaf = to_object(kobj); 678 struct _index_kobject *this_leaf = to_object(kobj);
663 ssize_t ret; 679 ssize_t ret;
664 680
665 ret = fattr->show ? 681 ret = fattr->show ?
666 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), 682 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
667 buf) : 683 buf) :
668 0; 684 0;
669 return ret; 685 return ret;
670 } 686 }
671 687
672 static ssize_t store(struct kobject * kobj, struct attribute * attr, 688 static ssize_t store(struct kobject * kobj, struct attribute * attr,
673 const char * buf, size_t count) 689 const char * buf, size_t count)
674 { 690 {
675 return 0; 691 return 0;
676 } 692 }
677 693
678 static struct sysfs_ops sysfs_ops = { 694 static struct sysfs_ops sysfs_ops = {
679 .show = show, 695 .show = show,
680 .store = store, 696 .store = store,
681 }; 697 };
682 698
683 static struct kobj_type ktype_cache = { 699 static struct kobj_type ktype_cache = {
684 .sysfs_ops = &sysfs_ops, 700 .sysfs_ops = &sysfs_ops,
685 .default_attrs = default_attrs, 701 .default_attrs = default_attrs,
686 }; 702 };
687 703
688 static struct kobj_type ktype_percpu_entry = { 704 static struct kobj_type ktype_percpu_entry = {
689 .sysfs_ops = &sysfs_ops, 705 .sysfs_ops = &sysfs_ops,
690 }; 706 };
691 707
692 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 708 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
693 { 709 {
694 kfree(per_cpu(cache_kobject, cpu)); 710 kfree(per_cpu(cache_kobject, cpu));
695 kfree(per_cpu(index_kobject, cpu)); 711 kfree(per_cpu(index_kobject, cpu));
696 per_cpu(cache_kobject, cpu) = NULL; 712 per_cpu(cache_kobject, cpu) = NULL;
697 per_cpu(index_kobject, cpu) = NULL; 713 per_cpu(index_kobject, cpu) = NULL;
698 free_cache_attributes(cpu); 714 free_cache_attributes(cpu);
699 } 715 }
700 716
701 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) 717 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
702 { 718 {
703 int err; 719 int err;
704 720
705 if (num_cache_leaves == 0) 721 if (num_cache_leaves == 0)
706 return -ENOENT; 722 return -ENOENT;
707 723
708 err = detect_cache_attributes(cpu); 724 err = detect_cache_attributes(cpu);
709 if (err) 725 if (err)
710 return err; 726 return err;
711 727
712 /* Allocate all required memory */ 728 /* Allocate all required memory */
713 per_cpu(cache_kobject, cpu) = 729 per_cpu(cache_kobject, cpu) =
714 kzalloc(sizeof(struct kobject), GFP_KERNEL); 730 kzalloc(sizeof(struct kobject), GFP_KERNEL);
715 if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) 731 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
716 goto err_out; 732 goto err_out;
717 733
718 per_cpu(index_kobject, cpu) = kzalloc( 734 per_cpu(index_kobject, cpu) = kzalloc(
719 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); 735 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
720 if (unlikely(per_cpu(index_kobject, cpu) == NULL)) 736 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
721 goto err_out; 737 goto err_out;
722 738
723 return 0; 739 return 0;
724 740
725 err_out: 741 err_out:
726 cpuid4_cache_sysfs_exit(cpu); 742 cpuid4_cache_sysfs_exit(cpu);
727 return -ENOMEM; 743 return -ENOMEM;
728 } 744 }
729 745
730 static cpumask_t cache_dev_map = CPU_MASK_NONE; 746 static cpumask_t cache_dev_map = CPU_MASK_NONE;
731 747
732 /* Add/Remove cache interface for CPU device */ 748 /* Add/Remove cache interface for CPU device */
733 static int __cpuinit cache_add_dev(struct sys_device * sys_dev) 749 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
734 { 750 {
735 unsigned int cpu = sys_dev->id; 751 unsigned int cpu = sys_dev->id;
736 unsigned long i, j; 752 unsigned long i, j;
737 struct _index_kobject *this_object; 753 struct _index_kobject *this_object;
738 int retval; 754 int retval;
739 755
740 retval = cpuid4_cache_sysfs_init(cpu); 756 retval = cpuid4_cache_sysfs_init(cpu);
741 if (unlikely(retval < 0)) 757 if (unlikely(retval < 0))
742 return retval; 758 return retval;
743 759
744 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), 760 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
745 &ktype_percpu_entry, 761 &ktype_percpu_entry,
746 &sys_dev->kobj, "%s", "cache"); 762 &sys_dev->kobj, "%s", "cache");
747 if (retval < 0) { 763 if (retval < 0) {
748 cpuid4_cache_sysfs_exit(cpu); 764 cpuid4_cache_sysfs_exit(cpu);
749 return retval; 765 return retval;
750 } 766 }
751 767
752 for (i = 0; i < num_cache_leaves; i++) { 768 for (i = 0; i < num_cache_leaves; i++) {
753 this_object = INDEX_KOBJECT_PTR(cpu,i); 769 this_object = INDEX_KOBJECT_PTR(cpu,i);
754 this_object->cpu = cpu; 770 this_object->cpu = cpu;
755 this_object->index = i; 771 this_object->index = i;
756 retval = kobject_init_and_add(&(this_object->kobj), 772 retval = kobject_init_and_add(&(this_object->kobj),
757 &ktype_cache, 773 &ktype_cache,
758 per_cpu(cache_kobject, cpu), 774 per_cpu(cache_kobject, cpu),
759 "index%1lu", i); 775 "index%1lu", i);
760 if (unlikely(retval)) { 776 if (unlikely(retval)) {
761 for (j = 0; j < i; j++) { 777 for (j = 0; j < i; j++) {
762 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj)); 778 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
763 } 779 }
764 kobject_put(per_cpu(cache_kobject, cpu)); 780 kobject_put(per_cpu(cache_kobject, cpu));
765 cpuid4_cache_sysfs_exit(cpu); 781 cpuid4_cache_sysfs_exit(cpu);
766 break; 782 break;
767 } 783 }
768 kobject_uevent(&(this_object->kobj), KOBJ_ADD); 784 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
769 } 785 }
770 if (!retval) 786 if (!retval)
771 cpu_set(cpu, cache_dev_map); 787 cpu_set(cpu, cache_dev_map);
772 788
773 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); 789 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
774 return retval; 790 return retval;
775 } 791 }
776 792
777 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) 793 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
778 { 794 {
779 unsigned int cpu = sys_dev->id; 795 unsigned int cpu = sys_dev->id;
780 unsigned long i; 796 unsigned long i;
781 797
782 if (per_cpu(cpuid4_info, cpu) == NULL) 798 if (per_cpu(cpuid4_info, cpu) == NULL)
783 return; 799 return;
784 if (!cpu_isset(cpu, cache_dev_map)) 800 if (!cpu_isset(cpu, cache_dev_map))
785 return; 801 return;
786 cpu_clear(cpu, cache_dev_map); 802 cpu_clear(cpu, cache_dev_map);
787 803
788 for (i = 0; i < num_cache_leaves; i++) 804 for (i = 0; i < num_cache_leaves; i++)
789 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 805 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
790 kobject_put(per_cpu(cache_kobject, cpu)); 806 kobject_put(per_cpu(cache_kobject, cpu));
791 cpuid4_cache_sysfs_exit(cpu); 807 cpuid4_cache_sysfs_exit(cpu);
792 } 808 }
793 809
794 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, 810 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
795 unsigned long action, void *hcpu) 811 unsigned long action, void *hcpu)
796 { 812 {
797 unsigned int cpu = (unsigned long)hcpu; 813 unsigned int cpu = (unsigned long)hcpu;
798 struct sys_device *sys_dev; 814 struct sys_device *sys_dev;
799 815
800 sys_dev = get_cpu_sysdev(cpu); 816 sys_dev = get_cpu_sysdev(cpu);
801 switch (action) { 817 switch (action) {
802 case CPU_ONLINE: 818 case CPU_ONLINE:
803 case CPU_ONLINE_FROZEN: 819 case CPU_ONLINE_FROZEN:
804 cache_add_dev(sys_dev); 820 cache_add_dev(sys_dev);
805 break; 821 break;
806 case CPU_DEAD: 822 case CPU_DEAD:
807 case CPU_DEAD_FROZEN: 823 case CPU_DEAD_FROZEN:
808 cache_remove_dev(sys_dev); 824 cache_remove_dev(sys_dev);
809 break; 825 break;
810 } 826 }
811 return NOTIFY_OK; 827 return NOTIFY_OK;
812 } 828 }
813 829
814 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = 830 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
815 { 831 {
816 .notifier_call = cacheinfo_cpu_callback, 832 .notifier_call = cacheinfo_cpu_callback,
817 }; 833 };
818 834
819 static int __cpuinit cache_sysfs_init(void) 835 static int __cpuinit cache_sysfs_init(void)
820 { 836 {
821 int i; 837 int i;
822 838
823 if (num_cache_leaves == 0) 839 if (num_cache_leaves == 0)
824 return 0; 840 return 0;
825 841
826 for_each_online_cpu(i) { 842 for_each_online_cpu(i) {
827 int err; 843 int err;
828 struct sys_device *sys_dev = get_cpu_sysdev(i); 844 struct sys_device *sys_dev = get_cpu_sysdev(i);
829 845
830 err = cache_add_dev(sys_dev); 846 err = cache_add_dev(sys_dev);
831 if (err) 847 if (err)
832 return err; 848 return err;
833 } 849 }
834 register_hotcpu_notifier(&cacheinfo_cpu_notifier); 850 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
835 return 0; 851 return 0;
836 } 852 }
837 853
838 device_initcall(cache_sysfs_init); 854 device_initcall(cache_sysfs_init);
839 855