Commit 36c46ca4f322a7bf89aad5462a3a1f61713edce7

Authored by Boris Ostrovsky
Committed by H. Peter Anvin
1 parent cb57a2b4cf

x86, microcode, AMD: Add support for family 16h processors

Add valid patch size for family 16h processors.

[ hpa: promoting to urgent/stable since it is hw enabling and trivial ]

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@amd.com>
Acked-by: Andreas Herrmann <herrmann.der.user@googlemail.com>
Link: http://lkml.kernel.org/r/1353004910-2204-1-git-send-email-boris.ostrovsky@amd.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: <stable@vger.kernel.org>

Showing 1 changed file with 4 additions and 0 deletions Inline Diff

arch/x86/kernel/microcode_amd.c
1 /* 1 /*
2 * AMD CPU Microcode Update Driver for Linux 2 * AMD CPU Microcode Update Driver for Linux
3 * Copyright (C) 2008-2011 Advanced Micro Devices Inc. 3 * Copyright (C) 2008-2011 Advanced Micro Devices Inc.
4 * 4 *
5 * Author: Peter Oruba <peter.oruba@amd.com> 5 * Author: Peter Oruba <peter.oruba@amd.com>
6 * 6 *
7 * Based on work by: 7 * Based on work by:
8 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> 8 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
9 * 9 *
10 * Maintainers: 10 * Maintainers:
11 * Andreas Herrmann <herrmann.der.user@googlemail.com> 11 * Andreas Herrmann <herrmann.der.user@googlemail.com>
12 * Borislav Petkov <bp@alien8.de> 12 * Borislav Petkov <bp@alien8.de>
13 * 13 *
14 * This driver allows to upgrade microcode on F10h AMD 14 * This driver allows to upgrade microcode on F10h AMD
15 * CPUs and later. 15 * CPUs and later.
16 * 16 *
17 * Licensed under the terms of the GNU General Public 17 * Licensed under the terms of the GNU General Public
18 * License version 2. See file COPYING for details. 18 * License version 2. See file COPYING for details.
19 */ 19 */
20 20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 22
23 #include <linux/firmware.h> 23 #include <linux/firmware.h>
24 #include <linux/pci_ids.h> 24 #include <linux/pci_ids.h>
25 #include <linux/uaccess.h> 25 #include <linux/uaccess.h>
26 #include <linux/vmalloc.h> 26 #include <linux/vmalloc.h>
27 #include <linux/kernel.h> 27 #include <linux/kernel.h>
28 #include <linux/module.h> 28 #include <linux/module.h>
29 #include <linux/pci.h> 29 #include <linux/pci.h>
30 30
31 #include <asm/microcode.h> 31 #include <asm/microcode.h>
32 #include <asm/processor.h> 32 #include <asm/processor.h>
33 #include <asm/msr.h> 33 #include <asm/msr.h>
34 34
35 MODULE_DESCRIPTION("AMD Microcode Update Driver"); 35 MODULE_DESCRIPTION("AMD Microcode Update Driver");
36 MODULE_AUTHOR("Peter Oruba"); 36 MODULE_AUTHOR("Peter Oruba");
37 MODULE_LICENSE("GPL v2"); 37 MODULE_LICENSE("GPL v2");
38 38
39 #define UCODE_MAGIC 0x00414d44 39 #define UCODE_MAGIC 0x00414d44
40 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 40 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
41 #define UCODE_UCODE_TYPE 0x00000001 41 #define UCODE_UCODE_TYPE 0x00000001
42 42
43 struct equiv_cpu_entry { 43 struct equiv_cpu_entry {
44 u32 installed_cpu; 44 u32 installed_cpu;
45 u32 fixed_errata_mask; 45 u32 fixed_errata_mask;
46 u32 fixed_errata_compare; 46 u32 fixed_errata_compare;
47 u16 equiv_cpu; 47 u16 equiv_cpu;
48 u16 res; 48 u16 res;
49 } __attribute__((packed)); 49 } __attribute__((packed));
50 50
51 struct microcode_header_amd { 51 struct microcode_header_amd {
52 u32 data_code; 52 u32 data_code;
53 u32 patch_id; 53 u32 patch_id;
54 u16 mc_patch_data_id; 54 u16 mc_patch_data_id;
55 u8 mc_patch_data_len; 55 u8 mc_patch_data_len;
56 u8 init_flag; 56 u8 init_flag;
57 u32 mc_patch_data_checksum; 57 u32 mc_patch_data_checksum;
58 u32 nb_dev_id; 58 u32 nb_dev_id;
59 u32 sb_dev_id; 59 u32 sb_dev_id;
60 u16 processor_rev_id; 60 u16 processor_rev_id;
61 u8 nb_rev_id; 61 u8 nb_rev_id;
62 u8 sb_rev_id; 62 u8 sb_rev_id;
63 u8 bios_api_rev; 63 u8 bios_api_rev;
64 u8 reserved1[3]; 64 u8 reserved1[3];
65 u32 match_reg[8]; 65 u32 match_reg[8];
66 } __attribute__((packed)); 66 } __attribute__((packed));
67 67
68 struct microcode_amd { 68 struct microcode_amd {
69 struct microcode_header_amd hdr; 69 struct microcode_header_amd hdr;
70 unsigned int mpb[0]; 70 unsigned int mpb[0];
71 }; 71 };
72 72
73 #define SECTION_HDR_SIZE 8 73 #define SECTION_HDR_SIZE 8
74 #define CONTAINER_HDR_SZ 12 74 #define CONTAINER_HDR_SZ 12
75 75
76 static struct equiv_cpu_entry *equiv_cpu_table; 76 static struct equiv_cpu_entry *equiv_cpu_table;
77 77
78 struct ucode_patch { 78 struct ucode_patch {
79 struct list_head plist; 79 struct list_head plist;
80 void *data; 80 void *data;
81 u32 patch_id; 81 u32 patch_id;
82 u16 equiv_cpu; 82 u16 equiv_cpu;
83 }; 83 };
84 84
85 static LIST_HEAD(pcache); 85 static LIST_HEAD(pcache);
86 86
87 static u16 find_equiv_id(unsigned int cpu) 87 static u16 find_equiv_id(unsigned int cpu)
88 { 88 {
89 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 89 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
90 int i = 0; 90 int i = 0;
91 91
92 if (!equiv_cpu_table) 92 if (!equiv_cpu_table)
93 return 0; 93 return 0;
94 94
95 while (equiv_cpu_table[i].installed_cpu != 0) { 95 while (equiv_cpu_table[i].installed_cpu != 0) {
96 if (uci->cpu_sig.sig == equiv_cpu_table[i].installed_cpu) 96 if (uci->cpu_sig.sig == equiv_cpu_table[i].installed_cpu)
97 return equiv_cpu_table[i].equiv_cpu; 97 return equiv_cpu_table[i].equiv_cpu;
98 98
99 i++; 99 i++;
100 } 100 }
101 return 0; 101 return 0;
102 } 102 }
103 103
104 static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu) 104 static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
105 { 105 {
106 int i = 0; 106 int i = 0;
107 107
108 BUG_ON(!equiv_cpu_table); 108 BUG_ON(!equiv_cpu_table);
109 109
110 while (equiv_cpu_table[i].equiv_cpu != 0) { 110 while (equiv_cpu_table[i].equiv_cpu != 0) {
111 if (equiv_cpu == equiv_cpu_table[i].equiv_cpu) 111 if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
112 return equiv_cpu_table[i].installed_cpu; 112 return equiv_cpu_table[i].installed_cpu;
113 i++; 113 i++;
114 } 114 }
115 return 0; 115 return 0;
116 } 116 }
117 117
118 /* 118 /*
119 * a small, trivial cache of per-family ucode patches 119 * a small, trivial cache of per-family ucode patches
120 */ 120 */
121 static struct ucode_patch *cache_find_patch(u16 equiv_cpu) 121 static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
122 { 122 {
123 struct ucode_patch *p; 123 struct ucode_patch *p;
124 124
125 list_for_each_entry(p, &pcache, plist) 125 list_for_each_entry(p, &pcache, plist)
126 if (p->equiv_cpu == equiv_cpu) 126 if (p->equiv_cpu == equiv_cpu)
127 return p; 127 return p;
128 return NULL; 128 return NULL;
129 } 129 }
130 130
131 static void update_cache(struct ucode_patch *new_patch) 131 static void update_cache(struct ucode_patch *new_patch)
132 { 132 {
133 struct ucode_patch *p; 133 struct ucode_patch *p;
134 134
135 list_for_each_entry(p, &pcache, plist) { 135 list_for_each_entry(p, &pcache, plist) {
136 if (p->equiv_cpu == new_patch->equiv_cpu) { 136 if (p->equiv_cpu == new_patch->equiv_cpu) {
137 if (p->patch_id >= new_patch->patch_id) 137 if (p->patch_id >= new_patch->patch_id)
138 /* we already have the latest patch */ 138 /* we already have the latest patch */
139 return; 139 return;
140 140
141 list_replace(&p->plist, &new_patch->plist); 141 list_replace(&p->plist, &new_patch->plist);
142 kfree(p->data); 142 kfree(p->data);
143 kfree(p); 143 kfree(p);
144 return; 144 return;
145 } 145 }
146 } 146 }
147 /* no patch found, add it */ 147 /* no patch found, add it */
148 list_add_tail(&new_patch->plist, &pcache); 148 list_add_tail(&new_patch->plist, &pcache);
149 } 149 }
150 150
151 static void free_cache(void) 151 static void free_cache(void)
152 { 152 {
153 struct ucode_patch *p, *tmp; 153 struct ucode_patch *p, *tmp;
154 154
155 list_for_each_entry_safe(p, tmp, &pcache, plist) { 155 list_for_each_entry_safe(p, tmp, &pcache, plist) {
156 __list_del(p->plist.prev, p->plist.next); 156 __list_del(p->plist.prev, p->plist.next);
157 kfree(p->data); 157 kfree(p->data);
158 kfree(p); 158 kfree(p);
159 } 159 }
160 } 160 }
161 161
162 static struct ucode_patch *find_patch(unsigned int cpu) 162 static struct ucode_patch *find_patch(unsigned int cpu)
163 { 163 {
164 u16 equiv_id; 164 u16 equiv_id;
165 165
166 equiv_id = find_equiv_id(cpu); 166 equiv_id = find_equiv_id(cpu);
167 if (!equiv_id) 167 if (!equiv_id)
168 return NULL; 168 return NULL;
169 169
170 return cache_find_patch(equiv_id); 170 return cache_find_patch(equiv_id);
171 } 171 }
172 172
173 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) 173 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
174 { 174 {
175 struct cpuinfo_x86 *c = &cpu_data(cpu); 175 struct cpuinfo_x86 *c = &cpu_data(cpu);
176 176
177 csig->sig = cpuid_eax(0x00000001); 177 csig->sig = cpuid_eax(0x00000001);
178 csig->rev = c->microcode; 178 csig->rev = c->microcode;
179 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); 179 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
180 180
181 return 0; 181 return 0;
182 } 182 }
183 183
184 static unsigned int verify_patch_size(int cpu, u32 patch_size, 184 static unsigned int verify_patch_size(int cpu, u32 patch_size,
185 unsigned int size) 185 unsigned int size)
186 { 186 {
187 struct cpuinfo_x86 *c = &cpu_data(cpu); 187 struct cpuinfo_x86 *c = &cpu_data(cpu);
188 u32 max_size; 188 u32 max_size;
189 189
190 #define F1XH_MPB_MAX_SIZE 2048 190 #define F1XH_MPB_MAX_SIZE 2048
191 #define F14H_MPB_MAX_SIZE 1824 191 #define F14H_MPB_MAX_SIZE 1824
192 #define F15H_MPB_MAX_SIZE 4096 192 #define F15H_MPB_MAX_SIZE 4096
193 #define F16H_MPB_MAX_SIZE 3458
193 194
194 switch (c->x86) { 195 switch (c->x86) {
195 case 0x14: 196 case 0x14:
196 max_size = F14H_MPB_MAX_SIZE; 197 max_size = F14H_MPB_MAX_SIZE;
197 break; 198 break;
198 case 0x15: 199 case 0x15:
199 max_size = F15H_MPB_MAX_SIZE; 200 max_size = F15H_MPB_MAX_SIZE;
201 break;
202 case 0x16:
203 max_size = F16H_MPB_MAX_SIZE;
200 break; 204 break;
201 default: 205 default:
202 max_size = F1XH_MPB_MAX_SIZE; 206 max_size = F1XH_MPB_MAX_SIZE;
203 break; 207 break;
204 } 208 }
205 209
206 if (patch_size > min_t(u32, size, max_size)) { 210 if (patch_size > min_t(u32, size, max_size)) {
207 pr_err("patch size mismatch\n"); 211 pr_err("patch size mismatch\n");
208 return 0; 212 return 0;
209 } 213 }
210 214
211 return patch_size; 215 return patch_size;
212 } 216 }
213 217
214 static int apply_microcode_amd(int cpu) 218 static int apply_microcode_amd(int cpu)
215 { 219 {
216 struct cpuinfo_x86 *c = &cpu_data(cpu); 220 struct cpuinfo_x86 *c = &cpu_data(cpu);
217 struct microcode_amd *mc_amd; 221 struct microcode_amd *mc_amd;
218 struct ucode_cpu_info *uci; 222 struct ucode_cpu_info *uci;
219 struct ucode_patch *p; 223 struct ucode_patch *p;
220 u32 rev, dummy; 224 u32 rev, dummy;
221 225
222 BUG_ON(raw_smp_processor_id() != cpu); 226 BUG_ON(raw_smp_processor_id() != cpu);
223 227
224 uci = ucode_cpu_info + cpu; 228 uci = ucode_cpu_info + cpu;
225 229
226 p = find_patch(cpu); 230 p = find_patch(cpu);
227 if (!p) 231 if (!p)
228 return 0; 232 return 0;
229 233
230 mc_amd = p->data; 234 mc_amd = p->data;
231 uci->mc = p->data; 235 uci->mc = p->data;
232 236
233 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 237 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
234 238
235 /* need to apply patch? */ 239 /* need to apply patch? */
236 if (rev >= mc_amd->hdr.patch_id) { 240 if (rev >= mc_amd->hdr.patch_id) {
237 c->microcode = rev; 241 c->microcode = rev;
238 return 0; 242 return 0;
239 } 243 }
240 244
241 wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); 245 wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
242 246
243 /* verify patch application was successful */ 247 /* verify patch application was successful */
244 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 248 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
245 if (rev != mc_amd->hdr.patch_id) { 249 if (rev != mc_amd->hdr.patch_id) {
246 pr_err("CPU%d: update failed for patch_level=0x%08x\n", 250 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
247 cpu, mc_amd->hdr.patch_id); 251 cpu, mc_amd->hdr.patch_id);
248 return -1; 252 return -1;
249 } 253 }
250 254
251 pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); 255 pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
252 uci->cpu_sig.rev = rev; 256 uci->cpu_sig.rev = rev;
253 c->microcode = rev; 257 c->microcode = rev;
254 258
255 return 0; 259 return 0;
256 } 260 }
257 261
258 static int install_equiv_cpu_table(const u8 *buf) 262 static int install_equiv_cpu_table(const u8 *buf)
259 { 263 {
260 unsigned int *ibuf = (unsigned int *)buf; 264 unsigned int *ibuf = (unsigned int *)buf;
261 unsigned int type = ibuf[1]; 265 unsigned int type = ibuf[1];
262 unsigned int size = ibuf[2]; 266 unsigned int size = ibuf[2];
263 267
264 if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { 268 if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
265 pr_err("empty section/" 269 pr_err("empty section/"
266 "invalid type field in container file section header\n"); 270 "invalid type field in container file section header\n");
267 return -EINVAL; 271 return -EINVAL;
268 } 272 }
269 273
270 equiv_cpu_table = vmalloc(size); 274 equiv_cpu_table = vmalloc(size);
271 if (!equiv_cpu_table) { 275 if (!equiv_cpu_table) {
272 pr_err("failed to allocate equivalent CPU table\n"); 276 pr_err("failed to allocate equivalent CPU table\n");
273 return -ENOMEM; 277 return -ENOMEM;
274 } 278 }
275 279
276 memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size); 280 memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
277 281
278 /* add header length */ 282 /* add header length */
279 return size + CONTAINER_HDR_SZ; 283 return size + CONTAINER_HDR_SZ;
280 } 284 }
281 285
282 static void free_equiv_cpu_table(void) 286 static void free_equiv_cpu_table(void)
283 { 287 {
284 vfree(equiv_cpu_table); 288 vfree(equiv_cpu_table);
285 equiv_cpu_table = NULL; 289 equiv_cpu_table = NULL;
286 } 290 }
287 291
288 static void cleanup(void) 292 static void cleanup(void)
289 { 293 {
290 free_equiv_cpu_table(); 294 free_equiv_cpu_table();
291 free_cache(); 295 free_cache();
292 } 296 }
293 297
294 /* 298 /*
295 * We return the current size even if some of the checks failed so that 299 * We return the current size even if some of the checks failed so that
296 * we can skip over the next patch. If we return a negative value, we 300 * we can skip over the next patch. If we return a negative value, we
297 * signal a grave error like a memory allocation has failed and the 301 * signal a grave error like a memory allocation has failed and the
298 * driver cannot continue functioning normally. In such cases, we tear 302 * driver cannot continue functioning normally. In such cases, we tear
299 * down everything we've used up so far and exit. 303 * down everything we've used up so far and exit.
300 */ 304 */
301 static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) 305 static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
302 { 306 {
303 struct cpuinfo_x86 *c = &cpu_data(cpu); 307 struct cpuinfo_x86 *c = &cpu_data(cpu);
304 struct microcode_header_amd *mc_hdr; 308 struct microcode_header_amd *mc_hdr;
305 struct ucode_patch *patch; 309 struct ucode_patch *patch;
306 unsigned int patch_size, crnt_size, ret; 310 unsigned int patch_size, crnt_size, ret;
307 u32 proc_fam; 311 u32 proc_fam;
308 u16 proc_id; 312 u16 proc_id;
309 313
310 patch_size = *(u32 *)(fw + 4); 314 patch_size = *(u32 *)(fw + 4);
311 crnt_size = patch_size + SECTION_HDR_SIZE; 315 crnt_size = patch_size + SECTION_HDR_SIZE;
312 mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); 316 mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
313 proc_id = mc_hdr->processor_rev_id; 317 proc_id = mc_hdr->processor_rev_id;
314 318
315 proc_fam = find_cpu_family_by_equiv_cpu(proc_id); 319 proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
316 if (!proc_fam) { 320 if (!proc_fam) {
317 pr_err("No patch family for equiv ID: 0x%04x\n", proc_id); 321 pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
318 return crnt_size; 322 return crnt_size;
319 } 323 }
320 324
321 /* check if patch is for the current family */ 325 /* check if patch is for the current family */
322 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); 326 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
323 if (proc_fam != c->x86) 327 if (proc_fam != c->x86)
324 return crnt_size; 328 return crnt_size;
325 329
326 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { 330 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
327 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", 331 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
328 mc_hdr->patch_id); 332 mc_hdr->patch_id);
329 return crnt_size; 333 return crnt_size;
330 } 334 }
331 335
332 ret = verify_patch_size(cpu, patch_size, leftover); 336 ret = verify_patch_size(cpu, patch_size, leftover);
333 if (!ret) { 337 if (!ret) {
334 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); 338 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
335 return crnt_size; 339 return crnt_size;
336 } 340 }
337 341
338 patch = kzalloc(sizeof(*patch), GFP_KERNEL); 342 patch = kzalloc(sizeof(*patch), GFP_KERNEL);
339 if (!patch) { 343 if (!patch) {
340 pr_err("Patch allocation failure.\n"); 344 pr_err("Patch allocation failure.\n");
341 return -EINVAL; 345 return -EINVAL;
342 } 346 }
343 347
344 patch->data = kzalloc(patch_size, GFP_KERNEL); 348 patch->data = kzalloc(patch_size, GFP_KERNEL);
345 if (!patch->data) { 349 if (!patch->data) {
346 pr_err("Patch data allocation failure.\n"); 350 pr_err("Patch data allocation failure.\n");
347 kfree(patch); 351 kfree(patch);
348 return -EINVAL; 352 return -EINVAL;
349 } 353 }
350 354
351 /* All looks ok, copy patch... */ 355 /* All looks ok, copy patch... */
352 memcpy(patch->data, fw + SECTION_HDR_SIZE, patch_size); 356 memcpy(patch->data, fw + SECTION_HDR_SIZE, patch_size);
353 INIT_LIST_HEAD(&patch->plist); 357 INIT_LIST_HEAD(&patch->plist);
354 patch->patch_id = mc_hdr->patch_id; 358 patch->patch_id = mc_hdr->patch_id;
355 patch->equiv_cpu = proc_id; 359 patch->equiv_cpu = proc_id;
356 360
357 /* ... and add to cache. */ 361 /* ... and add to cache. */
358 update_cache(patch); 362 update_cache(patch);
359 363
360 return crnt_size; 364 return crnt_size;
361 } 365 }
362 366
363 static enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) 367 static enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
364 { 368 {
365 enum ucode_state ret = UCODE_ERROR; 369 enum ucode_state ret = UCODE_ERROR;
366 unsigned int leftover; 370 unsigned int leftover;
367 u8 *fw = (u8 *)data; 371 u8 *fw = (u8 *)data;
368 int crnt_size = 0; 372 int crnt_size = 0;
369 int offset; 373 int offset;
370 374
371 offset = install_equiv_cpu_table(data); 375 offset = install_equiv_cpu_table(data);
372 if (offset < 0) { 376 if (offset < 0) {
373 pr_err("failed to create equivalent cpu table\n"); 377 pr_err("failed to create equivalent cpu table\n");
374 return ret; 378 return ret;
375 } 379 }
376 fw += offset; 380 fw += offset;
377 leftover = size - offset; 381 leftover = size - offset;
378 382
379 if (*(u32 *)fw != UCODE_UCODE_TYPE) { 383 if (*(u32 *)fw != UCODE_UCODE_TYPE) {
380 pr_err("invalid type field in container file section header\n"); 384 pr_err("invalid type field in container file section header\n");
381 free_equiv_cpu_table(); 385 free_equiv_cpu_table();
382 return ret; 386 return ret;
383 } 387 }
384 388
385 while (leftover) { 389 while (leftover) {
386 crnt_size = verify_and_add_patch(cpu, fw, leftover); 390 crnt_size = verify_and_add_patch(cpu, fw, leftover);
387 if (crnt_size < 0) 391 if (crnt_size < 0)
388 return ret; 392 return ret;
389 393
390 fw += crnt_size; 394 fw += crnt_size;
391 leftover -= crnt_size; 395 leftover -= crnt_size;
392 } 396 }
393 397
394 return UCODE_OK; 398 return UCODE_OK;
395 } 399 }
396 400
397 /* 401 /*
398 * AMD microcode firmware naming convention, up to family 15h they are in 402 * AMD microcode firmware naming convention, up to family 15h they are in
399 * the legacy file: 403 * the legacy file:
400 * 404 *
401 * amd-ucode/microcode_amd.bin 405 * amd-ucode/microcode_amd.bin
402 * 406 *
403 * This legacy file is always smaller than 2K in size. 407 * This legacy file is always smaller than 2K in size.
404 * 408 *
405 * Beginning with family 15h, they are in family-specific firmware files: 409 * Beginning with family 15h, they are in family-specific firmware files:
406 * 410 *
407 * amd-ucode/microcode_amd_fam15h.bin 411 * amd-ucode/microcode_amd_fam15h.bin
408 * amd-ucode/microcode_amd_fam16h.bin 412 * amd-ucode/microcode_amd_fam16h.bin
409 * ... 413 * ...
410 * 414 *
411 * These might be larger than 2K. 415 * These might be larger than 2K.
412 */ 416 */
413 static enum ucode_state request_microcode_amd(int cpu, struct device *device, 417 static enum ucode_state request_microcode_amd(int cpu, struct device *device,
414 bool refresh_fw) 418 bool refresh_fw)
415 { 419 {
416 char fw_name[36] = "amd-ucode/microcode_amd.bin"; 420 char fw_name[36] = "amd-ucode/microcode_amd.bin";
417 struct cpuinfo_x86 *c = &cpu_data(cpu); 421 struct cpuinfo_x86 *c = &cpu_data(cpu);
418 enum ucode_state ret = UCODE_NFOUND; 422 enum ucode_state ret = UCODE_NFOUND;
419 const struct firmware *fw; 423 const struct firmware *fw;
420 424
421 /* reload ucode container only on the boot cpu */ 425 /* reload ucode container only on the boot cpu */
422 if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index) 426 if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
423 return UCODE_OK; 427 return UCODE_OK;
424 428
425 if (c->x86 >= 0x15) 429 if (c->x86 >= 0x15)
426 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); 430 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
427 431
428 if (request_firmware(&fw, (const char *)fw_name, device)) { 432 if (request_firmware(&fw, (const char *)fw_name, device)) {
429 pr_err("failed to load file %s\n", fw_name); 433 pr_err("failed to load file %s\n", fw_name);
430 goto out; 434 goto out;
431 } 435 }
432 436
433 ret = UCODE_ERROR; 437 ret = UCODE_ERROR;
434 if (*(u32 *)fw->data != UCODE_MAGIC) { 438 if (*(u32 *)fw->data != UCODE_MAGIC) {
435 pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data); 439 pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
436 goto fw_release; 440 goto fw_release;
437 } 441 }
438 442
439 /* free old equiv table */ 443 /* free old equiv table */
440 free_equiv_cpu_table(); 444 free_equiv_cpu_table();
441 445
442 ret = load_microcode_amd(cpu, fw->data, fw->size); 446 ret = load_microcode_amd(cpu, fw->data, fw->size);
443 if (ret != UCODE_OK) 447 if (ret != UCODE_OK)
444 cleanup(); 448 cleanup();
445 449
446 fw_release: 450 fw_release:
447 release_firmware(fw); 451 release_firmware(fw);
448 452
449 out: 453 out:
450 return ret; 454 return ret;
451 } 455 }
452 456
453 static enum ucode_state 457 static enum ucode_state
454 request_microcode_user(int cpu, const void __user *buf, size_t size) 458 request_microcode_user(int cpu, const void __user *buf, size_t size)
455 { 459 {
456 return UCODE_ERROR; 460 return UCODE_ERROR;
457 } 461 }
458 462
459 static void microcode_fini_cpu_amd(int cpu) 463 static void microcode_fini_cpu_amd(int cpu)
460 { 464 {
461 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 465 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
462 466
463 uci->mc = NULL; 467 uci->mc = NULL;
464 } 468 }
465 469
466 static struct microcode_ops microcode_amd_ops = { 470 static struct microcode_ops microcode_amd_ops = {
467 .request_microcode_user = request_microcode_user, 471 .request_microcode_user = request_microcode_user,
468 .request_microcode_fw = request_microcode_amd, 472 .request_microcode_fw = request_microcode_amd,
469 .collect_cpu_info = collect_cpu_info_amd, 473 .collect_cpu_info = collect_cpu_info_amd,
470 .apply_microcode = apply_microcode_amd, 474 .apply_microcode = apply_microcode_amd,
471 .microcode_fini_cpu = microcode_fini_cpu_amd, 475 .microcode_fini_cpu = microcode_fini_cpu_amd,
472 }; 476 };
473 477
474 struct microcode_ops * __init init_amd_microcode(void) 478 struct microcode_ops * __init init_amd_microcode(void)
475 { 479 {
476 struct cpuinfo_x86 *c = &cpu_data(0); 480 struct cpuinfo_x86 *c = &cpu_data(0);
477 481
478 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { 482 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
479 pr_warning("AMD CPU family 0x%x not supported\n", c->x86); 483 pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
480 return NULL; 484 return NULL;
481 } 485 }
482 486
483 return &microcode_amd_ops; 487 return &microcode_amd_ops;
484 } 488 }
485 489
486 void __exit exit_amd_microcode(void) 490 void __exit exit_amd_microcode(void)
487 { 491 {
488 cleanup(); 492 cleanup();
489 } 493 }
490 494