Commit e913b8cd4548cd0a407b7debd6ec9104e84d050d
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
Merge tag 'microcode_fixes_for_3.19' of git://git.kernel.org/pub/scm/linux/kerne…
…l/git/bp/bp into x86/microcode Pull x86 microcode fixes from Borislav Petkov: "Reload microcode when resuming and the case when only the early loader has been utilized. Also, do not load the driver on paravirt guests, from Boris Ostrovsky." Signed-off-by: Ingo Molnar <mingo@kernel.org>
Showing 7 changed files Side-by-side Diff
arch/x86/include/asm/microcode.h
... | ... | @@ -78,6 +78,7 @@ |
78 | 78 | extern void __init load_ucode_bsp(void); |
79 | 79 | extern void load_ucode_ap(void); |
80 | 80 | extern int __init save_microcode_in_initrd(void); |
81 | +void reload_early_microcode(void); | |
81 | 82 | #else |
82 | 83 | static inline void __init load_ucode_bsp(void) {} |
83 | 84 | static inline void load_ucode_ap(void) {} |
... | ... | @@ -85,6 +86,7 @@ |
85 | 86 | { |
86 | 87 | return 0; |
87 | 88 | } |
89 | +static inline void reload_early_microcode(void) {} | |
88 | 90 | #endif |
89 | 91 | |
90 | 92 | #endif /* _ASM_X86_MICROCODE_H */ |
arch/x86/include/asm/microcode_amd.h
... | ... | @@ -68,10 +68,12 @@ |
68 | 68 | extern void __init load_ucode_amd_bsp(void); |
69 | 69 | extern void load_ucode_amd_ap(void); |
70 | 70 | extern int __init save_microcode_in_initrd_amd(void); |
71 | +void reload_ucode_amd(void); | |
71 | 72 | #else |
72 | 73 | static inline void __init load_ucode_amd_bsp(void) {} |
73 | 74 | static inline void load_ucode_amd_ap(void) {} |
74 | 75 | static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; } |
76 | +void reload_ucode_amd(void) {} | |
75 | 77 | #endif |
76 | 78 | |
77 | 79 | #endif /* _ASM_X86_MICROCODE_AMD_H */ |
arch/x86/include/asm/microcode_intel.h
... | ... | @@ -68,11 +68,13 @@ |
68 | 68 | extern void load_ucode_intel_ap(void); |
69 | 69 | extern void show_ucode_info_early(void); |
70 | 70 | extern int __init save_microcode_in_initrd_intel(void); |
71 | +void reload_ucode_intel(void); | |
71 | 72 | #else |
72 | 73 | static inline __init void load_ucode_intel_bsp(void) {} |
73 | 74 | static inline void load_ucode_intel_ap(void) {} |
74 | 75 | static inline void show_ucode_info_early(void) {} |
75 | 76 | static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; } |
77 | +static inline void reload_ucode_intel(void) {} | |
76 | 78 | #endif |
77 | 79 | |
78 | 80 | #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) |
arch/x86/kernel/cpu/microcode/amd_early.c
... | ... | @@ -402,4 +402,22 @@ |
402 | 402 | |
403 | 403 | return retval; |
404 | 404 | } |
405 | + | |
406 | +void reload_ucode_amd(void) | |
407 | +{ | |
408 | + struct microcode_amd *mc; | |
409 | + u32 rev, eax; | |
410 | + | |
411 | + rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | |
412 | + | |
413 | + mc = (struct microcode_amd *)amd_ucode_patch; | |
414 | + | |
415 | + if (mc && rev < mc->hdr.patch_id) { | |
416 | + if (!__apply_microcode_amd(mc)) { | |
417 | + ucode_new_rev = mc->hdr.patch_id; | |
418 | + pr_info("microcode: reload patch_level=0x%08x\n", | |
419 | + ucode_new_rev); | |
420 | + } | |
421 | + } | |
422 | +} |
arch/x86/kernel/cpu/microcode/core.c
... | ... | @@ -466,13 +466,7 @@ |
466 | 466 | if (uci->valid && uci->mc) |
467 | 467 | microcode_ops->apply_microcode(cpu); |
468 | 468 | else if (!uci->mc) |
469 | - /* | |
470 | - * We might resume and not have applied late microcode but still | |
471 | - * have a newer patch stashed from the early loader. We don't | |
472 | - * have it in uci->mc so we have to load it the same way we're | |
473 | - * applying patches early on the APs. | |
474 | - */ | |
475 | - load_ucode_ap(); | |
469 | + reload_early_microcode(); | |
476 | 470 | } |
477 | 471 | |
478 | 472 | static struct syscore_ops mc_syscore_ops = { |
... | ... | @@ -557,7 +551,7 @@ |
557 | 551 | struct cpuinfo_x86 *c = &cpu_data(0); |
558 | 552 | int error; |
559 | 553 | |
560 | - if (dis_ucode_ldr) | |
554 | + if (paravirt_enabled() || dis_ucode_ldr) | |
561 | 555 | return 0; |
562 | 556 | |
563 | 557 | if (c->x86_vendor == X86_VENDOR_INTEL) |
arch/x86/kernel/cpu/microcode/core_early.c
... | ... | @@ -176,4 +176,25 @@ |
176 | 176 | |
177 | 177 | return 0; |
178 | 178 | } |
179 | + | |
180 | +void reload_early_microcode(void) | |
181 | +{ | |
182 | + int vendor, x86; | |
183 | + | |
184 | + vendor = x86_vendor(); | |
185 | + x86 = x86_family(); | |
186 | + | |
187 | + switch (vendor) { | |
188 | + case X86_VENDOR_INTEL: | |
189 | + if (x86 >= 6) | |
190 | + reload_ucode_intel(); | |
191 | + break; | |
192 | + case X86_VENDOR_AMD: | |
193 | + if (x86 >= 0x10) | |
194 | + reload_ucode_amd(); | |
195 | + break; | |
196 | + default: | |
197 | + break; | |
198 | + } | |
199 | +} |
arch/x86/kernel/cpu/microcode/intel_early.c
... | ... | @@ -34,6 +34,8 @@ |
34 | 34 | struct microcode_intel **mc_saved; |
35 | 35 | } mc_saved_data; |
36 | 36 | |
37 | +static struct microcode_intel bsp_patch; | |
38 | + | |
37 | 39 | static enum ucode_state |
38 | 40 | generic_load_microcode_early(struct microcode_intel **mc_saved_p, |
39 | 41 | unsigned int mc_saved_count, |
... | ... | @@ -650,8 +652,7 @@ |
650 | 652 | } |
651 | 653 | #endif |
652 | 654 | |
653 | -static int apply_microcode_early(struct mc_saved_data *mc_saved_data, | |
654 | - struct ucode_cpu_info *uci) | |
655 | +static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) | |
655 | 656 | { |
656 | 657 | struct microcode_intel *mc_intel; |
657 | 658 | unsigned int val[2]; |
... | ... | @@ -680,7 +681,10 @@ |
680 | 681 | #endif |
681 | 682 | uci->cpu_sig.rev = val[1]; |
682 | 683 | |
683 | - print_ucode(uci); | |
684 | + if (early) | |
685 | + print_ucode(uci); | |
686 | + else | |
687 | + print_ucode_info(uci, mc_intel->hdr.date); | |
684 | 688 | |
685 | 689 | return 0; |
686 | 690 | } |
687 | 691 | |
688 | 692 | |
... | ... | @@ -713,14 +717,22 @@ |
713 | 717 | unsigned long *mc_saved_in_initrd, |
714 | 718 | unsigned long initrd_start_early, |
715 | 719 | unsigned long initrd_end_early, |
716 | - struct ucode_cpu_info *uci) | |
720 | + struct ucode_cpu_info *uci, | |
721 | + struct microcode_intel *bsp) | |
717 | 722 | { |
723 | + enum ucode_state ret; | |
724 | + | |
718 | 725 | collect_cpu_info_early(uci); |
719 | 726 | scan_microcode(initrd_start_early, initrd_end_early, mc_saved_data, |
720 | 727 | mc_saved_in_initrd, uci); |
721 | - load_microcode(mc_saved_data, mc_saved_in_initrd, | |
722 | - initrd_start_early, uci); | |
723 | - apply_microcode_early(mc_saved_data, uci); | |
728 | + | |
729 | + ret = load_microcode(mc_saved_data, mc_saved_in_initrd, | |
730 | + initrd_start_early, uci); | |
731 | + | |
732 | + if (ret == UCODE_OK) { | |
733 | + apply_microcode_early(uci, true); | |
734 | + memcpy(bsp, uci->mc, sizeof(*bsp)); | |
735 | + } | |
724 | 736 | } |
725 | 737 | |
726 | 738 | void __init |
727 | 739 | |
... | ... | @@ -729,10 +741,12 @@ |
729 | 741 | u64 ramdisk_image, ramdisk_size; |
730 | 742 | unsigned long initrd_start_early, initrd_end_early; |
731 | 743 | struct ucode_cpu_info uci; |
744 | + struct microcode_intel *bsp_p; | |
732 | 745 | #ifdef CONFIG_X86_32 |
733 | 746 | struct boot_params *boot_params_p; |
734 | 747 | |
735 | 748 | boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params); |
749 | + bsp_p = (struct microcode_intel *)__pa_nodebug(&bsp_patch); | |
736 | 750 | ramdisk_image = boot_params_p->hdr.ramdisk_image; |
737 | 751 | ramdisk_size = boot_params_p->hdr.ramdisk_size; |
738 | 752 | initrd_start_early = ramdisk_image; |
739 | 753 | |
740 | 754 | |
... | ... | @@ -741,15 +755,17 @@ |
741 | 755 | _load_ucode_intel_bsp( |
742 | 756 | (struct mc_saved_data *)__pa_nodebug(&mc_saved_data), |
743 | 757 | (unsigned long *)__pa_nodebug(&mc_saved_in_initrd), |
744 | - initrd_start_early, initrd_end_early, &uci); | |
758 | + initrd_start_early, initrd_end_early, &uci, bsp_p); | |
745 | 759 | #else |
760 | + bsp_p = &bsp_patch; | |
746 | 761 | ramdisk_image = boot_params.hdr.ramdisk_image; |
747 | 762 | ramdisk_size = boot_params.hdr.ramdisk_size; |
748 | 763 | initrd_start_early = ramdisk_image + PAGE_OFFSET; |
749 | 764 | initrd_end_early = initrd_start_early + ramdisk_size; |
750 | 765 | |
751 | 766 | _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, |
752 | - initrd_start_early, initrd_end_early, &uci); | |
767 | + initrd_start_early, initrd_end_early, | |
768 | + &uci, bsp_p); | |
753 | 769 | #endif |
754 | 770 | } |
755 | 771 | |
... | ... | @@ -783,6 +799,18 @@ |
783 | 799 | collect_cpu_info_early(&uci); |
784 | 800 | load_microcode(mc_saved_data_p, mc_saved_in_initrd_p, |
785 | 801 | initrd_start_addr, &uci); |
786 | - apply_microcode_early(mc_saved_data_p, &uci); | |
802 | + apply_microcode_early(&uci, true); | |
803 | +} | |
804 | + | |
805 | +void reload_ucode_intel(void) | |
806 | +{ | |
807 | + struct ucode_cpu_info uci; | |
808 | + | |
809 | + if (!bsp_patch.hdr.rev) | |
810 | + return; | |
811 | + | |
812 | + uci.mc = &bsp_patch; | |
813 | + | |
814 | + apply_microcode_early(&uci, false); | |
787 | 815 | } |