Commit d239198442d300710a2389449a583edbcfb68581

Authored by Borislav Petkov
Committed by Greg Kroah-Hartman
1 parent 44c4db3ab0

x86, microcode: Reload microcode on resume

commit fbae4ba8c4a387e306adc9c710e5c225cece7678 upstream.

Normally, we do reapply microcode on resume. However, in the cases where
that microcode comes from the early loader and the late loader hasn't
been utilized yet, there's no easy way for us to go and apply the patch
applied during boot by the early loader.

Thus, reuse the patch stashed by the early loader for the BSP.

Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 7 changed files with 84 additions and 18 deletions Side-by-side Diff

arch/x86/include/asm/microcode.h
... ... @@ -78,6 +78,7 @@
78 78 extern void __init load_ucode_bsp(void);
79 79 extern void load_ucode_ap(void);
80 80 extern int __init save_microcode_in_initrd(void);
  81 +void reload_early_microcode(void);
81 82 #else
82 83 static inline void __init load_ucode_bsp(void) {}
83 84 static inline void load_ucode_ap(void) {}
... ... @@ -85,6 +86,7 @@
85 86 {
86 87 return 0;
87 88 }
  89 +static inline void reload_early_microcode(void) {}
88 90 #endif
89 91  
90 92 #endif /* _ASM_X86_MICROCODE_H */
arch/x86/include/asm/microcode_amd.h
... ... @@ -68,10 +68,12 @@
68 68 extern void __init load_ucode_amd_bsp(void);
69 69 extern void load_ucode_amd_ap(void);
70 70 extern int __init save_microcode_in_initrd_amd(void);
  71 +void reload_ucode_amd(void);
71 72 #else
72 73 static inline void __init load_ucode_amd_bsp(void) {}
73 74 static inline void load_ucode_amd_ap(void) {}
74 75 static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
  76 +void reload_ucode_amd(void) {}
75 77 #endif
76 78  
77 79 #endif /* _ASM_X86_MICROCODE_AMD_H */
arch/x86/include/asm/microcode_intel.h
... ... @@ -68,11 +68,13 @@
68 68 extern void load_ucode_intel_ap(void);
69 69 extern void show_ucode_info_early(void);
70 70 extern int __init save_microcode_in_initrd_intel(void);
  71 +void reload_ucode_intel(void);
71 72 #else
72 73 static inline __init void load_ucode_intel_bsp(void) {}
73 74 static inline void load_ucode_intel_ap(void) {}
74 75 static inline void show_ucode_info_early(void) {}
75 76 static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
  77 +static inline void reload_ucode_intel(void) {}
76 78 #endif
77 79  
78 80 #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
arch/x86/kernel/cpu/microcode/amd_early.c
... ... @@ -402,4 +402,22 @@
402 402  
403 403 return retval;
404 404 }
  405 +
  406 +void reload_ucode_amd(void)
  407 +{
  408 + struct microcode_amd *mc;
  409 + u32 rev, eax;
  410 +
  411 + rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
  412 +
  413 + mc = (struct microcode_amd *)amd_ucode_patch;
  414 +
  415 + if (mc && rev < mc->hdr.patch_id) {
  416 + if (!__apply_microcode_amd(mc)) {
  417 + ucode_new_rev = mc->hdr.patch_id;
  418 + pr_info("microcode: reload patch_level=0x%08x\n",
  419 + ucode_new_rev);
  420 + }
  421 + }
  422 +}
arch/x86/kernel/cpu/microcode/core.c
... ... @@ -465,16 +465,8 @@
465 465  
466 466 if (uci->valid && uci->mc)
467 467 microcode_ops->apply_microcode(cpu);
468   -#ifdef CONFIG_X86_64
469 468 else if (!uci->mc)
470   - /*
471   - * We might resume and not have applied late microcode but still
472   - * have a newer patch stashed from the early loader. We don't
473   - * have it in uci->mc so we have to load it the same way we're
474   - * applying patches early on the APs.
475   - */
476   - load_ucode_ap();
477   -#endif
  469 + reload_early_microcode();
478 470 }
479 471  
480 472 static struct syscore_ops mc_syscore_ops = {
arch/x86/kernel/cpu/microcode/core_early.c
... ... @@ -176,4 +176,25 @@
176 176  
177 177 return 0;
178 178 }
  179 +
  180 +void reload_early_microcode(void)
  181 +{
  182 + int vendor, x86;
  183 +
  184 + vendor = x86_vendor();
  185 + x86 = x86_family();
  186 +
  187 + switch (vendor) {
  188 + case X86_VENDOR_INTEL:
  189 + if (x86 >= 6)
  190 + reload_ucode_intel();
  191 + break;
  192 + case X86_VENDOR_AMD:
  193 + if (x86 >= 0x10)
  194 + reload_ucode_amd();
  195 + break;
  196 + default:
  197 + break;
  198 + }
  199 +}
arch/x86/kernel/cpu/microcode/intel_early.c
... ... @@ -34,6 +34,8 @@
34 34 struct microcode_intel **mc_saved;
35 35 } mc_saved_data;
36 36  
  37 +static struct microcode_intel bsp_patch;
  38 +
37 39 static enum ucode_state
38 40 generic_load_microcode_early(struct microcode_intel **mc_saved_p,
39 41 unsigned int mc_saved_count,
... ... @@ -650,7 +652,7 @@
650 652 }
651 653 #endif
652 654  
653   -static int apply_microcode_early(struct ucode_cpu_info *uci)
  655 +static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
654 656 {
655 657 struct microcode_intel *mc_intel;
656 658 unsigned int val[2];
... ... @@ -679,7 +681,10 @@
679 681 #endif
680 682 uci->cpu_sig.rev = val[1];
681 683  
682   - print_ucode(uci);
  684 + if (early)
  685 + print_ucode(uci);
  686 + else
  687 + print_ucode_info(uci, mc_intel->hdr.date);
683 688  
684 689 return 0;
685 690 }
686 691  
687 692  
... ... @@ -712,14 +717,22 @@
712 717 unsigned long *mc_saved_in_initrd,
713 718 unsigned long initrd_start_early,
714 719 unsigned long initrd_end_early,
715   - struct ucode_cpu_info *uci)
  720 + struct ucode_cpu_info *uci,
  721 + struct microcode_intel *bsp)
716 722 {
  723 + enum ucode_state ret;
  724 +
717 725 collect_cpu_info_early(uci);
718 726 scan_microcode(initrd_start_early, initrd_end_early, mc_saved_data,
719 727 mc_saved_in_initrd, uci);
720   - load_microcode(mc_saved_data, mc_saved_in_initrd,
721   - initrd_start_early, uci);
722   - apply_microcode_early(uci);
  728 +
  729 + ret = load_microcode(mc_saved_data, mc_saved_in_initrd,
  730 + initrd_start_early, uci);
  731 +
  732 + if (ret == UCODE_OK) {
  733 + apply_microcode_early(uci, true);
  734 + memcpy(bsp, uci->mc, sizeof(*bsp));
  735 + }
723 736 }
724 737  
725 738 void __init
726 739  
... ... @@ -728,10 +741,12 @@
728 741 u64 ramdisk_image, ramdisk_size;
729 742 unsigned long initrd_start_early, initrd_end_early;
730 743 struct ucode_cpu_info uci;
  744 + struct microcode_intel *bsp_p;
731 745 #ifdef CONFIG_X86_32
732 746 struct boot_params *boot_params_p;
733 747  
734 748 boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
  749 + bsp_p = (struct microcode_intel *)__pa_nodebug(&bsp_patch);
735 750 ramdisk_image = boot_params_p->hdr.ramdisk_image;
736 751 ramdisk_size = boot_params_p->hdr.ramdisk_size;
737 752 initrd_start_early = ramdisk_image;
738 753  
739 754  
... ... @@ -740,15 +755,17 @@
740 755 _load_ucode_intel_bsp(
741 756 (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
742 757 (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
743   - initrd_start_early, initrd_end_early, &uci);
  758 + initrd_start_early, initrd_end_early, &uci, bsp_p);
744 759 #else
  760 + bsp_p = &bsp_patch;
745 761 ramdisk_image = boot_params.hdr.ramdisk_image;
746 762 ramdisk_size = boot_params.hdr.ramdisk_size;
747 763 initrd_start_early = ramdisk_image + PAGE_OFFSET;
748 764 initrd_end_early = initrd_start_early + ramdisk_size;
749 765  
750 766 _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd,
751   - initrd_start_early, initrd_end_early, &uci);
  767 + initrd_start_early, initrd_end_early,
  768 + &uci, bsp_p);
752 769 #endif
753 770 }
754 771  
... ... @@ -782,6 +799,18 @@
782 799 collect_cpu_info_early(&uci);
783 800 load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
784 801 initrd_start_addr, &uci);
785   - apply_microcode_early(&uci);
  802 + apply_microcode_early(&uci, true);
  803 +}
  804 +
  805 +void reload_ucode_intel(void)
  806 +{
  807 + struct ucode_cpu_info uci;
  808 +
  809 + if (!bsp_patch.hdr.rev)
  810 + return;
  811 +
  812 + uci.mc = &bsp_patch;
  813 +
  814 + apply_microcode_early(&uci, false);
786 815 }