Commit 95ffa2438d0e9c48779f0106b1c0eb36165e759c
Committed by
Thomas Gleixner
1 parent
0dbfafa5fc
Exists in
master
and in
4 other branches
x86: mtrr cleanup for converting continuous to discrete layout, v8
some BIOS like to use continus MTRR layout, and X driver can not add WB entries for graphical cards when 4g or more RAM installed. the patch will change MTRR to discrete. mtrr_chunk_size= could be used to have smaller continuous block to hold holes. default is 256m, could be set according to size of graphics card memory. mtrr_gran_size= could be used to send smallest mtrr block to avoid run out of MTRRs v2: fix -1 for UC checking v3: default to disable, and need use enable_mtrr_cleanup to enable this feature skip the var state change warning. remove next_basek in range_to_mtrr() v4: correct warning mask. v5: CONFIG_MTRR_SANITIZER v6: fix 1g, 2g, 512 aligment with extra hole v7: gran_sizek to prevent running out of MTRRs. v8: fix hole_basek caculation caused when removing next_basek gran_sizek using when basek is 0. need to apply [PATCH] x86: fix trimming e820 with MTRR holes. right after this one. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Showing 5 changed files with 528 additions and 14 deletions Side-by-side Diff
Documentation/kernel-parameters.txt
... | ... | @@ -599,6 +599,20 @@ |
599 | 599 | See drivers/char/README.epca and |
600 | 600 | Documentation/digiepca.txt. |
601 | 601 | |
602 | + disable_mtrr_cleanup [X86] | |
603 | + enable_mtrr_cleanup [X86] | |
604 | + The kernel tries to adjust MTRR layout from continuous | |
605 | + to discrete, to make X server driver able to add WB | |
606 | + entry later. This parameter enables/disables that. | |
607 | + | |
608 | + mtrr_chunk_size=nn[KMG] [X86] | |
609 | + used for mtrr cleanup. It is largest continous chunk | |
610 | + that could hold holes aka. UC entries. | |
611 | + | |
612 | + mtrr_gran_size=nn[KMG] [X86] | |
613 | + used for mtrr cleanup. It is granity of mtrr block. | |
614 | + Big value could prevent small alignment use up MTRRs. | |
615 | + | |
602 | 616 | disable_mtrr_trim [X86, Intel and AMD only] |
603 | 617 | By default the kernel will trim any uncacheable |
604 | 618 | memory out of your available memory pool based on |
arch/x86/Kconfig
... | ... | @@ -1092,6 +1092,32 @@ |
1092 | 1092 | |
1093 | 1093 | See <file:Documentation/mtrr.txt> for more information. |
1094 | 1094 | |
1095 | +config MTRR_SANITIZER | |
1096 | + def_bool y | |
1097 | + prompt "MTRR cleanup support" | |
1098 | + depends on MTRR | |
1099 | + help | |
1100 | + Convert MTRR layout from continuous to discrete, so some X driver | |
1101 | + could add WB entries. | |
1102 | + | |
1103 | + Say N here if you see bootup problems (boot crash, boot hang, | |
1104 | + spontaneous reboots). | |
1105 | + | |
1106 | + Could be disabled with disable_mtrr_cleanup. Also mtrr_chunk_size | |
1107 | + could be used to send largest mtrr entry size for continuous block | |
1108 | + to hold holes (aka. UC entries) | |
1109 | + | |
1110 | + If unsure, say Y. | |
1111 | + | |
1112 | +config MTRR_SANITIZER_ENABLE_DEFAULT | |
1113 | + def_bool y | |
1114 | + prompt "Enable MTRR cleanup by default" | |
1115 | + depends on MTRR_SANITIZER | |
1116 | + help | |
1117 | + Enable mtrr cleanup by default | |
1118 | + | |
1119 | + If unsure, say Y. | |
1120 | + | |
1095 | 1121 | config X86_PAT |
1096 | 1122 | bool |
1097 | 1123 | prompt "x86 PAT support" |
arch/x86/kernel/cpu/mtrr/generic.c
... | ... | @@ -37,7 +37,7 @@ |
37 | 37 | static unsigned long smp_changes_mask; |
38 | 38 | static struct mtrr_state mtrr_state = {}; |
39 | 39 | static int mtrr_state_set; |
40 | -static u64 tom2; | |
40 | +u64 mtrr_tom2; | |
41 | 41 | |
42 | 42 | #undef MODULE_PARAM_PREFIX |
43 | 43 | #define MODULE_PARAM_PREFIX "mtrr." |
... | ... | @@ -139,8 +139,8 @@ |
139 | 139 | } |
140 | 140 | } |
141 | 141 | |
142 | - if (tom2) { | |
143 | - if (start >= (1ULL<<32) && (end < tom2)) | |
142 | + if (mtrr_tom2) { | |
143 | + if (start >= (1ULL<<32) && (end < mtrr_tom2)) | |
144 | 144 | return MTRR_TYPE_WRBACK; |
145 | 145 | } |
146 | 146 | |
... | ... | @@ -158,6 +158,20 @@ |
158 | 158 | rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); |
159 | 159 | } |
160 | 160 | |
161 | +/* fill the MSR pair relating to a var range */ | |
162 | +void fill_mtrr_var_range(unsigned int index, | |
163 | + u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) | |
164 | +{ | |
165 | + struct mtrr_var_range *vr; | |
166 | + | |
167 | + vr = mtrr_state.var_ranges; | |
168 | + | |
169 | + vr[index].base_lo = base_lo; | |
170 | + vr[index].base_hi = base_hi; | |
171 | + vr[index].mask_lo = mask_lo; | |
172 | + vr[index].mask_hi = mask_hi; | |
173 | +} | |
174 | + | |
161 | 175 | static void |
162 | 176 | get_fixed_ranges(mtrr_type * frs) |
163 | 177 | { |
... | ... | @@ -216,10 +230,10 @@ |
216 | 230 | unsigned low, high; |
217 | 231 | /* TOP_MEM2 */ |
218 | 232 | rdmsr(MSR_K8_TOP_MEM2, low, high); |
219 | - tom2 = high; | |
220 | - tom2 <<= 32; | |
221 | - tom2 |= low; | |
222 | - tom2 &= 0xffffff8000000ULL; | |
233 | + mtrr_tom2 = high; | |
234 | + mtrr_tom2 <<= 32; | |
235 | + mtrr_tom2 |= low; | |
236 | + mtrr_tom2 &= 0xffffff8000000ULL; | |
223 | 237 | } |
224 | 238 | if (mtrr_show) { |
225 | 239 | int high_width; |
226 | 240 | |
... | ... | @@ -251,9 +265,9 @@ |
251 | 265 | else |
252 | 266 | printk(KERN_INFO "MTRR %u disabled\n", i); |
253 | 267 | } |
254 | - if (tom2) { | |
268 | + if (mtrr_tom2) { | |
255 | 269 | printk(KERN_INFO "TOM2: %016llx aka %lldM\n", |
256 | - tom2, tom2>>20); | |
270 | + mtrr_tom2, mtrr_tom2>>20); | |
257 | 271 | } |
258 | 272 | } |
259 | 273 | mtrr_state_set = 1; |
arch/x86/kernel/cpu/mtrr/main.c
... | ... | @@ -37,6 +37,7 @@ |
37 | 37 | #include <linux/smp.h> |
38 | 38 | #include <linux/cpu.h> |
39 | 39 | #include <linux/mutex.h> |
40 | +#include <linux/sort.h> | |
40 | 41 | |
41 | 42 | #include <asm/e820.h> |
42 | 43 | #include <asm/mtrr.h> |
... | ... | @@ -609,6 +610,452 @@ |
609 | 610 | .resume = mtrr_restore, |
610 | 611 | }; |
611 | 612 | |
613 | +#ifdef CONFIG_MTRR_SANITIZER | |
614 | + | |
615 | +#ifdef CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT | |
616 | +static int enable_mtrr_cleanup __initdata = 1; | |
617 | +#else | |
618 | +static int enable_mtrr_cleanup __initdata; | |
619 | +#endif | |
620 | + | |
621 | +#else | |
622 | + | |
623 | +static int enable_mtrr_cleanup __initdata = -1; | |
624 | + | |
625 | +#endif | |
626 | + | |
627 | +static int __init disable_mtrr_cleanup_setup(char *str) | |
628 | +{ | |
629 | + if (enable_mtrr_cleanup != -1) | |
630 | + enable_mtrr_cleanup = 0; | |
631 | + return 0; | |
632 | +} | |
633 | +early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup); | |
634 | + | |
635 | +static int __init enable_mtrr_cleanup_setup(char *str) | |
636 | +{ | |
637 | + if (enable_mtrr_cleanup != -1) | |
638 | + enable_mtrr_cleanup = 1; | |
639 | + return 0; | |
640 | +} | |
641 | +early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup); | |
642 | + | |
643 | +#define RANGE_NUM 256 | |
644 | + | |
645 | +struct res_range { | |
646 | + unsigned long start; | |
647 | + unsigned long end; | |
648 | +}; | |
649 | + | |
650 | +static int __init add_range(struct res_range *range, int nr_range, unsigned long start, | |
651 | + unsigned long end, int merge) | |
652 | +{ | |
653 | + int i; | |
654 | + | |
655 | + if (!merge) | |
656 | + goto addit; | |
657 | + | |
658 | + /* try to merge it with old one */ | |
659 | + for (i = 0; i < nr_range; i++) { | |
660 | + unsigned long final_start, final_end; | |
661 | + unsigned long common_start, common_end; | |
662 | + | |
663 | + if (!range[i].end) | |
664 | + continue; | |
665 | + | |
666 | + common_start = max(range[i].start, start); | |
667 | + common_end = min(range[i].end, end); | |
668 | + if (common_start > common_end + 1) | |
669 | + continue; | |
670 | + | |
671 | + final_start = min(range[i].start, start); | |
672 | + final_end = max(range[i].end, end); | |
673 | + | |
674 | + range[i].start = final_start; | |
675 | + range[i].end = final_end; | |
676 | + return nr_range; | |
677 | + } | |
678 | + | |
679 | +addit: | |
680 | + /* need to add that */ | |
681 | + if (nr_range >= RANGE_NUM) | |
682 | + return nr_range; | |
683 | + | |
684 | + range[nr_range].start = start; | |
685 | + range[nr_range].end = end; | |
686 | + | |
687 | + nr_range++; | |
688 | + | |
689 | + return nr_range; | |
690 | + | |
691 | +} | |
692 | +static void __init subtract_range(struct res_range *range, unsigned long start, | |
693 | + unsigned long end) | |
694 | +{ | |
695 | + int i; | |
696 | + int j; | |
697 | + | |
698 | + for (j = 0; j < RANGE_NUM; j++) { | |
699 | + if (!range[j].end) | |
700 | + continue; | |
701 | + | |
702 | + if (start <= range[j].start && end >= range[j].end) { | |
703 | + range[j].start = 0; | |
704 | + range[j].end = 0; | |
705 | + continue; | |
706 | + } | |
707 | + | |
708 | + if (start <= range[j].start && end < range[j].end && range[j].start < end + 1) { | |
709 | + range[j].start = end + 1; | |
710 | + continue; | |
711 | + } | |
712 | + | |
713 | + | |
714 | + if (start > range[j].start && end >= range[j].end && range[j].end > start - 1) { | |
715 | + range[j].end = start - 1; | |
716 | + continue; | |
717 | + } | |
718 | + | |
719 | + if (start > range[j].start && end < range[j].end) { | |
720 | + /* find the new spare */ | |
721 | + for (i = 0; i < RANGE_NUM; i++) { | |
722 | + if (range[i].end == 0) | |
723 | + break; | |
724 | + } | |
725 | + if (i < RANGE_NUM) { | |
726 | + range[i].end = range[j].end; | |
727 | + range[i].start = end + 1; | |
728 | + } else { | |
729 | + printk(KERN_ERR "run of slot in ranges\n"); | |
730 | + } | |
731 | + range[j].end = start - 1; | |
732 | + continue; | |
733 | + } | |
734 | + } | |
735 | +} | |
736 | + | |
737 | +static int __init cmp_range(const void *x1, const void *x2) | |
738 | +{ | |
739 | + const struct res_range *r1 = x1; | |
740 | + const struct res_range *r2 = x2; | |
741 | + long start1, start2; | |
742 | + | |
743 | + start1 = r1->start; | |
744 | + start2 = r2->start; | |
745 | + | |
746 | + return start1 - start2; | |
747 | +} | |
748 | + | |
749 | +struct var_mtrr_state { | |
750 | + unsigned long range_startk, range_sizek; | |
751 | + unsigned long chunk_sizek; | |
752 | + unsigned long gran_sizek; | |
753 | + unsigned int reg; | |
754 | + unsigned address_bits; | |
755 | +}; | |
756 | + | |
757 | +static void __init set_var_mtrr( | |
758 | + unsigned int reg, unsigned long basek, unsigned long sizek, | |
759 | + unsigned char type, unsigned address_bits) | |
760 | +{ | |
761 | + u32 base_lo, base_hi, mask_lo, mask_hi; | |
762 | + unsigned address_mask_high; | |
763 | + | |
764 | + if (!sizek) { | |
765 | + fill_mtrr_var_range(reg, 0, 0, 0, 0); | |
766 | + return; | |
767 | + } | |
768 | + | |
769 | + address_mask_high = ((1u << (address_bits - 32u)) - 1u); | |
770 | + | |
771 | + base_hi = basek >> 22; | |
772 | + base_lo = basek << 10; | |
773 | + | |
774 | + if (sizek < 4*1024*1024) { | |
775 | + mask_hi = address_mask_high; | |
776 | + mask_lo = ~((sizek << 10) - 1); | |
777 | + } else { | |
778 | + mask_hi = address_mask_high & (~((sizek >> 22) - 1)); | |
779 | + mask_lo = 0; | |
780 | + } | |
781 | + | |
782 | + base_lo |= type; | |
783 | + mask_lo |= 0x800; | |
784 | + fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi); | |
785 | +} | |
786 | + | |
787 | +static unsigned int __init range_to_mtrr(unsigned int reg, | |
788 | + unsigned long range_startk, unsigned long range_sizek, | |
789 | + unsigned char type, unsigned address_bits) | |
790 | +{ | |
791 | + if (!range_sizek || (reg >= num_var_ranges)) | |
792 | + return reg; | |
793 | + | |
794 | + while (range_sizek) { | |
795 | + unsigned long max_align, align; | |
796 | + unsigned long sizek; | |
797 | + /* Compute the maximum size I can make a range */ | |
798 | + if (range_startk) | |
799 | + max_align = ffs(range_startk) - 1; | |
800 | + else | |
801 | + max_align = 32; | |
802 | + align = fls(range_sizek) - 1; | |
803 | + if (align > max_align) | |
804 | + align = max_align; | |
805 | + | |
806 | + sizek = 1 << align; | |
807 | + printk(KERN_INFO "Setting variable MTRR %d, base: %ldMB, range: %ldMB, type %s\n", | |
808 | + reg, range_startk >> 10, sizek >> 10, | |
809 | + (type == MTRR_TYPE_UNCACHABLE)?"UC": | |
810 | + ((type == MTRR_TYPE_WRBACK)?"WB":"Other") | |
811 | + ); | |
812 | + set_var_mtrr(reg++, range_startk, sizek, type, address_bits); | |
813 | + range_startk += sizek; | |
814 | + range_sizek -= sizek; | |
815 | + if (reg >= num_var_ranges) | |
816 | + break; | |
817 | + } | |
818 | + return reg; | |
819 | +} | |
820 | + | |
821 | +static void __init range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek) | |
822 | +{ | |
823 | + unsigned long hole_basek, hole_sizek; | |
824 | + unsigned long range0_basek, range0_sizek; | |
825 | + unsigned long range_basek, range_sizek; | |
826 | + unsigned long chunk_sizek; | |
827 | + unsigned long gran_sizek; | |
828 | + | |
829 | + hole_basek = 0; | |
830 | + hole_sizek = 0; | |
831 | + chunk_sizek = state->chunk_sizek; | |
832 | + gran_sizek = state->gran_sizek; | |
833 | + | |
834 | + /* align with gran size, prevent small block used up MTRRs */ | |
835 | + range_basek = ALIGN(state->range_startk, gran_sizek); | |
836 | + if ((range_basek > basek) && basek) | |
837 | + return; | |
838 | + range_sizek = ALIGN(state->range_sizek - (range_basek - state->range_startk), gran_sizek); | |
839 | + | |
840 | + while (range_basek + range_sizek > (state->range_startk + state->range_sizek)) { | |
841 | + range_sizek -= gran_sizek; | |
842 | + if (!range_sizek) | |
843 | + return; | |
844 | + } | |
845 | + state->range_startk = range_basek; | |
846 | + state->range_sizek = range_sizek; | |
847 | + | |
848 | + /* try to append some small hole */ | |
849 | + range0_basek = state->range_startk; | |
850 | + range0_sizek = ALIGN(state->range_sizek, chunk_sizek); | |
851 | + if ((range0_sizek == state->range_sizek) || | |
852 | + ((range0_basek + range0_sizek - chunk_sizek > basek) && basek)) { | |
853 | + printk(KERN_INFO "rangeX: %016lx - %016lx\n", range0_basek<<10, (range0_basek + state->range_sizek)<<10); | |
854 | + state->reg = range_to_mtrr(state->reg, range0_basek, | |
855 | + state->range_sizek, MTRR_TYPE_WRBACK, state->address_bits); | |
856 | + return; | |
857 | + } | |
858 | + | |
859 | + | |
860 | + range0_sizek -= chunk_sizek; | |
861 | + printk(KERN_INFO "range0: %016lx - %016lx\n", range0_basek<<10, (range0_basek + range0_sizek)<<10); | |
862 | + state->reg = range_to_mtrr(state->reg, range0_basek, | |
863 | + range0_sizek, MTRR_TYPE_WRBACK, state->address_bits); | |
864 | + | |
865 | + range_basek = range0_basek + range0_sizek; | |
866 | + range_sizek = chunk_sizek; | |
867 | + if (range_sizek - (state->range_sizek - range0_sizek) < (chunk_sizek >> 1)) { | |
868 | + hole_sizek = range_sizek - (state->range_sizek - range0_sizek); | |
869 | + hole_basek = range_basek + range_sizek - hole_sizek; | |
870 | + } else | |
871 | + range_sizek = state->range_sizek - range0_sizek; | |
872 | + | |
873 | + printk(KERN_INFO "range: %016lx - %016lx\n", range_basek<<10, (range_basek + range_sizek)<<10); | |
874 | + state->reg = range_to_mtrr(state->reg, range_basek, | |
875 | + range_sizek, MTRR_TYPE_WRBACK, state->address_bits); | |
876 | + if (hole_sizek) { | |
877 | + printk(KERN_INFO "hole: %016lx - %016lx\n", hole_basek<<10, (hole_basek + hole_sizek)<<10); | |
878 | + state->reg = range_to_mtrr(state->reg, hole_basek, | |
879 | + hole_sizek, MTRR_TYPE_UNCACHABLE, state->address_bits); | |
880 | + } | |
881 | +} | |
882 | + | |
883 | +static void __init set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, unsigned long size_pfn) | |
884 | +{ | |
885 | + unsigned long basek, sizek; | |
886 | + | |
887 | + if (state->reg >= num_var_ranges) | |
888 | + return; | |
889 | + | |
890 | + basek = base_pfn << (PAGE_SHIFT - 10); | |
891 | + sizek = size_pfn << (PAGE_SHIFT - 10); | |
892 | + | |
893 | + /* See if I can merge with the last range */ | |
894 | + if ((basek <= 1024) || (state->range_startk + state->range_sizek == basek)) { | |
895 | + unsigned long endk = basek + sizek; | |
896 | + state->range_sizek = endk - state->range_startk; | |
897 | + return; | |
898 | + } | |
899 | + /* Write the range mtrrs */ | |
900 | + if (state->range_sizek != 0) { | |
901 | + range_to_mtrr_with_hole(state, basek); | |
902 | + | |
903 | + state->range_startk = 0; | |
904 | + state->range_sizek = 0; | |
905 | + } | |
906 | + /* Allocate an msr */ | |
907 | + state->range_startk = basek; | |
908 | + state->range_sizek = sizek; | |
909 | +} | |
910 | + | |
911 | +/* mininum size of mtrr block that can take hole */ | |
912 | +static u64 mtrr_chunk_size __initdata = (256ULL<<20); | |
913 | + | |
914 | +static int __init parse_mtrr_chunk_size_opt(char *p) | |
915 | +{ | |
916 | + if (!p) | |
917 | + return -EINVAL; | |
918 | + mtrr_chunk_size = memparse(p, &p); | |
919 | + return 0; | |
920 | +} | |
921 | +early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); | |
922 | + | |
923 | +/* granity of mtrr of block */ | |
924 | +static u64 mtrr_gran_size __initdata = (64ULL<<20); | |
925 | + | |
926 | +static int __init parse_mtrr_gran_size_opt(char *p) | |
927 | +{ | |
928 | + if (!p) | |
929 | + return -EINVAL; | |
930 | + mtrr_gran_size = memparse(p, &p); | |
931 | + return 0; | |
932 | +} | |
933 | +early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); | |
934 | + | |
935 | +static void __init x86_setup_var_mtrrs(struct res_range *range, int nr_range, unsigned address_bits) | |
936 | +{ | |
937 | + struct var_mtrr_state var_state; | |
938 | + int i; | |
939 | + | |
940 | + var_state.range_startk = 0; | |
941 | + var_state.range_sizek = 0; | |
942 | + var_state.reg = 0; | |
943 | + var_state.address_bits = address_bits; | |
944 | + var_state.chunk_sizek = mtrr_chunk_size >> 10; | |
945 | + var_state.gran_sizek = mtrr_gran_size >> 10; | |
946 | + | |
947 | + /* Write the range etc */ | |
948 | + for (i = 0; i < nr_range; i++) | |
949 | + set_var_mtrr_range(&var_state, range[i].start, range[i].end - range[i].start + 1); | |
950 | + | |
951 | + /* Write the last range */ | |
952 | + range_to_mtrr_with_hole(&var_state, 0); | |
953 | + printk(KERN_INFO "DONE variable MTRRs\n"); | |
954 | + /* Clear out the extra MTRR's */ | |
955 | + while (var_state.reg < num_var_ranges) | |
956 | + set_var_mtrr(var_state.reg++, 0, 0, 0, var_state.address_bits); | |
957 | +} | |
958 | + | |
959 | +static int __init x86_get_mtrr_mem_range(struct res_range *range, int nr_range, unsigned long extra_remove_base, unsigned long extra_remove_size) | |
960 | +{ | |
961 | + unsigned long i, base, size; | |
962 | + mtrr_type type; | |
963 | + | |
964 | + for (i = 0; i < num_var_ranges; i++) { | |
965 | + mtrr_if->get(i, &base, &size, &type); | |
966 | + if (type != MTRR_TYPE_WRBACK) | |
967 | + continue; | |
968 | + nr_range = add_range(range, nr_range, base, base + size - 1, 1); | |
969 | + } | |
970 | + printk(KERN_INFO "After WB checking\n"); | |
971 | + for (i = 0; i < nr_range; i++) | |
972 | + printk(KERN_INFO "MTRR MAP PFN: %016lx - %016lx\n", range[i].start, range[i].end + 1); | |
973 | + | |
974 | + /* take out UC ranges */ | |
975 | + for (i = 0; i < num_var_ranges; i++) { | |
976 | + mtrr_if->get(i, &base, &size, &type); | |
977 | + if (type != MTRR_TYPE_UNCACHABLE) | |
978 | + continue; | |
979 | + if (!size) | |
980 | + continue; | |
981 | + subtract_range(range, base, base + size - 1); | |
982 | + } | |
983 | + if (extra_remove_size) | |
984 | + subtract_range(range, extra_remove_base, extra_remove_base + extra_remove_size - 1); | |
985 | + | |
986 | + /* get new range num */ | |
987 | + nr_range = 0; | |
988 | + for (i = 0; i < RANGE_NUM; i++) { | |
989 | + if (!range[i].end) | |
990 | + continue; | |
991 | + nr_range++; | |
992 | + } | |
993 | + printk(KERN_INFO "After UC checking\n"); | |
994 | + for (i = 0; i < nr_range; i++) | |
995 | + printk(KERN_INFO "MTRR MAP PFN: %016lx - %016lx\n", range[i].start, range[i].end + 1); | |
996 | + | |
997 | + /* sort the ranges */ | |
998 | + sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | |
999 | + printk(KERN_INFO "After sorting\n"); | |
1000 | + for (i = 0; i < nr_range; i++) | |
1001 | + printk(KERN_INFO "MTRR MAP PFN: %016lx - %016lx\n", range[i].start, range[i].end + 1); | |
1002 | + | |
1003 | + return nr_range; | |
1004 | +} | |
1005 | + | |
1006 | +static int __init mtrr_cleanup(unsigned address_bits) | |
1007 | +{ | |
1008 | + unsigned long i, base, size, def, dummy; | |
1009 | + mtrr_type type; | |
1010 | + struct res_range range[RANGE_NUM]; | |
1011 | + int nr_range; | |
1012 | + unsigned long extra_remove_base, extra_remove_size; | |
1013 | + | |
1014 | + /* extra one for all 0 */ | |
1015 | + int num[MTRR_NUM_TYPES + 1]; | |
1016 | + | |
1017 | + if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) | |
1018 | + return 0; | |
1019 | + rdmsr(MTRRdefType_MSR, def, dummy); | |
1020 | + def &= 0xff; | |
1021 | + if (def != MTRR_TYPE_UNCACHABLE) | |
1022 | + return 0; | |
1023 | + | |
1024 | + /* check entries number */ | |
1025 | + memset(num, 0, sizeof(num)); | |
1026 | + for (i = 0; i < num_var_ranges; i++) { | |
1027 | + mtrr_if->get(i, &base, &size, &type); | |
1028 | + if (type >= MTRR_NUM_TYPES) | |
1029 | + continue; | |
1030 | + if (!size) | |
1031 | + type = MTRR_NUM_TYPES; | |
1032 | + num[type]++; | |
1033 | + } | |
1034 | + | |
1035 | + /* check if we got UC entries */ | |
1036 | + if (!num[MTRR_TYPE_UNCACHABLE]) | |
1037 | + return 0; | |
1038 | + | |
1039 | + /* check if we only had WB and UC */ | |
1040 | + if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | |
1041 | + num_var_ranges - num[MTRR_NUM_TYPES]) | |
1042 | + return 0; | |
1043 | + | |
1044 | + memset(range, 0, sizeof(range)); | |
1045 | + extra_remove_size = 0; | |
1046 | + if (mtrr_tom2) { | |
1047 | + extra_remove_base = 1 << (32 - PAGE_SHIFT); | |
1048 | + extra_remove_size = (mtrr_tom2>>PAGE_SHIFT) - extra_remove_base; | |
1049 | + } | |
1050 | + nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, extra_remove_size); | |
1051 | + | |
1052 | + /* convert ranges to var ranges state */ | |
1053 | + x86_setup_var_mtrrs(range, nr_range, address_bits); | |
1054 | + | |
1055 | + return 1; | |
1056 | + | |
1057 | +} | |
1058 | + | |
612 | 1059 | static int disable_mtrr_trim; |
613 | 1060 | |
614 | 1061 | static int __init disable_mtrr_trim_setup(char *str) |
615 | 1062 | |
616 | 1063 | |
617 | 1064 | |
... | ... | @@ -729,18 +1176,21 @@ |
729 | 1176 | */ |
730 | 1177 | void __init mtrr_bp_init(void) |
731 | 1178 | { |
1179 | + u32 phys_addr; | |
732 | 1180 | init_ifs(); |
733 | 1181 | |
1182 | + phys_addr = 32; | |
1183 | + | |
734 | 1184 | if (cpu_has_mtrr) { |
735 | 1185 | mtrr_if = &generic_mtrr_ops; |
736 | 1186 | size_or_mask = 0xff000000; /* 36 bits */ |
737 | 1187 | size_and_mask = 0x00f00000; |
1188 | + phys_addr = 36; | |
738 | 1189 | |
739 | 1190 | /* This is an AMD specific MSR, but we assume(hope?) that |
740 | 1191 | Intel will implement it to when they extend the address |
741 | 1192 | bus of the Xeon. */ |
742 | 1193 | if (cpuid_eax(0x80000000) >= 0x80000008) { |
743 | - u32 phys_addr; | |
744 | 1194 | phys_addr = cpuid_eax(0x80000008) & 0xff; |
745 | 1195 | /* CPUID workaround for Intel 0F33/0F34 CPU */ |
746 | 1196 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
... | ... | @@ -758,6 +1208,7 @@ |
758 | 1208 | don't support PAE */ |
759 | 1209 | size_or_mask = 0xfff00000; /* 32 bits */ |
760 | 1210 | size_and_mask = 0; |
1211 | + phys_addr = 32; | |
761 | 1212 | } |
762 | 1213 | } else { |
763 | 1214 | switch (boot_cpu_data.x86_vendor) { |
764 | 1215 | |
... | ... | @@ -791,8 +1242,13 @@ |
791 | 1242 | if (mtrr_if) { |
792 | 1243 | set_num_var_ranges(); |
793 | 1244 | init_table(); |
794 | - if (use_intel()) | |
1245 | + if (use_intel()) { | |
795 | 1246 | get_mtrr_state(); |
1247 | + | |
1248 | + if (mtrr_cleanup(phys_addr)) | |
1249 | + mtrr_if->set_all(); | |
1250 | + | |
1251 | + } | |
796 | 1252 | } |
797 | 1253 | } |
798 | 1254 | |
... | ... | @@ -829,9 +1285,10 @@ |
829 | 1285 | { |
830 | 1286 | if (!mtrr_if) |
831 | 1287 | return 0; |
832 | - if (use_intel()) | |
833 | - mtrr_state_warn(); | |
834 | - else { | |
1288 | + if (use_intel()) { | |
1289 | + if (enable_mtrr_cleanup < 1) | |
1290 | + mtrr_state_warn(); | |
1291 | + } else { | |
835 | 1292 | /* The CPUs haven't MTRR and seem to not support SMP. They have |
836 | 1293 | * specific drivers, we use a tricky method to support |
837 | 1294 | * suspend/resume for them. |
arch/x86/kernel/cpu/mtrr/mtrr.h
... | ... | @@ -81,6 +81,8 @@ |
81 | 81 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt); |
82 | 82 | void set_mtrr_prepare_save(struct set_mtrr_context *ctxt); |
83 | 83 | |
84 | +void fill_mtrr_var_range(unsigned int index, | |
85 | + u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); | |
84 | 86 | void get_mtrr_state(void); |
85 | 87 | |
86 | 88 | extern void set_mtrr_ops(struct mtrr_ops * ops); |
... | ... | @@ -92,6 +94,7 @@ |
92 | 94 | #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) |
93 | 95 | |
94 | 96 | extern unsigned int num_var_ranges; |
97 | +extern u64 mtrr_tom2; | |
95 | 98 | |
96 | 99 | void mtrr_state_warn(void); |
97 | 100 | const char *mtrr_attrib_to_str(int x); |