Blame view
arch/x86/power/cpu.c
5.8 KB
1da177e4c
|
1 |
/* |
6d48becd3
|
2 |
* Suspend support specific for i386/x86-64. |
1da177e4c
|
3 4 5 |
* * Distribute under GPLv2 * |
cf7700fe2
|
6 |
* Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> |
a2531293d
|
7 |
* Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> |
1da177e4c
|
8 9 |
* Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> */ |
1da177e4c
|
10 |
#include <linux/suspend.h> |
69c60c88e
|
11 |
#include <linux/export.h> |
f6783d20d
|
12 |
#include <linux/smp.h> |
3dd083255
|
13 |
#include <asm/pgtable.h> |
f6783d20d
|
14 |
#include <asm/proto.h> |
3ebad5905
|
15 |
#include <asm/mtrr.h> |
f6783d20d
|
16 17 |
#include <asm/page.h> #include <asm/mce.h> |
83b8e28b1
|
18 |
#include <asm/xcr.h> |
a8af78982
|
19 |
#include <asm/suspend.h> |
1e3500666
|
20 |
#include <asm/debugreg.h> |
1da177e4c
|
21 |
|
833b2ca07
|
22 23 |
#ifdef CONFIG_X86_32 static struct saved_context saved_context; |
cae459576
|
24 |
|
833b2ca07
|
25 26 27 28 29 30 |
unsigned long saved_context_ebx; unsigned long saved_context_esp, saved_context_ebp; unsigned long saved_context_esi, saved_context_edi; unsigned long saved_context_eflags; #else /* CONFIG_X86_64 */ |
1da177e4c
|
31 |
struct saved_context saved_context; |
833b2ca07
|
32 |
#endif |
1da177e4c
|
33 |
|
5c9c9bec0
|
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
/** * __save_processor_state - save CPU registers before creating a * hibernation image and before restoring the memory state from it * @ctxt - structure to store the registers contents in * * NOTE: If there is a CPU register the modification of which by the * boot kernel (ie. the kernel used for loading the hibernation image) * might affect the operations of the restored target kernel (ie. the one * saved in the hibernation image), then its contents must be saved by this * function. In other words, if kernel A is hibernated and different * kernel B is used for loading the hibernation image into memory, the * kernel A's __save_processor_state() function must save all registers * needed by kernel A, so that it can operate correctly after the resume * regardless of what kernel B does in the meantime. */ |
cae459576
|
49 |
static void __save_processor_state(struct saved_context *ctxt) |
1da177e4c
|
50 |
{ |
f9ebbe53e
|
51 52 53 |
#ifdef CONFIG_X86_32 mtrr_save_fixed_ranges(NULL); #endif |
1da177e4c
|
54 55 56 57 58 |
kernel_fpu_begin(); /* * descriptor tables */ |
f9ebbe53e
|
59 60 61 62 63 |
#ifdef CONFIG_X86_32 store_gdt(&ctxt->gdt); store_idt(&ctxt->idt); #else /* CONFIG_X86_64 */ |
9d1c6e7c8
|
64 65 |
store_gdt((struct desc_ptr *)&ctxt->gdt_limit); store_idt((struct desc_ptr *)&ctxt->idt_limit); |
f9ebbe53e
|
66 |
#endif |
9d1c6e7c8
|
67 |
store_tr(ctxt->tr); |
1da177e4c
|
68 69 |
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ |
1da177e4c
|
70 71 72 |
/* * segment registers */ |
f9ebbe53e
|
73 74 75 76 77 78 79 |
#ifdef CONFIG_X86_32 savesegment(es, ctxt->es); savesegment(fs, ctxt->fs); savesegment(gs, ctxt->gs); savesegment(ss, ctxt->ss); #else /* CONFIG_X86_64 */ |
1da177e4c
|
80 81 82 83 84 85 86 87 88 |
asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); rdmsrl(MSR_FS_BASE, ctxt->fs_base); rdmsrl(MSR_GS_BASE, ctxt->gs_base); rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); |
3ebad5905
|
89 |
mtrr_save_fixed_ranges(NULL); |
1da177e4c
|
90 |
|
f9ebbe53e
|
91 92 |
rdmsrl(MSR_EFER, ctxt->efer); #endif |
1da177e4c
|
93 |
/* |
cf7700fe2
|
94 |
* control registers |
1da177e4c
|
95 |
*/ |
f51c94528
|
96 97 98 |
ctxt->cr0 = read_cr0(); ctxt->cr2 = read_cr2(); ctxt->cr3 = read_cr3(); |
f9ebbe53e
|
99 100 101 102 |
#ifdef CONFIG_X86_32 ctxt->cr4 = read_cr4_safe(); #else /* CONFIG_X86_64 */ |
f51c94528
|
103 104 |
ctxt->cr4 = read_cr4(); ctxt->cr8 = read_cr8(); |
f9ebbe53e
|
105 |
#endif |
85a0e7539
|
106 107 |
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, &ctxt->misc_enable); |
1da177e4c
|
108 |
} |
f9ebbe53e
|
109 |
/* Needed by apm.c */ |
1da177e4c
|
110 111 112 |
void save_processor_state(void) { __save_processor_state(&saved_context); |
cd7240c0b
|
113 |
save_sched_clock_state(); |
1da177e4c
|
114 |
} |
f9ebbe53e
|
115 116 117 |
#ifdef CONFIG_X86_32 EXPORT_SYMBOL(save_processor_state); #endif |
1da177e4c
|
118 |
|
08967f941
|
119 |
static void do_fpu_end(void) |
1da177e4c
|
120 |
{ |
08967f941
|
121 |
/* |
3134d04b7
|
122 |
* Restore FPU regs if necessary. |
08967f941
|
123 124 |
*/ kernel_fpu_end(); |
1da177e4c
|
125 |
} |
3134d04b7
|
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
static void fix_processor_context(void) { int cpu = smp_processor_id(); struct tss_struct *t = &per_cpu(init_tss, cpu); set_tss_desc(cpu, t); /* * This just modifies memory; should not be * necessary. But... This is necessary, because * 386 hardware has concept of busy TSS or some * similar stupidity. */ #ifdef CONFIG_X86_64 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; syscall_init(); /* This sets MSR_*STAR and related */ #endif load_TR_desc(); /* This does ltr */ load_LDT(¤t->active_mm->context); /* This does lldt */ |
3134d04b7
|
145 |
} |
5c9c9bec0
|
146 147 148 149 150 |
/** * __restore_processor_state - restore the contents of CPU registers saved * by __save_processor_state() * @ctxt - structure to load the registers contents from */ |
cae459576
|
151 |
static void __restore_processor_state(struct saved_context *ctxt) |
1da177e4c
|
152 |
{ |
85a0e7539
|
153 154 |
if (ctxt->misc_enable_saved) wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); |
1da177e4c
|
155 156 157 |
/* * control registers */ |
3134d04b7
|
158 159 160 161 162 163 |
/* cr4 was introduced in the Pentium CPU */ #ifdef CONFIG_X86_32 if (ctxt->cr4) write_cr4(ctxt->cr4); #else /* CONFIG X86_64 */ |
3c321bceb
|
164 |
wrmsrl(MSR_EFER, ctxt->efer); |
f51c94528
|
165 166 |
write_cr8(ctxt->cr8); write_cr4(ctxt->cr4); |
3134d04b7
|
167 |
#endif |
f51c94528
|
168 169 170 |
write_cr3(ctxt->cr3); write_cr2(ctxt->cr2); write_cr0(ctxt->cr0); |
1da177e4c
|
171 172 |
/* |
8d783b3e0
|
173 174 175 |
* now restore the descriptor tables to their proper values * ltr is done i fix_processor_context(). */ |
3134d04b7
|
176 177 178 179 180 |
#ifdef CONFIG_X86_32 load_gdt(&ctxt->gdt); load_idt(&ctxt->idt); #else /* CONFIG_X86_64 */ |
9d1c6e7c8
|
181 182 |
load_gdt((const struct desc_ptr *)&ctxt->gdt_limit); load_idt((const struct desc_ptr *)&ctxt->idt_limit); |
3134d04b7
|
183 |
#endif |
8d783b3e0
|
184 185 |
/* |
1da177e4c
|
186 187 |
* segment registers */ |
3134d04b7
|
188 189 190 191 192 193 194 195 196 197 198 199 200 |
#ifdef CONFIG_X86_32 loadsegment(es, ctxt->es); loadsegment(fs, ctxt->fs); loadsegment(gs, ctxt->gs); loadsegment(ss, ctxt->ss); /* * sysenter MSRs */ if (boot_cpu_has(X86_FEATURE_SEP)) enable_sep_cpu(); #else /* CONFIG_X86_64 */ |
1da177e4c
|
201 202 203 204 205 206 207 208 209 |
asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); load_gs_index(ctxt->gs); asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); wrmsrl(MSR_FS_BASE, ctxt->fs_base); wrmsrl(MSR_GS_BASE, ctxt->gs_base); wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); |
3134d04b7
|
210 |
#endif |
1da177e4c
|
211 |
|
83b8e28b1
|
212 213 214 215 216 |
/* * restore XCR0 for xsave capable cpu's. */ if (cpu_has_xsave) xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); |
1da177e4c
|
217 218 219 |
fix_processor_context(); do_fpu_end(); |
d0af9eed5
|
220 |
mtrr_bp_restore(); |
1da177e4c
|
221 |
} |
3134d04b7
|
222 |
/* Needed by apm.c */ |
1da177e4c
|
223 224 225 |
void restore_processor_state(void) { __restore_processor_state(&saved_context); |
cd7240c0b
|
226 |
restore_sched_clock_state(); |
1da177e4c
|
227 |
} |
3134d04b7
|
228 229 230 |
#ifdef CONFIG_X86_32 EXPORT_SYMBOL(restore_processor_state); #endif |