Blame view

arch/i386/kernel/alternative.c 10.9 KB
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
1
  #include <linux/module.h>
f6a570333   Al Viro   [PATCH] severing ...
2
  #include <linux/sched.h>
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
3
4
  #include <linux/spinlock.h>
  #include <linux/list.h>
19d36ccdc   Andi Kleen   x86: Fix alternat...
5
6
7
  #include <linux/kprobes.h>
  #include <linux/mm.h>
  #include <linux/vmalloc.h>
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
8
9
  #include <asm/alternative.h>
  #include <asm/sections.h>
19d36ccdc   Andi Kleen   x86: Fix alternat...
10
  #include <asm/pgtable.h>
8f4e956b3   Andi Kleen   x86: Stop MCEs an...
11
12
  #include <asm/mce.h>
  #include <asm/nmi.h>
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
13

ab144f5ec   Andi Kleen   i386: Make patchi...
14
  #define MAX_PATCH_LEN (255-1)
09488165d   Jan Beulich   i386: smp-alt-onc...
15
16
  #ifdef CONFIG_HOTPLUG_CPU
  static int smp_alt_once;
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
17

d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
18
19
20
21
22
  static int __init bootonly(char *str)
  {
  	smp_alt_once = 1;
  	return 1;
  }
b7fb4af06   Jeremy Fitzhardinge   [PATCH] i386: All...
23
  __setup("smp-alt-boot", bootonly);
09488165d   Jan Beulich   i386: smp-alt-onc...
24
25
26
27
28
  #else
  #define smp_alt_once 1
  #endif
  
  static int debug_alternative;
b7fb4af06   Jeremy Fitzhardinge   [PATCH] i386: All...
29

d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
30
31
32
33
34
  static int __init debug_alt(char *str)
  {
  	debug_alternative = 1;
  	return 1;
  }
d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
35
  __setup("debug-alternative", debug_alt);
09488165d   Jan Beulich   i386: smp-alt-onc...
36
  static int noreplace_smp;
b7fb4af06   Jeremy Fitzhardinge   [PATCH] i386: All...
37
38
39
40
41
42
  static int __init setup_noreplace_smp(char *str)
  {
  	noreplace_smp = 1;
  	return 1;
  }
  __setup("noreplace-smp", setup_noreplace_smp);
959b4fdfe   Jeremy Fitzhardinge   [PATCH] i386: PAR...
43
44
45
46
47
48
49
50
51
52
  #ifdef CONFIG_PARAVIRT
  static int noreplace_paravirt = 0;
  
  static int __init setup_noreplace_paravirt(char *str)
  {
  	noreplace_paravirt = 1;
  	return 1;
  }
  __setup("noreplace-paravirt", setup_noreplace_paravirt);
  #endif
b7fb4af06   Jeremy Fitzhardinge   [PATCH] i386: All...
53

d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
54
55
56
57
  #define DPRINTK(fmt, args...) if (debug_alternative) \
  	printk(KERN_DEBUG fmt, args)
  
  #ifdef GENERIC_NOP1
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
58
59
60
61
62
63
64
  /* Use inline assembly to define this because the nops are defined
     as inline assembly strings in the include files and we cannot
     get them easily into strings. */
  asm("\t.data
  intelnops: "
  	GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
  	GENERIC_NOP7 GENERIC_NOP8);
d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
65
  extern unsigned char intelnops[];
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
66
67
68
69
70
71
72
73
74
75
76
  static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
  	NULL,
  	intelnops,
  	intelnops + 1,
  	intelnops + 1 + 2,
  	intelnops + 1 + 2 + 3,
  	intelnops + 1 + 2 + 3 + 4,
  	intelnops + 1 + 2 + 3 + 4 + 5,
  	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
  	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
  };
d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
77
78
79
80
81
82
83
84
  #endif
  
  #ifdef K8_NOP1
  asm("\t.data
  k8nops: "
  	K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
  	K8_NOP7 K8_NOP8);
  extern unsigned char k8nops[];
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
85
86
87
88
89
90
91
92
93
94
95
  static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
  	NULL,
  	k8nops,
  	k8nops + 1,
  	k8nops + 1 + 2,
  	k8nops + 1 + 2 + 3,
  	k8nops + 1 + 2 + 3 + 4,
  	k8nops + 1 + 2 + 3 + 4 + 5,
  	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
  	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
  };
d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
96
97
98
99
100
101
102
103
  #endif
  
  #ifdef K7_NOP1
  asm("\t.data
  k7nops: "
  	K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
  	K7_NOP7 K7_NOP8);
  extern unsigned char k7nops[];
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
104
105
106
107
108
109
110
111
112
113
114
  static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
  	NULL,
  	k7nops,
  	k7nops + 1,
  	k7nops + 1 + 2,
  	k7nops + 1 + 2 + 3,
  	k7nops + 1 + 2 + 3 + 4,
  	k7nops + 1 + 2 + 3 + 4 + 5,
  	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
  	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
  };
d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
115
116
117
118
119
120
121
122
123
124
125
  #endif
  
  #ifdef CONFIG_X86_64
  
  extern char __vsyscall_0;
  static inline unsigned char** find_nop_table(void)
  {
  	return k8_nops;
  }
  
  #else /* CONFIG_X86_64 */
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
126
127
128
129
130
131
132
133
  static struct nop {
  	int cpuid;
  	unsigned char **noptable;
  } noptypes[] = {
  	{ X86_FEATURE_K8, k8_nops },
  	{ X86_FEATURE_K7, k7_nops },
  	{ -1, NULL }
  };
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
134
135
136
137
138
139
140
141
142
143
144
145
146
  static unsigned char** find_nop_table(void)
  {
  	unsigned char **noptable = intel_nops;
  	int i;
  
  	for (i = 0; noptypes[i].cpuid >= 0; i++) {
  		if (boot_cpu_has(noptypes[i].cpuid)) {
  			noptable = noptypes[i].noptable;
  			break;
  		}
  	}
  	return noptable;
  }
d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
147
  #endif /* CONFIG_X86_64 */
ab144f5ec   Andi Kleen   i386: Make patchi...
148
149
  /* Use this to add nops to a buffer, then text_poke the whole buffer. */
  static void add_nops(void *insns, unsigned int len)
139ec7c41   Rusty Russell   [PATCH] paravirt:...
150
151
152
153
154
155
156
  {
  	unsigned char **noptable = find_nop_table();
  
  	while (len > 0) {
  		unsigned int noplen = len;
  		if (noplen > ASM_NOP_MAX)
  			noplen = ASM_NOP_MAX;
ab144f5ec   Andi Kleen   i386: Make patchi...
157
  		memcpy(insns, noptable[noplen], noplen);
139ec7c41   Rusty Russell   [PATCH] paravirt:...
158
159
160
161
  		insns += noplen;
  		len -= noplen;
  	}
  }
d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
162
  extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
163
  extern u8 *__smp_locks[], *__smp_locks_end[];
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
164
165
166
167
168
169
170
171
  /* Replace instructions with better alternatives for this CPU type.
     This runs before SMP is initialized to avoid SMP problems with
     self modifying code. This implies that assymetric systems where
     APs have less capabilities than the boot processor are not handled.
     Tough. Make sure you disable such features by hand. */
  
  void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
  {
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
172
  	struct alt_instr *a;
ab144f5ec   Andi Kleen   i386: Make patchi...
173
  	char insnbuf[MAX_PATCH_LEN];
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
174
175
176
177
  
  	DPRINTK("%s: alt table %p -> %p
  ", __FUNCTION__, start, end);
  	for (a = start; a < end; a++) {
ab144f5ec   Andi Kleen   i386: Make patchi...
178
  		u8 *instr = a->instr;
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
179
  		BUG_ON(a->replacementlen > a->instrlen);
ab144f5ec   Andi Kleen   i386: Make patchi...
180
  		BUG_ON(a->instrlen > sizeof(insnbuf));
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
181
182
  		if (!boot_cpu_has(a->cpuid))
  			continue;
d167a5187   Gerd Hoffmann   [PATCH] x86_64: x...
183
184
185
186
187
188
189
190
191
  #ifdef CONFIG_X86_64
  		/* vsyscall code is not mapped yet. resolve it manually. */
  		if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
  			instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
  			DPRINTK("%s: vsyscall fixup: %p => %p
  ",
  				__FUNCTION__, a->instr, instr);
  		}
  #endif
ab144f5ec   Andi Kleen   i386: Make patchi...
192
193
194
195
  		memcpy(insnbuf, a->replacement, a->replacementlen);
  		add_nops(insnbuf + a->replacementlen,
  			 a->instrlen - a->replacementlen);
  		text_poke(instr, insnbuf, a->instrlen);
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
196
197
  	}
  }
8ec4d41f8   Gerd Hoffmann   [PATCH] SMP alter...
198
  #ifdef CONFIG_SMP
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
199
200
201
202
203
204
205
206
207
  static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
  {
  	u8 **ptr;
  
  	for (ptr = start; ptr < end; ptr++) {
  		if (*ptr < text)
  			continue;
  		if (*ptr > text_end)
  			continue;
19d36ccdc   Andi Kleen   x86: Fix alternat...
208
  		text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
209
210
211
212
213
  	};
  }
  
  static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
  {
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
214
  	u8 **ptr;
ab144f5ec   Andi Kleen   i386: Make patchi...
215
  	char insn[1];
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
216

b7fb4af06   Jeremy Fitzhardinge   [PATCH] i386: All...
217
218
  	if (noreplace_smp)
  		return;
ab144f5ec   Andi Kleen   i386: Make patchi...
219
  	add_nops(insn, 1);
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
220
221
222
223
224
  	for (ptr = start; ptr < end; ptr++) {
  		if (*ptr < text)
  			continue;
  		if (*ptr > text_end)
  			continue;
ab144f5ec   Andi Kleen   i386: Make patchi...
225
  		text_poke(*ptr, insn, 1);
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
  	};
  }
  
  struct smp_alt_module {
  	/* what is this ??? */
  	struct module	*mod;
  	char		*name;
  
  	/* ptrs to lock prefixes */
  	u8		**locks;
  	u8		**locks_end;
  
  	/* .text segment, needed to avoid patching init code ;) */
  	u8		*text;
  	u8		*text_end;
  
  	struct list_head next;
  };
  static LIST_HEAD(smp_alt_modules);
  static DEFINE_SPINLOCK(smp_alt);
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
246
247
248
249
250
251
  void alternatives_smp_module_add(struct module *mod, char *name,
  				 void *locks, void *locks_end,
  				 void *text,  void *text_end)
  {
  	struct smp_alt_module *smp;
  	unsigned long flags;
b7fb4af06   Jeremy Fitzhardinge   [PATCH] i386: All...
252
253
  	if (noreplace_smp)
  		return;
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
  	if (smp_alt_once) {
  		if (boot_cpu_has(X86_FEATURE_UP))
  			alternatives_smp_unlock(locks, locks_end,
  						text, text_end);
  		return;
  	}
  
  	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
  	if (NULL == smp)
  		return; /* we'll run the (safe but slow) SMP code then ... */
  
  	smp->mod	= mod;
  	smp->name	= name;
  	smp->locks	= locks;
  	smp->locks_end	= locks_end;
  	smp->text	= text;
  	smp->text_end	= text_end;
  	DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s
  ",
  		__FUNCTION__, smp->locks, smp->locks_end,
  		smp->text, smp->text_end, smp->name);
  
  	spin_lock_irqsave(&smp_alt, flags);
  	list_add_tail(&smp->next, &smp_alt_modules);
  	if (boot_cpu_has(X86_FEATURE_UP))
  		alternatives_smp_unlock(smp->locks, smp->locks_end,
  					smp->text, smp->text_end);
  	spin_unlock_irqrestore(&smp_alt, flags);
  }
  
  void alternatives_smp_module_del(struct module *mod)
  {
  	struct smp_alt_module *item;
  	unsigned long flags;
b7fb4af06   Jeremy Fitzhardinge   [PATCH] i386: All...
288
  	if (smp_alt_once || noreplace_smp)
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
  		return;
  
  	spin_lock_irqsave(&smp_alt, flags);
  	list_for_each_entry(item, &smp_alt_modules, next) {
  		if (mod != item->mod)
  			continue;
  		list_del(&item->next);
  		spin_unlock_irqrestore(&smp_alt, flags);
  		DPRINTK("%s: %s
  ", __FUNCTION__, item->name);
  		kfree(item);
  		return;
  	}
  	spin_unlock_irqrestore(&smp_alt, flags);
  }
  
  void alternatives_smp_switch(int smp)
  {
  	struct smp_alt_module *mod;
  	unsigned long flags;
3047e99ed   Ingo Molnar   [PATCH] lockdep: ...
309
310
311
312
313
314
315
316
317
318
  #ifdef CONFIG_LOCKDEP
  	/*
  	 * A not yet fixed binutils section handling bug prevents
  	 * alternatives-replacement from working reliably, so turn
  	 * it off:
  	 */
  	printk("lockdep: not fixing up alternatives.
  ");
  	return;
  #endif
b7fb4af06   Jeremy Fitzhardinge   [PATCH] i386: All...
319
  	if (noreplace_smp || smp_alt_once)
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
320
321
322
323
324
325
326
327
328
  		return;
  	BUG_ON(!smp && (num_online_cpus() > 1));
  
  	spin_lock_irqsave(&smp_alt, flags);
  	if (smp) {
  		printk(KERN_INFO "SMP alternatives: switching to SMP code
  ");
  		clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
  		clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
329
330
331
332
333
334
335
336
  		list_for_each_entry(mod, &smp_alt_modules, next)
  			alternatives_smp_lock(mod->locks, mod->locks_end,
  					      mod->text, mod->text_end);
  	} else {
  		printk(KERN_INFO "SMP alternatives: switching to UP code
  ");
  		set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
  		set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
337
338
339
340
341
342
  		list_for_each_entry(mod, &smp_alt_modules, next)
  			alternatives_smp_unlock(mod->locks, mod->locks_end,
  						mod->text, mod->text_end);
  	}
  	spin_unlock_irqrestore(&smp_alt, flags);
  }
8ec4d41f8   Gerd Hoffmann   [PATCH] SMP alter...
343
  #endif
139ec7c41   Rusty Russell   [PATCH] paravirt:...
344
  #ifdef CONFIG_PARAVIRT
98de032b6   Jeremy Fitzhardinge   [PATCH] i386: PAR...
345
346
  void apply_paravirt(struct paravirt_patch_site *start,
  		    struct paravirt_patch_site *end)
139ec7c41   Rusty Russell   [PATCH] paravirt:...
347
  {
98de032b6   Jeremy Fitzhardinge   [PATCH] i386: PAR...
348
  	struct paravirt_patch_site *p;
ab144f5ec   Andi Kleen   i386: Make patchi...
349
  	char insnbuf[MAX_PATCH_LEN];
139ec7c41   Rusty Russell   [PATCH] paravirt:...
350

959b4fdfe   Jeremy Fitzhardinge   [PATCH] i386: PAR...
351
352
  	if (noreplace_paravirt)
  		return;
139ec7c41   Rusty Russell   [PATCH] paravirt:...
353
354
  	for (p = start; p < end; p++) {
  		unsigned int used;
ab144f5ec   Andi Kleen   i386: Make patchi...
355
  		BUG_ON(p->len > MAX_PATCH_LEN);
d34fda4a8   Chris Wright   x86: properly ini...
356
357
  		/* prep the buffer with the original instructions */
  		memcpy(insnbuf, p->instr, p->len);
ab144f5ec   Andi Kleen   i386: Make patchi...
358
359
  		used = paravirt_ops.patch(p->instrtype, p->clobbers, insnbuf,
  					  (unsigned long)p->instr, p->len);
7f63c41c6   Jeremy Fitzhardinge   [PATCH] i386: PAR...
360

63f70270c   Jeremy Fitzhardinge   [PATCH] i386: PAR...
361
  		BUG_ON(used > p->len);
139ec7c41   Rusty Russell   [PATCH] paravirt:...
362
  		/* Pad the rest with nops */
ab144f5ec   Andi Kleen   i386: Make patchi...
363
364
  		add_nops(insnbuf + used, p->len - used);
  		text_poke(p->instr, insnbuf, p->len);
139ec7c41   Rusty Russell   [PATCH] paravirt:...
365
  	}
139ec7c41   Rusty Russell   [PATCH] paravirt:...
366
  }
98de032b6   Jeremy Fitzhardinge   [PATCH] i386: PAR...
367
  extern struct paravirt_patch_site __start_parainstructions[],
139ec7c41   Rusty Russell   [PATCH] paravirt:...
368
369
  	__stop_parainstructions[];
  #endif	/* CONFIG_PARAVIRT */
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
370
371
  void __init alternative_instructions(void)
  {
e51959faa   Zachary Amsden   [PATCH] Fix poten...
372
  	unsigned long flags;
e51959faa   Zachary Amsden   [PATCH] Fix poten...
373

8f4e956b3   Andi Kleen   x86: Stop MCEs an...
374
375
376
377
  	/* The patching is not fully atomic, so try to avoid local interruptions
  	   that might execute the to be patched code.
  	   Other CPUs are not running. */
  	stop_nmi();
d2d0251f6   Adrian Bunk   i386: really stop...
378
  #ifdef CONFIG_X86_MCE
8f4e956b3   Andi Kleen   x86: Stop MCEs an...
379
380
  	stop_mce();
  #endif
e51959faa   Zachary Amsden   [PATCH] Fix poten...
381
  	local_irq_save(flags);
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
382
383
384
385
386
387
388
389
  	apply_alternatives(__alt_instructions, __alt_instructions_end);
  
  	/* switch to patch-once-at-boottime-only mode and free the
  	 * tables in case we know the number of CPUs will never ever
  	 * change */
  #ifdef CONFIG_HOTPLUG_CPU
  	if (num_possible_cpus() < 2)
  		smp_alt_once = 1;
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
390
  #endif
8ec4d41f8   Gerd Hoffmann   [PATCH] SMP alter...
391
  #ifdef CONFIG_SMP
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
392
393
394
395
396
397
  	if (smp_alt_once) {
  		if (1 == num_possible_cpus()) {
  			printk(KERN_INFO "SMP alternatives: switching to UP code
  ");
  			set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
  			set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
398
399
400
401
  			alternatives_smp_unlock(__smp_locks, __smp_locks_end,
  						_text, _etext);
  		}
  		free_init_pages("SMP alternatives",
e3ebadd95   Linus Torvalds   Revert "[PATCH] x...
402
403
  				(unsigned long)__smp_locks,
  				(unsigned long)__smp_locks_end);
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
404
  	} else {
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
405
406
407
408
409
  		alternatives_smp_module_add(NULL, "core kernel",
  					    __smp_locks, __smp_locks_end,
  					    _text, _etext);
  		alternatives_smp_switch(0);
  	}
8ec4d41f8   Gerd Hoffmann   [PATCH] SMP alter...
410
  #endif
441d40dca   Jeremy Fitzhardinge   [PATCH] x86: PARA...
411
   	apply_paravirt(__parainstructions, __parainstructions_end);
e51959faa   Zachary Amsden   [PATCH] Fix poten...
412
  	local_irq_restore(flags);
8f4e956b3   Andi Kleen   x86: Stop MCEs an...
413
414
  
  	restart_nmi();
d2d0251f6   Adrian Bunk   i386: really stop...
415
  #ifdef CONFIG_X86_MCE
8f4e956b3   Andi Kleen   x86: Stop MCEs an...
416
417
  	restart_mce();
  #endif
9a0b5817a   Gerd Hoffmann   [PATCH] x86: SMP ...
418
  }
19d36ccdc   Andi Kleen   x86: Fix alternat...
419
420
421
422
423
424
425
426
427
  
  /*
   * Warning:
   * When you use this code to patch more than one byte of an instruction
   * you need to make sure that other CPUs cannot execute this code in parallel.
   * Also no thread must be currently preempted in the middle of these instructions.
   * And on the local CPU you need to be protected again NMI or MCE handlers
   * seeing an inconsistent instruction while you patch.
   */
602033ed5   Linus Torvalds   Revert most of "x...
428
  void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
19d36ccdc   Andi Kleen   x86: Fix alternat...
429
  {
19d36ccdc   Andi Kleen   x86: Fix alternat...
430
431
  	memcpy(addr, opcode, len);
  	sync_core();
a534b6791   Andi Kleen   x86_64: Remove CL...
432
433
  	/* Could also do a CLFLUSH here to speed up CPU recovery; but
  	   that causes hangs on some VIA CPUs. */
19d36ccdc   Andi Kleen   x86: Fix alternat...
434
  }