Commit f2ab4461249df85b20930a7a57b54f39c5ae291a

Authored by Zachary Amsden
Committed by Linus Torvalds
1 parent 4f0cb8d978

[PATCH] x86: more asm cleanups

Some more assembler cleanups I noticed along the way.

Signed-off-by: Zachary Amsden <zach@vmware.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 8 changed files with 35 additions and 50 deletions Side-by-side Diff

arch/i386/kernel/cpu/intel.c
... ... @@ -82,16 +82,13 @@
82 82 */
83 83 static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
84 84 {
85   - unsigned int eax;
  85 + unsigned int eax, ebx, ecx, edx;
86 86  
87 87 if (c->cpuid_level < 4)
88 88 return 1;
89 89  
90   - __asm__("cpuid"
91   - : "=a" (eax)
92   - : "0" (4), "c" (0)
93   - : "bx", "dx");
94   -
  90 + /* Intel has a non-standard dependency on %ecx for this CPUID level. */
  91 + cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
95 92 if (eax & 0x1f)
96 93 return ((eax >> 26) + 1);
97 94 else
arch/i386/kernel/crash.c
... ... @@ -153,7 +153,7 @@
153 153 disable_local_APIC();
154 154 atomic_dec(&waiting_for_crash_ipi);
155 155 /* Assume hlt works */
156   - __asm__("hlt");
  156 + halt();
157 157 for(;;);
158 158  
159 159 return 1;
arch/i386/kernel/machine_kexec.c
... ... @@ -93,10 +93,7 @@
93 93 curidt.size = limit;
94 94 curidt.address = (unsigned long)newidt;
95 95  
96   - __asm__ __volatile__ (
97   - "lidtl %0\n"
98   - : : "m" (curidt)
99   - );
  96 + load_idt(&curidt);
100 97 };
101 98  
102 99  
... ... @@ -108,10 +105,7 @@
108 105 curgdt.size = limit;
109 106 curgdt.address = (unsigned long)newgdt;
110 107  
111   - __asm__ __volatile__ (
112   - "lgdtl %0\n"
113   - : : "m" (curgdt)
114   - );
  108 + load_gdt(&curgdt);
115 109 };
116 110  
117 111 static void load_segments(void)
arch/i386/kernel/msr.c
... ... @@ -46,23 +46,13 @@
46 46  
47 47 static struct class *msr_class;
48 48  
49   -/* Note: "err" is handled in a funny way below. Otherwise one version
50   - of gcc or another breaks. */
51   -
52 49 static inline int wrmsr_eio(u32 reg, u32 eax, u32 edx)
53 50 {
54 51 int err;
55 52  
56   - asm volatile ("1: wrmsr\n"
57   - "2:\n"
58   - ".section .fixup,\"ax\"\n"
59   - "3: movl %4,%0\n"
60   - " jmp 2b\n"
61   - ".previous\n"
62   - ".section __ex_table,\"a\"\n"
63   - " .align 4\n" " .long 1b,3b\n" ".previous":"=&bDS" (err)
64   - :"a"(eax), "d"(edx), "c"(reg), "i"(-EIO), "0"(0));
65   -
  53 + err = wrmsr_safe(reg, eax, edx);
  54 + if (err)
  55 + err = -EIO;
66 56 return err;
67 57 }
68 58  
... ... @@ -70,18 +60,9 @@
70 60 {
71 61 int err;
72 62  
73   - asm volatile ("1: rdmsr\n"
74   - "2:\n"
75   - ".section .fixup,\"ax\"\n"
76   - "3: movl %4,%0\n"
77   - " jmp 2b\n"
78   - ".previous\n"
79   - ".section __ex_table,\"a\"\n"
80   - " .align 4\n"
81   - " .long 1b,3b\n"
82   - ".previous":"=&bDS" (err), "=a"(*eax), "=d"(*edx)
83   - :"c"(reg), "i"(-EIO), "0"(0));
84   -
  63 + err = rdmsr_safe(reg, eax, edx);
  64 + if (err)
  65 + err = -EIO;
85 66 return err;
86 67 }
87 68  
arch/i386/kernel/process.c
... ... @@ -164,7 +164,7 @@
164 164 */
165 165 local_irq_disable();
166 166 while (1)
167   - __asm__ __volatile__("hlt":::"memory");
  167 + halt();
168 168 }
169 169 #else
170 170 static inline void play_dead(void)
arch/i386/mach-voyager/voyager_basic.c
... ... @@ -234,10 +234,9 @@
234 234 #endif
235 235 }
236 236 /* and wait for it to happen */
237   - for(;;) {
238   - __asm("cli");
239   - __asm("hlt");
240   - }
  237 + local_irq_disable();
  238 + for(;;)
  239 + halt();
241 240 }
242 241  
243 242 /* copied from process.c */
... ... @@ -278,10 +277,9 @@
278 277 outb(basebd | 0x08, VOYAGER_MC_SETUP);
279 278 outb(0x02, catbase + 0x21);
280 279 }
281   - for(;;) {
282   - asm("cli");
283   - asm("hlt");
284   - }
  280 + local_irq_disable();
  281 + for(;;)
  282 + halt();
285 283 }
286 284  
287 285 void
arch/i386/mach-voyager/voyager_smp.c
... ... @@ -1015,7 +1015,7 @@
1015 1015 cpu_clear(smp_processor_id(), cpu_online_map);
1016 1016 local_irq_disable();
1017 1017 for(;;)
1018   - __asm__("hlt");
  1018 + halt();
1019 1019 }
1020 1020  
1021 1021 static DEFINE_SPINLOCK(call_lock);
include/asm-i386/msr.h
... ... @@ -47,6 +47,21 @@
47 47 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
48 48 ret__; })
49 49  
  50 +/* rdmsr with exception handling */
  51 +#define rdmsr_safe(msr,a,b) ({ int ret__; \
  52 + asm volatile("2: rdmsr ; xorl %0,%0\n" \
  53 + "1:\n\t" \
  54 + ".section .fixup,\"ax\"\n\t" \
  55 + "3: movl %4,%0 ; jmp 1b\n\t" \
  56 + ".previous\n\t" \
  57 + ".section __ex_table,\"a\"\n" \
  58 + " .align 4\n\t" \
  59 + " .long 2b,3b\n\t" \
  60 + ".previous" \
  61 + : "=r" (ret__), "=a" (*(a)), "=d" (*(b)) \
  62 + : "c" (msr), "i" (-EFAULT));\
  63 + ret__; })
  64 +
50 65 #define rdtsc(low,high) \
51 66 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
52 67