Commit 33215652e4a75dfa8adb20f4d741517457b0da2b
Committed by
Linus Torvalds
1 parent
ade31f38f2
Exists in
master
and in
7 other branches
[PATCH] qualifiers in return types - easy cases
a bunch of functions switched from volatile to __attribute__((noreturn)) and from const to __attribute_pure__ Signed-off-by: Al Viro <viro@parcelfarce.linux.theplanet.co.uk> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 6 changed files with 8 additions and 8 deletions Inline Diff
arch/arm/kernel/traps.c
1 | /* | 1 | /* |
2 | * linux/arch/arm/kernel/traps.c | 2 | * linux/arch/arm/kernel/traps.c |
3 | * | 3 | * |
4 | * Copyright (C) 1995-2002 Russell King | 4 | * Copyright (C) 1995-2002 Russell King |
5 | * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds | 5 | * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * 'traps.c' handles hardware exceptions after we have saved some state in | 11 | * 'traps.c' handles hardware exceptions after we have saved some state in |
12 | * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably | 12 | * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably |
13 | * kill the offending process. | 13 | * kill the offending process. |
14 | */ | 14 | */ |
15 | #include <linux/config.h> | 15 | #include <linux/config.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/signal.h> | 17 | #include <linux/signal.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/personality.h> | 19 | #include <linux/personality.h> |
20 | #include <linux/ptrace.h> | 20 | #include <linux/ptrace.h> |
21 | #include <linux/kallsyms.h> | 21 | #include <linux/kallsyms.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | 23 | ||
24 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
25 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | #include <asm/system.h> | 27 | #include <asm/system.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/unistd.h> | 29 | #include <asm/unistd.h> |
30 | #include <asm/traps.h> | 30 | #include <asm/traps.h> |
31 | 31 | ||
32 | #include "ptrace.h" | 32 | #include "ptrace.h" |
33 | #include "signal.h" | 33 | #include "signal.h" |
34 | 34 | ||
35 | const char *processor_modes[]= | 35 | const char *processor_modes[]= |
36 | { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , | 36 | { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , |
37 | "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", | 37 | "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", |
38 | "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , | 38 | "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , |
39 | "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" | 39 | "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" |
40 | }; | 40 | }; |
41 | 41 | ||
42 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; | 42 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; |
43 | 43 | ||
44 | #ifdef CONFIG_DEBUG_USER | 44 | #ifdef CONFIG_DEBUG_USER |
45 | unsigned int user_debug; | 45 | unsigned int user_debug; |
46 | 46 | ||
47 | static int __init user_debug_setup(char *str) | 47 | static int __init user_debug_setup(char *str) |
48 | { | 48 | { |
49 | get_option(&str, &user_debug); | 49 | get_option(&str, &user_debug); |
50 | return 1; | 50 | return 1; |
51 | } | 51 | } |
52 | __setup("user_debug=", user_debug_setup); | 52 | __setup("user_debug=", user_debug_setup); |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | void dump_backtrace_entry(unsigned long where, unsigned long from) | 55 | void dump_backtrace_entry(unsigned long where, unsigned long from) |
56 | { | 56 | { |
57 | #ifdef CONFIG_KALLSYMS | 57 | #ifdef CONFIG_KALLSYMS |
58 | printk("[<%08lx>] ", where); | 58 | printk("[<%08lx>] ", where); |
59 | print_symbol("(%s) ", where); | 59 | print_symbol("(%s) ", where); |
60 | printk("from [<%08lx>] ", from); | 60 | printk("from [<%08lx>] ", from); |
61 | print_symbol("(%s)\n", from); | 61 | print_symbol("(%s)\n", from); |
62 | #else | 62 | #else |
63 | printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); | 63 | printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); |
64 | #endif | 64 | #endif |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Stack pointers should always be within the kernels view of | 68 | * Stack pointers should always be within the kernels view of |
69 | * physical memory. If it is not there, then we can't dump | 69 | * physical memory. If it is not there, then we can't dump |
70 | * out any information relating to the stack. | 70 | * out any information relating to the stack. |
71 | */ | 71 | */ |
72 | static int verify_stack(unsigned long sp) | 72 | static int verify_stack(unsigned long sp) |
73 | { | 73 | { |
74 | if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != 0)) | 74 | if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != 0)) |
75 | return -EFAULT; | 75 | return -EFAULT; |
76 | 76 | ||
77 | return 0; | 77 | return 0; |
78 | } | 78 | } |
79 | 79 | ||
80 | /* | 80 | /* |
81 | * Dump out the contents of some memory nicely... | 81 | * Dump out the contents of some memory nicely... |
82 | */ | 82 | */ |
83 | static void dump_mem(const char *str, unsigned long bottom, unsigned long top) | 83 | static void dump_mem(const char *str, unsigned long bottom, unsigned long top) |
84 | { | 84 | { |
85 | unsigned long p = bottom & ~31; | 85 | unsigned long p = bottom & ~31; |
86 | mm_segment_t fs; | 86 | mm_segment_t fs; |
87 | int i; | 87 | int i; |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * We need to switch to kernel mode so that we can use __get_user | 90 | * We need to switch to kernel mode so that we can use __get_user |
91 | * to safely read from kernel space. Note that we now dump the | 91 | * to safely read from kernel space. Note that we now dump the |
92 | * code first, just in case the backtrace kills us. | 92 | * code first, just in case the backtrace kills us. |
93 | */ | 93 | */ |
94 | fs = get_fs(); | 94 | fs = get_fs(); |
95 | set_fs(KERNEL_DS); | 95 | set_fs(KERNEL_DS); |
96 | 96 | ||
97 | printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); | 97 | printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); |
98 | 98 | ||
99 | for (p = bottom & ~31; p < top;) { | 99 | for (p = bottom & ~31; p < top;) { |
100 | printk("%04lx: ", p & 0xffff); | 100 | printk("%04lx: ", p & 0xffff); |
101 | 101 | ||
102 | for (i = 0; i < 8; i++, p += 4) { | 102 | for (i = 0; i < 8; i++, p += 4) { |
103 | unsigned int val; | 103 | unsigned int val; |
104 | 104 | ||
105 | if (p < bottom || p >= top) | 105 | if (p < bottom || p >= top) |
106 | printk(" "); | 106 | printk(" "); |
107 | else { | 107 | else { |
108 | __get_user(val, (unsigned long *)p); | 108 | __get_user(val, (unsigned long *)p); |
109 | printk("%08x ", val); | 109 | printk("%08x ", val); |
110 | } | 110 | } |
111 | } | 111 | } |
112 | printk ("\n"); | 112 | printk ("\n"); |
113 | } | 113 | } |
114 | 114 | ||
115 | set_fs(fs); | 115 | set_fs(fs); |
116 | } | 116 | } |
117 | 117 | ||
118 | static void dump_instr(struct pt_regs *regs) | 118 | static void dump_instr(struct pt_regs *regs) |
119 | { | 119 | { |
120 | unsigned long addr = instruction_pointer(regs); | 120 | unsigned long addr = instruction_pointer(regs); |
121 | const int thumb = thumb_mode(regs); | 121 | const int thumb = thumb_mode(regs); |
122 | const int width = thumb ? 4 : 8; | 122 | const int width = thumb ? 4 : 8; |
123 | mm_segment_t fs; | 123 | mm_segment_t fs; |
124 | int i; | 124 | int i; |
125 | 125 | ||
126 | /* | 126 | /* |
127 | * We need to switch to kernel mode so that we can use __get_user | 127 | * We need to switch to kernel mode so that we can use __get_user |
128 | * to safely read from kernel space. Note that we now dump the | 128 | * to safely read from kernel space. Note that we now dump the |
129 | * code first, just in case the backtrace kills us. | 129 | * code first, just in case the backtrace kills us. |
130 | */ | 130 | */ |
131 | fs = get_fs(); | 131 | fs = get_fs(); |
132 | set_fs(KERNEL_DS); | 132 | set_fs(KERNEL_DS); |
133 | 133 | ||
134 | printk("Code: "); | 134 | printk("Code: "); |
135 | for (i = -4; i < 1; i++) { | 135 | for (i = -4; i < 1; i++) { |
136 | unsigned int val, bad; | 136 | unsigned int val, bad; |
137 | 137 | ||
138 | if (thumb) | 138 | if (thumb) |
139 | bad = __get_user(val, &((u16 *)addr)[i]); | 139 | bad = __get_user(val, &((u16 *)addr)[i]); |
140 | else | 140 | else |
141 | bad = __get_user(val, &((u32 *)addr)[i]); | 141 | bad = __get_user(val, &((u32 *)addr)[i]); |
142 | 142 | ||
143 | if (!bad) | 143 | if (!bad) |
144 | printk(i == 0 ? "(%0*x) " : "%0*x ", width, val); | 144 | printk(i == 0 ? "(%0*x) " : "%0*x ", width, val); |
145 | else { | 145 | else { |
146 | printk("bad PC value."); | 146 | printk("bad PC value."); |
147 | break; | 147 | break; |
148 | } | 148 | } |
149 | } | 149 | } |
150 | printk("\n"); | 150 | printk("\n"); |
151 | 151 | ||
152 | set_fs(fs); | 152 | set_fs(fs); |
153 | } | 153 | } |
154 | 154 | ||
155 | static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | 155 | static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) |
156 | { | 156 | { |
157 | unsigned int fp; | 157 | unsigned int fp; |
158 | int ok = 1; | 158 | int ok = 1; |
159 | 159 | ||
160 | printk("Backtrace: "); | 160 | printk("Backtrace: "); |
161 | fp = regs->ARM_fp; | 161 | fp = regs->ARM_fp; |
162 | if (!fp) { | 162 | if (!fp) { |
163 | printk("no frame pointer"); | 163 | printk("no frame pointer"); |
164 | ok = 0; | 164 | ok = 0; |
165 | } else if (verify_stack(fp)) { | 165 | } else if (verify_stack(fp)) { |
166 | printk("invalid frame pointer 0x%08x", fp); | 166 | printk("invalid frame pointer 0x%08x", fp); |
167 | ok = 0; | 167 | ok = 0; |
168 | } else if (fp < (unsigned long)(tsk->thread_info + 1)) | 168 | } else if (fp < (unsigned long)(tsk->thread_info + 1)) |
169 | printk("frame pointer underflow"); | 169 | printk("frame pointer underflow"); |
170 | printk("\n"); | 170 | printk("\n"); |
171 | 171 | ||
172 | if (ok) | 172 | if (ok) |
173 | c_backtrace(fp, processor_mode(regs)); | 173 | c_backtrace(fp, processor_mode(regs)); |
174 | } | 174 | } |
175 | 175 | ||
176 | void dump_stack(void) | 176 | void dump_stack(void) |
177 | { | 177 | { |
178 | #ifdef CONFIG_DEBUG_ERRORS | 178 | #ifdef CONFIG_DEBUG_ERRORS |
179 | __backtrace(); | 179 | __backtrace(); |
180 | #endif | 180 | #endif |
181 | } | 181 | } |
182 | 182 | ||
183 | EXPORT_SYMBOL(dump_stack); | 183 | EXPORT_SYMBOL(dump_stack); |
184 | 184 | ||
185 | void show_stack(struct task_struct *tsk, unsigned long *sp) | 185 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
186 | { | 186 | { |
187 | unsigned long fp; | 187 | unsigned long fp; |
188 | 188 | ||
189 | if (!tsk) | 189 | if (!tsk) |
190 | tsk = current; | 190 | tsk = current; |
191 | 191 | ||
192 | if (tsk != current) | 192 | if (tsk != current) |
193 | fp = thread_saved_fp(tsk); | 193 | fp = thread_saved_fp(tsk); |
194 | else | 194 | else |
195 | asm("mov%? %0, fp" : "=r" (fp)); | 195 | asm("mov%? %0, fp" : "=r" (fp)); |
196 | 196 | ||
197 | c_backtrace(fp, 0x10); | 197 | c_backtrace(fp, 0x10); |
198 | barrier(); | 198 | barrier(); |
199 | } | 199 | } |
200 | 200 | ||
201 | DEFINE_SPINLOCK(die_lock); | 201 | DEFINE_SPINLOCK(die_lock); |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * This function is protected against re-entrancy. | 204 | * This function is protected against re-entrancy. |
205 | */ | 205 | */ |
206 | NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) | 206 | NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) |
207 | { | 207 | { |
208 | struct task_struct *tsk = current; | 208 | struct task_struct *tsk = current; |
209 | static int die_counter; | 209 | static int die_counter; |
210 | 210 | ||
211 | console_verbose(); | 211 | console_verbose(); |
212 | spin_lock_irq(&die_lock); | 212 | spin_lock_irq(&die_lock); |
213 | bust_spinlocks(1); | 213 | bust_spinlocks(1); |
214 | 214 | ||
215 | printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); | 215 | printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); |
216 | print_modules(); | 216 | print_modules(); |
217 | __show_regs(regs); | 217 | __show_regs(regs); |
218 | printk("Process %s (pid: %d, stack limit = 0x%p)\n", | 218 | printk("Process %s (pid: %d, stack limit = 0x%p)\n", |
219 | tsk->comm, tsk->pid, tsk->thread_info + 1); | 219 | tsk->comm, tsk->pid, tsk->thread_info + 1); |
220 | 220 | ||
221 | if (!user_mode(regs) || in_interrupt()) { | 221 | if (!user_mode(regs) || in_interrupt()) { |
222 | dump_mem("Stack: ", regs->ARM_sp, | 222 | dump_mem("Stack: ", regs->ARM_sp, |
223 | THREAD_SIZE + (unsigned long)tsk->thread_info); | 223 | THREAD_SIZE + (unsigned long)tsk->thread_info); |
224 | dump_backtrace(regs, tsk); | 224 | dump_backtrace(regs, tsk); |
225 | dump_instr(regs); | 225 | dump_instr(regs); |
226 | } | 226 | } |
227 | 227 | ||
228 | bust_spinlocks(0); | 228 | bust_spinlocks(0); |
229 | spin_unlock_irq(&die_lock); | 229 | spin_unlock_irq(&die_lock); |
230 | do_exit(SIGSEGV); | 230 | do_exit(SIGSEGV); |
231 | } | 231 | } |
232 | 232 | ||
233 | void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, | 233 | void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, |
234 | unsigned long err, unsigned long trap) | 234 | unsigned long err, unsigned long trap) |
235 | { | 235 | { |
236 | if (user_mode(regs)) { | 236 | if (user_mode(regs)) { |
237 | current->thread.error_code = err; | 237 | current->thread.error_code = err; |
238 | current->thread.trap_no = trap; | 238 | current->thread.trap_no = trap; |
239 | 239 | ||
240 | force_sig_info(info->si_signo, info, current); | 240 | force_sig_info(info->si_signo, info, current); |
241 | } else { | 241 | } else { |
242 | die(str, regs, err); | 242 | die(str, regs, err); |
243 | } | 243 | } |
244 | } | 244 | } |
245 | 245 | ||
246 | static LIST_HEAD(undef_hook); | 246 | static LIST_HEAD(undef_hook); |
247 | static DEFINE_SPINLOCK(undef_lock); | 247 | static DEFINE_SPINLOCK(undef_lock); |
248 | 248 | ||
249 | void register_undef_hook(struct undef_hook *hook) | 249 | void register_undef_hook(struct undef_hook *hook) |
250 | { | 250 | { |
251 | unsigned long flags; | 251 | unsigned long flags; |
252 | 252 | ||
253 | spin_lock_irqsave(&undef_lock, flags); | 253 | spin_lock_irqsave(&undef_lock, flags); |
254 | list_add(&hook->node, &undef_hook); | 254 | list_add(&hook->node, &undef_hook); |
255 | spin_unlock_irqrestore(&undef_lock, flags); | 255 | spin_unlock_irqrestore(&undef_lock, flags); |
256 | } | 256 | } |
257 | 257 | ||
258 | void unregister_undef_hook(struct undef_hook *hook) | 258 | void unregister_undef_hook(struct undef_hook *hook) |
259 | { | 259 | { |
260 | unsigned long flags; | 260 | unsigned long flags; |
261 | 261 | ||
262 | spin_lock_irqsave(&undef_lock, flags); | 262 | spin_lock_irqsave(&undef_lock, flags); |
263 | list_del(&hook->node); | 263 | list_del(&hook->node); |
264 | spin_unlock_irqrestore(&undef_lock, flags); | 264 | spin_unlock_irqrestore(&undef_lock, flags); |
265 | } | 265 | } |
266 | 266 | ||
267 | asmlinkage void do_undefinstr(struct pt_regs *regs) | 267 | asmlinkage void do_undefinstr(struct pt_regs *regs) |
268 | { | 268 | { |
269 | unsigned int correction = thumb_mode(regs) ? 2 : 4; | 269 | unsigned int correction = thumb_mode(regs) ? 2 : 4; |
270 | unsigned int instr; | 270 | unsigned int instr; |
271 | struct undef_hook *hook; | 271 | struct undef_hook *hook; |
272 | siginfo_t info; | 272 | siginfo_t info; |
273 | void __user *pc; | 273 | void __user *pc; |
274 | 274 | ||
275 | /* | 275 | /* |
276 | * According to the ARM ARM, PC is 2 or 4 bytes ahead, | 276 | * According to the ARM ARM, PC is 2 or 4 bytes ahead, |
277 | * depending whether we're in Thumb mode or not. | 277 | * depending whether we're in Thumb mode or not. |
278 | * Correct this offset. | 278 | * Correct this offset. |
279 | */ | 279 | */ |
280 | regs->ARM_pc -= correction; | 280 | regs->ARM_pc -= correction; |
281 | 281 | ||
282 | pc = (void __user *)instruction_pointer(regs); | 282 | pc = (void __user *)instruction_pointer(regs); |
283 | if (thumb_mode(regs)) { | 283 | if (thumb_mode(regs)) { |
284 | get_user(instr, (u16 __user *)pc); | 284 | get_user(instr, (u16 __user *)pc); |
285 | } else { | 285 | } else { |
286 | get_user(instr, (u32 __user *)pc); | 286 | get_user(instr, (u32 __user *)pc); |
287 | } | 287 | } |
288 | 288 | ||
289 | spin_lock_irq(&undef_lock); | 289 | spin_lock_irq(&undef_lock); |
290 | list_for_each_entry(hook, &undef_hook, node) { | 290 | list_for_each_entry(hook, &undef_hook, node) { |
291 | if ((instr & hook->instr_mask) == hook->instr_val && | 291 | if ((instr & hook->instr_mask) == hook->instr_val && |
292 | (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) { | 292 | (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) { |
293 | if (hook->fn(regs, instr) == 0) { | 293 | if (hook->fn(regs, instr) == 0) { |
294 | spin_unlock_irq(&undef_lock); | 294 | spin_unlock_irq(&undef_lock); |
295 | return; | 295 | return; |
296 | } | 296 | } |
297 | } | 297 | } |
298 | } | 298 | } |
299 | spin_unlock_irq(&undef_lock); | 299 | spin_unlock_irq(&undef_lock); |
300 | 300 | ||
301 | #ifdef CONFIG_DEBUG_USER | 301 | #ifdef CONFIG_DEBUG_USER |
302 | if (user_debug & UDBG_UNDEFINED) { | 302 | if (user_debug & UDBG_UNDEFINED) { |
303 | printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", | 303 | printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", |
304 | current->comm, current->pid, pc); | 304 | current->comm, current->pid, pc); |
305 | dump_instr(regs); | 305 | dump_instr(regs); |
306 | } | 306 | } |
307 | #endif | 307 | #endif |
308 | 308 | ||
309 | info.si_signo = SIGILL; | 309 | info.si_signo = SIGILL; |
310 | info.si_errno = 0; | 310 | info.si_errno = 0; |
311 | info.si_code = ILL_ILLOPC; | 311 | info.si_code = ILL_ILLOPC; |
312 | info.si_addr = pc; | 312 | info.si_addr = pc; |
313 | 313 | ||
314 | notify_die("Oops - undefined instruction", regs, &info, 0, 6); | 314 | notify_die("Oops - undefined instruction", regs, &info, 0, 6); |
315 | } | 315 | } |
316 | 316 | ||
317 | asmlinkage void do_unexp_fiq (struct pt_regs *regs) | 317 | asmlinkage void do_unexp_fiq (struct pt_regs *regs) |
318 | { | 318 | { |
319 | #ifndef CONFIG_IGNORE_FIQ | 319 | #ifndef CONFIG_IGNORE_FIQ |
320 | printk("Hmm. Unexpected FIQ received, but trying to continue\n"); | 320 | printk("Hmm. Unexpected FIQ received, but trying to continue\n"); |
321 | printk("You may have a hardware problem...\n"); | 321 | printk("You may have a hardware problem...\n"); |
322 | #endif | 322 | #endif |
323 | } | 323 | } |
324 | 324 | ||
325 | /* | 325 | /* |
326 | * bad_mode handles the impossible case in the vectors. If you see one of | 326 | * bad_mode handles the impossible case in the vectors. If you see one of |
327 | * these, then it's extremely serious, and could mean you have buggy hardware. | 327 | * these, then it's extremely serious, and could mean you have buggy hardware. |
328 | * It never returns, and never tries to sync. We hope that we can at least | 328 | * It never returns, and never tries to sync. We hope that we can at least |
329 | * dump out some state information... | 329 | * dump out some state information... |
330 | */ | 330 | */ |
331 | asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode) | 331 | asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode) |
332 | { | 332 | { |
333 | console_verbose(); | 333 | console_verbose(); |
334 | 334 | ||
335 | printk(KERN_CRIT "Bad mode in %s handler detected: mode %s\n", | 335 | printk(KERN_CRIT "Bad mode in %s handler detected: mode %s\n", |
336 | handler[reason], processor_modes[proc_mode]); | 336 | handler[reason], processor_modes[proc_mode]); |
337 | 337 | ||
338 | die("Oops - bad mode", regs, 0); | 338 | die("Oops - bad mode", regs, 0); |
339 | local_irq_disable(); | 339 | local_irq_disable(); |
340 | panic("bad mode"); | 340 | panic("bad mode"); |
341 | } | 341 | } |
342 | 342 | ||
343 | static int bad_syscall(int n, struct pt_regs *regs) | 343 | static int bad_syscall(int n, struct pt_regs *regs) |
344 | { | 344 | { |
345 | struct thread_info *thread = current_thread_info(); | 345 | struct thread_info *thread = current_thread_info(); |
346 | siginfo_t info; | 346 | siginfo_t info; |
347 | 347 | ||
348 | if (current->personality != PER_LINUX && thread->exec_domain->handler) { | 348 | if (current->personality != PER_LINUX && thread->exec_domain->handler) { |
349 | thread->exec_domain->handler(n, regs); | 349 | thread->exec_domain->handler(n, regs); |
350 | return regs->ARM_r0; | 350 | return regs->ARM_r0; |
351 | } | 351 | } |
352 | 352 | ||
353 | #ifdef CONFIG_DEBUG_USER | 353 | #ifdef CONFIG_DEBUG_USER |
354 | if (user_debug & UDBG_SYSCALL) { | 354 | if (user_debug & UDBG_SYSCALL) { |
355 | printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", | 355 | printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", |
356 | current->pid, current->comm, n); | 356 | current->pid, current->comm, n); |
357 | dump_instr(regs); | 357 | dump_instr(regs); |
358 | } | 358 | } |
359 | #endif | 359 | #endif |
360 | 360 | ||
361 | info.si_signo = SIGILL; | 361 | info.si_signo = SIGILL; |
362 | info.si_errno = 0; | 362 | info.si_errno = 0; |
363 | info.si_code = ILL_ILLTRP; | 363 | info.si_code = ILL_ILLTRP; |
364 | info.si_addr = (void __user *)instruction_pointer(regs) - | 364 | info.si_addr = (void __user *)instruction_pointer(regs) - |
365 | (thumb_mode(regs) ? 2 : 4); | 365 | (thumb_mode(regs) ? 2 : 4); |
366 | 366 | ||
367 | notify_die("Oops - bad syscall", regs, &info, n, 0); | 367 | notify_die("Oops - bad syscall", regs, &info, n, 0); |
368 | 368 | ||
369 | return regs->ARM_r0; | 369 | return regs->ARM_r0; |
370 | } | 370 | } |
371 | 371 | ||
372 | static inline void | 372 | static inline void |
373 | do_cache_op(unsigned long start, unsigned long end, int flags) | 373 | do_cache_op(unsigned long start, unsigned long end, int flags) |
374 | { | 374 | { |
375 | struct vm_area_struct *vma; | 375 | struct vm_area_struct *vma; |
376 | 376 | ||
377 | if (end < start || flags) | 377 | if (end < start || flags) |
378 | return; | 378 | return; |
379 | 379 | ||
380 | vma = find_vma(current->active_mm, start); | 380 | vma = find_vma(current->active_mm, start); |
381 | if (vma && vma->vm_start < end) { | 381 | if (vma && vma->vm_start < end) { |
382 | if (start < vma->vm_start) | 382 | if (start < vma->vm_start) |
383 | start = vma->vm_start; | 383 | start = vma->vm_start; |
384 | if (end > vma->vm_end) | 384 | if (end > vma->vm_end) |
385 | end = vma->vm_end; | 385 | end = vma->vm_end; |
386 | 386 | ||
387 | flush_cache_user_range(vma, start, end); | 387 | flush_cache_user_range(vma, start, end); |
388 | } | 388 | } |
389 | } | 389 | } |
390 | 390 | ||
391 | /* | 391 | /* |
392 | * Handle all unrecognised system calls. | 392 | * Handle all unrecognised system calls. |
393 | * 0x9f0000 - 0x9fffff are some more esoteric system calls | 393 | * 0x9f0000 - 0x9fffff are some more esoteric system calls |
394 | */ | 394 | */ |
395 | #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) | 395 | #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) |
396 | asmlinkage int arm_syscall(int no, struct pt_regs *regs) | 396 | asmlinkage int arm_syscall(int no, struct pt_regs *regs) |
397 | { | 397 | { |
398 | struct thread_info *thread = current_thread_info(); | 398 | struct thread_info *thread = current_thread_info(); |
399 | siginfo_t info; | 399 | siginfo_t info; |
400 | 400 | ||
401 | if ((no >> 16) != 0x9f) | 401 | if ((no >> 16) != 0x9f) |
402 | return bad_syscall(no, regs); | 402 | return bad_syscall(no, regs); |
403 | 403 | ||
404 | switch (no & 0xffff) { | 404 | switch (no & 0xffff) { |
405 | case 0: /* branch through 0 */ | 405 | case 0: /* branch through 0 */ |
406 | info.si_signo = SIGSEGV; | 406 | info.si_signo = SIGSEGV; |
407 | info.si_errno = 0; | 407 | info.si_errno = 0; |
408 | info.si_code = SEGV_MAPERR; | 408 | info.si_code = SEGV_MAPERR; |
409 | info.si_addr = NULL; | 409 | info.si_addr = NULL; |
410 | 410 | ||
411 | notify_die("branch through zero", regs, &info, 0, 0); | 411 | notify_die("branch through zero", regs, &info, 0, 0); |
412 | return 0; | 412 | return 0; |
413 | 413 | ||
414 | case NR(breakpoint): /* SWI BREAK_POINT */ | 414 | case NR(breakpoint): /* SWI BREAK_POINT */ |
415 | regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; | 415 | regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; |
416 | ptrace_break(current, regs); | 416 | ptrace_break(current, regs); |
417 | return regs->ARM_r0; | 417 | return regs->ARM_r0; |
418 | 418 | ||
419 | /* | 419 | /* |
420 | * Flush a region from virtual address 'r0' to virtual address 'r1' | 420 | * Flush a region from virtual address 'r0' to virtual address 'r1' |
421 | * _exclusive_. There is no alignment requirement on either address; | 421 | * _exclusive_. There is no alignment requirement on either address; |
422 | * user space does not need to know the hardware cache layout. | 422 | * user space does not need to know the hardware cache layout. |
423 | * | 423 | * |
424 | * r2 contains flags. It should ALWAYS be passed as ZERO until it | 424 | * r2 contains flags. It should ALWAYS be passed as ZERO until it |
425 | * is defined to be something else. For now we ignore it, but may | 425 | * is defined to be something else. For now we ignore it, but may |
426 | * the fires of hell burn in your belly if you break this rule. ;) | 426 | * the fires of hell burn in your belly if you break this rule. ;) |
427 | * | 427 | * |
428 | * (at a later date, we may want to allow this call to not flush | 428 | * (at a later date, we may want to allow this call to not flush |
429 | * various aspects of the cache. Passing '0' will guarantee that | 429 | * various aspects of the cache. Passing '0' will guarantee that |
430 | * everything necessary gets flushed to maintain consistency in | 430 | * everything necessary gets flushed to maintain consistency in |
431 | * the specified region). | 431 | * the specified region). |
432 | */ | 432 | */ |
433 | case NR(cacheflush): | 433 | case NR(cacheflush): |
434 | do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2); | 434 | do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2); |
435 | return 0; | 435 | return 0; |
436 | 436 | ||
437 | case NR(usr26): | 437 | case NR(usr26): |
438 | if (!(elf_hwcap & HWCAP_26BIT)) | 438 | if (!(elf_hwcap & HWCAP_26BIT)) |
439 | break; | 439 | break; |
440 | regs->ARM_cpsr &= ~MODE32_BIT; | 440 | regs->ARM_cpsr &= ~MODE32_BIT; |
441 | return regs->ARM_r0; | 441 | return regs->ARM_r0; |
442 | 442 | ||
443 | case NR(usr32): | 443 | case NR(usr32): |
444 | if (!(elf_hwcap & HWCAP_26BIT)) | 444 | if (!(elf_hwcap & HWCAP_26BIT)) |
445 | break; | 445 | break; |
446 | regs->ARM_cpsr |= MODE32_BIT; | 446 | regs->ARM_cpsr |= MODE32_BIT; |
447 | return regs->ARM_r0; | 447 | return regs->ARM_r0; |
448 | 448 | ||
449 | case NR(set_tls): | 449 | case NR(set_tls): |
450 | thread->tp_value = regs->ARM_r0; | 450 | thread->tp_value = regs->ARM_r0; |
451 | #if defined(CONFIG_HAS_TLS_REG) | 451 | #if defined(CONFIG_HAS_TLS_REG) |
452 | asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) ); | 452 | asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) ); |
453 | #elif !defined(CONFIG_TLS_REG_EMUL) | 453 | #elif !defined(CONFIG_TLS_REG_EMUL) |
454 | /* | 454 | /* |
455 | * User space must never try to access this directly. | 455 | * User space must never try to access this directly. |
456 | * Expect your app to break eventually if you do so. | 456 | * Expect your app to break eventually if you do so. |
457 | * The user helper at 0xffff0fe0 must be used instead. | 457 | * The user helper at 0xffff0fe0 must be used instead. |
458 | * (see entry-armv.S for details) | 458 | * (see entry-armv.S for details) |
459 | */ | 459 | */ |
460 | *((unsigned int *)0xffff0ff0) = regs->ARM_r0; | 460 | *((unsigned int *)0xffff0ff0) = regs->ARM_r0; |
461 | #endif | 461 | #endif |
462 | return 0; | 462 | return 0; |
463 | 463 | ||
464 | #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG | 464 | #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG |
465 | /* | 465 | /* |
466 | * Atomically store r1 in *r2 if *r2 is equal to r0 for user space. | 466 | * Atomically store r1 in *r2 if *r2 is equal to r0 for user space. |
467 | * Return zero in r0 if *MEM was changed or non-zero if no exchange | 467 | * Return zero in r0 if *MEM was changed or non-zero if no exchange |
468 | * happened. Also set the user C flag accordingly. | 468 | * happened. Also set the user C flag accordingly. |
469 | * If access permissions have to be fixed up then non-zero is | 469 | * If access permissions have to be fixed up then non-zero is |
470 | * returned and the operation has to be re-attempted. | 470 | * returned and the operation has to be re-attempted. |
471 | * | 471 | * |
472 | * *NOTE*: This is a ghost syscall private to the kernel. Only the | 472 | * *NOTE*: This is a ghost syscall private to the kernel. Only the |
473 | * __kuser_cmpxchg code in entry-armv.S should be aware of its | 473 | * __kuser_cmpxchg code in entry-armv.S should be aware of its |
474 | * existence. Don't ever use this from user code. | 474 | * existence. Don't ever use this from user code. |
475 | */ | 475 | */ |
476 | case 0xfff0: | 476 | case 0xfff0: |
477 | { | 477 | { |
478 | extern void do_DataAbort(unsigned long addr, unsigned int fsr, | 478 | extern void do_DataAbort(unsigned long addr, unsigned int fsr, |
479 | struct pt_regs *regs); | 479 | struct pt_regs *regs); |
480 | unsigned long val; | 480 | unsigned long val; |
481 | unsigned long addr = regs->ARM_r2; | 481 | unsigned long addr = regs->ARM_r2; |
482 | struct mm_struct *mm = current->mm; | 482 | struct mm_struct *mm = current->mm; |
483 | pgd_t *pgd; pmd_t *pmd; pte_t *pte; | 483 | pgd_t *pgd; pmd_t *pmd; pte_t *pte; |
484 | 484 | ||
485 | regs->ARM_cpsr &= ~PSR_C_BIT; | 485 | regs->ARM_cpsr &= ~PSR_C_BIT; |
486 | spin_lock(&mm->page_table_lock); | 486 | spin_lock(&mm->page_table_lock); |
487 | pgd = pgd_offset(mm, addr); | 487 | pgd = pgd_offset(mm, addr); |
488 | if (!pgd_present(*pgd)) | 488 | if (!pgd_present(*pgd)) |
489 | goto bad_access; | 489 | goto bad_access; |
490 | pmd = pmd_offset(pgd, addr); | 490 | pmd = pmd_offset(pgd, addr); |
491 | if (!pmd_present(*pmd)) | 491 | if (!pmd_present(*pmd)) |
492 | goto bad_access; | 492 | goto bad_access; |
493 | pte = pte_offset_map(pmd, addr); | 493 | pte = pte_offset_map(pmd, addr); |
494 | if (!pte_present(*pte) || !pte_write(*pte)) | 494 | if (!pte_present(*pte) || !pte_write(*pte)) |
495 | goto bad_access; | 495 | goto bad_access; |
496 | val = *(unsigned long *)addr; | 496 | val = *(unsigned long *)addr; |
497 | val -= regs->ARM_r0; | 497 | val -= regs->ARM_r0; |
498 | if (val == 0) { | 498 | if (val == 0) { |
499 | *(unsigned long *)addr = regs->ARM_r1; | 499 | *(unsigned long *)addr = regs->ARM_r1; |
500 | regs->ARM_cpsr |= PSR_C_BIT; | 500 | regs->ARM_cpsr |= PSR_C_BIT; |
501 | } | 501 | } |
502 | spin_unlock(&mm->page_table_lock); | 502 | spin_unlock(&mm->page_table_lock); |
503 | return val; | 503 | return val; |
504 | 504 | ||
505 | bad_access: | 505 | bad_access: |
506 | spin_unlock(&mm->page_table_lock); | 506 | spin_unlock(&mm->page_table_lock); |
507 | /* simulate a read access fault */ | 507 | /* simulate a read access fault */ |
508 | do_DataAbort(addr, 15 + (1 << 11), regs); | 508 | do_DataAbort(addr, 15 + (1 << 11), regs); |
509 | return -1; | 509 | return -1; |
510 | } | 510 | } |
511 | #endif | 511 | #endif |
512 | 512 | ||
513 | default: | 513 | default: |
514 | /* Calls 9f00xx..9f07ff are defined to return -ENOSYS | 514 | /* Calls 9f00xx..9f07ff are defined to return -ENOSYS |
515 | if not implemented, rather than raising SIGILL. This | 515 | if not implemented, rather than raising SIGILL. This |
516 | way the calling program can gracefully determine whether | 516 | way the calling program can gracefully determine whether |
517 | a feature is supported. */ | 517 | a feature is supported. */ |
518 | if (no <= 0x7ff) | 518 | if (no <= 0x7ff) |
519 | return -ENOSYS; | 519 | return -ENOSYS; |
520 | break; | 520 | break; |
521 | } | 521 | } |
522 | #ifdef CONFIG_DEBUG_USER | 522 | #ifdef CONFIG_DEBUG_USER |
523 | /* | 523 | /* |
524 | * experience shows that these seem to indicate that | 524 | * experience shows that these seem to indicate that |
525 | * something catastrophic has happened | 525 | * something catastrophic has happened |
526 | */ | 526 | */ |
527 | if (user_debug & UDBG_SYSCALL) { | 527 | if (user_debug & UDBG_SYSCALL) { |
528 | printk("[%d] %s: arm syscall %d\n", | 528 | printk("[%d] %s: arm syscall %d\n", |
529 | current->pid, current->comm, no); | 529 | current->pid, current->comm, no); |
530 | dump_instr(regs); | 530 | dump_instr(regs); |
531 | if (user_mode(regs)) { | 531 | if (user_mode(regs)) { |
532 | __show_regs(regs); | 532 | __show_regs(regs); |
533 | c_backtrace(regs->ARM_fp, processor_mode(regs)); | 533 | c_backtrace(regs->ARM_fp, processor_mode(regs)); |
534 | } | 534 | } |
535 | } | 535 | } |
536 | #endif | 536 | #endif |
537 | info.si_signo = SIGILL; | 537 | info.si_signo = SIGILL; |
538 | info.si_errno = 0; | 538 | info.si_errno = 0; |
539 | info.si_code = ILL_ILLTRP; | 539 | info.si_code = ILL_ILLTRP; |
540 | info.si_addr = (void __user *)instruction_pointer(regs) - | 540 | info.si_addr = (void __user *)instruction_pointer(regs) - |
541 | (thumb_mode(regs) ? 2 : 4); | 541 | (thumb_mode(regs) ? 2 : 4); |
542 | 542 | ||
543 | notify_die("Oops - bad syscall(2)", regs, &info, no, 0); | 543 | notify_die("Oops - bad syscall(2)", regs, &info, no, 0); |
544 | return 0; | 544 | return 0; |
545 | } | 545 | } |
546 | 546 | ||
547 | #ifdef CONFIG_TLS_REG_EMUL | 547 | #ifdef CONFIG_TLS_REG_EMUL |
548 | 548 | ||
549 | /* | 549 | /* |
550 | * We might be running on an ARMv6+ processor which should have the TLS | 550 | * We might be running on an ARMv6+ processor which should have the TLS |
551 | * register but for some reason we can't use it, or maybe an SMP system | 551 | * register but for some reason we can't use it, or maybe an SMP system |
552 | * using a pre-ARMv6 processor (there are apparently a few prototypes like | 552 | * using a pre-ARMv6 processor (there are apparently a few prototypes like |
553 | * that in existence) and therefore access to that register must be | 553 | * that in existence) and therefore access to that register must be |
554 | * emulated. | 554 | * emulated. |
555 | */ | 555 | */ |
556 | 556 | ||
557 | static int get_tp_trap(struct pt_regs *regs, unsigned int instr) | 557 | static int get_tp_trap(struct pt_regs *regs, unsigned int instr) |
558 | { | 558 | { |
559 | int reg = (instr >> 12) & 15; | 559 | int reg = (instr >> 12) & 15; |
560 | if (reg == 15) | 560 | if (reg == 15) |
561 | return 1; | 561 | return 1; |
562 | regs->uregs[reg] = current_thread_info()->tp_value; | 562 | regs->uregs[reg] = current_thread_info()->tp_value; |
563 | regs->ARM_pc += 4; | 563 | regs->ARM_pc += 4; |
564 | return 0; | 564 | return 0; |
565 | } | 565 | } |
566 | 566 | ||
567 | static struct undef_hook arm_mrc_hook = { | 567 | static struct undef_hook arm_mrc_hook = { |
568 | .instr_mask = 0x0fff0fff, | 568 | .instr_mask = 0x0fff0fff, |
569 | .instr_val = 0x0e1d0f70, | 569 | .instr_val = 0x0e1d0f70, |
570 | .cpsr_mask = PSR_T_BIT, | 570 | .cpsr_mask = PSR_T_BIT, |
571 | .cpsr_val = 0, | 571 | .cpsr_val = 0, |
572 | .fn = get_tp_trap, | 572 | .fn = get_tp_trap, |
573 | }; | 573 | }; |
574 | 574 | ||
575 | static int __init arm_mrc_hook_init(void) | 575 | static int __init arm_mrc_hook_init(void) |
576 | { | 576 | { |
577 | register_undef_hook(&arm_mrc_hook); | 577 | register_undef_hook(&arm_mrc_hook); |
578 | return 0; | 578 | return 0; |
579 | } | 579 | } |
580 | 580 | ||
581 | late_initcall(arm_mrc_hook_init); | 581 | late_initcall(arm_mrc_hook_init); |
582 | 582 | ||
583 | #endif | 583 | #endif |
584 | 584 | ||
585 | void __bad_xchg(volatile void *ptr, int size) | 585 | void __bad_xchg(volatile void *ptr, int size) |
586 | { | 586 | { |
587 | printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", | 587 | printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", |
588 | __builtin_return_address(0), ptr, size); | 588 | __builtin_return_address(0), ptr, size); |
589 | BUG(); | 589 | BUG(); |
590 | } | 590 | } |
591 | EXPORT_SYMBOL(__bad_xchg); | 591 | EXPORT_SYMBOL(__bad_xchg); |
592 | 592 | ||
593 | /* | 593 | /* |
594 | * A data abort trap was taken, but we did not handle the instruction. | 594 | * A data abort trap was taken, but we did not handle the instruction. |
595 | * Try to abort the user program, or panic if it was the kernel. | 595 | * Try to abort the user program, or panic if it was the kernel. |
596 | */ | 596 | */ |
597 | asmlinkage void | 597 | asmlinkage void |
598 | baddataabort(int code, unsigned long instr, struct pt_regs *regs) | 598 | baddataabort(int code, unsigned long instr, struct pt_regs *regs) |
599 | { | 599 | { |
600 | unsigned long addr = instruction_pointer(regs); | 600 | unsigned long addr = instruction_pointer(regs); |
601 | siginfo_t info; | 601 | siginfo_t info; |
602 | 602 | ||
603 | #ifdef CONFIG_DEBUG_USER | 603 | #ifdef CONFIG_DEBUG_USER |
604 | if (user_debug & UDBG_BADABORT) { | 604 | if (user_debug & UDBG_BADABORT) { |
605 | printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", | 605 | printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", |
606 | current->pid, current->comm, code, instr); | 606 | current->pid, current->comm, code, instr); |
607 | dump_instr(regs); | 607 | dump_instr(regs); |
608 | show_pte(current->mm, addr); | 608 | show_pte(current->mm, addr); |
609 | } | 609 | } |
610 | #endif | 610 | #endif |
611 | 611 | ||
612 | info.si_signo = SIGILL; | 612 | info.si_signo = SIGILL; |
613 | info.si_errno = 0; | 613 | info.si_errno = 0; |
614 | info.si_code = ILL_ILLOPC; | 614 | info.si_code = ILL_ILLOPC; |
615 | info.si_addr = (void __user *)addr; | 615 | info.si_addr = (void __user *)addr; |
616 | 616 | ||
617 | notify_die("unknown data abort code", regs, &info, instr, 0); | 617 | notify_die("unknown data abort code", regs, &info, instr, 0); |
618 | } | 618 | } |
619 | 619 | ||
620 | volatile void __bug(const char *file, int line, void *data) | 620 | void __attribute__((noreturn)) __bug(const char *file, int line, void *data) |
621 | { | 621 | { |
622 | printk(KERN_CRIT"kernel BUG at %s:%d!", file, line); | 622 | printk(KERN_CRIT"kernel BUG at %s:%d!", file, line); |
623 | if (data) | 623 | if (data) |
624 | printk(" - extra data = %p", data); | 624 | printk(" - extra data = %p", data); |
625 | printk("\n"); | 625 | printk("\n"); |
626 | *(int *)0 = 0; | 626 | *(int *)0 = 0; |
627 | } | 627 | } |
628 | EXPORT_SYMBOL(__bug); | 628 | EXPORT_SYMBOL(__bug); |
629 | 629 | ||
630 | void __readwrite_bug(const char *fn) | 630 | void __readwrite_bug(const char *fn) |
631 | { | 631 | { |
632 | printk("%s called, but not implemented\n", fn); | 632 | printk("%s called, but not implemented\n", fn); |
633 | BUG(); | 633 | BUG(); |
634 | } | 634 | } |
635 | EXPORT_SYMBOL(__readwrite_bug); | 635 | EXPORT_SYMBOL(__readwrite_bug); |
636 | 636 | ||
637 | void __pte_error(const char *file, int line, unsigned long val) | 637 | void __pte_error(const char *file, int line, unsigned long val) |
638 | { | 638 | { |
639 | printk("%s:%d: bad pte %08lx.\n", file, line, val); | 639 | printk("%s:%d: bad pte %08lx.\n", file, line, val); |
640 | } | 640 | } |
641 | 641 | ||
642 | void __pmd_error(const char *file, int line, unsigned long val) | 642 | void __pmd_error(const char *file, int line, unsigned long val) |
643 | { | 643 | { |
644 | printk("%s:%d: bad pmd %08lx.\n", file, line, val); | 644 | printk("%s:%d: bad pmd %08lx.\n", file, line, val); |
645 | } | 645 | } |
646 | 646 | ||
647 | void __pgd_error(const char *file, int line, unsigned long val) | 647 | void __pgd_error(const char *file, int line, unsigned long val) |
648 | { | 648 | { |
649 | printk("%s:%d: bad pgd %08lx.\n", file, line, val); | 649 | printk("%s:%d: bad pgd %08lx.\n", file, line, val); |
650 | } | 650 | } |
651 | 651 | ||
652 | asmlinkage void __div0(void) | 652 | asmlinkage void __div0(void) |
653 | { | 653 | { |
654 | printk("Division by zero in kernel.\n"); | 654 | printk("Division by zero in kernel.\n"); |
655 | dump_stack(); | 655 | dump_stack(); |
656 | } | 656 | } |
657 | EXPORT_SYMBOL(__div0); | 657 | EXPORT_SYMBOL(__div0); |
658 | 658 | ||
659 | void abort(void) | 659 | void abort(void) |
660 | { | 660 | { |
661 | BUG(); | 661 | BUG(); |
662 | 662 | ||
663 | /* if that doesn't kill us, halt */ | 663 | /* if that doesn't kill us, halt */ |
664 | panic("Oops failed to kill thread"); | 664 | panic("Oops failed to kill thread"); |
665 | } | 665 | } |
666 | EXPORT_SYMBOL(abort); | 666 | EXPORT_SYMBOL(abort); |
667 | 667 | ||
668 | void __init trap_init(void) | 668 | void __init trap_init(void) |
669 | { | 669 | { |
670 | extern char __stubs_start[], __stubs_end[]; | 670 | extern char __stubs_start[], __stubs_end[]; |
671 | extern char __vectors_start[], __vectors_end[]; | 671 | extern char __vectors_start[], __vectors_end[]; |
672 | extern char __kuser_helper_start[], __kuser_helper_end[]; | 672 | extern char __kuser_helper_start[], __kuser_helper_end[]; |
673 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | 673 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; |
674 | 674 | ||
675 | /* | 675 | /* |
676 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) | 676 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) |
677 | * into the vector page, mapped at 0xffff0000, and ensure these | 677 | * into the vector page, mapped at 0xffff0000, and ensure these |
678 | * are visible to the instruction stream. | 678 | * are visible to the instruction stream. |
679 | */ | 679 | */ |
680 | memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start); | 680 | memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start); |
681 | memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start); | 681 | memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start); |
682 | memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz); | 682 | memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz); |
683 | 683 | ||
684 | /* | 684 | /* |
685 | * Copy signal return handlers into the vector page, and | 685 | * Copy signal return handlers into the vector page, and |
686 | * set sigreturn to be a pointer to these. | 686 | * set sigreturn to be a pointer to these. |
687 | */ | 687 | */ |
688 | memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, | 688 | memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, |
689 | sizeof(sigreturn_codes)); | 689 | sizeof(sigreturn_codes)); |
690 | 690 | ||
691 | flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); | 691 | flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); |
692 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | 692 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); |
693 | } | 693 | } |
694 | 694 |
arch/arm/nwfpe/fpopcode.h
1 | /* | 1 | /* |
2 | NetWinder Floating Point Emulator | 2 | NetWinder Floating Point Emulator |
3 | (c) Rebel.COM, 1998,1999 | 3 | (c) Rebel.COM, 1998,1999 |
4 | (c) Philip Blundell, 2001 | 4 | (c) Philip Blundell, 2001 |
5 | 5 | ||
6 | Direct questions, comments to Scott Bambrough <scottb@netwinder.org> | 6 | Direct questions, comments to Scott Bambrough <scottb@netwinder.org> |
7 | 7 | ||
8 | This program is free software; you can redistribute it and/or modify | 8 | This program is free software; you can redistribute it and/or modify |
9 | it under the terms of the GNU General Public License as published by | 9 | it under the terms of the GNU General Public License as published by |
10 | the Free Software Foundation; either version 2 of the License, or | 10 | the Free Software Foundation; either version 2 of the License, or |
11 | (at your option) any later version. | 11 | (at your option) any later version. |
12 | 12 | ||
13 | This program is distributed in the hope that it will be useful, | 13 | This program is distributed in the hope that it will be useful, |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | GNU General Public License for more details. | 16 | GNU General Public License for more details. |
17 | 17 | ||
18 | You should have received a copy of the GNU General Public License | 18 | You should have received a copy of the GNU General Public License |
19 | along with this program; if not, write to the Free Software | 19 | along with this program; if not, write to the Free Software |
20 | Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 20 | Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #ifndef __FPOPCODE_H__ | 23 | #ifndef __FPOPCODE_H__ |
24 | #define __FPOPCODE_H__ | 24 | #define __FPOPCODE_H__ |
25 | 25 | ||
26 | #include <linux/config.h> | 26 | #include <linux/config.h> |
27 | 27 | ||
28 | /* | 28 | /* |
29 | ARM Floating Point Instruction Classes | 29 | ARM Floating Point Instruction Classes |
30 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | 30 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
31 | |c o n d|1 1 0 P|U|u|W|L| Rn |v| Fd |0|0|0|1| o f f s e t | CPDT | 31 | |c o n d|1 1 0 P|U|u|W|L| Rn |v| Fd |0|0|0|1| o f f s e t | CPDT |
32 | |c o n d|1 1 0 P|U|w|W|L| Rn |x| Fd |0|0|1|0| o f f s e t | CPDT (copro 2) | 32 | |c o n d|1 1 0 P|U|w|W|L| Rn |x| Fd |0|0|1|0| o f f s e t | CPDT (copro 2) |
33 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | 33 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
34 | |c o n d|1 1 1 0|a|b|c|d|e| Fn |j| Fd |0|0|0|1|f|g|h|0|i| Fm | CPDO | 34 | |c o n d|1 1 1 0|a|b|c|d|e| Fn |j| Fd |0|0|0|1|f|g|h|0|i| Fm | CPDO |
35 | |c o n d|1 1 1 0|a|b|c|L|e| Fn | Rd |0|0|0|1|f|g|h|1|i| Fm | CPRT | 35 | |c o n d|1 1 1 0|a|b|c|L|e| Fn | Rd |0|0|0|1|f|g|h|1|i| Fm | CPRT |
36 | |c o n d|1 1 1 0|a|b|c|1|e| Fn |1|1|1|1|0|0|0|1|f|g|h|1|i| Fm | comparisons | 36 | |c o n d|1 1 1 0|a|b|c|1|e| Fn |1|1|1|1|0|0|0|1|f|g|h|1|i| Fm | comparisons |
37 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | 37 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
38 | 38 | ||
39 | CPDT data transfer instructions | 39 | CPDT data transfer instructions |
40 | LDF, STF, LFM (copro 2), SFM (copro 2) | 40 | LDF, STF, LFM (copro 2), SFM (copro 2) |
41 | 41 | ||
42 | CPDO dyadic arithmetic instructions | 42 | CPDO dyadic arithmetic instructions |
43 | ADF, MUF, SUF, RSF, DVF, RDF, | 43 | ADF, MUF, SUF, RSF, DVF, RDF, |
44 | POW, RPW, RMF, FML, FDV, FRD, POL | 44 | POW, RPW, RMF, FML, FDV, FRD, POL |
45 | 45 | ||
46 | CPDO monadic arithmetic instructions | 46 | CPDO monadic arithmetic instructions |
47 | MVF, MNF, ABS, RND, SQT, LOG, LGN, EXP, | 47 | MVF, MNF, ABS, RND, SQT, LOG, LGN, EXP, |
48 | SIN, COS, TAN, ASN, ACS, ATN, URD, NRM | 48 | SIN, COS, TAN, ASN, ACS, ATN, URD, NRM |
49 | 49 | ||
50 | CPRT joint arithmetic/data transfer instructions | 50 | CPRT joint arithmetic/data transfer instructions |
51 | FIX (arithmetic followed by load/store) | 51 | FIX (arithmetic followed by load/store) |
52 | FLT (load/store followed by arithmetic) | 52 | FLT (load/store followed by arithmetic) |
53 | CMF, CNF CMFE, CNFE (comparisons) | 53 | CMF, CNF CMFE, CNFE (comparisons) |
54 | WFS, RFS (write/read floating point status register) | 54 | WFS, RFS (write/read floating point status register) |
55 | WFC, RFC (write/read floating point control register) | 55 | WFC, RFC (write/read floating point control register) |
56 | 56 | ||
57 | cond condition codes | 57 | cond condition codes |
58 | P pre/post index bit: 0 = postindex, 1 = preindex | 58 | P pre/post index bit: 0 = postindex, 1 = preindex |
59 | U up/down bit: 0 = stack grows down, 1 = stack grows up | 59 | U up/down bit: 0 = stack grows down, 1 = stack grows up |
60 | W write back bit: 1 = update base register (Rn) | 60 | W write back bit: 1 = update base register (Rn) |
61 | L load/store bit: 0 = store, 1 = load | 61 | L load/store bit: 0 = store, 1 = load |
62 | Rn base register | 62 | Rn base register |
63 | Rd destination/source register | 63 | Rd destination/source register |
64 | Fd floating point destination register | 64 | Fd floating point destination register |
65 | Fn floating point source register | 65 | Fn floating point source register |
66 | Fm floating point source register or floating point constant | 66 | Fm floating point source register or floating point constant |
67 | 67 | ||
68 | uv transfer length (TABLE 1) | 68 | uv transfer length (TABLE 1) |
69 | wx register count (TABLE 2) | 69 | wx register count (TABLE 2) |
70 | abcd arithmetic opcode (TABLES 3 & 4) | 70 | abcd arithmetic opcode (TABLES 3 & 4) |
71 | ef destination size (rounding precision) (TABLE 5) | 71 | ef destination size (rounding precision) (TABLE 5) |
72 | gh rounding mode (TABLE 6) | 72 | gh rounding mode (TABLE 6) |
73 | j dyadic/monadic bit: 0 = dyadic, 1 = monadic | 73 | j dyadic/monadic bit: 0 = dyadic, 1 = monadic |
74 | i constant bit: 1 = constant (TABLE 6) | 74 | i constant bit: 1 = constant (TABLE 6) |
75 | */ | 75 | */ |
76 | 76 | ||
77 | /* | 77 | /* |
78 | TABLE 1 | 78 | TABLE 1 |
79 | +-------------------------+---+---+---------+---------+ | 79 | +-------------------------+---+---+---------+---------+ |
80 | | Precision | u | v | FPSR.EP | length | | 80 | | Precision | u | v | FPSR.EP | length | |
81 | +-------------------------+---+---+---------+---------+ | 81 | +-------------------------+---+---+---------+---------+ |
82 | | Single | 0 รผ 0 | x | 1 words | | 82 | | Single | 0 รผ 0 | x | 1 words | |
83 | | Double | 1 รผ 1 | x | 2 words | | 83 | | Double | 1 รผ 1 | x | 2 words | |
84 | | Extended | 1 รผ 1 | x | 3 words | | 84 | | Extended | 1 รผ 1 | x | 3 words | |
85 | | Packed decimal | 1 รผ 1 | 0 | 3 words | | 85 | | Packed decimal | 1 รผ 1 | 0 | 3 words | |
86 | | Expanded packed decimal | 1 รผ 1 | 1 | 4 words | | 86 | | Expanded packed decimal | 1 รผ 1 | 1 | 4 words | |
87 | +-------------------------+---+---+---------+---------+ | 87 | +-------------------------+---+---+---------+---------+ |
88 | Note: x = don't care | 88 | Note: x = don't care |
89 | */ | 89 | */ |
90 | 90 | ||
91 | /* | 91 | /* |
92 | TABLE 2 | 92 | TABLE 2 |
93 | +---+---+---------------------------------+ | 93 | +---+---+---------------------------------+ |
94 | | w | x | Number of registers to transfer | | 94 | | w | x | Number of registers to transfer | |
95 | +---+---+---------------------------------+ | 95 | +---+---+---------------------------------+ |
96 | | 0 รผ 1 | 1 | | 96 | | 0 รผ 1 | 1 | |
97 | | 1 รผ 0 | 2 | | 97 | | 1 รผ 0 | 2 | |
98 | | 1 รผ 1 | 3 | | 98 | | 1 รผ 1 | 3 | |
99 | | 0 รผ 0 | 4 | | 99 | | 0 รผ 0 | 4 | |
100 | +---+---+---------------------------------+ | 100 | +---+---+---------------------------------+ |
101 | */ | 101 | */ |
102 | 102 | ||
103 | /* | 103 | /* |
104 | TABLE 3: Dyadic Floating Point Opcodes | 104 | TABLE 3: Dyadic Floating Point Opcodes |
105 | +---+---+---+---+----------+-----------------------+-----------------------+ | 105 | +---+---+---+---+----------+-----------------------+-----------------------+ |
106 | | a | b | c | d | Mnemonic | Description | Operation | | 106 | | a | b | c | d | Mnemonic | Description | Operation | |
107 | +---+---+---+---+----------+-----------------------+-----------------------+ | 107 | +---+---+---+---+----------+-----------------------+-----------------------+ |
108 | | 0 | 0 | 0 | 0 | ADF | Add | Fd := Fn + Fm | | 108 | | 0 | 0 | 0 | 0 | ADF | Add | Fd := Fn + Fm | |
109 | | 0 | 0 | 0 | 1 | MUF | Multiply | Fd := Fn * Fm | | 109 | | 0 | 0 | 0 | 1 | MUF | Multiply | Fd := Fn * Fm | |
110 | | 0 | 0 | 1 | 0 | SUF | Subtract | Fd := Fn - Fm | | 110 | | 0 | 0 | 1 | 0 | SUF | Subtract | Fd := Fn - Fm | |
111 | | 0 | 0 | 1 | 1 | RSF | Reverse subtract | Fd := Fm - Fn | | 111 | | 0 | 0 | 1 | 1 | RSF | Reverse subtract | Fd := Fm - Fn | |
112 | | 0 | 1 | 0 | 0 | DVF | Divide | Fd := Fn / Fm | | 112 | | 0 | 1 | 0 | 0 | DVF | Divide | Fd := Fn / Fm | |
113 | | 0 | 1 | 0 | 1 | RDF | Reverse divide | Fd := Fm / Fn | | 113 | | 0 | 1 | 0 | 1 | RDF | Reverse divide | Fd := Fm / Fn | |
114 | | 0 | 1 | 1 | 0 | POW | Power | Fd := Fn ^ Fm | | 114 | | 0 | 1 | 1 | 0 | POW | Power | Fd := Fn ^ Fm | |
115 | | 0 | 1 | 1 | 1 | RPW | Reverse power | Fd := Fm ^ Fn | | 115 | | 0 | 1 | 1 | 1 | RPW | Reverse power | Fd := Fm ^ Fn | |
116 | | 1 | 0 | 0 | 0 | RMF | Remainder | Fd := IEEE rem(Fn/Fm) | | 116 | | 1 | 0 | 0 | 0 | RMF | Remainder | Fd := IEEE rem(Fn/Fm) | |
117 | | 1 | 0 | 0 | 1 | FML | Fast Multiply | Fd := Fn * Fm | | 117 | | 1 | 0 | 0 | 1 | FML | Fast Multiply | Fd := Fn * Fm | |
118 | | 1 | 0 | 1 | 0 | FDV | Fast Divide | Fd := Fn / Fm | | 118 | | 1 | 0 | 1 | 0 | FDV | Fast Divide | Fd := Fn / Fm | |
119 | | 1 | 0 | 1 | 1 | FRD | Fast reverse divide | Fd := Fm / Fn | | 119 | | 1 | 0 | 1 | 1 | FRD | Fast reverse divide | Fd := Fm / Fn | |
120 | | 1 | 1 | 0 | 0 | POL | Polar angle (ArcTan2) | Fd := arctan2(Fn,Fm) | | 120 | | 1 | 1 | 0 | 0 | POL | Polar angle (ArcTan2) | Fd := arctan2(Fn,Fm) | |
121 | | 1 | 1 | 0 | 1 | | undefined instruction | trap | | 121 | | 1 | 1 | 0 | 1 | | undefined instruction | trap | |
122 | | 1 | 1 | 1 | 0 | | undefined instruction | trap | | 122 | | 1 | 1 | 1 | 0 | | undefined instruction | trap | |
123 | | 1 | 1 | 1 | 1 | | undefined instruction | trap | | 123 | | 1 | 1 | 1 | 1 | | undefined instruction | trap | |
124 | +---+---+---+---+----------+-----------------------+-----------------------+ | 124 | +---+---+---+---+----------+-----------------------+-----------------------+ |
125 | Note: POW, RPW, POL are deprecated, and are available for backwards | 125 | Note: POW, RPW, POL are deprecated, and are available for backwards |
126 | compatibility only. | 126 | compatibility only. |
127 | */ | 127 | */ |
128 | 128 | ||
129 | /* | 129 | /* |
130 | TABLE 4: Monadic Floating Point Opcodes | 130 | TABLE 4: Monadic Floating Point Opcodes |
131 | +---+---+---+---+----------+-----------------------+-----------------------+ | 131 | +---+---+---+---+----------+-----------------------+-----------------------+ |
132 | | a | b | c | d | Mnemonic | Description | Operation | | 132 | | a | b | c | d | Mnemonic | Description | Operation | |
133 | +---+---+---+---+----------+-----------------------+-----------------------+ | 133 | +---+---+---+---+----------+-----------------------+-----------------------+ |
134 | | 0 | 0 | 0 | 0 | MVF | Move | Fd := Fm | | 134 | | 0 | 0 | 0 | 0 | MVF | Move | Fd := Fm | |
135 | | 0 | 0 | 0 | 1 | MNF | Move negated | Fd := - Fm | | 135 | | 0 | 0 | 0 | 1 | MNF | Move negated | Fd := - Fm | |
136 | | 0 | 0 | 1 | 0 | ABS | Absolute value | Fd := abs(Fm) | | 136 | | 0 | 0 | 1 | 0 | ABS | Absolute value | Fd := abs(Fm) | |
137 | | 0 | 0 | 1 | 1 | RND | Round to integer | Fd := int(Fm) | | 137 | | 0 | 0 | 1 | 1 | RND | Round to integer | Fd := int(Fm) | |
138 | | 0 | 1 | 0 | 0 | SQT | Square root | Fd := sqrt(Fm) | | 138 | | 0 | 1 | 0 | 0 | SQT | Square root | Fd := sqrt(Fm) | |
139 | | 0 | 1 | 0 | 1 | LOG | Log base 10 | Fd := log10(Fm) | | 139 | | 0 | 1 | 0 | 1 | LOG | Log base 10 | Fd := log10(Fm) | |
140 | | 0 | 1 | 1 | 0 | LGN | Log base e | Fd := ln(Fm) | | 140 | | 0 | 1 | 1 | 0 | LGN | Log base e | Fd := ln(Fm) | |
141 | | 0 | 1 | 1 | 1 | EXP | Exponent | Fd := e ^ Fm | | 141 | | 0 | 1 | 1 | 1 | EXP | Exponent | Fd := e ^ Fm | |
142 | | 1 | 0 | 0 | 0 | SIN | Sine | Fd := sin(Fm) | | 142 | | 1 | 0 | 0 | 0 | SIN | Sine | Fd := sin(Fm) | |
143 | | 1 | 0 | 0 | 1 | COS | Cosine | Fd := cos(Fm) | | 143 | | 1 | 0 | 0 | 1 | COS | Cosine | Fd := cos(Fm) | |
144 | | 1 | 0 | 1 | 0 | TAN | Tangent | Fd := tan(Fm) | | 144 | | 1 | 0 | 1 | 0 | TAN | Tangent | Fd := tan(Fm) | |
145 | | 1 | 0 | 1 | 1 | ASN | Arc Sine | Fd := arcsin(Fm) | | 145 | | 1 | 0 | 1 | 1 | ASN | Arc Sine | Fd := arcsin(Fm) | |
146 | | 1 | 1 | 0 | 0 | ACS | Arc Cosine | Fd := arccos(Fm) | | 146 | | 1 | 1 | 0 | 0 | ACS | Arc Cosine | Fd := arccos(Fm) | |
147 | | 1 | 1 | 0 | 1 | ATN | Arc Tangent | Fd := arctan(Fm) | | 147 | | 1 | 1 | 0 | 1 | ATN | Arc Tangent | Fd := arctan(Fm) | |
148 | | 1 | 1 | 1 | 0 | URD | Unnormalized round | Fd := int(Fm) | | 148 | | 1 | 1 | 1 | 0 | URD | Unnormalized round | Fd := int(Fm) | |
149 | | 1 | 1 | 1 | 1 | NRM | Normalize | Fd := norm(Fm) | | 149 | | 1 | 1 | 1 | 1 | NRM | Normalize | Fd := norm(Fm) | |
150 | +---+---+---+---+----------+-----------------------+-----------------------+ | 150 | +---+---+---+---+----------+-----------------------+-----------------------+ |
151 | Note: LOG, LGN, EXP, SIN, COS, TAN, ASN, ACS, ATN are deprecated, and are | 151 | Note: LOG, LGN, EXP, SIN, COS, TAN, ASN, ACS, ATN are deprecated, and are |
152 | available for backwards compatibility only. | 152 | available for backwards compatibility only. |
153 | */ | 153 | */ |
154 | 154 | ||
155 | /* | 155 | /* |
156 | TABLE 5 | 156 | TABLE 5 |
157 | +-------------------------+---+---+ | 157 | +-------------------------+---+---+ |
158 | | Rounding Precision | e | f | | 158 | | Rounding Precision | e | f | |
159 | +-------------------------+---+---+ | 159 | +-------------------------+---+---+ |
160 | | IEEE Single precision | 0 รผ 0 | | 160 | | IEEE Single precision | 0 รผ 0 | |
161 | | IEEE Double precision | 0 รผ 1 | | 161 | | IEEE Double precision | 0 รผ 1 | |
162 | | IEEE Extended precision | 1 รผ 0 | | 162 | | IEEE Extended precision | 1 รผ 0 | |
163 | | undefined (trap) | 1 รผ 1 | | 163 | | undefined (trap) | 1 รผ 1 | |
164 | +-------------------------+---+---+ | 164 | +-------------------------+---+---+ |
165 | */ | 165 | */ |
166 | 166 | ||
167 | /* | 167 | /* |
168 | TABLE 5 | 168 | TABLE 5 |
169 | +---------------------------------+---+---+ | 169 | +---------------------------------+---+---+ |
170 | | Rounding Mode | g | h | | 170 | | Rounding Mode | g | h | |
171 | +---------------------------------+---+---+ | 171 | +---------------------------------+---+---+ |
172 | | Round to nearest (default) | 0 รผ 0 | | 172 | | Round to nearest (default) | 0 รผ 0 | |
173 | | Round toward plus infinity | 0 รผ 1 | | 173 | | Round toward plus infinity | 0 รผ 1 | |
174 | | Round toward negative infinity | 1 รผ 0 | | 174 | | Round toward negative infinity | 1 รผ 0 | |
175 | | Round toward zero | 1 รผ 1 | | 175 | | Round toward zero | 1 รผ 1 | |
176 | +---------------------------------+---+---+ | 176 | +---------------------------------+---+---+ |
177 | */ | 177 | */ |
178 | 178 | ||
179 | /* | 179 | /* |
180 | === | 180 | === |
181 | === Definitions for load and store instructions | 181 | === Definitions for load and store instructions |
182 | === | 182 | === |
183 | */ | 183 | */ |
184 | 184 | ||
185 | /* bit masks */ | 185 | /* bit masks */ |
186 | #define BIT_PREINDEX 0x01000000 | 186 | #define BIT_PREINDEX 0x01000000 |
187 | #define BIT_UP 0x00800000 | 187 | #define BIT_UP 0x00800000 |
188 | #define BIT_WRITE_BACK 0x00200000 | 188 | #define BIT_WRITE_BACK 0x00200000 |
189 | #define BIT_LOAD 0x00100000 | 189 | #define BIT_LOAD 0x00100000 |
190 | 190 | ||
191 | /* masks for load/store */ | 191 | /* masks for load/store */ |
192 | #define MASK_CPDT 0x0c000000 /* data processing opcode */ | 192 | #define MASK_CPDT 0x0c000000 /* data processing opcode */ |
193 | #define MASK_OFFSET 0x000000ff | 193 | #define MASK_OFFSET 0x000000ff |
194 | #define MASK_TRANSFER_LENGTH 0x00408000 | 194 | #define MASK_TRANSFER_LENGTH 0x00408000 |
195 | #define MASK_REGISTER_COUNT MASK_TRANSFER_LENGTH | 195 | #define MASK_REGISTER_COUNT MASK_TRANSFER_LENGTH |
196 | #define MASK_COPROCESSOR 0x00000f00 | 196 | #define MASK_COPROCESSOR 0x00000f00 |
197 | 197 | ||
198 | /* Tests for transfer length */ | 198 | /* Tests for transfer length */ |
199 | #define TRANSFER_SINGLE 0x00000000 | 199 | #define TRANSFER_SINGLE 0x00000000 |
200 | #define TRANSFER_DOUBLE 0x00008000 | 200 | #define TRANSFER_DOUBLE 0x00008000 |
201 | #define TRANSFER_EXTENDED 0x00400000 | 201 | #define TRANSFER_EXTENDED 0x00400000 |
202 | #define TRANSFER_PACKED MASK_TRANSFER_LENGTH | 202 | #define TRANSFER_PACKED MASK_TRANSFER_LENGTH |
203 | 203 | ||
204 | /* Get the coprocessor number from the opcode. */ | 204 | /* Get the coprocessor number from the opcode. */ |
205 | #define getCoprocessorNumber(opcode) ((opcode & MASK_COPROCESSOR) >> 8) | 205 | #define getCoprocessorNumber(opcode) ((opcode & MASK_COPROCESSOR) >> 8) |
206 | 206 | ||
207 | /* Get the offset from the opcode. */ | 207 | /* Get the offset from the opcode. */ |
208 | #define getOffset(opcode) (opcode & MASK_OFFSET) | 208 | #define getOffset(opcode) (opcode & MASK_OFFSET) |
209 | 209 | ||
210 | /* Tests for specific data transfer load/store opcodes. */ | 210 | /* Tests for specific data transfer load/store opcodes. */ |
211 | #define TEST_OPCODE(opcode,mask) (((opcode) & (mask)) == (mask)) | 211 | #define TEST_OPCODE(opcode,mask) (((opcode) & (mask)) == (mask)) |
212 | 212 | ||
213 | #define LOAD_OP(opcode) TEST_OPCODE((opcode),MASK_CPDT | BIT_LOAD) | 213 | #define LOAD_OP(opcode) TEST_OPCODE((opcode),MASK_CPDT | BIT_LOAD) |
214 | #define STORE_OP(opcode) ((opcode & (MASK_CPDT | BIT_LOAD)) == MASK_CPDT) | 214 | #define STORE_OP(opcode) ((opcode & (MASK_CPDT | BIT_LOAD)) == MASK_CPDT) |
215 | 215 | ||
216 | #define LDF_OP(opcode) (LOAD_OP(opcode) && (getCoprocessorNumber(opcode) == 1)) | 216 | #define LDF_OP(opcode) (LOAD_OP(opcode) && (getCoprocessorNumber(opcode) == 1)) |
217 | #define LFM_OP(opcode) (LOAD_OP(opcode) && (getCoprocessorNumber(opcode) == 2)) | 217 | #define LFM_OP(opcode) (LOAD_OP(opcode) && (getCoprocessorNumber(opcode) == 2)) |
218 | #define STF_OP(opcode) (STORE_OP(opcode) && (getCoprocessorNumber(opcode) == 1)) | 218 | #define STF_OP(opcode) (STORE_OP(opcode) && (getCoprocessorNumber(opcode) == 1)) |
219 | #define SFM_OP(opcode) (STORE_OP(opcode) && (getCoprocessorNumber(opcode) == 2)) | 219 | #define SFM_OP(opcode) (STORE_OP(opcode) && (getCoprocessorNumber(opcode) == 2)) |
220 | 220 | ||
221 | #define PREINDEXED(opcode) ((opcode & BIT_PREINDEX) != 0) | 221 | #define PREINDEXED(opcode) ((opcode & BIT_PREINDEX) != 0) |
222 | #define POSTINDEXED(opcode) ((opcode & BIT_PREINDEX) == 0) | 222 | #define POSTINDEXED(opcode) ((opcode & BIT_PREINDEX) == 0) |
223 | #define BIT_UP_SET(opcode) ((opcode & BIT_UP) != 0) | 223 | #define BIT_UP_SET(opcode) ((opcode & BIT_UP) != 0) |
224 | #define BIT_UP_CLEAR(opcode) ((opcode & BIT_DOWN) == 0) | 224 | #define BIT_UP_CLEAR(opcode) ((opcode & BIT_DOWN) == 0) |
225 | #define WRITE_BACK(opcode) ((opcode & BIT_WRITE_BACK) != 0) | 225 | #define WRITE_BACK(opcode) ((opcode & BIT_WRITE_BACK) != 0) |
226 | #define LOAD(opcode) ((opcode & BIT_LOAD) != 0) | 226 | #define LOAD(opcode) ((opcode & BIT_LOAD) != 0) |
227 | #define STORE(opcode) ((opcode & BIT_LOAD) == 0) | 227 | #define STORE(opcode) ((opcode & BIT_LOAD) == 0) |
228 | 228 | ||
229 | /* | 229 | /* |
230 | === | 230 | === |
231 | === Definitions for arithmetic instructions | 231 | === Definitions for arithmetic instructions |
232 | === | 232 | === |
233 | */ | 233 | */ |
234 | /* bit masks */ | 234 | /* bit masks */ |
235 | #define BIT_MONADIC 0x00008000 | 235 | #define BIT_MONADIC 0x00008000 |
236 | #define BIT_CONSTANT 0x00000008 | 236 | #define BIT_CONSTANT 0x00000008 |
237 | 237 | ||
238 | #define CONSTANT_FM(opcode) ((opcode & BIT_CONSTANT) != 0) | 238 | #define CONSTANT_FM(opcode) ((opcode & BIT_CONSTANT) != 0) |
239 | #define MONADIC_INSTRUCTION(opcode) ((opcode & BIT_MONADIC) != 0) | 239 | #define MONADIC_INSTRUCTION(opcode) ((opcode & BIT_MONADIC) != 0) |
240 | 240 | ||
241 | /* instruction identification masks */ | 241 | /* instruction identification masks */ |
242 | #define MASK_CPDO 0x0e000000 /* arithmetic opcode */ | 242 | #define MASK_CPDO 0x0e000000 /* arithmetic opcode */ |
243 | #define MASK_ARITHMETIC_OPCODE 0x00f08000 | 243 | #define MASK_ARITHMETIC_OPCODE 0x00f08000 |
244 | #define MASK_DESTINATION_SIZE 0x00080080 | 244 | #define MASK_DESTINATION_SIZE 0x00080080 |
245 | 245 | ||
246 | /* dyadic arithmetic opcodes. */ | 246 | /* dyadic arithmetic opcodes. */ |
247 | #define ADF_CODE 0x00000000 | 247 | #define ADF_CODE 0x00000000 |
248 | #define MUF_CODE 0x00100000 | 248 | #define MUF_CODE 0x00100000 |
249 | #define SUF_CODE 0x00200000 | 249 | #define SUF_CODE 0x00200000 |
250 | #define RSF_CODE 0x00300000 | 250 | #define RSF_CODE 0x00300000 |
251 | #define DVF_CODE 0x00400000 | 251 | #define DVF_CODE 0x00400000 |
252 | #define RDF_CODE 0x00500000 | 252 | #define RDF_CODE 0x00500000 |
253 | #define POW_CODE 0x00600000 | 253 | #define POW_CODE 0x00600000 |
254 | #define RPW_CODE 0x00700000 | 254 | #define RPW_CODE 0x00700000 |
255 | #define RMF_CODE 0x00800000 | 255 | #define RMF_CODE 0x00800000 |
256 | #define FML_CODE 0x00900000 | 256 | #define FML_CODE 0x00900000 |
257 | #define FDV_CODE 0x00a00000 | 257 | #define FDV_CODE 0x00a00000 |
258 | #define FRD_CODE 0x00b00000 | 258 | #define FRD_CODE 0x00b00000 |
259 | #define POL_CODE 0x00c00000 | 259 | #define POL_CODE 0x00c00000 |
260 | /* 0x00d00000 is an invalid dyadic arithmetic opcode */ | 260 | /* 0x00d00000 is an invalid dyadic arithmetic opcode */ |
261 | /* 0x00e00000 is an invalid dyadic arithmetic opcode */ | 261 | /* 0x00e00000 is an invalid dyadic arithmetic opcode */ |
262 | /* 0x00f00000 is an invalid dyadic arithmetic opcode */ | 262 | /* 0x00f00000 is an invalid dyadic arithmetic opcode */ |
263 | 263 | ||
264 | /* monadic arithmetic opcodes. */ | 264 | /* monadic arithmetic opcodes. */ |
265 | #define MVF_CODE 0x00008000 | 265 | #define MVF_CODE 0x00008000 |
266 | #define MNF_CODE 0x00108000 | 266 | #define MNF_CODE 0x00108000 |
267 | #define ABS_CODE 0x00208000 | 267 | #define ABS_CODE 0x00208000 |
268 | #define RND_CODE 0x00308000 | 268 | #define RND_CODE 0x00308000 |
269 | #define SQT_CODE 0x00408000 | 269 | #define SQT_CODE 0x00408000 |
270 | #define LOG_CODE 0x00508000 | 270 | #define LOG_CODE 0x00508000 |
271 | #define LGN_CODE 0x00608000 | 271 | #define LGN_CODE 0x00608000 |
272 | #define EXP_CODE 0x00708000 | 272 | #define EXP_CODE 0x00708000 |
273 | #define SIN_CODE 0x00808000 | 273 | #define SIN_CODE 0x00808000 |
274 | #define COS_CODE 0x00908000 | 274 | #define COS_CODE 0x00908000 |
275 | #define TAN_CODE 0x00a08000 | 275 | #define TAN_CODE 0x00a08000 |
276 | #define ASN_CODE 0x00b08000 | 276 | #define ASN_CODE 0x00b08000 |
277 | #define ACS_CODE 0x00c08000 | 277 | #define ACS_CODE 0x00c08000 |
278 | #define ATN_CODE 0x00d08000 | 278 | #define ATN_CODE 0x00d08000 |
279 | #define URD_CODE 0x00e08000 | 279 | #define URD_CODE 0x00e08000 |
280 | #define NRM_CODE 0x00f08000 | 280 | #define NRM_CODE 0x00f08000 |
281 | 281 | ||
282 | /* | 282 | /* |
283 | === | 283 | === |
284 | === Definitions for register transfer and comparison instructions | 284 | === Definitions for register transfer and comparison instructions |
285 | === | 285 | === |
286 | */ | 286 | */ |
287 | 287 | ||
288 | #define MASK_CPRT 0x0e000010 /* register transfer opcode */ | 288 | #define MASK_CPRT 0x0e000010 /* register transfer opcode */ |
289 | #define MASK_CPRT_CODE 0x00f00000 | 289 | #define MASK_CPRT_CODE 0x00f00000 |
290 | #define FLT_CODE 0x00000000 | 290 | #define FLT_CODE 0x00000000 |
291 | #define FIX_CODE 0x00100000 | 291 | #define FIX_CODE 0x00100000 |
292 | #define WFS_CODE 0x00200000 | 292 | #define WFS_CODE 0x00200000 |
293 | #define RFS_CODE 0x00300000 | 293 | #define RFS_CODE 0x00300000 |
294 | #define WFC_CODE 0x00400000 | 294 | #define WFC_CODE 0x00400000 |
295 | #define RFC_CODE 0x00500000 | 295 | #define RFC_CODE 0x00500000 |
296 | #define CMF_CODE 0x00900000 | 296 | #define CMF_CODE 0x00900000 |
297 | #define CNF_CODE 0x00b00000 | 297 | #define CNF_CODE 0x00b00000 |
298 | #define CMFE_CODE 0x00d00000 | 298 | #define CMFE_CODE 0x00d00000 |
299 | #define CNFE_CODE 0x00f00000 | 299 | #define CNFE_CODE 0x00f00000 |
300 | 300 | ||
301 | /* | 301 | /* |
302 | === | 302 | === |
303 | === Common definitions | 303 | === Common definitions |
304 | === | 304 | === |
305 | */ | 305 | */ |
306 | 306 | ||
307 | /* register masks */ | 307 | /* register masks */ |
308 | #define MASK_Rd 0x0000f000 | 308 | #define MASK_Rd 0x0000f000 |
309 | #define MASK_Rn 0x000f0000 | 309 | #define MASK_Rn 0x000f0000 |
310 | #define MASK_Fd 0x00007000 | 310 | #define MASK_Fd 0x00007000 |
311 | #define MASK_Fm 0x00000007 | 311 | #define MASK_Fm 0x00000007 |
312 | #define MASK_Fn 0x00070000 | 312 | #define MASK_Fn 0x00070000 |
313 | 313 | ||
314 | /* condition code masks */ | 314 | /* condition code masks */ |
315 | #define CC_MASK 0xf0000000 | 315 | #define CC_MASK 0xf0000000 |
316 | #define CC_NEGATIVE 0x80000000 | 316 | #define CC_NEGATIVE 0x80000000 |
317 | #define CC_ZERO 0x40000000 | 317 | #define CC_ZERO 0x40000000 |
318 | #define CC_CARRY 0x20000000 | 318 | #define CC_CARRY 0x20000000 |
319 | #define CC_OVERFLOW 0x10000000 | 319 | #define CC_OVERFLOW 0x10000000 |
320 | #define CC_EQ 0x00000000 | 320 | #define CC_EQ 0x00000000 |
321 | #define CC_NE 0x10000000 | 321 | #define CC_NE 0x10000000 |
322 | #define CC_CS 0x20000000 | 322 | #define CC_CS 0x20000000 |
323 | #define CC_HS CC_CS | 323 | #define CC_HS CC_CS |
324 | #define CC_CC 0x30000000 | 324 | #define CC_CC 0x30000000 |
325 | #define CC_LO CC_CC | 325 | #define CC_LO CC_CC |
326 | #define CC_MI 0x40000000 | 326 | #define CC_MI 0x40000000 |
327 | #define CC_PL 0x50000000 | 327 | #define CC_PL 0x50000000 |
328 | #define CC_VS 0x60000000 | 328 | #define CC_VS 0x60000000 |
329 | #define CC_VC 0x70000000 | 329 | #define CC_VC 0x70000000 |
330 | #define CC_HI 0x80000000 | 330 | #define CC_HI 0x80000000 |
331 | #define CC_LS 0x90000000 | 331 | #define CC_LS 0x90000000 |
332 | #define CC_GE 0xa0000000 | 332 | #define CC_GE 0xa0000000 |
333 | #define CC_LT 0xb0000000 | 333 | #define CC_LT 0xb0000000 |
334 | #define CC_GT 0xc0000000 | 334 | #define CC_GT 0xc0000000 |
335 | #define CC_LE 0xd0000000 | 335 | #define CC_LE 0xd0000000 |
336 | #define CC_AL 0xe0000000 | 336 | #define CC_AL 0xe0000000 |
337 | #define CC_NV 0xf0000000 | 337 | #define CC_NV 0xf0000000 |
338 | 338 | ||
339 | /* rounding masks/values */ | 339 | /* rounding masks/values */ |
340 | #define MASK_ROUNDING_MODE 0x00000060 | 340 | #define MASK_ROUNDING_MODE 0x00000060 |
341 | #define ROUND_TO_NEAREST 0x00000000 | 341 | #define ROUND_TO_NEAREST 0x00000000 |
342 | #define ROUND_TO_PLUS_INFINITY 0x00000020 | 342 | #define ROUND_TO_PLUS_INFINITY 0x00000020 |
343 | #define ROUND_TO_MINUS_INFINITY 0x00000040 | 343 | #define ROUND_TO_MINUS_INFINITY 0x00000040 |
344 | #define ROUND_TO_ZERO 0x00000060 | 344 | #define ROUND_TO_ZERO 0x00000060 |
345 | 345 | ||
346 | #define MASK_ROUNDING_PRECISION 0x00080080 | 346 | #define MASK_ROUNDING_PRECISION 0x00080080 |
347 | #define ROUND_SINGLE 0x00000000 | 347 | #define ROUND_SINGLE 0x00000000 |
348 | #define ROUND_DOUBLE 0x00000080 | 348 | #define ROUND_DOUBLE 0x00000080 |
349 | #define ROUND_EXTENDED 0x00080000 | 349 | #define ROUND_EXTENDED 0x00080000 |
350 | 350 | ||
351 | /* Get the condition code from the opcode. */ | 351 | /* Get the condition code from the opcode. */ |
352 | #define getCondition(opcode) (opcode >> 28) | 352 | #define getCondition(opcode) (opcode >> 28) |
353 | 353 | ||
354 | /* Get the source register from the opcode. */ | 354 | /* Get the source register from the opcode. */ |
355 | #define getRn(opcode) ((opcode & MASK_Rn) >> 16) | 355 | #define getRn(opcode) ((opcode & MASK_Rn) >> 16) |
356 | 356 | ||
357 | /* Get the destination floating point register from the opcode. */ | 357 | /* Get the destination floating point register from the opcode. */ |
358 | #define getFd(opcode) ((opcode & MASK_Fd) >> 12) | 358 | #define getFd(opcode) ((opcode & MASK_Fd) >> 12) |
359 | 359 | ||
360 | /* Get the first source floating point register from the opcode. */ | 360 | /* Get the first source floating point register from the opcode. */ |
361 | #define getFn(opcode) ((opcode & MASK_Fn) >> 16) | 361 | #define getFn(opcode) ((opcode & MASK_Fn) >> 16) |
362 | 362 | ||
363 | /* Get the second source floating point register from the opcode. */ | 363 | /* Get the second source floating point register from the opcode. */ |
364 | #define getFm(opcode) (opcode & MASK_Fm) | 364 | #define getFm(opcode) (opcode & MASK_Fm) |
365 | 365 | ||
366 | /* Get the destination register from the opcode. */ | 366 | /* Get the destination register from the opcode. */ |
367 | #define getRd(opcode) ((opcode & MASK_Rd) >> 12) | 367 | #define getRd(opcode) ((opcode & MASK_Rd) >> 12) |
368 | 368 | ||
369 | /* Get the rounding mode from the opcode. */ | 369 | /* Get the rounding mode from the opcode. */ |
370 | #define getRoundingMode(opcode) ((opcode & MASK_ROUNDING_MODE) >> 5) | 370 | #define getRoundingMode(opcode) ((opcode & MASK_ROUNDING_MODE) >> 5) |
371 | 371 | ||
372 | #ifdef CONFIG_FPE_NWFPE_XP | 372 | #ifdef CONFIG_FPE_NWFPE_XP |
373 | static inline const floatx80 getExtendedConstant(const unsigned int nIndex) | 373 | static inline __attribute_pure__ floatx80 getExtendedConstant(const unsigned int nIndex) |
374 | { | 374 | { |
375 | extern const floatx80 floatx80Constant[]; | 375 | extern const floatx80 floatx80Constant[]; |
376 | return floatx80Constant[nIndex]; | 376 | return floatx80Constant[nIndex]; |
377 | } | 377 | } |
378 | #endif | 378 | #endif |
379 | 379 | ||
380 | static inline const float64 getDoubleConstant(const unsigned int nIndex) | 380 | static inline __attribute_pure__ float64 getDoubleConstant(const unsigned int nIndex) |
381 | { | 381 | { |
382 | extern const float64 float64Constant[]; | 382 | extern const float64 float64Constant[]; |
383 | return float64Constant[nIndex]; | 383 | return float64Constant[nIndex]; |
384 | } | 384 | } |
385 | 385 | ||
386 | static inline const float32 getSingleConstant(const unsigned int nIndex) | 386 | static inline __attribute_pure__ float32 getSingleConstant(const unsigned int nIndex) |
387 | { | 387 | { |
388 | extern const float32 float32Constant[]; | 388 | extern const float32 float32Constant[]; |
389 | return float32Constant[nIndex]; | 389 | return float32Constant[nIndex]; |
390 | } | 390 | } |
391 | 391 | ||
392 | static inline unsigned int getTransferLength(const unsigned int opcode) | 392 | static inline unsigned int getTransferLength(const unsigned int opcode) |
393 | { | 393 | { |
394 | unsigned int nRc; | 394 | unsigned int nRc; |
395 | 395 | ||
396 | switch (opcode & MASK_TRANSFER_LENGTH) { | 396 | switch (opcode & MASK_TRANSFER_LENGTH) { |
397 | case 0x00000000: | 397 | case 0x00000000: |
398 | nRc = 1; | 398 | nRc = 1; |
399 | break; /* single precision */ | 399 | break; /* single precision */ |
400 | case 0x00008000: | 400 | case 0x00008000: |
401 | nRc = 2; | 401 | nRc = 2; |
402 | break; /* double precision */ | 402 | break; /* double precision */ |
403 | case 0x00400000: | 403 | case 0x00400000: |
404 | nRc = 3; | 404 | nRc = 3; |
405 | break; /* extended precision */ | 405 | break; /* extended precision */ |
406 | default: | 406 | default: |
407 | nRc = 0; | 407 | nRc = 0; |
408 | } | 408 | } |
409 | 409 | ||
410 | return (nRc); | 410 | return (nRc); |
411 | } | 411 | } |
412 | 412 | ||
413 | static inline unsigned int getRegisterCount(const unsigned int opcode) | 413 | static inline unsigned int getRegisterCount(const unsigned int opcode) |
414 | { | 414 | { |
415 | unsigned int nRc; | 415 | unsigned int nRc; |
416 | 416 | ||
417 | switch (opcode & MASK_REGISTER_COUNT) { | 417 | switch (opcode & MASK_REGISTER_COUNT) { |
418 | case 0x00000000: | 418 | case 0x00000000: |
419 | nRc = 4; | 419 | nRc = 4; |
420 | break; | 420 | break; |
421 | case 0x00008000: | 421 | case 0x00008000: |
422 | nRc = 1; | 422 | nRc = 1; |
423 | break; | 423 | break; |
424 | case 0x00400000: | 424 | case 0x00400000: |
425 | nRc = 2; | 425 | nRc = 2; |
426 | break; | 426 | break; |
427 | case 0x00408000: | 427 | case 0x00408000: |
428 | nRc = 3; | 428 | nRc = 3; |
429 | break; | 429 | break; |
430 | default: | 430 | default: |
431 | nRc = 0; | 431 | nRc = 0; |
432 | } | 432 | } |
433 | 433 | ||
434 | return (nRc); | 434 | return (nRc); |
435 | } | 435 | } |
436 | 436 | ||
437 | static inline unsigned int getRoundingPrecision(const unsigned int opcode) | 437 | static inline unsigned int getRoundingPrecision(const unsigned int opcode) |
438 | { | 438 | { |
439 | unsigned int nRc; | 439 | unsigned int nRc; |
440 | 440 | ||
441 | switch (opcode & MASK_ROUNDING_PRECISION) { | 441 | switch (opcode & MASK_ROUNDING_PRECISION) { |
442 | case 0x00000000: | 442 | case 0x00000000: |
443 | nRc = 1; | 443 | nRc = 1; |
444 | break; | 444 | break; |
445 | case 0x00000080: | 445 | case 0x00000080: |
446 | nRc = 2; | 446 | nRc = 2; |
447 | break; | 447 | break; |
448 | case 0x00080000: | 448 | case 0x00080000: |
449 | nRc = 3; | 449 | nRc = 3; |
450 | break; | 450 | break; |
451 | default: | 451 | default: |
452 | nRc = 0; | 452 | nRc = 0; |
453 | } | 453 | } |
454 | 454 | ||
455 | return (nRc); | 455 | return (nRc); |
456 | } | 456 | } |
457 | 457 | ||
458 | static inline unsigned int getDestinationSize(const unsigned int opcode) | 458 | static inline unsigned int getDestinationSize(const unsigned int opcode) |
459 | { | 459 | { |
460 | unsigned int nRc; | 460 | unsigned int nRc; |
461 | 461 | ||
462 | switch (opcode & MASK_DESTINATION_SIZE) { | 462 | switch (opcode & MASK_DESTINATION_SIZE) { |
463 | case 0x00000000: | 463 | case 0x00000000: |
464 | nRc = typeSingle; | 464 | nRc = typeSingle; |
465 | break; | 465 | break; |
466 | case 0x00000080: | 466 | case 0x00000080: |
467 | nRc = typeDouble; | 467 | nRc = typeDouble; |
468 | break; | 468 | break; |
469 | case 0x00080000: | 469 | case 0x00080000: |
470 | nRc = typeExtended; | 470 | nRc = typeExtended; |
471 | break; | 471 | break; |
472 | default: | 472 | default: |
473 | nRc = typeNone; | 473 | nRc = typeNone; |
474 | } | 474 | } |
475 | 475 | ||
476 | return (nRc); | 476 | return (nRc); |
477 | } | 477 | } |
478 | 478 | ||
479 | #endif | 479 | #endif |
480 | 480 |
include/asm-arm/bug.h
1 | #ifndef _ASMARM_BUG_H | 1 | #ifndef _ASMARM_BUG_H |
2 | #define _ASMARM_BUG_H | 2 | #define _ASMARM_BUG_H |
3 | 3 | ||
4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
5 | 5 | ||
6 | #ifdef CONFIG_BUG | 6 | #ifdef CONFIG_BUG |
7 | #ifdef CONFIG_DEBUG_BUGVERBOSE | 7 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
8 | extern volatile void __bug(const char *file, int line, void *data); | 8 | extern void __bug(const char *file, int line, void *data) __attribute__((noreturn)); |
9 | 9 | ||
10 | /* give file/line information */ | 10 | /* give file/line information */ |
11 | #define BUG() __bug(__FILE__, __LINE__, NULL) | 11 | #define BUG() __bug(__FILE__, __LINE__, NULL) |
12 | 12 | ||
13 | #else | 13 | #else |
14 | 14 | ||
15 | /* this just causes an oops */ | 15 | /* this just causes an oops */ |
16 | #define BUG() (*(int *)0 = 0) | 16 | #define BUG() (*(int *)0 = 0) |
17 | 17 | ||
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | #define HAVE_ARCH_BUG | 20 | #define HAVE_ARCH_BUG |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #include <asm-generic/bug.h> | 23 | #include <asm-generic/bug.h> |
24 | 24 | ||
25 | #endif | 25 | #endif |
26 | 26 |
include/asm-arm/cpu-multi32.h
1 | /* | 1 | /* |
2 | * linux/include/asm-arm/cpu-multi32.h | 2 | * linux/include/asm-arm/cpu-multi32.h |
3 | * | 3 | * |
4 | * Copyright (C) 2000 Russell King | 4 | * Copyright (C) 2000 Russell King |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <asm/page.h> | 10 | #include <asm/page.h> |
11 | 11 | ||
12 | struct mm_struct; | 12 | struct mm_struct; |
13 | 13 | ||
14 | /* | 14 | /* |
15 | * Don't change this structure - ASM code | 15 | * Don't change this structure - ASM code |
16 | * relies on it. | 16 | * relies on it. |
17 | */ | 17 | */ |
18 | extern struct processor { | 18 | extern struct processor { |
19 | /* MISC | 19 | /* MISC |
20 | * get data abort address/flags | 20 | * get data abort address/flags |
21 | */ | 21 | */ |
22 | void (*_data_abort)(unsigned long pc); | 22 | void (*_data_abort)(unsigned long pc); |
23 | /* | 23 | /* |
24 | * Set up any processor specifics | 24 | * Set up any processor specifics |
25 | */ | 25 | */ |
26 | void (*_proc_init)(void); | 26 | void (*_proc_init)(void); |
27 | /* | 27 | /* |
28 | * Disable any processor specifics | 28 | * Disable any processor specifics |
29 | */ | 29 | */ |
30 | void (*_proc_fin)(void); | 30 | void (*_proc_fin)(void); |
31 | /* | 31 | /* |
32 | * Special stuff for a reset | 32 | * Special stuff for a reset |
33 | */ | 33 | */ |
34 | volatile void (*reset)(unsigned long addr); | 34 | void (*reset)(unsigned long addr) __attribute__((noreturn)); |
35 | /* | 35 | /* |
36 | * Idle the processor | 36 | * Idle the processor |
37 | */ | 37 | */ |
38 | int (*_do_idle)(void); | 38 | int (*_do_idle)(void); |
39 | /* | 39 | /* |
40 | * Processor architecture specific | 40 | * Processor architecture specific |
41 | */ | 41 | */ |
42 | /* | 42 | /* |
43 | * clean a virtual address range from the | 43 | * clean a virtual address range from the |
44 | * D-cache without flushing the cache. | 44 | * D-cache without flushing the cache. |
45 | */ | 45 | */ |
46 | void (*dcache_clean_area)(void *addr, int size); | 46 | void (*dcache_clean_area)(void *addr, int size); |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Set the page table | 49 | * Set the page table |
50 | */ | 50 | */ |
51 | void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); | 51 | void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); |
52 | /* | 52 | /* |
53 | * Set a PTE | 53 | * Set a PTE |
54 | */ | 54 | */ |
55 | void (*set_pte)(pte_t *ptep, pte_t pte); | 55 | void (*set_pte)(pte_t *ptep, pte_t pte); |
56 | } processor; | 56 | } processor; |
57 | 57 | ||
58 | #define cpu_proc_init() processor._proc_init() | 58 | #define cpu_proc_init() processor._proc_init() |
59 | #define cpu_proc_fin() processor._proc_fin() | 59 | #define cpu_proc_fin() processor._proc_fin() |
60 | #define cpu_reset(addr) processor.reset(addr) | 60 | #define cpu_reset(addr) processor.reset(addr) |
61 | #define cpu_do_idle() processor._do_idle() | 61 | #define cpu_do_idle() processor._do_idle() |
62 | #define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) | 62 | #define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) |
63 | #define cpu_set_pte(ptep, pte) processor.set_pte(ptep, pte) | 63 | #define cpu_set_pte(ptep, pte) processor.set_pte(ptep, pte) |
64 | #define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) | 64 | #define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) |
65 | 65 |
include/asm-arm/cpu-single.h
1 | /* | 1 | /* |
2 | * linux/include/asm-arm/cpu-single.h | 2 | * linux/include/asm-arm/cpu-single.h |
3 | * | 3 | * |
4 | * Copyright (C) 2000 Russell King | 4 | * Copyright (C) 2000 Russell King |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | /* | 10 | /* |
11 | * Single CPU | 11 | * Single CPU |
12 | */ | 12 | */ |
13 | #ifdef __STDC__ | 13 | #ifdef __STDC__ |
14 | #define __catify_fn(name,x) name##x | 14 | #define __catify_fn(name,x) name##x |
15 | #else | 15 | #else |
16 | #define __catify_fn(name,x) name/**/x | 16 | #define __catify_fn(name,x) name/**/x |
17 | #endif | 17 | #endif |
18 | #define __cpu_fn(name,x) __catify_fn(name,x) | 18 | #define __cpu_fn(name,x) __catify_fn(name,x) |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * If we are supporting multiple CPUs, then we must use a table of | 21 | * If we are supporting multiple CPUs, then we must use a table of |
22 | * function pointers for this lot. Otherwise, we can optimise the | 22 | * function pointers for this lot. Otherwise, we can optimise the |
23 | * table away. | 23 | * table away. |
24 | */ | 24 | */ |
25 | #define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init) | 25 | #define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init) |
26 | #define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin) | 26 | #define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin) |
27 | #define cpu_reset __cpu_fn(CPU_NAME,_reset) | 27 | #define cpu_reset __cpu_fn(CPU_NAME,_reset) |
28 | #define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle) | 28 | #define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle) |
29 | #define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area) | 29 | #define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area) |
30 | #define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm) | 30 | #define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm) |
31 | #define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte) | 31 | #define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte) |
32 | 32 | ||
33 | #include <asm/page.h> | 33 | #include <asm/page.h> |
34 | 34 | ||
35 | struct mm_struct; | 35 | struct mm_struct; |
36 | 36 | ||
37 | /* declare all the functions as extern */ | 37 | /* declare all the functions as extern */ |
38 | extern void cpu_proc_init(void); | 38 | extern void cpu_proc_init(void); |
39 | extern void cpu_proc_fin(void); | 39 | extern void cpu_proc_fin(void); |
40 | extern int cpu_do_idle(void); | 40 | extern int cpu_do_idle(void); |
41 | extern void cpu_dcache_clean_area(void *, int); | 41 | extern void cpu_dcache_clean_area(void *, int); |
42 | extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); | 42 | extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); |
43 | extern void cpu_set_pte(pte_t *ptep, pte_t pte); | 43 | extern void cpu_set_pte(pte_t *ptep, pte_t pte); |
44 | extern volatile void cpu_reset(unsigned long addr); | 44 | extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); |
45 | 45 |
include/asm-ppc/time.h
1 | /* | 1 | /* |
2 | * Common time prototypes and such for all ppc machines. | 2 | * Common time prototypes and such for all ppc machines. |
3 | * | 3 | * |
4 | * Written by Cort Dougan (cort@fsmlabs.com) to merge | 4 | * Written by Cort Dougan (cort@fsmlabs.com) to merge |
5 | * Paul Mackerras' version and mine for PReP and Pmac. | 5 | * Paul Mackerras' version and mine for PReP and Pmac. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifdef __KERNEL__ | 8 | #ifdef __KERNEL__ |
9 | #ifndef __ASM_TIME_H__ | 9 | #ifndef __ASM_TIME_H__ |
10 | #define __ASM_TIME_H__ | 10 | #define __ASM_TIME_H__ |
11 | 11 | ||
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/rtc.h> | 14 | #include <linux/rtc.h> |
15 | #include <linux/threads.h> | 15 | #include <linux/threads.h> |
16 | 16 | ||
17 | #include <asm/reg.h> | 17 | #include <asm/reg.h> |
18 | 18 | ||
19 | /* time.c */ | 19 | /* time.c */ |
20 | extern unsigned tb_ticks_per_jiffy; | 20 | extern unsigned tb_ticks_per_jiffy; |
21 | extern unsigned tb_to_us; | 21 | extern unsigned tb_to_us; |
22 | extern unsigned tb_last_stamp; | 22 | extern unsigned tb_last_stamp; |
23 | extern unsigned long disarm_decr[NR_CPUS]; | 23 | extern unsigned long disarm_decr[NR_CPUS]; |
24 | 24 | ||
25 | extern void to_tm(int tim, struct rtc_time * tm); | 25 | extern void to_tm(int tim, struct rtc_time * tm); |
26 | extern time_t last_rtc_update; | 26 | extern time_t last_rtc_update; |
27 | 27 | ||
28 | extern void set_dec_cpu6(unsigned int val); | 28 | extern void set_dec_cpu6(unsigned int val); |
29 | 29 | ||
30 | int via_calibrate_decr(void); | 30 | int via_calibrate_decr(void); |
31 | 31 | ||
32 | /* Accessor functions for the decrementer register. | 32 | /* Accessor functions for the decrementer register. |
33 | * The 4xx doesn't even have a decrementer. I tried to use the | 33 | * The 4xx doesn't even have a decrementer. I tried to use the |
34 | * generic timer interrupt code, which seems OK, with the 4xx PIT | 34 | * generic timer interrupt code, which seems OK, with the 4xx PIT |
35 | * in auto-reload mode. The problem is PIT stops counting when it | 35 | * in auto-reload mode. The problem is PIT stops counting when it |
36 | * hits zero. If it would wrap, we could use it just like a decrementer. | 36 | * hits zero. If it would wrap, we could use it just like a decrementer. |
37 | */ | 37 | */ |
38 | static __inline__ unsigned int get_dec(void) | 38 | static __inline__ unsigned int get_dec(void) |
39 | { | 39 | { |
40 | #if defined(CONFIG_40x) | 40 | #if defined(CONFIG_40x) |
41 | return (mfspr(SPRN_PIT)); | 41 | return (mfspr(SPRN_PIT)); |
42 | #else | 42 | #else |
43 | return (mfspr(SPRN_DEC)); | 43 | return (mfspr(SPRN_DEC)); |
44 | #endif | 44 | #endif |
45 | } | 45 | } |
46 | 46 | ||
47 | static __inline__ void set_dec(unsigned int val) | 47 | static __inline__ void set_dec(unsigned int val) |
48 | { | 48 | { |
49 | #if defined(CONFIG_40x) | 49 | #if defined(CONFIG_40x) |
50 | return; /* Have to let it auto-reload */ | 50 | return; /* Have to let it auto-reload */ |
51 | #elif defined(CONFIG_8xx_CPU6) | 51 | #elif defined(CONFIG_8xx_CPU6) |
52 | set_dec_cpu6(val); | 52 | set_dec_cpu6(val); |
53 | #else | 53 | #else |
54 | mtspr(SPRN_DEC, val); | 54 | mtspr(SPRN_DEC, val); |
55 | #endif | 55 | #endif |
56 | } | 56 | } |
57 | 57 | ||
58 | /* Accessor functions for the timebase (RTC on 601) registers. */ | 58 | /* Accessor functions for the timebase (RTC on 601) registers. */ |
59 | /* If one day CONFIG_POWER is added just define __USE_RTC as 1 */ | 59 | /* If one day CONFIG_POWER is added just define __USE_RTC as 1 */ |
60 | #ifdef CONFIG_6xx | 60 | #ifdef CONFIG_6xx |
61 | extern __inline__ int const __USE_RTC(void) { | 61 | extern __inline__ int __attribute_pure__ __USE_RTC(void) { |
62 | return (mfspr(SPRN_PVR)>>16) == 1; | 62 | return (mfspr(SPRN_PVR)>>16) == 1; |
63 | } | 63 | } |
64 | #else | 64 | #else |
65 | #define __USE_RTC() 0 | 65 | #define __USE_RTC() 0 |
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | extern __inline__ unsigned long get_tbl(void) { | 68 | extern __inline__ unsigned long get_tbl(void) { |
69 | unsigned long tbl; | 69 | unsigned long tbl; |
70 | #if defined(CONFIG_403GCX) | 70 | #if defined(CONFIG_403GCX) |
71 | asm volatile("mfspr %0, 0x3dd" : "=r" (tbl)); | 71 | asm volatile("mfspr %0, 0x3dd" : "=r" (tbl)); |
72 | #else | 72 | #else |
73 | asm volatile("mftb %0" : "=r" (tbl)); | 73 | asm volatile("mftb %0" : "=r" (tbl)); |
74 | #endif | 74 | #endif |
75 | return tbl; | 75 | return tbl; |
76 | } | 76 | } |
77 | 77 | ||
78 | extern __inline__ unsigned long get_tbu(void) { | 78 | extern __inline__ unsigned long get_tbu(void) { |
79 | unsigned long tbl; | 79 | unsigned long tbl; |
80 | #if defined(CONFIG_403GCX) | 80 | #if defined(CONFIG_403GCX) |
81 | asm volatile("mfspr %0, 0x3dc" : "=r" (tbl)); | 81 | asm volatile("mfspr %0, 0x3dc" : "=r" (tbl)); |
82 | #else | 82 | #else |
83 | asm volatile("mftbu %0" : "=r" (tbl)); | 83 | asm volatile("mftbu %0" : "=r" (tbl)); |
84 | #endif | 84 | #endif |
85 | return tbl; | 85 | return tbl; |
86 | } | 86 | } |
87 | 87 | ||
88 | extern __inline__ void set_tb(unsigned int upper, unsigned int lower) | 88 | extern __inline__ void set_tb(unsigned int upper, unsigned int lower) |
89 | { | 89 | { |
90 | mtspr(SPRN_TBWL, 0); | 90 | mtspr(SPRN_TBWL, 0); |
91 | mtspr(SPRN_TBWU, upper); | 91 | mtspr(SPRN_TBWU, upper); |
92 | mtspr(SPRN_TBWL, lower); | 92 | mtspr(SPRN_TBWL, lower); |
93 | } | 93 | } |
94 | 94 | ||
95 | extern __inline__ unsigned long get_rtcl(void) { | 95 | extern __inline__ unsigned long get_rtcl(void) { |
96 | unsigned long rtcl; | 96 | unsigned long rtcl; |
97 | asm volatile("mfrtcl %0" : "=r" (rtcl)); | 97 | asm volatile("mfrtcl %0" : "=r" (rtcl)); |
98 | return rtcl; | 98 | return rtcl; |
99 | } | 99 | } |
100 | 100 | ||
101 | extern __inline__ unsigned long get_rtcu(void) | 101 | extern __inline__ unsigned long get_rtcu(void) |
102 | { | 102 | { |
103 | unsigned long rtcu; | 103 | unsigned long rtcu; |
104 | asm volatile("mfrtcu %0" : "=r" (rtcu)); | 104 | asm volatile("mfrtcu %0" : "=r" (rtcu)); |
105 | return rtcu; | 105 | return rtcu; |
106 | } | 106 | } |
107 | 107 | ||
108 | extern __inline__ unsigned get_native_tbl(void) { | 108 | extern __inline__ unsigned get_native_tbl(void) { |
109 | if (__USE_RTC()) | 109 | if (__USE_RTC()) |
110 | return get_rtcl(); | 110 | return get_rtcl(); |
111 | else | 111 | else |
112 | return get_tbl(); | 112 | return get_tbl(); |
113 | } | 113 | } |
114 | 114 | ||
115 | /* On machines with RTC, this function can only be used safely | 115 | /* On machines with RTC, this function can only be used safely |
116 | * after the timestamp and for 1 second. It is only used by gettimeofday | 116 | * after the timestamp and for 1 second. It is only used by gettimeofday |
117 | * however so it should not matter. | 117 | * however so it should not matter. |
118 | */ | 118 | */ |
119 | extern __inline__ unsigned tb_ticks_since(unsigned tstamp) { | 119 | extern __inline__ unsigned tb_ticks_since(unsigned tstamp) { |
120 | if (__USE_RTC()) { | 120 | if (__USE_RTC()) { |
121 | int delta = get_rtcl() - tstamp; | 121 | int delta = get_rtcl() - tstamp; |
122 | return delta<0 ? delta + 1000000000 : delta; | 122 | return delta<0 ? delta + 1000000000 : delta; |
123 | } else { | 123 | } else { |
124 | return get_tbl() - tstamp; | 124 | return get_tbl() - tstamp; |
125 | } | 125 | } |
126 | } | 126 | } |
127 | 127 | ||
128 | #if 0 | 128 | #if 0 |
129 | extern __inline__ unsigned long get_bin_rtcl(void) { | 129 | extern __inline__ unsigned long get_bin_rtcl(void) { |
130 | unsigned long rtcl, rtcu1, rtcu2; | 130 | unsigned long rtcl, rtcu1, rtcu2; |
131 | asm volatile("\ | 131 | asm volatile("\ |
132 | 1: mfrtcu %0\n\ | 132 | 1: mfrtcu %0\n\ |
133 | mfrtcl %1\n\ | 133 | mfrtcl %1\n\ |
134 | mfrtcu %2\n\ | 134 | mfrtcu %2\n\ |
135 | cmpw %0,%2\n\ | 135 | cmpw %0,%2\n\ |
136 | bne- 1b\n" | 136 | bne- 1b\n" |
137 | : "=r" (rtcu1), "=r" (rtcl), "=r" (rtcu2) | 137 | : "=r" (rtcu1), "=r" (rtcl), "=r" (rtcu2) |
138 | : : "cr0"); | 138 | : : "cr0"); |
139 | return rtcu2*1000000000+rtcl; | 139 | return rtcu2*1000000000+rtcl; |
140 | } | 140 | } |
141 | 141 | ||
142 | extern __inline__ unsigned binary_tbl(void) { | 142 | extern __inline__ unsigned binary_tbl(void) { |
143 | if (__USE_RTC()) | 143 | if (__USE_RTC()) |
144 | return get_bin_rtcl(); | 144 | return get_bin_rtcl(); |
145 | else | 145 | else |
146 | return get_tbl(); | 146 | return get_tbl(); |
147 | } | 147 | } |
148 | #endif | 148 | #endif |
149 | 149 | ||
150 | /* Use mulhwu to scale processor timebase to timeval */ | 150 | /* Use mulhwu to scale processor timebase to timeval */ |
151 | /* Specifically, this computes (x * y) / 2^32. -- paulus */ | 151 | /* Specifically, this computes (x * y) / 2^32. -- paulus */ |
152 | #define mulhwu(x,y) \ | 152 | #define mulhwu(x,y) \ |
153 | ({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;}) | 153 | ({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;}) |
154 | 154 | ||
155 | unsigned mulhwu_scale_factor(unsigned, unsigned); | 155 | unsigned mulhwu_scale_factor(unsigned, unsigned); |
156 | #endif /* __ASM_TIME_H__ */ | 156 | #endif /* __ASM_TIME_H__ */ |
157 | #endif /* __KERNEL__ */ | 157 | #endif /* __KERNEL__ */ |
158 | 158 |