Commit 21f585073d6347651f2262da187606fa1c4ee16d
Committed by
Benjamin Herrenschmidt
1 parent
04c32a5168
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
powerpc: Fix smp_processor_id() in preemptible splat in set_breakpoint
Currently, on 8641D, which doesn't set CONFIG_HAVE_HW_BREAKPOINT we get the following splat: BUG: using smp_processor_id() in preemptible [00000000] code: login/1382 caller is set_breakpoint+0x1c/0xa0 CPU: 0 PID: 1382 Comm: login Not tainted 3.15.0-rc3-00041-g2aafe1a4d451 #1 Call Trace: [decd5d80] [c0008dc4] show_stack+0x50/0x158 (unreliable) [decd5dc0] [c03c6fa0] dump_stack+0x7c/0xdc [decd5de0] [c01f8818] check_preemption_disabled+0xf4/0x104 [decd5e00] [c00086b8] set_breakpoint+0x1c/0xa0 [decd5e10] [c00d4530] flush_old_exec+0x2bc/0x588 [decd5e40] [c011c468] load_elf_binary+0x2ac/0x1164 [decd5ec0] [c00d35f8] search_binary_handler+0xc4/0x1f8 [decd5ef0] [c00d4ee8] do_execve+0x3d8/0x4b8 [decd5f40] [c001185c] ret_from_syscall+0x0/0x38 --- Exception: c01 at 0xfeee554 LR = 0xfeee7d4 The call path in this case is: flush_thread --> set_debug_reg_defaults --> set_breakpoint --> __get_cpu_var Since preemption is enabled in the cleanup of flush thread, and there is no need to disable it, introduce the distinction between set_breakpoint and __set_breakpoint, leaving only the flush_thread instance as the current user of set_breakpoint. Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Showing 6 changed files with 17 additions and 9 deletions Inline Diff
arch/powerpc/include/asm/debug.h
1 | /* | 1 | /* |
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | 2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> |
3 | */ | 3 | */ |
4 | #ifndef _ASM_POWERPC_DEBUG_H | 4 | #ifndef _ASM_POWERPC_DEBUG_H |
5 | #define _ASM_POWERPC_DEBUG_H | 5 | #define _ASM_POWERPC_DEBUG_H |
6 | 6 | ||
7 | #include <asm/hw_breakpoint.h> | 7 | #include <asm/hw_breakpoint.h> |
8 | 8 | ||
9 | struct pt_regs; | 9 | struct pt_regs; |
10 | 10 | ||
11 | extern struct dentry *powerpc_debugfs_root; | 11 | extern struct dentry *powerpc_debugfs_root; |
12 | 12 | ||
13 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 13 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
14 | 14 | ||
15 | extern int (*__debugger)(struct pt_regs *regs); | 15 | extern int (*__debugger)(struct pt_regs *regs); |
16 | extern int (*__debugger_ipi)(struct pt_regs *regs); | 16 | extern int (*__debugger_ipi)(struct pt_regs *regs); |
17 | extern int (*__debugger_bpt)(struct pt_regs *regs); | 17 | extern int (*__debugger_bpt)(struct pt_regs *regs); |
18 | extern int (*__debugger_sstep)(struct pt_regs *regs); | 18 | extern int (*__debugger_sstep)(struct pt_regs *regs); |
19 | extern int (*__debugger_iabr_match)(struct pt_regs *regs); | 19 | extern int (*__debugger_iabr_match)(struct pt_regs *regs); |
20 | extern int (*__debugger_break_match)(struct pt_regs *regs); | 20 | extern int (*__debugger_break_match)(struct pt_regs *regs); |
21 | extern int (*__debugger_fault_handler)(struct pt_regs *regs); | 21 | extern int (*__debugger_fault_handler)(struct pt_regs *regs); |
22 | 22 | ||
23 | #define DEBUGGER_BOILERPLATE(__NAME) \ | 23 | #define DEBUGGER_BOILERPLATE(__NAME) \ |
24 | static inline int __NAME(struct pt_regs *regs) \ | 24 | static inline int __NAME(struct pt_regs *regs) \ |
25 | { \ | 25 | { \ |
26 | if (unlikely(__ ## __NAME)) \ | 26 | if (unlikely(__ ## __NAME)) \ |
27 | return __ ## __NAME(regs); \ | 27 | return __ ## __NAME(regs); \ |
28 | return 0; \ | 28 | return 0; \ |
29 | } | 29 | } |
30 | 30 | ||
31 | DEBUGGER_BOILERPLATE(debugger) | 31 | DEBUGGER_BOILERPLATE(debugger) |
32 | DEBUGGER_BOILERPLATE(debugger_ipi) | 32 | DEBUGGER_BOILERPLATE(debugger_ipi) |
33 | DEBUGGER_BOILERPLATE(debugger_bpt) | 33 | DEBUGGER_BOILERPLATE(debugger_bpt) |
34 | DEBUGGER_BOILERPLATE(debugger_sstep) | 34 | DEBUGGER_BOILERPLATE(debugger_sstep) |
35 | DEBUGGER_BOILERPLATE(debugger_iabr_match) | 35 | DEBUGGER_BOILERPLATE(debugger_iabr_match) |
36 | DEBUGGER_BOILERPLATE(debugger_break_match) | 36 | DEBUGGER_BOILERPLATE(debugger_break_match) |
37 | DEBUGGER_BOILERPLATE(debugger_fault_handler) | 37 | DEBUGGER_BOILERPLATE(debugger_fault_handler) |
38 | 38 | ||
39 | #else | 39 | #else |
40 | static inline int debugger(struct pt_regs *regs) { return 0; } | 40 | static inline int debugger(struct pt_regs *regs) { return 0; } |
41 | static inline int debugger_ipi(struct pt_regs *regs) { return 0; } | 41 | static inline int debugger_ipi(struct pt_regs *regs) { return 0; } |
42 | static inline int debugger_bpt(struct pt_regs *regs) { return 0; } | 42 | static inline int debugger_bpt(struct pt_regs *regs) { return 0; } |
43 | static inline int debugger_sstep(struct pt_regs *regs) { return 0; } | 43 | static inline int debugger_sstep(struct pt_regs *regs) { return 0; } |
44 | static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } | 44 | static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } |
45 | static inline int debugger_break_match(struct pt_regs *regs) { return 0; } | 45 | static inline int debugger_break_match(struct pt_regs *regs) { return 0; } |
46 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } | 46 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | void set_breakpoint(struct arch_hw_breakpoint *brk); | 49 | void set_breakpoint(struct arch_hw_breakpoint *brk); |
50 | void __set_breakpoint(struct arch_hw_breakpoint *brk); | ||
50 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 51 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
51 | extern void do_send_trap(struct pt_regs *regs, unsigned long address, | 52 | extern void do_send_trap(struct pt_regs *regs, unsigned long address, |
52 | unsigned long error_code, int signal_code, int brkpt); | 53 | unsigned long error_code, int signal_code, int brkpt); |
53 | #else | 54 | #else |
54 | 55 | ||
55 | extern void do_break(struct pt_regs *regs, unsigned long address, | 56 | extern void do_break(struct pt_regs *regs, unsigned long address, |
56 | unsigned long error_code); | 57 | unsigned long error_code); |
57 | #endif | 58 | #endif |
58 | 59 | ||
59 | #endif /* _ASM_POWERPC_DEBUG_H */ | 60 | #endif /* _ASM_POWERPC_DEBUG_H */ |
60 | 61 |
arch/powerpc/include/asm/hw_breakpoint.h
1 | /* | 1 | /* |
2 | * PowerPC BookIII S hardware breakpoint definitions | 2 | * PowerPC BookIII S hardware breakpoint definitions |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or | 6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. | 7 | * (at your option) any later version. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * | 17 | * |
18 | * Copyright 2010, IBM Corporation. | 18 | * Copyright 2010, IBM Corporation. |
19 | * Author: K.Prasad <prasad@linux.vnet.ibm.com> | 19 | * Author: K.Prasad <prasad@linux.vnet.ibm.com> |
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #ifndef _PPC_BOOK3S_64_HW_BREAKPOINT_H | 23 | #ifndef _PPC_BOOK3S_64_HW_BREAKPOINT_H |
24 | #define _PPC_BOOK3S_64_HW_BREAKPOINT_H | 24 | #define _PPC_BOOK3S_64_HW_BREAKPOINT_H |
25 | 25 | ||
26 | #ifdef __KERNEL__ | 26 | #ifdef __KERNEL__ |
27 | struct arch_hw_breakpoint { | 27 | struct arch_hw_breakpoint { |
28 | unsigned long address; | 28 | unsigned long address; |
29 | u16 type; | 29 | u16 type; |
30 | u16 len; /* length of the target data symbol */ | 30 | u16 len; /* length of the target data symbol */ |
31 | }; | 31 | }; |
32 | 32 | ||
33 | /* Note: Don't change the the first 6 bits below as they are in the same order | 33 | /* Note: Don't change the the first 6 bits below as they are in the same order |
34 | * as the dabr and dabrx. | 34 | * as the dabr and dabrx. |
35 | */ | 35 | */ |
36 | #define HW_BRK_TYPE_READ 0x01 | 36 | #define HW_BRK_TYPE_READ 0x01 |
37 | #define HW_BRK_TYPE_WRITE 0x02 | 37 | #define HW_BRK_TYPE_WRITE 0x02 |
38 | #define HW_BRK_TYPE_TRANSLATE 0x04 | 38 | #define HW_BRK_TYPE_TRANSLATE 0x04 |
39 | #define HW_BRK_TYPE_USER 0x08 | 39 | #define HW_BRK_TYPE_USER 0x08 |
40 | #define HW_BRK_TYPE_KERNEL 0x10 | 40 | #define HW_BRK_TYPE_KERNEL 0x10 |
41 | #define HW_BRK_TYPE_HYP 0x20 | 41 | #define HW_BRK_TYPE_HYP 0x20 |
42 | #define HW_BRK_TYPE_EXTRANEOUS_IRQ 0x80 | 42 | #define HW_BRK_TYPE_EXTRANEOUS_IRQ 0x80 |
43 | 43 | ||
44 | /* bits that overlap with the bottom 3 bits of the dabr */ | 44 | /* bits that overlap with the bottom 3 bits of the dabr */ |
45 | #define HW_BRK_TYPE_RDWR (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE) | 45 | #define HW_BRK_TYPE_RDWR (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE) |
46 | #define HW_BRK_TYPE_DABR (HW_BRK_TYPE_RDWR | HW_BRK_TYPE_TRANSLATE) | 46 | #define HW_BRK_TYPE_DABR (HW_BRK_TYPE_RDWR | HW_BRK_TYPE_TRANSLATE) |
47 | #define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \ | 47 | #define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \ |
48 | HW_BRK_TYPE_HYP) | 48 | HW_BRK_TYPE_HYP) |
49 | 49 | ||
50 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 50 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
51 | #include <linux/kdebug.h> | 51 | #include <linux/kdebug.h> |
52 | #include <asm/reg.h> | 52 | #include <asm/reg.h> |
53 | #include <asm/debug.h> | 53 | #include <asm/debug.h> |
54 | 54 | ||
55 | struct perf_event; | 55 | struct perf_event; |
56 | struct pmu; | 56 | struct pmu; |
57 | struct perf_sample_data; | 57 | struct perf_sample_data; |
58 | 58 | ||
59 | #define HW_BREAKPOINT_ALIGN 0x7 | 59 | #define HW_BREAKPOINT_ALIGN 0x7 |
60 | 60 | ||
61 | extern int hw_breakpoint_slots(int type); | 61 | extern int hw_breakpoint_slots(int type); |
62 | extern int arch_bp_generic_fields(int type, int *gen_bp_type); | 62 | extern int arch_bp_generic_fields(int type, int *gen_bp_type); |
63 | extern int arch_check_bp_in_kernelspace(struct perf_event *bp); | 63 | extern int arch_check_bp_in_kernelspace(struct perf_event *bp); |
64 | extern int arch_validate_hwbkpt_settings(struct perf_event *bp); | 64 | extern int arch_validate_hwbkpt_settings(struct perf_event *bp); |
65 | extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, | 65 | extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, |
66 | unsigned long val, void *data); | 66 | unsigned long val, void *data); |
67 | int arch_install_hw_breakpoint(struct perf_event *bp); | 67 | int arch_install_hw_breakpoint(struct perf_event *bp); |
68 | void arch_uninstall_hw_breakpoint(struct perf_event *bp); | 68 | void arch_uninstall_hw_breakpoint(struct perf_event *bp); |
69 | void hw_breakpoint_pmu_read(struct perf_event *bp); | 69 | void hw_breakpoint_pmu_read(struct perf_event *bp); |
70 | extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); | 70 | extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); |
71 | 71 | ||
72 | extern struct pmu perf_ops_bp; | 72 | extern struct pmu perf_ops_bp; |
73 | extern void ptrace_triggered(struct perf_event *bp, | 73 | extern void ptrace_triggered(struct perf_event *bp, |
74 | struct perf_sample_data *data, struct pt_regs *regs); | 74 | struct perf_sample_data *data, struct pt_regs *regs); |
75 | static inline void hw_breakpoint_disable(void) | 75 | static inline void hw_breakpoint_disable(void) |
76 | { | 76 | { |
77 | struct arch_hw_breakpoint brk; | 77 | struct arch_hw_breakpoint brk; |
78 | 78 | ||
79 | brk.address = 0; | 79 | brk.address = 0; |
80 | brk.type = 0; | 80 | brk.type = 0; |
81 | brk.len = 0; | 81 | brk.len = 0; |
82 | set_breakpoint(&brk); | 82 | __set_breakpoint(&brk); |
83 | } | 83 | } |
84 | extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); | 84 | extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); |
85 | 85 | ||
86 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ | 86 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
87 | static inline void hw_breakpoint_disable(void) { } | 87 | static inline void hw_breakpoint_disable(void) { } |
88 | static inline void thread_change_pc(struct task_struct *tsk, | 88 | static inline void thread_change_pc(struct task_struct *tsk, |
89 | struct pt_regs *regs) { } | 89 | struct pt_regs *regs) { } |
90 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 90 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
91 | #endif /* __KERNEL__ */ | 91 | #endif /* __KERNEL__ */ |
92 | #endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */ | 92 | #endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */ |
93 | 93 |
arch/powerpc/kernel/hw_breakpoint.c
1 | /* | 1 | /* |
2 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | 2 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, |
3 | * using the CPU's debug registers. Derived from | 3 | * using the CPU's debug registers. Derived from |
4 | * "arch/x86/kernel/hw_breakpoint.c" | 4 | * "arch/x86/kernel/hw_breakpoint.c" |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | * | 19 | * |
20 | * Copyright 2010 IBM Corporation | 20 | * Copyright 2010 IBM Corporation |
21 | * Author: K.Prasad <prasad@linux.vnet.ibm.com> | 21 | * Author: K.Prasad <prasad@linux.vnet.ibm.com> |
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/hw_breakpoint.h> | 25 | #include <linux/hw_breakpoint.h> |
26 | #include <linux/notifier.h> | 26 | #include <linux/notifier.h> |
27 | #include <linux/kprobes.h> | 27 | #include <linux/kprobes.h> |
28 | #include <linux/percpu.h> | 28 | #include <linux/percpu.h> |
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/smp.h> | 31 | #include <linux/smp.h> |
32 | 32 | ||
33 | #include <asm/hw_breakpoint.h> | 33 | #include <asm/hw_breakpoint.h> |
34 | #include <asm/processor.h> | 34 | #include <asm/processor.h> |
35 | #include <asm/sstep.h> | 35 | #include <asm/sstep.h> |
36 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Stores the breakpoints currently in use on each breakpoint address | 39 | * Stores the breakpoints currently in use on each breakpoint address |
40 | * register for every cpu | 40 | * register for every cpu |
41 | */ | 41 | */ |
42 | static DEFINE_PER_CPU(struct perf_event *, bp_per_reg); | 42 | static DEFINE_PER_CPU(struct perf_event *, bp_per_reg); |
43 | 43 | ||
44 | /* | 44 | /* |
45 | * Returns total number of data or instruction breakpoints available. | 45 | * Returns total number of data or instruction breakpoints available. |
46 | */ | 46 | */ |
47 | int hw_breakpoint_slots(int type) | 47 | int hw_breakpoint_slots(int type) |
48 | { | 48 | { |
49 | if (type == TYPE_DATA) | 49 | if (type == TYPE_DATA) |
50 | return HBP_NUM; | 50 | return HBP_NUM; |
51 | return 0; /* no instruction breakpoints available */ | 51 | return 0; /* no instruction breakpoints available */ |
52 | } | 52 | } |
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Install a perf counter breakpoint. | 55 | * Install a perf counter breakpoint. |
56 | * | 56 | * |
57 | * We seek a free debug address register and use it for this | 57 | * We seek a free debug address register and use it for this |
58 | * breakpoint. | 58 | * breakpoint. |
59 | * | 59 | * |
60 | * Atomic: we hold the counter->ctx->lock and we only handle variables | 60 | * Atomic: we hold the counter->ctx->lock and we only handle variables |
61 | * and registers local to this cpu. | 61 | * and registers local to this cpu. |
62 | */ | 62 | */ |
63 | int arch_install_hw_breakpoint(struct perf_event *bp) | 63 | int arch_install_hw_breakpoint(struct perf_event *bp) |
64 | { | 64 | { |
65 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | 65 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); |
66 | struct perf_event **slot = &__get_cpu_var(bp_per_reg); | 66 | struct perf_event **slot = &__get_cpu_var(bp_per_reg); |
67 | 67 | ||
68 | *slot = bp; | 68 | *slot = bp; |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Do not install DABR values if the instruction must be single-stepped. | 71 | * Do not install DABR values if the instruction must be single-stepped. |
72 | * If so, DABR will be populated in single_step_dabr_instruction(). | 72 | * If so, DABR will be populated in single_step_dabr_instruction(). |
73 | */ | 73 | */ |
74 | if (current->thread.last_hit_ubp != bp) | 74 | if (current->thread.last_hit_ubp != bp) |
75 | set_breakpoint(info); | 75 | __set_breakpoint(info); |
76 | 76 | ||
77 | return 0; | 77 | return 0; |
78 | } | 78 | } |
79 | 79 | ||
80 | /* | 80 | /* |
81 | * Uninstall the breakpoint contained in the given counter. | 81 | * Uninstall the breakpoint contained in the given counter. |
82 | * | 82 | * |
83 | * First we search the debug address register it uses and then we disable | 83 | * First we search the debug address register it uses and then we disable |
84 | * it. | 84 | * it. |
85 | * | 85 | * |
86 | * Atomic: we hold the counter->ctx->lock and we only handle variables | 86 | * Atomic: we hold the counter->ctx->lock and we only handle variables |
87 | * and registers local to this cpu. | 87 | * and registers local to this cpu. |
88 | */ | 88 | */ |
89 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | 89 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) |
90 | { | 90 | { |
91 | struct perf_event **slot = &__get_cpu_var(bp_per_reg); | 91 | struct perf_event **slot = &__get_cpu_var(bp_per_reg); |
92 | 92 | ||
93 | if (*slot != bp) { | 93 | if (*slot != bp) { |
94 | WARN_ONCE(1, "Can't find the breakpoint"); | 94 | WARN_ONCE(1, "Can't find the breakpoint"); |
95 | return; | 95 | return; |
96 | } | 96 | } |
97 | 97 | ||
98 | *slot = NULL; | 98 | *slot = NULL; |
99 | hw_breakpoint_disable(); | 99 | hw_breakpoint_disable(); |
100 | } | 100 | } |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * Perform cleanup of arch-specific counters during unregistration | 103 | * Perform cleanup of arch-specific counters during unregistration |
104 | * of the perf-event | 104 | * of the perf-event |
105 | */ | 105 | */ |
106 | void arch_unregister_hw_breakpoint(struct perf_event *bp) | 106 | void arch_unregister_hw_breakpoint(struct perf_event *bp) |
107 | { | 107 | { |
108 | /* | 108 | /* |
109 | * If the breakpoint is unregistered between a hw_breakpoint_handler() | 109 | * If the breakpoint is unregistered between a hw_breakpoint_handler() |
110 | * and the single_step_dabr_instruction(), then cleanup the breakpoint | 110 | * and the single_step_dabr_instruction(), then cleanup the breakpoint |
111 | * restoration variables to prevent dangling pointers. | 111 | * restoration variables to prevent dangling pointers. |
112 | */ | 112 | */ |
113 | if (bp->ctx && bp->ctx->task) | 113 | if (bp->ctx && bp->ctx->task) |
114 | bp->ctx->task->thread.last_hit_ubp = NULL; | 114 | bp->ctx->task->thread.last_hit_ubp = NULL; |
115 | } | 115 | } |
116 | 116 | ||
117 | /* | 117 | /* |
118 | * Check for virtual address in kernel space. | 118 | * Check for virtual address in kernel space. |
119 | */ | 119 | */ |
120 | int arch_check_bp_in_kernelspace(struct perf_event *bp) | 120 | int arch_check_bp_in_kernelspace(struct perf_event *bp) |
121 | { | 121 | { |
122 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | 122 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); |
123 | 123 | ||
124 | return is_kernel_addr(info->address); | 124 | return is_kernel_addr(info->address); |
125 | } | 125 | } |
126 | 126 | ||
127 | int arch_bp_generic_fields(int type, int *gen_bp_type) | 127 | int arch_bp_generic_fields(int type, int *gen_bp_type) |
128 | { | 128 | { |
129 | *gen_bp_type = 0; | 129 | *gen_bp_type = 0; |
130 | if (type & HW_BRK_TYPE_READ) | 130 | if (type & HW_BRK_TYPE_READ) |
131 | *gen_bp_type |= HW_BREAKPOINT_R; | 131 | *gen_bp_type |= HW_BREAKPOINT_R; |
132 | if (type & HW_BRK_TYPE_WRITE) | 132 | if (type & HW_BRK_TYPE_WRITE) |
133 | *gen_bp_type |= HW_BREAKPOINT_W; | 133 | *gen_bp_type |= HW_BREAKPOINT_W; |
134 | if (*gen_bp_type == 0) | 134 | if (*gen_bp_type == 0) |
135 | return -EINVAL; | 135 | return -EINVAL; |
136 | return 0; | 136 | return 0; |
137 | } | 137 | } |
138 | 138 | ||
139 | /* | 139 | /* |
140 | * Validate the arch-specific HW Breakpoint register settings | 140 | * Validate the arch-specific HW Breakpoint register settings |
141 | */ | 141 | */ |
142 | int arch_validate_hwbkpt_settings(struct perf_event *bp) | 142 | int arch_validate_hwbkpt_settings(struct perf_event *bp) |
143 | { | 143 | { |
144 | int ret = -EINVAL, length_max; | 144 | int ret = -EINVAL, length_max; |
145 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | 145 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); |
146 | 146 | ||
147 | if (!bp) | 147 | if (!bp) |
148 | return ret; | 148 | return ret; |
149 | 149 | ||
150 | info->type = HW_BRK_TYPE_TRANSLATE; | 150 | info->type = HW_BRK_TYPE_TRANSLATE; |
151 | if (bp->attr.bp_type & HW_BREAKPOINT_R) | 151 | if (bp->attr.bp_type & HW_BREAKPOINT_R) |
152 | info->type |= HW_BRK_TYPE_READ; | 152 | info->type |= HW_BRK_TYPE_READ; |
153 | if (bp->attr.bp_type & HW_BREAKPOINT_W) | 153 | if (bp->attr.bp_type & HW_BREAKPOINT_W) |
154 | info->type |= HW_BRK_TYPE_WRITE; | 154 | info->type |= HW_BRK_TYPE_WRITE; |
155 | if (info->type == HW_BRK_TYPE_TRANSLATE) | 155 | if (info->type == HW_BRK_TYPE_TRANSLATE) |
156 | /* must set alteast read or write */ | 156 | /* must set alteast read or write */ |
157 | return ret; | 157 | return ret; |
158 | if (!(bp->attr.exclude_user)) | 158 | if (!(bp->attr.exclude_user)) |
159 | info->type |= HW_BRK_TYPE_USER; | 159 | info->type |= HW_BRK_TYPE_USER; |
160 | if (!(bp->attr.exclude_kernel)) | 160 | if (!(bp->attr.exclude_kernel)) |
161 | info->type |= HW_BRK_TYPE_KERNEL; | 161 | info->type |= HW_BRK_TYPE_KERNEL; |
162 | if (!(bp->attr.exclude_hv)) | 162 | if (!(bp->attr.exclude_hv)) |
163 | info->type |= HW_BRK_TYPE_HYP; | 163 | info->type |= HW_BRK_TYPE_HYP; |
164 | info->address = bp->attr.bp_addr; | 164 | info->address = bp->attr.bp_addr; |
165 | info->len = bp->attr.bp_len; | 165 | info->len = bp->attr.bp_len; |
166 | 166 | ||
167 | /* | 167 | /* |
168 | * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) | 168 | * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) |
169 | * and breakpoint addresses are aligned to nearest double-word | 169 | * and breakpoint addresses are aligned to nearest double-word |
170 | * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the | 170 | * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the |
171 | * 'symbolsize' should satisfy the check below. | 171 | * 'symbolsize' should satisfy the check below. |
172 | */ | 172 | */ |
173 | length_max = 8; /* DABR */ | 173 | length_max = 8; /* DABR */ |
174 | if (cpu_has_feature(CPU_FTR_DAWR)) { | 174 | if (cpu_has_feature(CPU_FTR_DAWR)) { |
175 | length_max = 512 ; /* 64 doublewords */ | 175 | length_max = 512 ; /* 64 doublewords */ |
176 | /* DAWR region can't cross 512 boundary */ | 176 | /* DAWR region can't cross 512 boundary */ |
177 | if ((bp->attr.bp_addr >> 10) != | 177 | if ((bp->attr.bp_addr >> 10) != |
178 | ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) | 178 | ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) |
179 | return -EINVAL; | 179 | return -EINVAL; |
180 | } | 180 | } |
181 | if (info->len > | 181 | if (info->len > |
182 | (length_max - (info->address & HW_BREAKPOINT_ALIGN))) | 182 | (length_max - (info->address & HW_BREAKPOINT_ALIGN))) |
183 | return -EINVAL; | 183 | return -EINVAL; |
184 | return 0; | 184 | return 0; |
185 | } | 185 | } |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * Restores the breakpoint on the debug registers. | 188 | * Restores the breakpoint on the debug registers. |
189 | * Invoke this function if it is known that the execution context is | 189 | * Invoke this function if it is known that the execution context is |
190 | * about to change to cause loss of MSR_SE settings. | 190 | * about to change to cause loss of MSR_SE settings. |
191 | */ | 191 | */ |
192 | void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) | 192 | void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) |
193 | { | 193 | { |
194 | struct arch_hw_breakpoint *info; | 194 | struct arch_hw_breakpoint *info; |
195 | 195 | ||
196 | if (likely(!tsk->thread.last_hit_ubp)) | 196 | if (likely(!tsk->thread.last_hit_ubp)) |
197 | return; | 197 | return; |
198 | 198 | ||
199 | info = counter_arch_bp(tsk->thread.last_hit_ubp); | 199 | info = counter_arch_bp(tsk->thread.last_hit_ubp); |
200 | regs->msr &= ~MSR_SE; | 200 | regs->msr &= ~MSR_SE; |
201 | set_breakpoint(info); | 201 | __set_breakpoint(info); |
202 | tsk->thread.last_hit_ubp = NULL; | 202 | tsk->thread.last_hit_ubp = NULL; |
203 | } | 203 | } |
204 | 204 | ||
205 | /* | 205 | /* |
206 | * Handle debug exception notifications. | 206 | * Handle debug exception notifications. |
207 | */ | 207 | */ |
208 | int __kprobes hw_breakpoint_handler(struct die_args *args) | 208 | int __kprobes hw_breakpoint_handler(struct die_args *args) |
209 | { | 209 | { |
210 | int rc = NOTIFY_STOP; | 210 | int rc = NOTIFY_STOP; |
211 | struct perf_event *bp; | 211 | struct perf_event *bp; |
212 | struct pt_regs *regs = args->regs; | 212 | struct pt_regs *regs = args->regs; |
213 | int stepped = 1; | 213 | int stepped = 1; |
214 | struct arch_hw_breakpoint *info; | 214 | struct arch_hw_breakpoint *info; |
215 | unsigned int instr; | 215 | unsigned int instr; |
216 | unsigned long dar = regs->dar; | 216 | unsigned long dar = regs->dar; |
217 | 217 | ||
218 | /* Disable breakpoints during exception handling */ | 218 | /* Disable breakpoints during exception handling */ |
219 | hw_breakpoint_disable(); | 219 | hw_breakpoint_disable(); |
220 | 220 | ||
221 | /* | 221 | /* |
222 | * The counter may be concurrently released but that can only | 222 | * The counter may be concurrently released but that can only |
223 | * occur from a call_rcu() path. We can then safely fetch | 223 | * occur from a call_rcu() path. We can then safely fetch |
224 | * the breakpoint, use its callback, touch its counter | 224 | * the breakpoint, use its callback, touch its counter |
225 | * while we are in an rcu_read_lock() path. | 225 | * while we are in an rcu_read_lock() path. |
226 | */ | 226 | */ |
227 | rcu_read_lock(); | 227 | rcu_read_lock(); |
228 | 228 | ||
229 | bp = __get_cpu_var(bp_per_reg); | 229 | bp = __get_cpu_var(bp_per_reg); |
230 | if (!bp) | 230 | if (!bp) |
231 | goto out; | 231 | goto out; |
232 | info = counter_arch_bp(bp); | 232 | info = counter_arch_bp(bp); |
233 | 233 | ||
234 | /* | 234 | /* |
235 | * Return early after invoking user-callback function without restoring | 235 | * Return early after invoking user-callback function without restoring |
236 | * DABR if the breakpoint is from ptrace which always operates in | 236 | * DABR if the breakpoint is from ptrace which always operates in |
237 | * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal | 237 | * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal |
238 | * generated in do_dabr(). | 238 | * generated in do_dabr(). |
239 | */ | 239 | */ |
240 | if (bp->overflow_handler == ptrace_triggered) { | 240 | if (bp->overflow_handler == ptrace_triggered) { |
241 | perf_bp_event(bp, regs); | 241 | perf_bp_event(bp, regs); |
242 | rc = NOTIFY_DONE; | 242 | rc = NOTIFY_DONE; |
243 | goto out; | 243 | goto out; |
244 | } | 244 | } |
245 | 245 | ||
246 | /* | 246 | /* |
247 | * Verify if dar lies within the address range occupied by the symbol | 247 | * Verify if dar lies within the address range occupied by the symbol |
248 | * being watched to filter extraneous exceptions. If it doesn't, | 248 | * being watched to filter extraneous exceptions. If it doesn't, |
249 | * we still need to single-step the instruction, but we don't | 249 | * we still need to single-step the instruction, but we don't |
250 | * generate an event. | 250 | * generate an event. |
251 | */ | 251 | */ |
252 | info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; | 252 | info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; |
253 | if (!((bp->attr.bp_addr <= dar) && | 253 | if (!((bp->attr.bp_addr <= dar) && |
254 | (dar - bp->attr.bp_addr < bp->attr.bp_len))) | 254 | (dar - bp->attr.bp_addr < bp->attr.bp_len))) |
255 | info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; | 255 | info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; |
256 | 256 | ||
257 | /* Do not emulate user-space instructions, instead single-step them */ | 257 | /* Do not emulate user-space instructions, instead single-step them */ |
258 | if (user_mode(regs)) { | 258 | if (user_mode(regs)) { |
259 | current->thread.last_hit_ubp = bp; | 259 | current->thread.last_hit_ubp = bp; |
260 | regs->msr |= MSR_SE; | 260 | regs->msr |= MSR_SE; |
261 | goto out; | 261 | goto out; |
262 | } | 262 | } |
263 | 263 | ||
264 | stepped = 0; | 264 | stepped = 0; |
265 | instr = 0; | 265 | instr = 0; |
266 | if (!__get_user_inatomic(instr, (unsigned int *) regs->nip)) | 266 | if (!__get_user_inatomic(instr, (unsigned int *) regs->nip)) |
267 | stepped = emulate_step(regs, instr); | 267 | stepped = emulate_step(regs, instr); |
268 | 268 | ||
269 | /* | 269 | /* |
270 | * emulate_step() could not execute it. We've failed in reliably | 270 | * emulate_step() could not execute it. We've failed in reliably |
271 | * handling the hw-breakpoint. Unregister it and throw a warning | 271 | * handling the hw-breakpoint. Unregister it and throw a warning |
272 | * message to let the user know about it. | 272 | * message to let the user know about it. |
273 | */ | 273 | */ |
274 | if (!stepped) { | 274 | if (!stepped) { |
275 | WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " | 275 | WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " |
276 | "0x%lx will be disabled.", info->address); | 276 | "0x%lx will be disabled.", info->address); |
277 | perf_event_disable(bp); | 277 | perf_event_disable(bp); |
278 | goto out; | 278 | goto out; |
279 | } | 279 | } |
280 | /* | 280 | /* |
281 | * As a policy, the callback is invoked in a 'trigger-after-execute' | 281 | * As a policy, the callback is invoked in a 'trigger-after-execute' |
282 | * fashion | 282 | * fashion |
283 | */ | 283 | */ |
284 | if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) | 284 | if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) |
285 | perf_bp_event(bp, regs); | 285 | perf_bp_event(bp, regs); |
286 | 286 | ||
287 | set_breakpoint(info); | 287 | __set_breakpoint(info); |
288 | out: | 288 | out: |
289 | rcu_read_unlock(); | 289 | rcu_read_unlock(); |
290 | return rc; | 290 | return rc; |
291 | } | 291 | } |
292 | 292 | ||
293 | /* | 293 | /* |
294 | * Handle single-step exceptions following a DABR hit. | 294 | * Handle single-step exceptions following a DABR hit. |
295 | */ | 295 | */ |
296 | int __kprobes single_step_dabr_instruction(struct die_args *args) | 296 | int __kprobes single_step_dabr_instruction(struct die_args *args) |
297 | { | 297 | { |
298 | struct pt_regs *regs = args->regs; | 298 | struct pt_regs *regs = args->regs; |
299 | struct perf_event *bp = NULL; | 299 | struct perf_event *bp = NULL; |
300 | struct arch_hw_breakpoint *info; | 300 | struct arch_hw_breakpoint *info; |
301 | 301 | ||
302 | bp = current->thread.last_hit_ubp; | 302 | bp = current->thread.last_hit_ubp; |
303 | /* | 303 | /* |
304 | * Check if we are single-stepping as a result of a | 304 | * Check if we are single-stepping as a result of a |
305 | * previous HW Breakpoint exception | 305 | * previous HW Breakpoint exception |
306 | */ | 306 | */ |
307 | if (!bp) | 307 | if (!bp) |
308 | return NOTIFY_DONE; | 308 | return NOTIFY_DONE; |
309 | 309 | ||
310 | info = counter_arch_bp(bp); | 310 | info = counter_arch_bp(bp); |
311 | 311 | ||
312 | /* | 312 | /* |
313 | * We shall invoke the user-defined callback function in the single | 313 | * We shall invoke the user-defined callback function in the single |
314 | * stepping handler to confirm to 'trigger-after-execute' semantics | 314 | * stepping handler to confirm to 'trigger-after-execute' semantics |
315 | */ | 315 | */ |
316 | if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) | 316 | if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) |
317 | perf_bp_event(bp, regs); | 317 | perf_bp_event(bp, regs); |
318 | 318 | ||
319 | set_breakpoint(info); | 319 | __set_breakpoint(info); |
320 | current->thread.last_hit_ubp = NULL; | 320 | current->thread.last_hit_ubp = NULL; |
321 | 321 | ||
322 | /* | 322 | /* |
323 | * If the process was being single-stepped by ptrace, let the | 323 | * If the process was being single-stepped by ptrace, let the |
324 | * other single-step actions occur (e.g. generate SIGTRAP). | 324 | * other single-step actions occur (e.g. generate SIGTRAP). |
325 | */ | 325 | */ |
326 | if (test_thread_flag(TIF_SINGLESTEP)) | 326 | if (test_thread_flag(TIF_SINGLESTEP)) |
327 | return NOTIFY_DONE; | 327 | return NOTIFY_DONE; |
328 | 328 | ||
329 | return NOTIFY_STOP; | 329 | return NOTIFY_STOP; |
330 | } | 330 | } |
331 | 331 | ||
332 | /* | 332 | /* |
333 | * Handle debug exception notifications. | 333 | * Handle debug exception notifications. |
334 | */ | 334 | */ |
335 | int __kprobes hw_breakpoint_exceptions_notify( | 335 | int __kprobes hw_breakpoint_exceptions_notify( |
336 | struct notifier_block *unused, unsigned long val, void *data) | 336 | struct notifier_block *unused, unsigned long val, void *data) |
337 | { | 337 | { |
338 | int ret = NOTIFY_DONE; | 338 | int ret = NOTIFY_DONE; |
339 | 339 | ||
340 | switch (val) { | 340 | switch (val) { |
341 | case DIE_DABR_MATCH: | 341 | case DIE_DABR_MATCH: |
342 | ret = hw_breakpoint_handler(data); | 342 | ret = hw_breakpoint_handler(data); |
343 | break; | 343 | break; |
344 | case DIE_SSTEP: | 344 | case DIE_SSTEP: |
345 | ret = single_step_dabr_instruction(data); | 345 | ret = single_step_dabr_instruction(data); |
346 | break; | 346 | break; |
347 | } | 347 | } |
348 | 348 | ||
349 | return ret; | 349 | return ret; |
350 | } | 350 | } |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * Release the user breakpoints used by ptrace | 353 | * Release the user breakpoints used by ptrace |
354 | */ | 354 | */ |
355 | void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | 355 | void flush_ptrace_hw_breakpoint(struct task_struct *tsk) |
356 | { | 356 | { |
357 | struct thread_struct *t = &tsk->thread; | 357 | struct thread_struct *t = &tsk->thread; |
358 | 358 | ||
359 | unregister_hw_breakpoint(t->ptrace_bps[0]); | 359 | unregister_hw_breakpoint(t->ptrace_bps[0]); |
360 | t->ptrace_bps[0] = NULL; | 360 | t->ptrace_bps[0] = NULL; |
361 | } | 361 | } |
362 | 362 | ||
363 | void hw_breakpoint_pmu_read(struct perf_event *bp) | 363 | void hw_breakpoint_pmu_read(struct perf_event *bp) |
364 | { | 364 | { |
365 | /* TODO */ | 365 | /* TODO */ |
366 | } | 366 | } |
367 | 367 |
arch/powerpc/kernel/process.c
1 | /* | 1 | /* |
2 | * Derived from "arch/i386/kernel/process.c" | 2 | * Derived from "arch/i386/kernel/process.c" |
3 | * Copyright (C) 1995 Linus Torvalds | 3 | * Copyright (C) 1995 Linus Torvalds |
4 | * | 4 | * |
5 | * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and | 5 | * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and |
6 | * Paul Mackerras (paulus@cs.anu.edu.au) | 6 | * Paul Mackerras (paulus@cs.anu.edu.au) |
7 | * | 7 | * |
8 | * PowerPC version | 8 | * PowerPC version |
9 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 9 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
13 | * as published by the Free Software Foundation; either version | 13 | * as published by the Free Software Foundation; either version |
14 | * 2 of the License, or (at your option) any later version. | 14 | * 2 of the License, or (at your option) any later version. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/stddef.h> | 22 | #include <linux/stddef.h> |
23 | #include <linux/unistd.h> | 23 | #include <linux/unistd.h> |
24 | #include <linux/ptrace.h> | 24 | #include <linux/ptrace.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/user.h> | 26 | #include <linux/user.h> |
27 | #include <linux/elf.h> | 27 | #include <linux/elf.h> |
28 | #include <linux/prctl.h> | 28 | #include <linux/prctl.h> |
29 | #include <linux/init_task.h> | 29 | #include <linux/init_task.h> |
30 | #include <linux/export.h> | 30 | #include <linux/export.h> |
31 | #include <linux/kallsyms.h> | 31 | #include <linux/kallsyms.h> |
32 | #include <linux/mqueue.h> | 32 | #include <linux/mqueue.h> |
33 | #include <linux/hardirq.h> | 33 | #include <linux/hardirq.h> |
34 | #include <linux/utsname.h> | 34 | #include <linux/utsname.h> |
35 | #include <linux/ftrace.h> | 35 | #include <linux/ftrace.h> |
36 | #include <linux/kernel_stat.h> | 36 | #include <linux/kernel_stat.h> |
37 | #include <linux/personality.h> | 37 | #include <linux/personality.h> |
38 | #include <linux/random.h> | 38 | #include <linux/random.h> |
39 | #include <linux/hw_breakpoint.h> | 39 | #include <linux/hw_breakpoint.h> |
40 | 40 | ||
41 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
42 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
43 | #include <asm/io.h> | 43 | #include <asm/io.h> |
44 | #include <asm/processor.h> | 44 | #include <asm/processor.h> |
45 | #include <asm/mmu.h> | 45 | #include <asm/mmu.h> |
46 | #include <asm/prom.h> | 46 | #include <asm/prom.h> |
47 | #include <asm/machdep.h> | 47 | #include <asm/machdep.h> |
48 | #include <asm/time.h> | 48 | #include <asm/time.h> |
49 | #include <asm/runlatch.h> | 49 | #include <asm/runlatch.h> |
50 | #include <asm/syscalls.h> | 50 | #include <asm/syscalls.h> |
51 | #include <asm/switch_to.h> | 51 | #include <asm/switch_to.h> |
52 | #include <asm/tm.h> | 52 | #include <asm/tm.h> |
53 | #include <asm/debug.h> | 53 | #include <asm/debug.h> |
54 | #ifdef CONFIG_PPC64 | 54 | #ifdef CONFIG_PPC64 |
55 | #include <asm/firmware.h> | 55 | #include <asm/firmware.h> |
56 | #endif | 56 | #endif |
57 | #include <asm/code-patching.h> | 57 | #include <asm/code-patching.h> |
58 | #include <linux/kprobes.h> | 58 | #include <linux/kprobes.h> |
59 | #include <linux/kdebug.h> | 59 | #include <linux/kdebug.h> |
60 | 60 | ||
61 | /* Transactional Memory debug */ | 61 | /* Transactional Memory debug */ |
62 | #ifdef TM_DEBUG_SW | 62 | #ifdef TM_DEBUG_SW |
63 | #define TM_DEBUG(x...) printk(KERN_INFO x) | 63 | #define TM_DEBUG(x...) printk(KERN_INFO x) |
64 | #else | 64 | #else |
65 | #define TM_DEBUG(x...) do { } while(0) | 65 | #define TM_DEBUG(x...) do { } while(0) |
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | extern unsigned long _get_SP(void); | 68 | extern unsigned long _get_SP(void); |
69 | 69 | ||
70 | #ifndef CONFIG_SMP | 70 | #ifndef CONFIG_SMP |
71 | struct task_struct *last_task_used_math = NULL; | 71 | struct task_struct *last_task_used_math = NULL; |
72 | struct task_struct *last_task_used_altivec = NULL; | 72 | struct task_struct *last_task_used_altivec = NULL; |
73 | struct task_struct *last_task_used_vsx = NULL; | 73 | struct task_struct *last_task_used_vsx = NULL; |
74 | struct task_struct *last_task_used_spe = NULL; | 74 | struct task_struct *last_task_used_spe = NULL; |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 77 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
78 | void giveup_fpu_maybe_transactional(struct task_struct *tsk) | 78 | void giveup_fpu_maybe_transactional(struct task_struct *tsk) |
79 | { | 79 | { |
80 | /* | 80 | /* |
81 | * If we are saving the current thread's registers, and the | 81 | * If we are saving the current thread's registers, and the |
82 | * thread is in a transactional state, set the TIF_RESTORE_TM | 82 | * thread is in a transactional state, set the TIF_RESTORE_TM |
83 | * bit so that we know to restore the registers before | 83 | * bit so that we know to restore the registers before |
84 | * returning to userspace. | 84 | * returning to userspace. |
85 | */ | 85 | */ |
86 | if (tsk == current && tsk->thread.regs && | 86 | if (tsk == current && tsk->thread.regs && |
87 | MSR_TM_ACTIVE(tsk->thread.regs->msr) && | 87 | MSR_TM_ACTIVE(tsk->thread.regs->msr) && |
88 | !test_thread_flag(TIF_RESTORE_TM)) { | 88 | !test_thread_flag(TIF_RESTORE_TM)) { |
89 | tsk->thread.tm_orig_msr = tsk->thread.regs->msr; | 89 | tsk->thread.tm_orig_msr = tsk->thread.regs->msr; |
90 | set_thread_flag(TIF_RESTORE_TM); | 90 | set_thread_flag(TIF_RESTORE_TM); |
91 | } | 91 | } |
92 | 92 | ||
93 | giveup_fpu(tsk); | 93 | giveup_fpu(tsk); |
94 | } | 94 | } |
95 | 95 | ||
96 | void giveup_altivec_maybe_transactional(struct task_struct *tsk) | 96 | void giveup_altivec_maybe_transactional(struct task_struct *tsk) |
97 | { | 97 | { |
98 | /* | 98 | /* |
99 | * If we are saving the current thread's registers, and the | 99 | * If we are saving the current thread's registers, and the |
100 | * thread is in a transactional state, set the TIF_RESTORE_TM | 100 | * thread is in a transactional state, set the TIF_RESTORE_TM |
101 | * bit so that we know to restore the registers before | 101 | * bit so that we know to restore the registers before |
102 | * returning to userspace. | 102 | * returning to userspace. |
103 | */ | 103 | */ |
104 | if (tsk == current && tsk->thread.regs && | 104 | if (tsk == current && tsk->thread.regs && |
105 | MSR_TM_ACTIVE(tsk->thread.regs->msr) && | 105 | MSR_TM_ACTIVE(tsk->thread.regs->msr) && |
106 | !test_thread_flag(TIF_RESTORE_TM)) { | 106 | !test_thread_flag(TIF_RESTORE_TM)) { |
107 | tsk->thread.tm_orig_msr = tsk->thread.regs->msr; | 107 | tsk->thread.tm_orig_msr = tsk->thread.regs->msr; |
108 | set_thread_flag(TIF_RESTORE_TM); | 108 | set_thread_flag(TIF_RESTORE_TM); |
109 | } | 109 | } |
110 | 110 | ||
111 | giveup_altivec(tsk); | 111 | giveup_altivec(tsk); |
112 | } | 112 | } |
113 | 113 | ||
114 | #else | 114 | #else |
115 | #define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk) | 115 | #define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk) |
116 | #define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk) | 116 | #define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk) |
117 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 117 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
118 | 118 | ||
119 | #ifdef CONFIG_PPC_FPU | 119 | #ifdef CONFIG_PPC_FPU |
120 | /* | 120 | /* |
121 | * Make sure the floating-point register state in the | 121 | * Make sure the floating-point register state in the |
122 | * the thread_struct is up to date for task tsk. | 122 | * the thread_struct is up to date for task tsk. |
123 | */ | 123 | */ |
124 | void flush_fp_to_thread(struct task_struct *tsk) | 124 | void flush_fp_to_thread(struct task_struct *tsk) |
125 | { | 125 | { |
126 | if (tsk->thread.regs) { | 126 | if (tsk->thread.regs) { |
127 | /* | 127 | /* |
128 | * We need to disable preemption here because if we didn't, | 128 | * We need to disable preemption here because if we didn't, |
129 | * another process could get scheduled after the regs->msr | 129 | * another process could get scheduled after the regs->msr |
130 | * test but before we have finished saving the FP registers | 130 | * test but before we have finished saving the FP registers |
131 | * to the thread_struct. That process could take over the | 131 | * to the thread_struct. That process could take over the |
132 | * FPU, and then when we get scheduled again we would store | 132 | * FPU, and then when we get scheduled again we would store |
133 | * bogus values for the remaining FP registers. | 133 | * bogus values for the remaining FP registers. |
134 | */ | 134 | */ |
135 | preempt_disable(); | 135 | preempt_disable(); |
136 | if (tsk->thread.regs->msr & MSR_FP) { | 136 | if (tsk->thread.regs->msr & MSR_FP) { |
137 | #ifdef CONFIG_SMP | 137 | #ifdef CONFIG_SMP |
138 | /* | 138 | /* |
139 | * This should only ever be called for current or | 139 | * This should only ever be called for current or |
140 | * for a stopped child process. Since we save away | 140 | * for a stopped child process. Since we save away |
141 | * the FP register state on context switch on SMP, | 141 | * the FP register state on context switch on SMP, |
142 | * there is something wrong if a stopped child appears | 142 | * there is something wrong if a stopped child appears |
143 | * to still have its FP state in the CPU registers. | 143 | * to still have its FP state in the CPU registers. |
144 | */ | 144 | */ |
145 | BUG_ON(tsk != current); | 145 | BUG_ON(tsk != current); |
146 | #endif | 146 | #endif |
147 | giveup_fpu_maybe_transactional(tsk); | 147 | giveup_fpu_maybe_transactional(tsk); |
148 | } | 148 | } |
149 | preempt_enable(); | 149 | preempt_enable(); |
150 | } | 150 | } |
151 | } | 151 | } |
152 | EXPORT_SYMBOL_GPL(flush_fp_to_thread); | 152 | EXPORT_SYMBOL_GPL(flush_fp_to_thread); |
153 | #endif /* CONFIG_PPC_FPU */ | 153 | #endif /* CONFIG_PPC_FPU */ |
154 | 154 | ||
155 | void enable_kernel_fp(void) | 155 | void enable_kernel_fp(void) |
156 | { | 156 | { |
157 | WARN_ON(preemptible()); | 157 | WARN_ON(preemptible()); |
158 | 158 | ||
159 | #ifdef CONFIG_SMP | 159 | #ifdef CONFIG_SMP |
160 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | 160 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) |
161 | giveup_fpu_maybe_transactional(current); | 161 | giveup_fpu_maybe_transactional(current); |
162 | else | 162 | else |
163 | giveup_fpu(NULL); /* just enables FP for kernel */ | 163 | giveup_fpu(NULL); /* just enables FP for kernel */ |
164 | #else | 164 | #else |
165 | giveup_fpu_maybe_transactional(last_task_used_math); | 165 | giveup_fpu_maybe_transactional(last_task_used_math); |
166 | #endif /* CONFIG_SMP */ | 166 | #endif /* CONFIG_SMP */ |
167 | } | 167 | } |
168 | EXPORT_SYMBOL(enable_kernel_fp); | 168 | EXPORT_SYMBOL(enable_kernel_fp); |
169 | 169 | ||
170 | #ifdef CONFIG_ALTIVEC | 170 | #ifdef CONFIG_ALTIVEC |
171 | void enable_kernel_altivec(void) | 171 | void enable_kernel_altivec(void) |
172 | { | 172 | { |
173 | WARN_ON(preemptible()); | 173 | WARN_ON(preemptible()); |
174 | 174 | ||
175 | #ifdef CONFIG_SMP | 175 | #ifdef CONFIG_SMP |
176 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) | 176 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) |
177 | giveup_altivec_maybe_transactional(current); | 177 | giveup_altivec_maybe_transactional(current); |
178 | else | 178 | else |
179 | giveup_altivec_notask(); | 179 | giveup_altivec_notask(); |
180 | #else | 180 | #else |
181 | giveup_altivec_maybe_transactional(last_task_used_altivec); | 181 | giveup_altivec_maybe_transactional(last_task_used_altivec); |
182 | #endif /* CONFIG_SMP */ | 182 | #endif /* CONFIG_SMP */ |
183 | } | 183 | } |
184 | EXPORT_SYMBOL(enable_kernel_altivec); | 184 | EXPORT_SYMBOL(enable_kernel_altivec); |
185 | 185 | ||
186 | /* | 186 | /* |
187 | * Make sure the VMX/Altivec register state in the | 187 | * Make sure the VMX/Altivec register state in the |
188 | * the thread_struct is up to date for task tsk. | 188 | * the thread_struct is up to date for task tsk. |
189 | */ | 189 | */ |
190 | void flush_altivec_to_thread(struct task_struct *tsk) | 190 | void flush_altivec_to_thread(struct task_struct *tsk) |
191 | { | 191 | { |
192 | if (tsk->thread.regs) { | 192 | if (tsk->thread.regs) { |
193 | preempt_disable(); | 193 | preempt_disable(); |
194 | if (tsk->thread.regs->msr & MSR_VEC) { | 194 | if (tsk->thread.regs->msr & MSR_VEC) { |
195 | #ifdef CONFIG_SMP | 195 | #ifdef CONFIG_SMP |
196 | BUG_ON(tsk != current); | 196 | BUG_ON(tsk != current); |
197 | #endif | 197 | #endif |
198 | giveup_altivec_maybe_transactional(tsk); | 198 | giveup_altivec_maybe_transactional(tsk); |
199 | } | 199 | } |
200 | preempt_enable(); | 200 | preempt_enable(); |
201 | } | 201 | } |
202 | } | 202 | } |
203 | EXPORT_SYMBOL_GPL(flush_altivec_to_thread); | 203 | EXPORT_SYMBOL_GPL(flush_altivec_to_thread); |
204 | #endif /* CONFIG_ALTIVEC */ | 204 | #endif /* CONFIG_ALTIVEC */ |
205 | 205 | ||
206 | #ifdef CONFIG_VSX | 206 | #ifdef CONFIG_VSX |
207 | #if 0 | 207 | #if 0 |
208 | /* not currently used, but some crazy RAID module might want to later */ | 208 | /* not currently used, but some crazy RAID module might want to later */ |
209 | void enable_kernel_vsx(void) | 209 | void enable_kernel_vsx(void) |
210 | { | 210 | { |
211 | WARN_ON(preemptible()); | 211 | WARN_ON(preemptible()); |
212 | 212 | ||
213 | #ifdef CONFIG_SMP | 213 | #ifdef CONFIG_SMP |
214 | if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) | 214 | if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) |
215 | giveup_vsx(current); | 215 | giveup_vsx(current); |
216 | else | 216 | else |
217 | giveup_vsx(NULL); /* just enable vsx for kernel - force */ | 217 | giveup_vsx(NULL); /* just enable vsx for kernel - force */ |
218 | #else | 218 | #else |
219 | giveup_vsx(last_task_used_vsx); | 219 | giveup_vsx(last_task_used_vsx); |
220 | #endif /* CONFIG_SMP */ | 220 | #endif /* CONFIG_SMP */ |
221 | } | 221 | } |
222 | EXPORT_SYMBOL(enable_kernel_vsx); | 222 | EXPORT_SYMBOL(enable_kernel_vsx); |
223 | #endif | 223 | #endif |
224 | 224 | ||
225 | void giveup_vsx(struct task_struct *tsk) | 225 | void giveup_vsx(struct task_struct *tsk) |
226 | { | 226 | { |
227 | giveup_fpu_maybe_transactional(tsk); | 227 | giveup_fpu_maybe_transactional(tsk); |
228 | giveup_altivec_maybe_transactional(tsk); | 228 | giveup_altivec_maybe_transactional(tsk); |
229 | __giveup_vsx(tsk); | 229 | __giveup_vsx(tsk); |
230 | } | 230 | } |
231 | 231 | ||
232 | void flush_vsx_to_thread(struct task_struct *tsk) | 232 | void flush_vsx_to_thread(struct task_struct *tsk) |
233 | { | 233 | { |
234 | if (tsk->thread.regs) { | 234 | if (tsk->thread.regs) { |
235 | preempt_disable(); | 235 | preempt_disable(); |
236 | if (tsk->thread.regs->msr & MSR_VSX) { | 236 | if (tsk->thread.regs->msr & MSR_VSX) { |
237 | #ifdef CONFIG_SMP | 237 | #ifdef CONFIG_SMP |
238 | BUG_ON(tsk != current); | 238 | BUG_ON(tsk != current); |
239 | #endif | 239 | #endif |
240 | giveup_vsx(tsk); | 240 | giveup_vsx(tsk); |
241 | } | 241 | } |
242 | preempt_enable(); | 242 | preempt_enable(); |
243 | } | 243 | } |
244 | } | 244 | } |
245 | EXPORT_SYMBOL_GPL(flush_vsx_to_thread); | 245 | EXPORT_SYMBOL_GPL(flush_vsx_to_thread); |
246 | #endif /* CONFIG_VSX */ | 246 | #endif /* CONFIG_VSX */ |
247 | 247 | ||
248 | #ifdef CONFIG_SPE | 248 | #ifdef CONFIG_SPE |
249 | 249 | ||
250 | void enable_kernel_spe(void) | 250 | void enable_kernel_spe(void) |
251 | { | 251 | { |
252 | WARN_ON(preemptible()); | 252 | WARN_ON(preemptible()); |
253 | 253 | ||
254 | #ifdef CONFIG_SMP | 254 | #ifdef CONFIG_SMP |
255 | if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) | 255 | if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) |
256 | giveup_spe(current); | 256 | giveup_spe(current); |
257 | else | 257 | else |
258 | giveup_spe(NULL); /* just enable SPE for kernel - force */ | 258 | giveup_spe(NULL); /* just enable SPE for kernel - force */ |
259 | #else | 259 | #else |
260 | giveup_spe(last_task_used_spe); | 260 | giveup_spe(last_task_used_spe); |
261 | #endif /* __SMP __ */ | 261 | #endif /* __SMP __ */ |
262 | } | 262 | } |
263 | EXPORT_SYMBOL(enable_kernel_spe); | 263 | EXPORT_SYMBOL(enable_kernel_spe); |
264 | 264 | ||
265 | void flush_spe_to_thread(struct task_struct *tsk) | 265 | void flush_spe_to_thread(struct task_struct *tsk) |
266 | { | 266 | { |
267 | if (tsk->thread.regs) { | 267 | if (tsk->thread.regs) { |
268 | preempt_disable(); | 268 | preempt_disable(); |
269 | if (tsk->thread.regs->msr & MSR_SPE) { | 269 | if (tsk->thread.regs->msr & MSR_SPE) { |
270 | #ifdef CONFIG_SMP | 270 | #ifdef CONFIG_SMP |
271 | BUG_ON(tsk != current); | 271 | BUG_ON(tsk != current); |
272 | #endif | 272 | #endif |
273 | tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); | 273 | tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); |
274 | giveup_spe(tsk); | 274 | giveup_spe(tsk); |
275 | } | 275 | } |
276 | preempt_enable(); | 276 | preempt_enable(); |
277 | } | 277 | } |
278 | } | 278 | } |
279 | #endif /* CONFIG_SPE */ | 279 | #endif /* CONFIG_SPE */ |
280 | 280 | ||
281 | #ifndef CONFIG_SMP | 281 | #ifndef CONFIG_SMP |
282 | /* | 282 | /* |
283 | * If we are doing lazy switching of CPU state (FP, altivec or SPE), | 283 | * If we are doing lazy switching of CPU state (FP, altivec or SPE), |
284 | * and the current task has some state, discard it. | 284 | * and the current task has some state, discard it. |
285 | */ | 285 | */ |
286 | void discard_lazy_cpu_state(void) | 286 | void discard_lazy_cpu_state(void) |
287 | { | 287 | { |
288 | preempt_disable(); | 288 | preempt_disable(); |
289 | if (last_task_used_math == current) | 289 | if (last_task_used_math == current) |
290 | last_task_used_math = NULL; | 290 | last_task_used_math = NULL; |
291 | #ifdef CONFIG_ALTIVEC | 291 | #ifdef CONFIG_ALTIVEC |
292 | if (last_task_used_altivec == current) | 292 | if (last_task_used_altivec == current) |
293 | last_task_used_altivec = NULL; | 293 | last_task_used_altivec = NULL; |
294 | #endif /* CONFIG_ALTIVEC */ | 294 | #endif /* CONFIG_ALTIVEC */ |
295 | #ifdef CONFIG_VSX | 295 | #ifdef CONFIG_VSX |
296 | if (last_task_used_vsx == current) | 296 | if (last_task_used_vsx == current) |
297 | last_task_used_vsx = NULL; | 297 | last_task_used_vsx = NULL; |
298 | #endif /* CONFIG_VSX */ | 298 | #endif /* CONFIG_VSX */ |
299 | #ifdef CONFIG_SPE | 299 | #ifdef CONFIG_SPE |
300 | if (last_task_used_spe == current) | 300 | if (last_task_used_spe == current) |
301 | last_task_used_spe = NULL; | 301 | last_task_used_spe = NULL; |
302 | #endif | 302 | #endif |
303 | preempt_enable(); | 303 | preempt_enable(); |
304 | } | 304 | } |
305 | #endif /* CONFIG_SMP */ | 305 | #endif /* CONFIG_SMP */ |
306 | 306 | ||
307 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 307 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
308 | void do_send_trap(struct pt_regs *regs, unsigned long address, | 308 | void do_send_trap(struct pt_regs *regs, unsigned long address, |
309 | unsigned long error_code, int signal_code, int breakpt) | 309 | unsigned long error_code, int signal_code, int breakpt) |
310 | { | 310 | { |
311 | siginfo_t info; | 311 | siginfo_t info; |
312 | 312 | ||
313 | current->thread.trap_nr = signal_code; | 313 | current->thread.trap_nr = signal_code; |
314 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | 314 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, |
315 | 11, SIGSEGV) == NOTIFY_STOP) | 315 | 11, SIGSEGV) == NOTIFY_STOP) |
316 | return; | 316 | return; |
317 | 317 | ||
318 | /* Deliver the signal to userspace */ | 318 | /* Deliver the signal to userspace */ |
319 | info.si_signo = SIGTRAP; | 319 | info.si_signo = SIGTRAP; |
320 | info.si_errno = breakpt; /* breakpoint or watchpoint id */ | 320 | info.si_errno = breakpt; /* breakpoint or watchpoint id */ |
321 | info.si_code = signal_code; | 321 | info.si_code = signal_code; |
322 | info.si_addr = (void __user *)address; | 322 | info.si_addr = (void __user *)address; |
323 | force_sig_info(SIGTRAP, &info, current); | 323 | force_sig_info(SIGTRAP, &info, current); |
324 | } | 324 | } |
325 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 325 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
326 | void do_break (struct pt_regs *regs, unsigned long address, | 326 | void do_break (struct pt_regs *regs, unsigned long address, |
327 | unsigned long error_code) | 327 | unsigned long error_code) |
328 | { | 328 | { |
329 | siginfo_t info; | 329 | siginfo_t info; |
330 | 330 | ||
331 | current->thread.trap_nr = TRAP_HWBKPT; | 331 | current->thread.trap_nr = TRAP_HWBKPT; |
332 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | 332 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, |
333 | 11, SIGSEGV) == NOTIFY_STOP) | 333 | 11, SIGSEGV) == NOTIFY_STOP) |
334 | return; | 334 | return; |
335 | 335 | ||
336 | if (debugger_break_match(regs)) | 336 | if (debugger_break_match(regs)) |
337 | return; | 337 | return; |
338 | 338 | ||
339 | /* Clear the breakpoint */ | 339 | /* Clear the breakpoint */ |
340 | hw_breakpoint_disable(); | 340 | hw_breakpoint_disable(); |
341 | 341 | ||
342 | /* Deliver the signal to userspace */ | 342 | /* Deliver the signal to userspace */ |
343 | info.si_signo = SIGTRAP; | 343 | info.si_signo = SIGTRAP; |
344 | info.si_errno = 0; | 344 | info.si_errno = 0; |
345 | info.si_code = TRAP_HWBKPT; | 345 | info.si_code = TRAP_HWBKPT; |
346 | info.si_addr = (void __user *)address; | 346 | info.si_addr = (void __user *)address; |
347 | force_sig_info(SIGTRAP, &info, current); | 347 | force_sig_info(SIGTRAP, &info, current); |
348 | } | 348 | } |
349 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 349 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
350 | 350 | ||
351 | static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); | 351 | static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); |
352 | 352 | ||
353 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 353 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
354 | /* | 354 | /* |
355 | * Set the debug registers back to their default "safe" values. | 355 | * Set the debug registers back to their default "safe" values. |
356 | */ | 356 | */ |
357 | static void set_debug_reg_defaults(struct thread_struct *thread) | 357 | static void set_debug_reg_defaults(struct thread_struct *thread) |
358 | { | 358 | { |
359 | thread->debug.iac1 = thread->debug.iac2 = 0; | 359 | thread->debug.iac1 = thread->debug.iac2 = 0; |
360 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 360 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
361 | thread->debug.iac3 = thread->debug.iac4 = 0; | 361 | thread->debug.iac3 = thread->debug.iac4 = 0; |
362 | #endif | 362 | #endif |
363 | thread->debug.dac1 = thread->debug.dac2 = 0; | 363 | thread->debug.dac1 = thread->debug.dac2 = 0; |
364 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 364 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 |
365 | thread->debug.dvc1 = thread->debug.dvc2 = 0; | 365 | thread->debug.dvc1 = thread->debug.dvc2 = 0; |
366 | #endif | 366 | #endif |
367 | thread->debug.dbcr0 = 0; | 367 | thread->debug.dbcr0 = 0; |
368 | #ifdef CONFIG_BOOKE | 368 | #ifdef CONFIG_BOOKE |
369 | /* | 369 | /* |
370 | * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) | 370 | * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) |
371 | */ | 371 | */ |
372 | thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | | 372 | thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | |
373 | DBCR1_IAC3US | DBCR1_IAC4US; | 373 | DBCR1_IAC3US | DBCR1_IAC4US; |
374 | /* | 374 | /* |
375 | * Force Data Address Compare User/Supervisor bits to be User-only | 375 | * Force Data Address Compare User/Supervisor bits to be User-only |
376 | * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. | 376 | * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. |
377 | */ | 377 | */ |
378 | thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; | 378 | thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; |
379 | #else | 379 | #else |
380 | thread->debug.dbcr1 = 0; | 380 | thread->debug.dbcr1 = 0; |
381 | #endif | 381 | #endif |
382 | } | 382 | } |
383 | 383 | ||
384 | static void prime_debug_regs(struct debug_reg *debug) | 384 | static void prime_debug_regs(struct debug_reg *debug) |
385 | { | 385 | { |
386 | /* | 386 | /* |
387 | * We could have inherited MSR_DE from userspace, since | 387 | * We could have inherited MSR_DE from userspace, since |
388 | * it doesn't get cleared on exception entry. Make sure | 388 | * it doesn't get cleared on exception entry. Make sure |
389 | * MSR_DE is clear before we enable any debug events. | 389 | * MSR_DE is clear before we enable any debug events. |
390 | */ | 390 | */ |
391 | mtmsr(mfmsr() & ~MSR_DE); | 391 | mtmsr(mfmsr() & ~MSR_DE); |
392 | 392 | ||
393 | mtspr(SPRN_IAC1, debug->iac1); | 393 | mtspr(SPRN_IAC1, debug->iac1); |
394 | mtspr(SPRN_IAC2, debug->iac2); | 394 | mtspr(SPRN_IAC2, debug->iac2); |
395 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 395 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
396 | mtspr(SPRN_IAC3, debug->iac3); | 396 | mtspr(SPRN_IAC3, debug->iac3); |
397 | mtspr(SPRN_IAC4, debug->iac4); | 397 | mtspr(SPRN_IAC4, debug->iac4); |
398 | #endif | 398 | #endif |
399 | mtspr(SPRN_DAC1, debug->dac1); | 399 | mtspr(SPRN_DAC1, debug->dac1); |
400 | mtspr(SPRN_DAC2, debug->dac2); | 400 | mtspr(SPRN_DAC2, debug->dac2); |
401 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 401 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 |
402 | mtspr(SPRN_DVC1, debug->dvc1); | 402 | mtspr(SPRN_DVC1, debug->dvc1); |
403 | mtspr(SPRN_DVC2, debug->dvc2); | 403 | mtspr(SPRN_DVC2, debug->dvc2); |
404 | #endif | 404 | #endif |
405 | mtspr(SPRN_DBCR0, debug->dbcr0); | 405 | mtspr(SPRN_DBCR0, debug->dbcr0); |
406 | mtspr(SPRN_DBCR1, debug->dbcr1); | 406 | mtspr(SPRN_DBCR1, debug->dbcr1); |
407 | #ifdef CONFIG_BOOKE | 407 | #ifdef CONFIG_BOOKE |
408 | mtspr(SPRN_DBCR2, debug->dbcr2); | 408 | mtspr(SPRN_DBCR2, debug->dbcr2); |
409 | #endif | 409 | #endif |
410 | } | 410 | } |
411 | /* | 411 | /* |
412 | * Unless neither the old or new thread are making use of the | 412 | * Unless neither the old or new thread are making use of the |
413 | * debug registers, set the debug registers from the values | 413 | * debug registers, set the debug registers from the values |
414 | * stored in the new thread. | 414 | * stored in the new thread. |
415 | */ | 415 | */ |
416 | void switch_booke_debug_regs(struct debug_reg *new_debug) | 416 | void switch_booke_debug_regs(struct debug_reg *new_debug) |
417 | { | 417 | { |
418 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) | 418 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) |
419 | || (new_debug->dbcr0 & DBCR0_IDM)) | 419 | || (new_debug->dbcr0 & DBCR0_IDM)) |
420 | prime_debug_regs(new_debug); | 420 | prime_debug_regs(new_debug); |
421 | } | 421 | } |
422 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); | 422 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); |
423 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 423 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
424 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | 424 | #ifndef CONFIG_HAVE_HW_BREAKPOINT |
425 | static void set_debug_reg_defaults(struct thread_struct *thread) | 425 | static void set_debug_reg_defaults(struct thread_struct *thread) |
426 | { | 426 | { |
427 | thread->hw_brk.address = 0; | 427 | thread->hw_brk.address = 0; |
428 | thread->hw_brk.type = 0; | 428 | thread->hw_brk.type = 0; |
429 | set_breakpoint(&thread->hw_brk); | 429 | set_breakpoint(&thread->hw_brk); |
430 | } | 430 | } |
431 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ | 431 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ |
432 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 432 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
433 | 433 | ||
434 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 434 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
435 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) | 435 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) |
436 | { | 436 | { |
437 | mtspr(SPRN_DAC1, dabr); | 437 | mtspr(SPRN_DAC1, dabr); |
438 | #ifdef CONFIG_PPC_47x | 438 | #ifdef CONFIG_PPC_47x |
439 | isync(); | 439 | isync(); |
440 | #endif | 440 | #endif |
441 | return 0; | 441 | return 0; |
442 | } | 442 | } |
443 | #elif defined(CONFIG_PPC_BOOK3S) | 443 | #elif defined(CONFIG_PPC_BOOK3S) |
444 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) | 444 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) |
445 | { | 445 | { |
446 | mtspr(SPRN_DABR, dabr); | 446 | mtspr(SPRN_DABR, dabr); |
447 | if (cpu_has_feature(CPU_FTR_DABRX)) | 447 | if (cpu_has_feature(CPU_FTR_DABRX)) |
448 | mtspr(SPRN_DABRX, dabrx); | 448 | mtspr(SPRN_DABRX, dabrx); |
449 | return 0; | 449 | return 0; |
450 | } | 450 | } |
451 | #else | 451 | #else |
452 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) | 452 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) |
453 | { | 453 | { |
454 | return -EINVAL; | 454 | return -EINVAL; |
455 | } | 455 | } |
456 | #endif | 456 | #endif |
457 | 457 | ||
458 | static inline int set_dabr(struct arch_hw_breakpoint *brk) | 458 | static inline int set_dabr(struct arch_hw_breakpoint *brk) |
459 | { | 459 | { |
460 | unsigned long dabr, dabrx; | 460 | unsigned long dabr, dabrx; |
461 | 461 | ||
462 | dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR); | 462 | dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR); |
463 | dabrx = ((brk->type >> 3) & 0x7); | 463 | dabrx = ((brk->type >> 3) & 0x7); |
464 | 464 | ||
465 | if (ppc_md.set_dabr) | 465 | if (ppc_md.set_dabr) |
466 | return ppc_md.set_dabr(dabr, dabrx); | 466 | return ppc_md.set_dabr(dabr, dabrx); |
467 | 467 | ||
468 | return __set_dabr(dabr, dabrx); | 468 | return __set_dabr(dabr, dabrx); |
469 | } | 469 | } |
470 | 470 | ||
471 | static inline int set_dawr(struct arch_hw_breakpoint *brk) | 471 | static inline int set_dawr(struct arch_hw_breakpoint *brk) |
472 | { | 472 | { |
473 | unsigned long dawr, dawrx, mrd; | 473 | unsigned long dawr, dawrx, mrd; |
474 | 474 | ||
475 | dawr = brk->address; | 475 | dawr = brk->address; |
476 | 476 | ||
477 | dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \ | 477 | dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \ |
478 | << (63 - 58); //* read/write bits */ | 478 | << (63 - 58); //* read/write bits */ |
479 | dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \ | 479 | dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \ |
480 | << (63 - 59); //* translate */ | 480 | << (63 - 59); //* translate */ |
481 | dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \ | 481 | dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \ |
482 | >> 3; //* PRIM bits */ | 482 | >> 3; //* PRIM bits */ |
483 | /* dawr length is stored in field MDR bits 48:53. Matches range in | 483 | /* dawr length is stored in field MDR bits 48:53. Matches range in |
484 | doublewords (64 bits) baised by -1 eg. 0b000000=1DW and | 484 | doublewords (64 bits) baised by -1 eg. 0b000000=1DW and |
485 | 0b111111=64DW. | 485 | 0b111111=64DW. |
486 | brk->len is in bytes. | 486 | brk->len is in bytes. |
487 | This aligns up to double word size, shifts and does the bias. | 487 | This aligns up to double word size, shifts and does the bias. |
488 | */ | 488 | */ |
489 | mrd = ((brk->len + 7) >> 3) - 1; | 489 | mrd = ((brk->len + 7) >> 3) - 1; |
490 | dawrx |= (mrd & 0x3f) << (63 - 53); | 490 | dawrx |= (mrd & 0x3f) << (63 - 53); |
491 | 491 | ||
492 | if (ppc_md.set_dawr) | 492 | if (ppc_md.set_dawr) |
493 | return ppc_md.set_dawr(dawr, dawrx); | 493 | return ppc_md.set_dawr(dawr, dawrx); |
494 | mtspr(SPRN_DAWR, dawr); | 494 | mtspr(SPRN_DAWR, dawr); |
495 | mtspr(SPRN_DAWRX, dawrx); | 495 | mtspr(SPRN_DAWRX, dawrx); |
496 | return 0; | 496 | return 0; |
497 | } | 497 | } |
498 | 498 | ||
499 | void set_breakpoint(struct arch_hw_breakpoint *brk) | 499 | void __set_breakpoint(struct arch_hw_breakpoint *brk) |
500 | { | 500 | { |
501 | __get_cpu_var(current_brk) = *brk; | 501 | __get_cpu_var(current_brk) = *brk; |
502 | 502 | ||
503 | if (cpu_has_feature(CPU_FTR_DAWR)) | 503 | if (cpu_has_feature(CPU_FTR_DAWR)) |
504 | set_dawr(brk); | 504 | set_dawr(brk); |
505 | else | 505 | else |
506 | set_dabr(brk); | 506 | set_dabr(brk); |
507 | } | 507 | } |
508 | 508 | ||
509 | void set_breakpoint(struct arch_hw_breakpoint *brk) | ||
510 | { | ||
511 | preempt_disable(); | ||
512 | __set_breakpoint(brk); | ||
513 | preempt_enable(); | ||
514 | } | ||
515 | |||
509 | #ifdef CONFIG_PPC64 | 516 | #ifdef CONFIG_PPC64 |
510 | DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); | 517 | DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); |
511 | #endif | 518 | #endif |
512 | 519 | ||
513 | static inline bool hw_brk_match(struct arch_hw_breakpoint *a, | 520 | static inline bool hw_brk_match(struct arch_hw_breakpoint *a, |
514 | struct arch_hw_breakpoint *b) | 521 | struct arch_hw_breakpoint *b) |
515 | { | 522 | { |
516 | if (a->address != b->address) | 523 | if (a->address != b->address) |
517 | return false; | 524 | return false; |
518 | if (a->type != b->type) | 525 | if (a->type != b->type) |
519 | return false; | 526 | return false; |
520 | if (a->len != b->len) | 527 | if (a->len != b->len) |
521 | return false; | 528 | return false; |
522 | return true; | 529 | return true; |
523 | } | 530 | } |
524 | 531 | ||
525 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 532 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
526 | static void tm_reclaim_thread(struct thread_struct *thr, | 533 | static void tm_reclaim_thread(struct thread_struct *thr, |
527 | struct thread_info *ti, uint8_t cause) | 534 | struct thread_info *ti, uint8_t cause) |
528 | { | 535 | { |
529 | unsigned long msr_diff = 0; | 536 | unsigned long msr_diff = 0; |
530 | 537 | ||
531 | /* | 538 | /* |
532 | * If FP/VSX registers have been already saved to the | 539 | * If FP/VSX registers have been already saved to the |
533 | * thread_struct, move them to the transact_fp array. | 540 | * thread_struct, move them to the transact_fp array. |
534 | * We clear the TIF_RESTORE_TM bit since after the reclaim | 541 | * We clear the TIF_RESTORE_TM bit since after the reclaim |
535 | * the thread will no longer be transactional. | 542 | * the thread will no longer be transactional. |
536 | */ | 543 | */ |
537 | if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) { | 544 | if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) { |
538 | msr_diff = thr->tm_orig_msr & ~thr->regs->msr; | 545 | msr_diff = thr->tm_orig_msr & ~thr->regs->msr; |
539 | if (msr_diff & MSR_FP) | 546 | if (msr_diff & MSR_FP) |
540 | memcpy(&thr->transact_fp, &thr->fp_state, | 547 | memcpy(&thr->transact_fp, &thr->fp_state, |
541 | sizeof(struct thread_fp_state)); | 548 | sizeof(struct thread_fp_state)); |
542 | if (msr_diff & MSR_VEC) | 549 | if (msr_diff & MSR_VEC) |
543 | memcpy(&thr->transact_vr, &thr->vr_state, | 550 | memcpy(&thr->transact_vr, &thr->vr_state, |
544 | sizeof(struct thread_vr_state)); | 551 | sizeof(struct thread_vr_state)); |
545 | clear_ti_thread_flag(ti, TIF_RESTORE_TM); | 552 | clear_ti_thread_flag(ti, TIF_RESTORE_TM); |
546 | msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; | 553 | msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; |
547 | } | 554 | } |
548 | 555 | ||
549 | tm_reclaim(thr, thr->regs->msr, cause); | 556 | tm_reclaim(thr, thr->regs->msr, cause); |
550 | 557 | ||
551 | /* Having done the reclaim, we now have the checkpointed | 558 | /* Having done the reclaim, we now have the checkpointed |
552 | * FP/VSX values in the registers. These might be valid | 559 | * FP/VSX values in the registers. These might be valid |
553 | * even if we have previously called enable_kernel_fp() or | 560 | * even if we have previously called enable_kernel_fp() or |
554 | * flush_fp_to_thread(), so update thr->regs->msr to | 561 | * flush_fp_to_thread(), so update thr->regs->msr to |
555 | * indicate their current validity. | 562 | * indicate their current validity. |
556 | */ | 563 | */ |
557 | thr->regs->msr |= msr_diff; | 564 | thr->regs->msr |= msr_diff; |
558 | } | 565 | } |
559 | 566 | ||
560 | void tm_reclaim_current(uint8_t cause) | 567 | void tm_reclaim_current(uint8_t cause) |
561 | { | 568 | { |
562 | tm_enable(); | 569 | tm_enable(); |
563 | tm_reclaim_thread(¤t->thread, current_thread_info(), cause); | 570 | tm_reclaim_thread(¤t->thread, current_thread_info(), cause); |
564 | } | 571 | } |
565 | 572 | ||
566 | static inline void tm_reclaim_task(struct task_struct *tsk) | 573 | static inline void tm_reclaim_task(struct task_struct *tsk) |
567 | { | 574 | { |
568 | /* We have to work out if we're switching from/to a task that's in the | 575 | /* We have to work out if we're switching from/to a task that's in the |
569 | * middle of a transaction. | 576 | * middle of a transaction. |
570 | * | 577 | * |
571 | * In switching we need to maintain a 2nd register state as | 578 | * In switching we need to maintain a 2nd register state as |
572 | * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the | 579 | * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the |
573 | * checkpointed (tbegin) state in ckpt_regs and saves the transactional | 580 | * checkpointed (tbegin) state in ckpt_regs and saves the transactional |
574 | * (current) FPRs into oldtask->thread.transact_fpr[]. | 581 | * (current) FPRs into oldtask->thread.transact_fpr[]. |
575 | * | 582 | * |
576 | * We also context switch (save) TFHAR/TEXASR/TFIAR in here. | 583 | * We also context switch (save) TFHAR/TEXASR/TFIAR in here. |
577 | */ | 584 | */ |
578 | struct thread_struct *thr = &tsk->thread; | 585 | struct thread_struct *thr = &tsk->thread; |
579 | 586 | ||
580 | if (!thr->regs) | 587 | if (!thr->regs) |
581 | return; | 588 | return; |
582 | 589 | ||
583 | if (!MSR_TM_ACTIVE(thr->regs->msr)) | 590 | if (!MSR_TM_ACTIVE(thr->regs->msr)) |
584 | goto out_and_saveregs; | 591 | goto out_and_saveregs; |
585 | 592 | ||
586 | /* Stash the original thread MSR, as giveup_fpu et al will | 593 | /* Stash the original thread MSR, as giveup_fpu et al will |
587 | * modify it. We hold onto it to see whether the task used | 594 | * modify it. We hold onto it to see whether the task used |
588 | * FP & vector regs. If the TIF_RESTORE_TM flag is set, | 595 | * FP & vector regs. If the TIF_RESTORE_TM flag is set, |
589 | * tm_orig_msr is already set. | 596 | * tm_orig_msr is already set. |
590 | */ | 597 | */ |
591 | if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM)) | 598 | if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM)) |
592 | thr->tm_orig_msr = thr->regs->msr; | 599 | thr->tm_orig_msr = thr->regs->msr; |
593 | 600 | ||
594 | TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " | 601 | TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " |
595 | "ccr=%lx, msr=%lx, trap=%lx)\n", | 602 | "ccr=%lx, msr=%lx, trap=%lx)\n", |
596 | tsk->pid, thr->regs->nip, | 603 | tsk->pid, thr->regs->nip, |
597 | thr->regs->ccr, thr->regs->msr, | 604 | thr->regs->ccr, thr->regs->msr, |
598 | thr->regs->trap); | 605 | thr->regs->trap); |
599 | 606 | ||
600 | tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED); | 607 | tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED); |
601 | 608 | ||
602 | TM_DEBUG("--- tm_reclaim on pid %d complete\n", | 609 | TM_DEBUG("--- tm_reclaim on pid %d complete\n", |
603 | tsk->pid); | 610 | tsk->pid); |
604 | 611 | ||
605 | out_and_saveregs: | 612 | out_and_saveregs: |
606 | /* Always save the regs here, even if a transaction's not active. | 613 | /* Always save the regs here, even if a transaction's not active. |
607 | * This context-switches a thread's TM info SPRs. We do it here to | 614 | * This context-switches a thread's TM info SPRs. We do it here to |
608 | * be consistent with the restore path (in recheckpoint) which | 615 | * be consistent with the restore path (in recheckpoint) which |
609 | * cannot happen later in _switch(). | 616 | * cannot happen later in _switch(). |
610 | */ | 617 | */ |
611 | tm_save_sprs(thr); | 618 | tm_save_sprs(thr); |
612 | } | 619 | } |
613 | 620 | ||
614 | extern void __tm_recheckpoint(struct thread_struct *thread, | 621 | extern void __tm_recheckpoint(struct thread_struct *thread, |
615 | unsigned long orig_msr); | 622 | unsigned long orig_msr); |
616 | 623 | ||
617 | void tm_recheckpoint(struct thread_struct *thread, | 624 | void tm_recheckpoint(struct thread_struct *thread, |
618 | unsigned long orig_msr) | 625 | unsigned long orig_msr) |
619 | { | 626 | { |
620 | unsigned long flags; | 627 | unsigned long flags; |
621 | 628 | ||
622 | /* We really can't be interrupted here as the TEXASR registers can't | 629 | /* We really can't be interrupted here as the TEXASR registers can't |
623 | * change and later in the trecheckpoint code, we have a userspace R1. | 630 | * change and later in the trecheckpoint code, we have a userspace R1. |
624 | * So let's hard disable over this region. | 631 | * So let's hard disable over this region. |
625 | */ | 632 | */ |
626 | local_irq_save(flags); | 633 | local_irq_save(flags); |
627 | hard_irq_disable(); | 634 | hard_irq_disable(); |
628 | 635 | ||
629 | /* The TM SPRs are restored here, so that TEXASR.FS can be set | 636 | /* The TM SPRs are restored here, so that TEXASR.FS can be set |
630 | * before the trecheckpoint and no explosion occurs. | 637 | * before the trecheckpoint and no explosion occurs. |
631 | */ | 638 | */ |
632 | tm_restore_sprs(thread); | 639 | tm_restore_sprs(thread); |
633 | 640 | ||
634 | __tm_recheckpoint(thread, orig_msr); | 641 | __tm_recheckpoint(thread, orig_msr); |
635 | 642 | ||
636 | local_irq_restore(flags); | 643 | local_irq_restore(flags); |
637 | } | 644 | } |
638 | 645 | ||
639 | static inline void tm_recheckpoint_new_task(struct task_struct *new) | 646 | static inline void tm_recheckpoint_new_task(struct task_struct *new) |
640 | { | 647 | { |
641 | unsigned long msr; | 648 | unsigned long msr; |
642 | 649 | ||
643 | if (!cpu_has_feature(CPU_FTR_TM)) | 650 | if (!cpu_has_feature(CPU_FTR_TM)) |
644 | return; | 651 | return; |
645 | 652 | ||
646 | /* Recheckpoint the registers of the thread we're about to switch to. | 653 | /* Recheckpoint the registers of the thread we're about to switch to. |
647 | * | 654 | * |
648 | * If the task was using FP, we non-lazily reload both the original and | 655 | * If the task was using FP, we non-lazily reload both the original and |
649 | * the speculative FP register states. This is because the kernel | 656 | * the speculative FP register states. This is because the kernel |
650 | * doesn't see if/when a TM rollback occurs, so if we take an FP | 657 | * doesn't see if/when a TM rollback occurs, so if we take an FP |
651 | * unavoidable later, we are unable to determine which set of FP regs | 658 | * unavoidable later, we are unable to determine which set of FP regs |
652 | * need to be restored. | 659 | * need to be restored. |
653 | */ | 660 | */ |
654 | if (!new->thread.regs) | 661 | if (!new->thread.regs) |
655 | return; | 662 | return; |
656 | 663 | ||
657 | if (!MSR_TM_ACTIVE(new->thread.regs->msr)){ | 664 | if (!MSR_TM_ACTIVE(new->thread.regs->msr)){ |
658 | tm_restore_sprs(&new->thread); | 665 | tm_restore_sprs(&new->thread); |
659 | return; | 666 | return; |
660 | } | 667 | } |
661 | msr = new->thread.tm_orig_msr; | 668 | msr = new->thread.tm_orig_msr; |
662 | /* Recheckpoint to restore original checkpointed register state. */ | 669 | /* Recheckpoint to restore original checkpointed register state. */ |
663 | TM_DEBUG("*** tm_recheckpoint of pid %d " | 670 | TM_DEBUG("*** tm_recheckpoint of pid %d " |
664 | "(new->msr 0x%lx, new->origmsr 0x%lx)\n", | 671 | "(new->msr 0x%lx, new->origmsr 0x%lx)\n", |
665 | new->pid, new->thread.regs->msr, msr); | 672 | new->pid, new->thread.regs->msr, msr); |
666 | 673 | ||
667 | /* This loads the checkpointed FP/VEC state, if used */ | 674 | /* This loads the checkpointed FP/VEC state, if used */ |
668 | tm_recheckpoint(&new->thread, msr); | 675 | tm_recheckpoint(&new->thread, msr); |
669 | 676 | ||
670 | /* This loads the speculative FP/VEC state, if used */ | 677 | /* This loads the speculative FP/VEC state, if used */ |
671 | if (msr & MSR_FP) { | 678 | if (msr & MSR_FP) { |
672 | do_load_up_transact_fpu(&new->thread); | 679 | do_load_up_transact_fpu(&new->thread); |
673 | new->thread.regs->msr |= | 680 | new->thread.regs->msr |= |
674 | (MSR_FP | new->thread.fpexc_mode); | 681 | (MSR_FP | new->thread.fpexc_mode); |
675 | } | 682 | } |
676 | #ifdef CONFIG_ALTIVEC | 683 | #ifdef CONFIG_ALTIVEC |
677 | if (msr & MSR_VEC) { | 684 | if (msr & MSR_VEC) { |
678 | do_load_up_transact_altivec(&new->thread); | 685 | do_load_up_transact_altivec(&new->thread); |
679 | new->thread.regs->msr |= MSR_VEC; | 686 | new->thread.regs->msr |= MSR_VEC; |
680 | } | 687 | } |
681 | #endif | 688 | #endif |
682 | /* We may as well turn on VSX too since all the state is restored now */ | 689 | /* We may as well turn on VSX too since all the state is restored now */ |
683 | if (msr & MSR_VSX) | 690 | if (msr & MSR_VSX) |
684 | new->thread.regs->msr |= MSR_VSX; | 691 | new->thread.regs->msr |= MSR_VSX; |
685 | 692 | ||
686 | TM_DEBUG("*** tm_recheckpoint of pid %d complete " | 693 | TM_DEBUG("*** tm_recheckpoint of pid %d complete " |
687 | "(kernel msr 0x%lx)\n", | 694 | "(kernel msr 0x%lx)\n", |
688 | new->pid, mfmsr()); | 695 | new->pid, mfmsr()); |
689 | } | 696 | } |
690 | 697 | ||
691 | static inline void __switch_to_tm(struct task_struct *prev) | 698 | static inline void __switch_to_tm(struct task_struct *prev) |
692 | { | 699 | { |
693 | if (cpu_has_feature(CPU_FTR_TM)) { | 700 | if (cpu_has_feature(CPU_FTR_TM)) { |
694 | tm_enable(); | 701 | tm_enable(); |
695 | tm_reclaim_task(prev); | 702 | tm_reclaim_task(prev); |
696 | } | 703 | } |
697 | } | 704 | } |
698 | 705 | ||
699 | /* | 706 | /* |
700 | * This is called if we are on the way out to userspace and the | 707 | * This is called if we are on the way out to userspace and the |
701 | * TIF_RESTORE_TM flag is set. It checks if we need to reload | 708 | * TIF_RESTORE_TM flag is set. It checks if we need to reload |
702 | * FP and/or vector state and does so if necessary. | 709 | * FP and/or vector state and does so if necessary. |
703 | * If userspace is inside a transaction (whether active or | 710 | * If userspace is inside a transaction (whether active or |
704 | * suspended) and FP/VMX/VSX instructions have ever been enabled | 711 | * suspended) and FP/VMX/VSX instructions have ever been enabled |
705 | * inside that transaction, then we have to keep them enabled | 712 | * inside that transaction, then we have to keep them enabled |
706 | * and keep the FP/VMX/VSX state loaded while ever the transaction | 713 | * and keep the FP/VMX/VSX state loaded while ever the transaction |
707 | * continues. The reason is that if we didn't, and subsequently | 714 | * continues. The reason is that if we didn't, and subsequently |
708 | * got a FP/VMX/VSX unavailable interrupt inside a transaction, | 715 | * got a FP/VMX/VSX unavailable interrupt inside a transaction, |
709 | * we don't know whether it's the same transaction, and thus we | 716 | * we don't know whether it's the same transaction, and thus we |
710 | * don't know which of the checkpointed state and the transactional | 717 | * don't know which of the checkpointed state and the transactional |
711 | * state to use. | 718 | * state to use. |
712 | */ | 719 | */ |
713 | void restore_tm_state(struct pt_regs *regs) | 720 | void restore_tm_state(struct pt_regs *regs) |
714 | { | 721 | { |
715 | unsigned long msr_diff; | 722 | unsigned long msr_diff; |
716 | 723 | ||
717 | clear_thread_flag(TIF_RESTORE_TM); | 724 | clear_thread_flag(TIF_RESTORE_TM); |
718 | if (!MSR_TM_ACTIVE(regs->msr)) | 725 | if (!MSR_TM_ACTIVE(regs->msr)) |
719 | return; | 726 | return; |
720 | 727 | ||
721 | msr_diff = current->thread.tm_orig_msr & ~regs->msr; | 728 | msr_diff = current->thread.tm_orig_msr & ~regs->msr; |
722 | msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; | 729 | msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; |
723 | if (msr_diff & MSR_FP) { | 730 | if (msr_diff & MSR_FP) { |
724 | fp_enable(); | 731 | fp_enable(); |
725 | load_fp_state(¤t->thread.fp_state); | 732 | load_fp_state(¤t->thread.fp_state); |
726 | regs->msr |= current->thread.fpexc_mode; | 733 | regs->msr |= current->thread.fpexc_mode; |
727 | } | 734 | } |
728 | if (msr_diff & MSR_VEC) { | 735 | if (msr_diff & MSR_VEC) { |
729 | vec_enable(); | 736 | vec_enable(); |
730 | load_vr_state(¤t->thread.vr_state); | 737 | load_vr_state(¤t->thread.vr_state); |
731 | } | 738 | } |
732 | regs->msr |= msr_diff; | 739 | regs->msr |= msr_diff; |
733 | } | 740 | } |
734 | 741 | ||
735 | #else | 742 | #else |
736 | #define tm_recheckpoint_new_task(new) | 743 | #define tm_recheckpoint_new_task(new) |
737 | #define __switch_to_tm(prev) | 744 | #define __switch_to_tm(prev) |
738 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 745 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
739 | 746 | ||
740 | struct task_struct *__switch_to(struct task_struct *prev, | 747 | struct task_struct *__switch_to(struct task_struct *prev, |
741 | struct task_struct *new) | 748 | struct task_struct *new) |
742 | { | 749 | { |
743 | struct thread_struct *new_thread, *old_thread; | 750 | struct thread_struct *new_thread, *old_thread; |
744 | struct task_struct *last; | 751 | struct task_struct *last; |
745 | #ifdef CONFIG_PPC_BOOK3S_64 | 752 | #ifdef CONFIG_PPC_BOOK3S_64 |
746 | struct ppc64_tlb_batch *batch; | 753 | struct ppc64_tlb_batch *batch; |
747 | #endif | 754 | #endif |
748 | 755 | ||
749 | WARN_ON(!irqs_disabled()); | 756 | WARN_ON(!irqs_disabled()); |
750 | 757 | ||
751 | /* Back up the TAR across context switches. | 758 | /* Back up the TAR across context switches. |
752 | * Note that the TAR is not available for use in the kernel. (To | 759 | * Note that the TAR is not available for use in the kernel. (To |
753 | * provide this, the TAR should be backed up/restored on exception | 760 | * provide this, the TAR should be backed up/restored on exception |
754 | * entry/exit instead, and be in pt_regs. FIXME, this should be in | 761 | * entry/exit instead, and be in pt_regs. FIXME, this should be in |
755 | * pt_regs anyway (for debug).) | 762 | * pt_regs anyway (for debug).) |
756 | * Save the TAR here before we do treclaim/trecheckpoint as these | 763 | * Save the TAR here before we do treclaim/trecheckpoint as these |
757 | * will change the TAR. | 764 | * will change the TAR. |
758 | */ | 765 | */ |
759 | save_tar(&prev->thread); | 766 | save_tar(&prev->thread); |
760 | 767 | ||
761 | __switch_to_tm(prev); | 768 | __switch_to_tm(prev); |
762 | 769 | ||
763 | #ifdef CONFIG_SMP | 770 | #ifdef CONFIG_SMP |
764 | /* avoid complexity of lazy save/restore of fpu | 771 | /* avoid complexity of lazy save/restore of fpu |
765 | * by just saving it every time we switch out if | 772 | * by just saving it every time we switch out if |
766 | * this task used the fpu during the last quantum. | 773 | * this task used the fpu during the last quantum. |
767 | * | 774 | * |
768 | * If it tries to use the fpu again, it'll trap and | 775 | * If it tries to use the fpu again, it'll trap and |
769 | * reload its fp regs. So we don't have to do a restore | 776 | * reload its fp regs. So we don't have to do a restore |
770 | * every switch, just a save. | 777 | * every switch, just a save. |
771 | * -- Cort | 778 | * -- Cort |
772 | */ | 779 | */ |
773 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) | 780 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) |
774 | giveup_fpu(prev); | 781 | giveup_fpu(prev); |
775 | #ifdef CONFIG_ALTIVEC | 782 | #ifdef CONFIG_ALTIVEC |
776 | /* | 783 | /* |
777 | * If the previous thread used altivec in the last quantum | 784 | * If the previous thread used altivec in the last quantum |
778 | * (thus changing altivec regs) then save them. | 785 | * (thus changing altivec regs) then save them. |
779 | * We used to check the VRSAVE register but not all apps | 786 | * We used to check the VRSAVE register but not all apps |
780 | * set it, so we don't rely on it now (and in fact we need | 787 | * set it, so we don't rely on it now (and in fact we need |
781 | * to save & restore VSCR even if VRSAVE == 0). -- paulus | 788 | * to save & restore VSCR even if VRSAVE == 0). -- paulus |
782 | * | 789 | * |
783 | * On SMP we always save/restore altivec regs just to avoid the | 790 | * On SMP we always save/restore altivec regs just to avoid the |
784 | * complexity of changing processors. | 791 | * complexity of changing processors. |
785 | * -- Cort | 792 | * -- Cort |
786 | */ | 793 | */ |
787 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) | 794 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) |
788 | giveup_altivec(prev); | 795 | giveup_altivec(prev); |
789 | #endif /* CONFIG_ALTIVEC */ | 796 | #endif /* CONFIG_ALTIVEC */ |
790 | #ifdef CONFIG_VSX | 797 | #ifdef CONFIG_VSX |
791 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) | 798 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) |
792 | /* VMX and FPU registers are already save here */ | 799 | /* VMX and FPU registers are already save here */ |
793 | __giveup_vsx(prev); | 800 | __giveup_vsx(prev); |
794 | #endif /* CONFIG_VSX */ | 801 | #endif /* CONFIG_VSX */ |
795 | #ifdef CONFIG_SPE | 802 | #ifdef CONFIG_SPE |
796 | /* | 803 | /* |
797 | * If the previous thread used spe in the last quantum | 804 | * If the previous thread used spe in the last quantum |
798 | * (thus changing spe regs) then save them. | 805 | * (thus changing spe regs) then save them. |
799 | * | 806 | * |
800 | * On SMP we always save/restore spe regs just to avoid the | 807 | * On SMP we always save/restore spe regs just to avoid the |
801 | * complexity of changing processors. | 808 | * complexity of changing processors. |
802 | */ | 809 | */ |
803 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) | 810 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) |
804 | giveup_spe(prev); | 811 | giveup_spe(prev); |
805 | #endif /* CONFIG_SPE */ | 812 | #endif /* CONFIG_SPE */ |
806 | 813 | ||
807 | #else /* CONFIG_SMP */ | 814 | #else /* CONFIG_SMP */ |
808 | #ifdef CONFIG_ALTIVEC | 815 | #ifdef CONFIG_ALTIVEC |
809 | /* Avoid the trap. On smp this this never happens since | 816 | /* Avoid the trap. On smp this this never happens since |
810 | * we don't set last_task_used_altivec -- Cort | 817 | * we don't set last_task_used_altivec -- Cort |
811 | */ | 818 | */ |
812 | if (new->thread.regs && last_task_used_altivec == new) | 819 | if (new->thread.regs && last_task_used_altivec == new) |
813 | new->thread.regs->msr |= MSR_VEC; | 820 | new->thread.regs->msr |= MSR_VEC; |
814 | #endif /* CONFIG_ALTIVEC */ | 821 | #endif /* CONFIG_ALTIVEC */ |
815 | #ifdef CONFIG_VSX | 822 | #ifdef CONFIG_VSX |
816 | if (new->thread.regs && last_task_used_vsx == new) | 823 | if (new->thread.regs && last_task_used_vsx == new) |
817 | new->thread.regs->msr |= MSR_VSX; | 824 | new->thread.regs->msr |= MSR_VSX; |
818 | #endif /* CONFIG_VSX */ | 825 | #endif /* CONFIG_VSX */ |
819 | #ifdef CONFIG_SPE | 826 | #ifdef CONFIG_SPE |
820 | /* Avoid the trap. On smp this this never happens since | 827 | /* Avoid the trap. On smp this this never happens since |
821 | * we don't set last_task_used_spe | 828 | * we don't set last_task_used_spe |
822 | */ | 829 | */ |
823 | if (new->thread.regs && last_task_used_spe == new) | 830 | if (new->thread.regs && last_task_used_spe == new) |
824 | new->thread.regs->msr |= MSR_SPE; | 831 | new->thread.regs->msr |= MSR_SPE; |
825 | #endif /* CONFIG_SPE */ | 832 | #endif /* CONFIG_SPE */ |
826 | 833 | ||
827 | #endif /* CONFIG_SMP */ | 834 | #endif /* CONFIG_SMP */ |
828 | 835 | ||
829 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 836 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
830 | switch_booke_debug_regs(&new->thread.debug); | 837 | switch_booke_debug_regs(&new->thread.debug); |
831 | #else | 838 | #else |
832 | /* | 839 | /* |
833 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would | 840 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would |
834 | * schedule DABR | 841 | * schedule DABR |
835 | */ | 842 | */ |
836 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | 843 | #ifndef CONFIG_HAVE_HW_BREAKPOINT |
837 | if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) | 844 | if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) |
838 | set_breakpoint(&new->thread.hw_brk); | 845 | __set_breakpoint(&new->thread.hw_brk); |
839 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 846 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
840 | #endif | 847 | #endif |
841 | 848 | ||
842 | 849 | ||
843 | new_thread = &new->thread; | 850 | new_thread = &new->thread; |
844 | old_thread = ¤t->thread; | 851 | old_thread = ¤t->thread; |
845 | 852 | ||
846 | #ifdef CONFIG_PPC64 | 853 | #ifdef CONFIG_PPC64 |
847 | /* | 854 | /* |
848 | * Collect processor utilization data per process | 855 | * Collect processor utilization data per process |
849 | */ | 856 | */ |
850 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 857 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { |
851 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | 858 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); |
852 | long unsigned start_tb, current_tb; | 859 | long unsigned start_tb, current_tb; |
853 | start_tb = old_thread->start_tb; | 860 | start_tb = old_thread->start_tb; |
854 | cu->current_tb = current_tb = mfspr(SPRN_PURR); | 861 | cu->current_tb = current_tb = mfspr(SPRN_PURR); |
855 | old_thread->accum_tb += (current_tb - start_tb); | 862 | old_thread->accum_tb += (current_tb - start_tb); |
856 | new_thread->start_tb = current_tb; | 863 | new_thread->start_tb = current_tb; |
857 | } | 864 | } |
858 | #endif /* CONFIG_PPC64 */ | 865 | #endif /* CONFIG_PPC64 */ |
859 | 866 | ||
860 | #ifdef CONFIG_PPC_BOOK3S_64 | 867 | #ifdef CONFIG_PPC_BOOK3S_64 |
861 | batch = &__get_cpu_var(ppc64_tlb_batch); | 868 | batch = &__get_cpu_var(ppc64_tlb_batch); |
862 | if (batch->active) { | 869 | if (batch->active) { |
863 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; | 870 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; |
864 | if (batch->index) | 871 | if (batch->index) |
865 | __flush_tlb_pending(batch); | 872 | __flush_tlb_pending(batch); |
866 | batch->active = 0; | 873 | batch->active = 0; |
867 | } | 874 | } |
868 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 875 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
869 | 876 | ||
870 | /* | 877 | /* |
871 | * We can't take a PMU exception inside _switch() since there is a | 878 | * We can't take a PMU exception inside _switch() since there is a |
872 | * window where the kernel stack SLB and the kernel stack are out | 879 | * window where the kernel stack SLB and the kernel stack are out |
873 | * of sync. Hard disable here. | 880 | * of sync. Hard disable here. |
874 | */ | 881 | */ |
875 | hard_irq_disable(); | 882 | hard_irq_disable(); |
876 | 883 | ||
877 | tm_recheckpoint_new_task(new); | 884 | tm_recheckpoint_new_task(new); |
878 | 885 | ||
879 | last = _switch(old_thread, new_thread); | 886 | last = _switch(old_thread, new_thread); |
880 | 887 | ||
881 | #ifdef CONFIG_PPC_BOOK3S_64 | 888 | #ifdef CONFIG_PPC_BOOK3S_64 |
882 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { | 889 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { |
883 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; | 890 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; |
884 | batch = &__get_cpu_var(ppc64_tlb_batch); | 891 | batch = &__get_cpu_var(ppc64_tlb_batch); |
885 | batch->active = 1; | 892 | batch->active = 1; |
886 | } | 893 | } |
887 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 894 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
888 | 895 | ||
889 | return last; | 896 | return last; |
890 | } | 897 | } |
891 | 898 | ||
892 | static int instructions_to_print = 16; | 899 | static int instructions_to_print = 16; |
893 | 900 | ||
894 | static void show_instructions(struct pt_regs *regs) | 901 | static void show_instructions(struct pt_regs *regs) |
895 | { | 902 | { |
896 | int i; | 903 | int i; |
897 | unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * | 904 | unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * |
898 | sizeof(int)); | 905 | sizeof(int)); |
899 | 906 | ||
900 | printk("Instruction dump:"); | 907 | printk("Instruction dump:"); |
901 | 908 | ||
902 | for (i = 0; i < instructions_to_print; i++) { | 909 | for (i = 0; i < instructions_to_print; i++) { |
903 | int instr; | 910 | int instr; |
904 | 911 | ||
905 | if (!(i % 8)) | 912 | if (!(i % 8)) |
906 | printk("\n"); | 913 | printk("\n"); |
907 | 914 | ||
908 | #if !defined(CONFIG_BOOKE) | 915 | #if !defined(CONFIG_BOOKE) |
909 | /* If executing with the IMMU off, adjust pc rather | 916 | /* If executing with the IMMU off, adjust pc rather |
910 | * than print XXXXXXXX. | 917 | * than print XXXXXXXX. |
911 | */ | 918 | */ |
912 | if (!(regs->msr & MSR_IR)) | 919 | if (!(regs->msr & MSR_IR)) |
913 | pc = (unsigned long)phys_to_virt(pc); | 920 | pc = (unsigned long)phys_to_virt(pc); |
914 | #endif | 921 | #endif |
915 | 922 | ||
916 | /* We use __get_user here *only* to avoid an OOPS on a | 923 | /* We use __get_user here *only* to avoid an OOPS on a |
917 | * bad address because the pc *should* only be a | 924 | * bad address because the pc *should* only be a |
918 | * kernel address. | 925 | * kernel address. |
919 | */ | 926 | */ |
920 | if (!__kernel_text_address(pc) || | 927 | if (!__kernel_text_address(pc) || |
921 | __get_user(instr, (unsigned int __user *)pc)) { | 928 | __get_user(instr, (unsigned int __user *)pc)) { |
922 | printk(KERN_CONT "XXXXXXXX "); | 929 | printk(KERN_CONT "XXXXXXXX "); |
923 | } else { | 930 | } else { |
924 | if (regs->nip == pc) | 931 | if (regs->nip == pc) |
925 | printk(KERN_CONT "<%08x> ", instr); | 932 | printk(KERN_CONT "<%08x> ", instr); |
926 | else | 933 | else |
927 | printk(KERN_CONT "%08x ", instr); | 934 | printk(KERN_CONT "%08x ", instr); |
928 | } | 935 | } |
929 | 936 | ||
930 | pc += sizeof(int); | 937 | pc += sizeof(int); |
931 | } | 938 | } |
932 | 939 | ||
933 | printk("\n"); | 940 | printk("\n"); |
934 | } | 941 | } |
935 | 942 | ||
936 | static struct regbit { | 943 | static struct regbit { |
937 | unsigned long bit; | 944 | unsigned long bit; |
938 | const char *name; | 945 | const char *name; |
939 | } msr_bits[] = { | 946 | } msr_bits[] = { |
940 | #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) | 947 | #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) |
941 | {MSR_SF, "SF"}, | 948 | {MSR_SF, "SF"}, |
942 | {MSR_HV, "HV"}, | 949 | {MSR_HV, "HV"}, |
943 | #endif | 950 | #endif |
944 | {MSR_VEC, "VEC"}, | 951 | {MSR_VEC, "VEC"}, |
945 | {MSR_VSX, "VSX"}, | 952 | {MSR_VSX, "VSX"}, |
946 | #ifdef CONFIG_BOOKE | 953 | #ifdef CONFIG_BOOKE |
947 | {MSR_CE, "CE"}, | 954 | {MSR_CE, "CE"}, |
948 | #endif | 955 | #endif |
949 | {MSR_EE, "EE"}, | 956 | {MSR_EE, "EE"}, |
950 | {MSR_PR, "PR"}, | 957 | {MSR_PR, "PR"}, |
951 | {MSR_FP, "FP"}, | 958 | {MSR_FP, "FP"}, |
952 | {MSR_ME, "ME"}, | 959 | {MSR_ME, "ME"}, |
953 | #ifdef CONFIG_BOOKE | 960 | #ifdef CONFIG_BOOKE |
954 | {MSR_DE, "DE"}, | 961 | {MSR_DE, "DE"}, |
955 | #else | 962 | #else |
956 | {MSR_SE, "SE"}, | 963 | {MSR_SE, "SE"}, |
957 | {MSR_BE, "BE"}, | 964 | {MSR_BE, "BE"}, |
958 | #endif | 965 | #endif |
959 | {MSR_IR, "IR"}, | 966 | {MSR_IR, "IR"}, |
960 | {MSR_DR, "DR"}, | 967 | {MSR_DR, "DR"}, |
961 | {MSR_PMM, "PMM"}, | 968 | {MSR_PMM, "PMM"}, |
962 | #ifndef CONFIG_BOOKE | 969 | #ifndef CONFIG_BOOKE |
963 | {MSR_RI, "RI"}, | 970 | {MSR_RI, "RI"}, |
964 | {MSR_LE, "LE"}, | 971 | {MSR_LE, "LE"}, |
965 | #endif | 972 | #endif |
966 | {0, NULL} | 973 | {0, NULL} |
967 | }; | 974 | }; |
968 | 975 | ||
969 | static void printbits(unsigned long val, struct regbit *bits) | 976 | static void printbits(unsigned long val, struct regbit *bits) |
970 | { | 977 | { |
971 | const char *sep = ""; | 978 | const char *sep = ""; |
972 | 979 | ||
973 | printk("<"); | 980 | printk("<"); |
974 | for (; bits->bit; ++bits) | 981 | for (; bits->bit; ++bits) |
975 | if (val & bits->bit) { | 982 | if (val & bits->bit) { |
976 | printk("%s%s", sep, bits->name); | 983 | printk("%s%s", sep, bits->name); |
977 | sep = ","; | 984 | sep = ","; |
978 | } | 985 | } |
979 | printk(">"); | 986 | printk(">"); |
980 | } | 987 | } |
981 | 988 | ||
982 | #ifdef CONFIG_PPC64 | 989 | #ifdef CONFIG_PPC64 |
983 | #define REG "%016lx" | 990 | #define REG "%016lx" |
984 | #define REGS_PER_LINE 4 | 991 | #define REGS_PER_LINE 4 |
985 | #define LAST_VOLATILE 13 | 992 | #define LAST_VOLATILE 13 |
986 | #else | 993 | #else |
987 | #define REG "%08lx" | 994 | #define REG "%08lx" |
988 | #define REGS_PER_LINE 8 | 995 | #define REGS_PER_LINE 8 |
989 | #define LAST_VOLATILE 12 | 996 | #define LAST_VOLATILE 12 |
990 | #endif | 997 | #endif |
991 | 998 | ||
992 | void show_regs(struct pt_regs * regs) | 999 | void show_regs(struct pt_regs * regs) |
993 | { | 1000 | { |
994 | int i, trap; | 1001 | int i, trap; |
995 | 1002 | ||
996 | show_regs_print_info(KERN_DEFAULT); | 1003 | show_regs_print_info(KERN_DEFAULT); |
997 | 1004 | ||
998 | printk("NIP: "REG" LR: "REG" CTR: "REG"\n", | 1005 | printk("NIP: "REG" LR: "REG" CTR: "REG"\n", |
999 | regs->nip, regs->link, regs->ctr); | 1006 | regs->nip, regs->link, regs->ctr); |
1000 | printk("REGS: %p TRAP: %04lx %s (%s)\n", | 1007 | printk("REGS: %p TRAP: %04lx %s (%s)\n", |
1001 | regs, regs->trap, print_tainted(), init_utsname()->release); | 1008 | regs, regs->trap, print_tainted(), init_utsname()->release); |
1002 | printk("MSR: "REG" ", regs->msr); | 1009 | printk("MSR: "REG" ", regs->msr); |
1003 | printbits(regs->msr, msr_bits); | 1010 | printbits(regs->msr, msr_bits); |
1004 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); | 1011 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); |
1005 | trap = TRAP(regs); | 1012 | trap = TRAP(regs); |
1006 | if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) | 1013 | if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) |
1007 | printk("CFAR: "REG" ", regs->orig_gpr3); | 1014 | printk("CFAR: "REG" ", regs->orig_gpr3); |
1008 | if (trap == 0x200 || trap == 0x300 || trap == 0x600) | 1015 | if (trap == 0x200 || trap == 0x300 || trap == 0x600) |
1009 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1016 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
1010 | printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); | 1017 | printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); |
1011 | #else | 1018 | #else |
1012 | printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); | 1019 | printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); |
1013 | #endif | 1020 | #endif |
1014 | #ifdef CONFIG_PPC64 | 1021 | #ifdef CONFIG_PPC64 |
1015 | printk("SOFTE: %ld ", regs->softe); | 1022 | printk("SOFTE: %ld ", regs->softe); |
1016 | #endif | 1023 | #endif |
1017 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1024 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1018 | if (MSR_TM_ACTIVE(regs->msr)) | 1025 | if (MSR_TM_ACTIVE(regs->msr)) |
1019 | printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); | 1026 | printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); |
1020 | #endif | 1027 | #endif |
1021 | 1028 | ||
1022 | for (i = 0; i < 32; i++) { | 1029 | for (i = 0; i < 32; i++) { |
1023 | if ((i % REGS_PER_LINE) == 0) | 1030 | if ((i % REGS_PER_LINE) == 0) |
1024 | printk("\nGPR%02d: ", i); | 1031 | printk("\nGPR%02d: ", i); |
1025 | printk(REG " ", regs->gpr[i]); | 1032 | printk(REG " ", regs->gpr[i]); |
1026 | if (i == LAST_VOLATILE && !FULL_REGS(regs)) | 1033 | if (i == LAST_VOLATILE && !FULL_REGS(regs)) |
1027 | break; | 1034 | break; |
1028 | } | 1035 | } |
1029 | printk("\n"); | 1036 | printk("\n"); |
1030 | #ifdef CONFIG_KALLSYMS | 1037 | #ifdef CONFIG_KALLSYMS |
1031 | /* | 1038 | /* |
1032 | * Lookup NIP late so we have the best change of getting the | 1039 | * Lookup NIP late so we have the best change of getting the |
1033 | * above info out without failing | 1040 | * above info out without failing |
1034 | */ | 1041 | */ |
1035 | printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); | 1042 | printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); |
1036 | printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); | 1043 | printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); |
1037 | #endif | 1044 | #endif |
1038 | show_stack(current, (unsigned long *) regs->gpr[1]); | 1045 | show_stack(current, (unsigned long *) regs->gpr[1]); |
1039 | if (!user_mode(regs)) | 1046 | if (!user_mode(regs)) |
1040 | show_instructions(regs); | 1047 | show_instructions(regs); |
1041 | } | 1048 | } |
1042 | 1049 | ||
1043 | void exit_thread(void) | 1050 | void exit_thread(void) |
1044 | { | 1051 | { |
1045 | discard_lazy_cpu_state(); | 1052 | discard_lazy_cpu_state(); |
1046 | } | 1053 | } |
1047 | 1054 | ||
1048 | void flush_thread(void) | 1055 | void flush_thread(void) |
1049 | { | 1056 | { |
1050 | discard_lazy_cpu_state(); | 1057 | discard_lazy_cpu_state(); |
1051 | 1058 | ||
1052 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 1059 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1053 | flush_ptrace_hw_breakpoint(current); | 1060 | flush_ptrace_hw_breakpoint(current); |
1054 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ | 1061 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
1055 | set_debug_reg_defaults(¤t->thread); | 1062 | set_debug_reg_defaults(¤t->thread); |
1056 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 1063 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
1057 | } | 1064 | } |
1058 | 1065 | ||
1059 | void | 1066 | void |
1060 | release_thread(struct task_struct *t) | 1067 | release_thread(struct task_struct *t) |
1061 | { | 1068 | { |
1062 | } | 1069 | } |
1063 | 1070 | ||
1064 | /* | 1071 | /* |
1065 | * this gets called so that we can store coprocessor state into memory and | 1072 | * this gets called so that we can store coprocessor state into memory and |
1066 | * copy the current task into the new thread. | 1073 | * copy the current task into the new thread. |
1067 | */ | 1074 | */ |
1068 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 1075 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
1069 | { | 1076 | { |
1070 | flush_fp_to_thread(src); | 1077 | flush_fp_to_thread(src); |
1071 | flush_altivec_to_thread(src); | 1078 | flush_altivec_to_thread(src); |
1072 | flush_vsx_to_thread(src); | 1079 | flush_vsx_to_thread(src); |
1073 | flush_spe_to_thread(src); | 1080 | flush_spe_to_thread(src); |
1074 | /* | 1081 | /* |
1075 | * Flush TM state out so we can copy it. __switch_to_tm() does this | 1082 | * Flush TM state out so we can copy it. __switch_to_tm() does this |
1076 | * flush but it removes the checkpointed state from the current CPU and | 1083 | * flush but it removes the checkpointed state from the current CPU and |
1077 | * transitions the CPU out of TM mode. Hence we need to call | 1084 | * transitions the CPU out of TM mode. Hence we need to call |
1078 | * tm_recheckpoint_new_task() (on the same task) to restore the | 1085 | * tm_recheckpoint_new_task() (on the same task) to restore the |
1079 | * checkpointed state back and the TM mode. | 1086 | * checkpointed state back and the TM mode. |
1080 | */ | 1087 | */ |
1081 | __switch_to_tm(src); | 1088 | __switch_to_tm(src); |
1082 | tm_recheckpoint_new_task(src); | 1089 | tm_recheckpoint_new_task(src); |
1083 | 1090 | ||
1084 | *dst = *src; | 1091 | *dst = *src; |
1085 | 1092 | ||
1086 | clear_task_ebb(dst); | 1093 | clear_task_ebb(dst); |
1087 | 1094 | ||
1088 | return 0; | 1095 | return 0; |
1089 | } | 1096 | } |
1090 | 1097 | ||
1091 | /* | 1098 | /* |
1092 | * Copy a thread.. | 1099 | * Copy a thread.. |
1093 | */ | 1100 | */ |
1094 | extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ | 1101 | extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ |
1095 | 1102 | ||
1096 | int copy_thread(unsigned long clone_flags, unsigned long usp, | 1103 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
1097 | unsigned long arg, struct task_struct *p) | 1104 | unsigned long arg, struct task_struct *p) |
1098 | { | 1105 | { |
1099 | struct pt_regs *childregs, *kregs; | 1106 | struct pt_regs *childregs, *kregs; |
1100 | extern void ret_from_fork(void); | 1107 | extern void ret_from_fork(void); |
1101 | extern void ret_from_kernel_thread(void); | 1108 | extern void ret_from_kernel_thread(void); |
1102 | void (*f)(void); | 1109 | void (*f)(void); |
1103 | unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; | 1110 | unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; |
1104 | 1111 | ||
1105 | /* Copy registers */ | 1112 | /* Copy registers */ |
1106 | sp -= sizeof(struct pt_regs); | 1113 | sp -= sizeof(struct pt_regs); |
1107 | childregs = (struct pt_regs *) sp; | 1114 | childregs = (struct pt_regs *) sp; |
1108 | if (unlikely(p->flags & PF_KTHREAD)) { | 1115 | if (unlikely(p->flags & PF_KTHREAD)) { |
1109 | struct thread_info *ti = (void *)task_stack_page(p); | 1116 | struct thread_info *ti = (void *)task_stack_page(p); |
1110 | memset(childregs, 0, sizeof(struct pt_regs)); | 1117 | memset(childregs, 0, sizeof(struct pt_regs)); |
1111 | childregs->gpr[1] = sp + sizeof(struct pt_regs); | 1118 | childregs->gpr[1] = sp + sizeof(struct pt_regs); |
1112 | /* function */ | 1119 | /* function */ |
1113 | if (usp) | 1120 | if (usp) |
1114 | childregs->gpr[14] = ppc_function_entry((void *)usp); | 1121 | childregs->gpr[14] = ppc_function_entry((void *)usp); |
1115 | #ifdef CONFIG_PPC64 | 1122 | #ifdef CONFIG_PPC64 |
1116 | clear_tsk_thread_flag(p, TIF_32BIT); | 1123 | clear_tsk_thread_flag(p, TIF_32BIT); |
1117 | childregs->softe = 1; | 1124 | childregs->softe = 1; |
1118 | #endif | 1125 | #endif |
1119 | childregs->gpr[15] = arg; | 1126 | childregs->gpr[15] = arg; |
1120 | p->thread.regs = NULL; /* no user register state */ | 1127 | p->thread.regs = NULL; /* no user register state */ |
1121 | ti->flags |= _TIF_RESTOREALL; | 1128 | ti->flags |= _TIF_RESTOREALL; |
1122 | f = ret_from_kernel_thread; | 1129 | f = ret_from_kernel_thread; |
1123 | } else { | 1130 | } else { |
1124 | struct pt_regs *regs = current_pt_regs(); | 1131 | struct pt_regs *regs = current_pt_regs(); |
1125 | CHECK_FULL_REGS(regs); | 1132 | CHECK_FULL_REGS(regs); |
1126 | *childregs = *regs; | 1133 | *childregs = *regs; |
1127 | if (usp) | 1134 | if (usp) |
1128 | childregs->gpr[1] = usp; | 1135 | childregs->gpr[1] = usp; |
1129 | p->thread.regs = childregs; | 1136 | p->thread.regs = childregs; |
1130 | childregs->gpr[3] = 0; /* Result from fork() */ | 1137 | childregs->gpr[3] = 0; /* Result from fork() */ |
1131 | if (clone_flags & CLONE_SETTLS) { | 1138 | if (clone_flags & CLONE_SETTLS) { |
1132 | #ifdef CONFIG_PPC64 | 1139 | #ifdef CONFIG_PPC64 |
1133 | if (!is_32bit_task()) | 1140 | if (!is_32bit_task()) |
1134 | childregs->gpr[13] = childregs->gpr[6]; | 1141 | childregs->gpr[13] = childregs->gpr[6]; |
1135 | else | 1142 | else |
1136 | #endif | 1143 | #endif |
1137 | childregs->gpr[2] = childregs->gpr[6]; | 1144 | childregs->gpr[2] = childregs->gpr[6]; |
1138 | } | 1145 | } |
1139 | 1146 | ||
1140 | f = ret_from_fork; | 1147 | f = ret_from_fork; |
1141 | } | 1148 | } |
1142 | sp -= STACK_FRAME_OVERHEAD; | 1149 | sp -= STACK_FRAME_OVERHEAD; |
1143 | 1150 | ||
1144 | /* | 1151 | /* |
1145 | * The way this works is that at some point in the future | 1152 | * The way this works is that at some point in the future |
1146 | * some task will call _switch to switch to the new task. | 1153 | * some task will call _switch to switch to the new task. |
1147 | * That will pop off the stack frame created below and start | 1154 | * That will pop off the stack frame created below and start |
1148 | * the new task running at ret_from_fork. The new task will | 1155 | * the new task running at ret_from_fork. The new task will |
1149 | * do some house keeping and then return from the fork or clone | 1156 | * do some house keeping and then return from the fork or clone |
1150 | * system call, using the stack frame created above. | 1157 | * system call, using the stack frame created above. |
1151 | */ | 1158 | */ |
1152 | ((unsigned long *)sp)[0] = 0; | 1159 | ((unsigned long *)sp)[0] = 0; |
1153 | sp -= sizeof(struct pt_regs); | 1160 | sp -= sizeof(struct pt_regs); |
1154 | kregs = (struct pt_regs *) sp; | 1161 | kregs = (struct pt_regs *) sp; |
1155 | sp -= STACK_FRAME_OVERHEAD; | 1162 | sp -= STACK_FRAME_OVERHEAD; |
1156 | p->thread.ksp = sp; | 1163 | p->thread.ksp = sp; |
1157 | #ifdef CONFIG_PPC32 | 1164 | #ifdef CONFIG_PPC32 |
1158 | p->thread.ksp_limit = (unsigned long)task_stack_page(p) + | 1165 | p->thread.ksp_limit = (unsigned long)task_stack_page(p) + |
1159 | _ALIGN_UP(sizeof(struct thread_info), 16); | 1166 | _ALIGN_UP(sizeof(struct thread_info), 16); |
1160 | #endif | 1167 | #endif |
1161 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 1168 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1162 | p->thread.ptrace_bps[0] = NULL; | 1169 | p->thread.ptrace_bps[0] = NULL; |
1163 | #endif | 1170 | #endif |
1164 | 1171 | ||
1165 | p->thread.fp_save_area = NULL; | 1172 | p->thread.fp_save_area = NULL; |
1166 | #ifdef CONFIG_ALTIVEC | 1173 | #ifdef CONFIG_ALTIVEC |
1167 | p->thread.vr_save_area = NULL; | 1174 | p->thread.vr_save_area = NULL; |
1168 | #endif | 1175 | #endif |
1169 | 1176 | ||
1170 | #ifdef CONFIG_PPC_STD_MMU_64 | 1177 | #ifdef CONFIG_PPC_STD_MMU_64 |
1171 | if (mmu_has_feature(MMU_FTR_SLB)) { | 1178 | if (mmu_has_feature(MMU_FTR_SLB)) { |
1172 | unsigned long sp_vsid; | 1179 | unsigned long sp_vsid; |
1173 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; | 1180 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; |
1174 | 1181 | ||
1175 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | 1182 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
1176 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) | 1183 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) |
1177 | << SLB_VSID_SHIFT_1T; | 1184 | << SLB_VSID_SHIFT_1T; |
1178 | else | 1185 | else |
1179 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) | 1186 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) |
1180 | << SLB_VSID_SHIFT; | 1187 | << SLB_VSID_SHIFT; |
1181 | sp_vsid |= SLB_VSID_KERNEL | llp; | 1188 | sp_vsid |= SLB_VSID_KERNEL | llp; |
1182 | p->thread.ksp_vsid = sp_vsid; | 1189 | p->thread.ksp_vsid = sp_vsid; |
1183 | } | 1190 | } |
1184 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 1191 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
1185 | #ifdef CONFIG_PPC64 | 1192 | #ifdef CONFIG_PPC64 |
1186 | if (cpu_has_feature(CPU_FTR_DSCR)) { | 1193 | if (cpu_has_feature(CPU_FTR_DSCR)) { |
1187 | p->thread.dscr_inherit = current->thread.dscr_inherit; | 1194 | p->thread.dscr_inherit = current->thread.dscr_inherit; |
1188 | p->thread.dscr = current->thread.dscr; | 1195 | p->thread.dscr = current->thread.dscr; |
1189 | } | 1196 | } |
1190 | if (cpu_has_feature(CPU_FTR_HAS_PPR)) | 1197 | if (cpu_has_feature(CPU_FTR_HAS_PPR)) |
1191 | p->thread.ppr = INIT_PPR; | 1198 | p->thread.ppr = INIT_PPR; |
1192 | #endif | 1199 | #endif |
1193 | kregs->nip = ppc_function_entry(f); | 1200 | kregs->nip = ppc_function_entry(f); |
1194 | return 0; | 1201 | return 0; |
1195 | } | 1202 | } |
1196 | 1203 | ||
1197 | /* | 1204 | /* |
1198 | * Set up a thread for executing a new program | 1205 | * Set up a thread for executing a new program |
1199 | */ | 1206 | */ |
1200 | void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) | 1207 | void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) |
1201 | { | 1208 | { |
1202 | #ifdef CONFIG_PPC64 | 1209 | #ifdef CONFIG_PPC64 |
1203 | unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ | 1210 | unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ |
1204 | #endif | 1211 | #endif |
1205 | 1212 | ||
1206 | /* | 1213 | /* |
1207 | * If we exec out of a kernel thread then thread.regs will not be | 1214 | * If we exec out of a kernel thread then thread.regs will not be |
1208 | * set. Do it now. | 1215 | * set. Do it now. |
1209 | */ | 1216 | */ |
1210 | if (!current->thread.regs) { | 1217 | if (!current->thread.regs) { |
1211 | struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; | 1218 | struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; |
1212 | current->thread.regs = regs - 1; | 1219 | current->thread.regs = regs - 1; |
1213 | } | 1220 | } |
1214 | 1221 | ||
1215 | memset(regs->gpr, 0, sizeof(regs->gpr)); | 1222 | memset(regs->gpr, 0, sizeof(regs->gpr)); |
1216 | regs->ctr = 0; | 1223 | regs->ctr = 0; |
1217 | regs->link = 0; | 1224 | regs->link = 0; |
1218 | regs->xer = 0; | 1225 | regs->xer = 0; |
1219 | regs->ccr = 0; | 1226 | regs->ccr = 0; |
1220 | regs->gpr[1] = sp; | 1227 | regs->gpr[1] = sp; |
1221 | 1228 | ||
1222 | /* | 1229 | /* |
1223 | * We have just cleared all the nonvolatile GPRs, so make | 1230 | * We have just cleared all the nonvolatile GPRs, so make |
1224 | * FULL_REGS(regs) return true. This is necessary to allow | 1231 | * FULL_REGS(regs) return true. This is necessary to allow |
1225 | * ptrace to examine the thread immediately after exec. | 1232 | * ptrace to examine the thread immediately after exec. |
1226 | */ | 1233 | */ |
1227 | regs->trap &= ~1UL; | 1234 | regs->trap &= ~1UL; |
1228 | 1235 | ||
1229 | #ifdef CONFIG_PPC32 | 1236 | #ifdef CONFIG_PPC32 |
1230 | regs->mq = 0; | 1237 | regs->mq = 0; |
1231 | regs->nip = start; | 1238 | regs->nip = start; |
1232 | regs->msr = MSR_USER; | 1239 | regs->msr = MSR_USER; |
1233 | #else | 1240 | #else |
1234 | if (!is_32bit_task()) { | 1241 | if (!is_32bit_task()) { |
1235 | unsigned long entry; | 1242 | unsigned long entry; |
1236 | 1243 | ||
1237 | if (is_elf2_task()) { | 1244 | if (is_elf2_task()) { |
1238 | /* Look ma, no function descriptors! */ | 1245 | /* Look ma, no function descriptors! */ |
1239 | entry = start; | 1246 | entry = start; |
1240 | 1247 | ||
1241 | /* | 1248 | /* |
1242 | * Ulrich says: | 1249 | * Ulrich says: |
1243 | * The latest iteration of the ABI requires that when | 1250 | * The latest iteration of the ABI requires that when |
1244 | * calling a function (at its global entry point), | 1251 | * calling a function (at its global entry point), |
1245 | * the caller must ensure r12 holds the entry point | 1252 | * the caller must ensure r12 holds the entry point |
1246 | * address (so that the function can quickly | 1253 | * address (so that the function can quickly |
1247 | * establish addressability). | 1254 | * establish addressability). |
1248 | */ | 1255 | */ |
1249 | regs->gpr[12] = start; | 1256 | regs->gpr[12] = start; |
1250 | /* Make sure that's restored on entry to userspace. */ | 1257 | /* Make sure that's restored on entry to userspace. */ |
1251 | set_thread_flag(TIF_RESTOREALL); | 1258 | set_thread_flag(TIF_RESTOREALL); |
1252 | } else { | 1259 | } else { |
1253 | unsigned long toc; | 1260 | unsigned long toc; |
1254 | 1261 | ||
1255 | /* start is a relocated pointer to the function | 1262 | /* start is a relocated pointer to the function |
1256 | * descriptor for the elf _start routine. The first | 1263 | * descriptor for the elf _start routine. The first |
1257 | * entry in the function descriptor is the entry | 1264 | * entry in the function descriptor is the entry |
1258 | * address of _start and the second entry is the TOC | 1265 | * address of _start and the second entry is the TOC |
1259 | * value we need to use. | 1266 | * value we need to use. |
1260 | */ | 1267 | */ |
1261 | __get_user(entry, (unsigned long __user *)start); | 1268 | __get_user(entry, (unsigned long __user *)start); |
1262 | __get_user(toc, (unsigned long __user *)start+1); | 1269 | __get_user(toc, (unsigned long __user *)start+1); |
1263 | 1270 | ||
1264 | /* Check whether the e_entry function descriptor entries | 1271 | /* Check whether the e_entry function descriptor entries |
1265 | * need to be relocated before we can use them. | 1272 | * need to be relocated before we can use them. |
1266 | */ | 1273 | */ |
1267 | if (load_addr != 0) { | 1274 | if (load_addr != 0) { |
1268 | entry += load_addr; | 1275 | entry += load_addr; |
1269 | toc += load_addr; | 1276 | toc += load_addr; |
1270 | } | 1277 | } |
1271 | regs->gpr[2] = toc; | 1278 | regs->gpr[2] = toc; |
1272 | } | 1279 | } |
1273 | regs->nip = entry; | 1280 | regs->nip = entry; |
1274 | regs->msr = MSR_USER64; | 1281 | regs->msr = MSR_USER64; |
1275 | } else { | 1282 | } else { |
1276 | regs->nip = start; | 1283 | regs->nip = start; |
1277 | regs->gpr[2] = 0; | 1284 | regs->gpr[2] = 0; |
1278 | regs->msr = MSR_USER32; | 1285 | regs->msr = MSR_USER32; |
1279 | } | 1286 | } |
1280 | #endif | 1287 | #endif |
1281 | discard_lazy_cpu_state(); | 1288 | discard_lazy_cpu_state(); |
1282 | #ifdef CONFIG_VSX | 1289 | #ifdef CONFIG_VSX |
1283 | current->thread.used_vsr = 0; | 1290 | current->thread.used_vsr = 0; |
1284 | #endif | 1291 | #endif |
1285 | memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); | 1292 | memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); |
1286 | current->thread.fp_save_area = NULL; | 1293 | current->thread.fp_save_area = NULL; |
1287 | #ifdef CONFIG_ALTIVEC | 1294 | #ifdef CONFIG_ALTIVEC |
1288 | memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state)); | 1295 | memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state)); |
1289 | current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ | 1296 | current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ |
1290 | current->thread.vr_save_area = NULL; | 1297 | current->thread.vr_save_area = NULL; |
1291 | current->thread.vrsave = 0; | 1298 | current->thread.vrsave = 0; |
1292 | current->thread.used_vr = 0; | 1299 | current->thread.used_vr = 0; |
1293 | #endif /* CONFIG_ALTIVEC */ | 1300 | #endif /* CONFIG_ALTIVEC */ |
1294 | #ifdef CONFIG_SPE | 1301 | #ifdef CONFIG_SPE |
1295 | memset(current->thread.evr, 0, sizeof(current->thread.evr)); | 1302 | memset(current->thread.evr, 0, sizeof(current->thread.evr)); |
1296 | current->thread.acc = 0; | 1303 | current->thread.acc = 0; |
1297 | current->thread.spefscr = 0; | 1304 | current->thread.spefscr = 0; |
1298 | current->thread.used_spe = 0; | 1305 | current->thread.used_spe = 0; |
1299 | #endif /* CONFIG_SPE */ | 1306 | #endif /* CONFIG_SPE */ |
1300 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1307 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1301 | if (cpu_has_feature(CPU_FTR_TM)) | 1308 | if (cpu_has_feature(CPU_FTR_TM)) |
1302 | regs->msr |= MSR_TM; | 1309 | regs->msr |= MSR_TM; |
1303 | current->thread.tm_tfhar = 0; | 1310 | current->thread.tm_tfhar = 0; |
1304 | current->thread.tm_texasr = 0; | 1311 | current->thread.tm_texasr = 0; |
1305 | current->thread.tm_tfiar = 0; | 1312 | current->thread.tm_tfiar = 0; |
1306 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 1313 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
1307 | } | 1314 | } |
1308 | 1315 | ||
1309 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ | 1316 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ |
1310 | | PR_FP_EXC_RES | PR_FP_EXC_INV) | 1317 | | PR_FP_EXC_RES | PR_FP_EXC_INV) |
1311 | 1318 | ||
1312 | int set_fpexc_mode(struct task_struct *tsk, unsigned int val) | 1319 | int set_fpexc_mode(struct task_struct *tsk, unsigned int val) |
1313 | { | 1320 | { |
1314 | struct pt_regs *regs = tsk->thread.regs; | 1321 | struct pt_regs *regs = tsk->thread.regs; |
1315 | 1322 | ||
1316 | /* This is a bit hairy. If we are an SPE enabled processor | 1323 | /* This is a bit hairy. If we are an SPE enabled processor |
1317 | * (have embedded fp) we store the IEEE exception enable flags in | 1324 | * (have embedded fp) we store the IEEE exception enable flags in |
1318 | * fpexc_mode. fpexc_mode is also used for setting FP exception | 1325 | * fpexc_mode. fpexc_mode is also used for setting FP exception |
1319 | * mode (asyn, precise, disabled) for 'Classic' FP. */ | 1326 | * mode (asyn, precise, disabled) for 'Classic' FP. */ |
1320 | if (val & PR_FP_EXC_SW_ENABLE) { | 1327 | if (val & PR_FP_EXC_SW_ENABLE) { |
1321 | #ifdef CONFIG_SPE | 1328 | #ifdef CONFIG_SPE |
1322 | if (cpu_has_feature(CPU_FTR_SPE)) { | 1329 | if (cpu_has_feature(CPU_FTR_SPE)) { |
1323 | /* | 1330 | /* |
1324 | * When the sticky exception bits are set | 1331 | * When the sticky exception bits are set |
1325 | * directly by userspace, it must call prctl | 1332 | * directly by userspace, it must call prctl |
1326 | * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE | 1333 | * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE |
1327 | * in the existing prctl settings) or | 1334 | * in the existing prctl settings) or |
1328 | * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in | 1335 | * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in |
1329 | * the bits being set). <fenv.h> functions | 1336 | * the bits being set). <fenv.h> functions |
1330 | * saving and restoring the whole | 1337 | * saving and restoring the whole |
1331 | * floating-point environment need to do so | 1338 | * floating-point environment need to do so |
1332 | * anyway to restore the prctl settings from | 1339 | * anyway to restore the prctl settings from |
1333 | * the saved environment. | 1340 | * the saved environment. |
1334 | */ | 1341 | */ |
1335 | tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); | 1342 | tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); |
1336 | tsk->thread.fpexc_mode = val & | 1343 | tsk->thread.fpexc_mode = val & |
1337 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); | 1344 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); |
1338 | return 0; | 1345 | return 0; |
1339 | } else { | 1346 | } else { |
1340 | return -EINVAL; | 1347 | return -EINVAL; |
1341 | } | 1348 | } |
1342 | #else | 1349 | #else |
1343 | return -EINVAL; | 1350 | return -EINVAL; |
1344 | #endif | 1351 | #endif |
1345 | } | 1352 | } |
1346 | 1353 | ||
1347 | /* on a CONFIG_SPE this does not hurt us. The bits that | 1354 | /* on a CONFIG_SPE this does not hurt us. The bits that |
1348 | * __pack_fe01 use do not overlap with bits used for | 1355 | * __pack_fe01 use do not overlap with bits used for |
1349 | * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits | 1356 | * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits |
1350 | * on CONFIG_SPE implementations are reserved so writing to | 1357 | * on CONFIG_SPE implementations are reserved so writing to |
1351 | * them does not change anything */ | 1358 | * them does not change anything */ |
1352 | if (val > PR_FP_EXC_PRECISE) | 1359 | if (val > PR_FP_EXC_PRECISE) |
1353 | return -EINVAL; | 1360 | return -EINVAL; |
1354 | tsk->thread.fpexc_mode = __pack_fe01(val); | 1361 | tsk->thread.fpexc_mode = __pack_fe01(val); |
1355 | if (regs != NULL && (regs->msr & MSR_FP) != 0) | 1362 | if (regs != NULL && (regs->msr & MSR_FP) != 0) |
1356 | regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) | 1363 | regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) |
1357 | | tsk->thread.fpexc_mode; | 1364 | | tsk->thread.fpexc_mode; |
1358 | return 0; | 1365 | return 0; |
1359 | } | 1366 | } |
1360 | 1367 | ||
1361 | int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) | 1368 | int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) |
1362 | { | 1369 | { |
1363 | unsigned int val; | 1370 | unsigned int val; |
1364 | 1371 | ||
1365 | if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) | 1372 | if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) |
1366 | #ifdef CONFIG_SPE | 1373 | #ifdef CONFIG_SPE |
1367 | if (cpu_has_feature(CPU_FTR_SPE)) { | 1374 | if (cpu_has_feature(CPU_FTR_SPE)) { |
1368 | /* | 1375 | /* |
1369 | * When the sticky exception bits are set | 1376 | * When the sticky exception bits are set |
1370 | * directly by userspace, it must call prctl | 1377 | * directly by userspace, it must call prctl |
1371 | * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE | 1378 | * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE |
1372 | * in the existing prctl settings) or | 1379 | * in the existing prctl settings) or |
1373 | * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in | 1380 | * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in |
1374 | * the bits being set). <fenv.h> functions | 1381 | * the bits being set). <fenv.h> functions |
1375 | * saving and restoring the whole | 1382 | * saving and restoring the whole |
1376 | * floating-point environment need to do so | 1383 | * floating-point environment need to do so |
1377 | * anyway to restore the prctl settings from | 1384 | * anyway to restore the prctl settings from |
1378 | * the saved environment. | 1385 | * the saved environment. |
1379 | */ | 1386 | */ |
1380 | tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); | 1387 | tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); |
1381 | val = tsk->thread.fpexc_mode; | 1388 | val = tsk->thread.fpexc_mode; |
1382 | } else | 1389 | } else |
1383 | return -EINVAL; | 1390 | return -EINVAL; |
1384 | #else | 1391 | #else |
1385 | return -EINVAL; | 1392 | return -EINVAL; |
1386 | #endif | 1393 | #endif |
1387 | else | 1394 | else |
1388 | val = __unpack_fe01(tsk->thread.fpexc_mode); | 1395 | val = __unpack_fe01(tsk->thread.fpexc_mode); |
1389 | return put_user(val, (unsigned int __user *) adr); | 1396 | return put_user(val, (unsigned int __user *) adr); |
1390 | } | 1397 | } |
1391 | 1398 | ||
1392 | int set_endian(struct task_struct *tsk, unsigned int val) | 1399 | int set_endian(struct task_struct *tsk, unsigned int val) |
1393 | { | 1400 | { |
1394 | struct pt_regs *regs = tsk->thread.regs; | 1401 | struct pt_regs *regs = tsk->thread.regs; |
1395 | 1402 | ||
1396 | if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || | 1403 | if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || |
1397 | (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) | 1404 | (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) |
1398 | return -EINVAL; | 1405 | return -EINVAL; |
1399 | 1406 | ||
1400 | if (regs == NULL) | 1407 | if (regs == NULL) |
1401 | return -EINVAL; | 1408 | return -EINVAL; |
1402 | 1409 | ||
1403 | if (val == PR_ENDIAN_BIG) | 1410 | if (val == PR_ENDIAN_BIG) |
1404 | regs->msr &= ~MSR_LE; | 1411 | regs->msr &= ~MSR_LE; |
1405 | else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) | 1412 | else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) |
1406 | regs->msr |= MSR_LE; | 1413 | regs->msr |= MSR_LE; |
1407 | else | 1414 | else |
1408 | return -EINVAL; | 1415 | return -EINVAL; |
1409 | 1416 | ||
1410 | return 0; | 1417 | return 0; |
1411 | } | 1418 | } |
1412 | 1419 | ||
1413 | int get_endian(struct task_struct *tsk, unsigned long adr) | 1420 | int get_endian(struct task_struct *tsk, unsigned long adr) |
1414 | { | 1421 | { |
1415 | struct pt_regs *regs = tsk->thread.regs; | 1422 | struct pt_regs *regs = tsk->thread.regs; |
1416 | unsigned int val; | 1423 | unsigned int val; |
1417 | 1424 | ||
1418 | if (!cpu_has_feature(CPU_FTR_PPC_LE) && | 1425 | if (!cpu_has_feature(CPU_FTR_PPC_LE) && |
1419 | !cpu_has_feature(CPU_FTR_REAL_LE)) | 1426 | !cpu_has_feature(CPU_FTR_REAL_LE)) |
1420 | return -EINVAL; | 1427 | return -EINVAL; |
1421 | 1428 | ||
1422 | if (regs == NULL) | 1429 | if (regs == NULL) |
1423 | return -EINVAL; | 1430 | return -EINVAL; |
1424 | 1431 | ||
1425 | if (regs->msr & MSR_LE) { | 1432 | if (regs->msr & MSR_LE) { |
1426 | if (cpu_has_feature(CPU_FTR_REAL_LE)) | 1433 | if (cpu_has_feature(CPU_FTR_REAL_LE)) |
1427 | val = PR_ENDIAN_LITTLE; | 1434 | val = PR_ENDIAN_LITTLE; |
1428 | else | 1435 | else |
1429 | val = PR_ENDIAN_PPC_LITTLE; | 1436 | val = PR_ENDIAN_PPC_LITTLE; |
1430 | } else | 1437 | } else |
1431 | val = PR_ENDIAN_BIG; | 1438 | val = PR_ENDIAN_BIG; |
1432 | 1439 | ||
1433 | return put_user(val, (unsigned int __user *)adr); | 1440 | return put_user(val, (unsigned int __user *)adr); |
1434 | } | 1441 | } |
1435 | 1442 | ||
1436 | int set_unalign_ctl(struct task_struct *tsk, unsigned int val) | 1443 | int set_unalign_ctl(struct task_struct *tsk, unsigned int val) |
1437 | { | 1444 | { |
1438 | tsk->thread.align_ctl = val; | 1445 | tsk->thread.align_ctl = val; |
1439 | return 0; | 1446 | return 0; |
1440 | } | 1447 | } |
1441 | 1448 | ||
1442 | int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) | 1449 | int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) |
1443 | { | 1450 | { |
1444 | return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); | 1451 | return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); |
1445 | } | 1452 | } |
1446 | 1453 | ||
1447 | static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, | 1454 | static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, |
1448 | unsigned long nbytes) | 1455 | unsigned long nbytes) |
1449 | { | 1456 | { |
1450 | unsigned long stack_page; | 1457 | unsigned long stack_page; |
1451 | unsigned long cpu = task_cpu(p); | 1458 | unsigned long cpu = task_cpu(p); |
1452 | 1459 | ||
1453 | /* | 1460 | /* |
1454 | * Avoid crashing if the stack has overflowed and corrupted | 1461 | * Avoid crashing if the stack has overflowed and corrupted |
1455 | * task_cpu(p), which is in the thread_info struct. | 1462 | * task_cpu(p), which is in the thread_info struct. |
1456 | */ | 1463 | */ |
1457 | if (cpu < NR_CPUS && cpu_possible(cpu)) { | 1464 | if (cpu < NR_CPUS && cpu_possible(cpu)) { |
1458 | stack_page = (unsigned long) hardirq_ctx[cpu]; | 1465 | stack_page = (unsigned long) hardirq_ctx[cpu]; |
1459 | if (sp >= stack_page + sizeof(struct thread_struct) | 1466 | if (sp >= stack_page + sizeof(struct thread_struct) |
1460 | && sp <= stack_page + THREAD_SIZE - nbytes) | 1467 | && sp <= stack_page + THREAD_SIZE - nbytes) |
1461 | return 1; | 1468 | return 1; |
1462 | 1469 | ||
1463 | stack_page = (unsigned long) softirq_ctx[cpu]; | 1470 | stack_page = (unsigned long) softirq_ctx[cpu]; |
1464 | if (sp >= stack_page + sizeof(struct thread_struct) | 1471 | if (sp >= stack_page + sizeof(struct thread_struct) |
1465 | && sp <= stack_page + THREAD_SIZE - nbytes) | 1472 | && sp <= stack_page + THREAD_SIZE - nbytes) |
1466 | return 1; | 1473 | return 1; |
1467 | } | 1474 | } |
1468 | return 0; | 1475 | return 0; |
1469 | } | 1476 | } |
1470 | 1477 | ||
1471 | int validate_sp(unsigned long sp, struct task_struct *p, | 1478 | int validate_sp(unsigned long sp, struct task_struct *p, |
1472 | unsigned long nbytes) | 1479 | unsigned long nbytes) |
1473 | { | 1480 | { |
1474 | unsigned long stack_page = (unsigned long)task_stack_page(p); | 1481 | unsigned long stack_page = (unsigned long)task_stack_page(p); |
1475 | 1482 | ||
1476 | if (sp >= stack_page + sizeof(struct thread_struct) | 1483 | if (sp >= stack_page + sizeof(struct thread_struct) |
1477 | && sp <= stack_page + THREAD_SIZE - nbytes) | 1484 | && sp <= stack_page + THREAD_SIZE - nbytes) |
1478 | return 1; | 1485 | return 1; |
1479 | 1486 | ||
1480 | return valid_irq_stack(sp, p, nbytes); | 1487 | return valid_irq_stack(sp, p, nbytes); |
1481 | } | 1488 | } |
1482 | 1489 | ||
1483 | EXPORT_SYMBOL(validate_sp); | 1490 | EXPORT_SYMBOL(validate_sp); |
1484 | 1491 | ||
1485 | unsigned long get_wchan(struct task_struct *p) | 1492 | unsigned long get_wchan(struct task_struct *p) |
1486 | { | 1493 | { |
1487 | unsigned long ip, sp; | 1494 | unsigned long ip, sp; |
1488 | int count = 0; | 1495 | int count = 0; |
1489 | 1496 | ||
1490 | if (!p || p == current || p->state == TASK_RUNNING) | 1497 | if (!p || p == current || p->state == TASK_RUNNING) |
1491 | return 0; | 1498 | return 0; |
1492 | 1499 | ||
1493 | sp = p->thread.ksp; | 1500 | sp = p->thread.ksp; |
1494 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) | 1501 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) |
1495 | return 0; | 1502 | return 0; |
1496 | 1503 | ||
1497 | do { | 1504 | do { |
1498 | sp = *(unsigned long *)sp; | 1505 | sp = *(unsigned long *)sp; |
1499 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) | 1506 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) |
1500 | return 0; | 1507 | return 0; |
1501 | if (count > 0) { | 1508 | if (count > 0) { |
1502 | ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; | 1509 | ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; |
1503 | if (!in_sched_functions(ip)) | 1510 | if (!in_sched_functions(ip)) |
1504 | return ip; | 1511 | return ip; |
1505 | } | 1512 | } |
1506 | } while (count++ < 16); | 1513 | } while (count++ < 16); |
1507 | return 0; | 1514 | return 0; |
1508 | } | 1515 | } |
1509 | 1516 | ||
1510 | static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; | 1517 | static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; |
1511 | 1518 | ||
1512 | void show_stack(struct task_struct *tsk, unsigned long *stack) | 1519 | void show_stack(struct task_struct *tsk, unsigned long *stack) |
1513 | { | 1520 | { |
1514 | unsigned long sp, ip, lr, newsp; | 1521 | unsigned long sp, ip, lr, newsp; |
1515 | int count = 0; | 1522 | int count = 0; |
1516 | int firstframe = 1; | 1523 | int firstframe = 1; |
1517 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1524 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1518 | int curr_frame = current->curr_ret_stack; | 1525 | int curr_frame = current->curr_ret_stack; |
1519 | extern void return_to_handler(void); | 1526 | extern void return_to_handler(void); |
1520 | unsigned long rth = (unsigned long)return_to_handler; | 1527 | unsigned long rth = (unsigned long)return_to_handler; |
1521 | unsigned long mrth = -1; | 1528 | unsigned long mrth = -1; |
1522 | #ifdef CONFIG_PPC64 | 1529 | #ifdef CONFIG_PPC64 |
1523 | extern void mod_return_to_handler(void); | 1530 | extern void mod_return_to_handler(void); |
1524 | rth = *(unsigned long *)rth; | 1531 | rth = *(unsigned long *)rth; |
1525 | mrth = (unsigned long)mod_return_to_handler; | 1532 | mrth = (unsigned long)mod_return_to_handler; |
1526 | mrth = *(unsigned long *)mrth; | 1533 | mrth = *(unsigned long *)mrth; |
1527 | #endif | 1534 | #endif |
1528 | #endif | 1535 | #endif |
1529 | 1536 | ||
1530 | sp = (unsigned long) stack; | 1537 | sp = (unsigned long) stack; |
1531 | if (tsk == NULL) | 1538 | if (tsk == NULL) |
1532 | tsk = current; | 1539 | tsk = current; |
1533 | if (sp == 0) { | 1540 | if (sp == 0) { |
1534 | if (tsk == current) | 1541 | if (tsk == current) |
1535 | asm("mr %0,1" : "=r" (sp)); | 1542 | asm("mr %0,1" : "=r" (sp)); |
1536 | else | 1543 | else |
1537 | sp = tsk->thread.ksp; | 1544 | sp = tsk->thread.ksp; |
1538 | } | 1545 | } |
1539 | 1546 | ||
1540 | lr = 0; | 1547 | lr = 0; |
1541 | printk("Call Trace:\n"); | 1548 | printk("Call Trace:\n"); |
1542 | do { | 1549 | do { |
1543 | if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) | 1550 | if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) |
1544 | return; | 1551 | return; |
1545 | 1552 | ||
1546 | stack = (unsigned long *) sp; | 1553 | stack = (unsigned long *) sp; |
1547 | newsp = stack[0]; | 1554 | newsp = stack[0]; |
1548 | ip = stack[STACK_FRAME_LR_SAVE]; | 1555 | ip = stack[STACK_FRAME_LR_SAVE]; |
1549 | if (!firstframe || ip != lr) { | 1556 | if (!firstframe || ip != lr) { |
1550 | printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); | 1557 | printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); |
1551 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1558 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1552 | if ((ip == rth || ip == mrth) && curr_frame >= 0) { | 1559 | if ((ip == rth || ip == mrth) && curr_frame >= 0) { |
1553 | printk(" (%pS)", | 1560 | printk(" (%pS)", |
1554 | (void *)current->ret_stack[curr_frame].ret); | 1561 | (void *)current->ret_stack[curr_frame].ret); |
1555 | curr_frame--; | 1562 | curr_frame--; |
1556 | } | 1563 | } |
1557 | #endif | 1564 | #endif |
1558 | if (firstframe) | 1565 | if (firstframe) |
1559 | printk(" (unreliable)"); | 1566 | printk(" (unreliable)"); |
1560 | printk("\n"); | 1567 | printk("\n"); |
1561 | } | 1568 | } |
1562 | firstframe = 0; | 1569 | firstframe = 0; |
1563 | 1570 | ||
1564 | /* | 1571 | /* |
1565 | * See if this is an exception frame. | 1572 | * See if this is an exception frame. |
1566 | * We look for the "regshere" marker in the current frame. | 1573 | * We look for the "regshere" marker in the current frame. |
1567 | */ | 1574 | */ |
1568 | if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) | 1575 | if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) |
1569 | && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { | 1576 | && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { |
1570 | struct pt_regs *regs = (struct pt_regs *) | 1577 | struct pt_regs *regs = (struct pt_regs *) |
1571 | (sp + STACK_FRAME_OVERHEAD); | 1578 | (sp + STACK_FRAME_OVERHEAD); |
1572 | lr = regs->link; | 1579 | lr = regs->link; |
1573 | printk("--- Exception: %lx at %pS\n LR = %pS\n", | 1580 | printk("--- Exception: %lx at %pS\n LR = %pS\n", |
1574 | regs->trap, (void *)regs->nip, (void *)lr); | 1581 | regs->trap, (void *)regs->nip, (void *)lr); |
1575 | firstframe = 1; | 1582 | firstframe = 1; |
1576 | } | 1583 | } |
1577 | 1584 | ||
1578 | sp = newsp; | 1585 | sp = newsp; |
1579 | } while (count++ < kstack_depth_to_print); | 1586 | } while (count++ < kstack_depth_to_print); |
1580 | } | 1587 | } |
1581 | 1588 | ||
1582 | #ifdef CONFIG_PPC64 | 1589 | #ifdef CONFIG_PPC64 |
1583 | /* Called with hard IRQs off */ | 1590 | /* Called with hard IRQs off */ |
1584 | void notrace __ppc64_runlatch_on(void) | 1591 | void notrace __ppc64_runlatch_on(void) |
1585 | { | 1592 | { |
1586 | struct thread_info *ti = current_thread_info(); | 1593 | struct thread_info *ti = current_thread_info(); |
1587 | unsigned long ctrl; | 1594 | unsigned long ctrl; |
1588 | 1595 | ||
1589 | ctrl = mfspr(SPRN_CTRLF); | 1596 | ctrl = mfspr(SPRN_CTRLF); |
1590 | ctrl |= CTRL_RUNLATCH; | 1597 | ctrl |= CTRL_RUNLATCH; |
1591 | mtspr(SPRN_CTRLT, ctrl); | 1598 | mtspr(SPRN_CTRLT, ctrl); |
1592 | 1599 | ||
1593 | ti->local_flags |= _TLF_RUNLATCH; | 1600 | ti->local_flags |= _TLF_RUNLATCH; |
1594 | } | 1601 | } |
1595 | 1602 | ||
1596 | /* Called with hard IRQs off */ | 1603 | /* Called with hard IRQs off */ |
1597 | void notrace __ppc64_runlatch_off(void) | 1604 | void notrace __ppc64_runlatch_off(void) |
1598 | { | 1605 | { |
1599 | struct thread_info *ti = current_thread_info(); | 1606 | struct thread_info *ti = current_thread_info(); |
1600 | unsigned long ctrl; | 1607 | unsigned long ctrl; |
1601 | 1608 | ||
1602 | ti->local_flags &= ~_TLF_RUNLATCH; | 1609 | ti->local_flags &= ~_TLF_RUNLATCH; |
1603 | 1610 | ||
1604 | ctrl = mfspr(SPRN_CTRLF); | 1611 | ctrl = mfspr(SPRN_CTRLF); |
1605 | ctrl &= ~CTRL_RUNLATCH; | 1612 | ctrl &= ~CTRL_RUNLATCH; |
1606 | mtspr(SPRN_CTRLT, ctrl); | 1613 | mtspr(SPRN_CTRLT, ctrl); |
1607 | } | 1614 | } |
1608 | #endif /* CONFIG_PPC64 */ | 1615 | #endif /* CONFIG_PPC64 */ |
1609 | 1616 | ||
1610 | unsigned long arch_align_stack(unsigned long sp) | 1617 | unsigned long arch_align_stack(unsigned long sp) |
1611 | { | 1618 | { |
1612 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | 1619 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
1613 | sp -= get_random_int() & ~PAGE_MASK; | 1620 | sp -= get_random_int() & ~PAGE_MASK; |
1614 | return sp & ~0xf; | 1621 | return sp & ~0xf; |
1615 | } | 1622 | } |
1616 | 1623 | ||
1617 | static inline unsigned long brk_rnd(void) | 1624 | static inline unsigned long brk_rnd(void) |
1618 | { | 1625 | { |
1619 | unsigned long rnd = 0; | 1626 | unsigned long rnd = 0; |
1620 | 1627 | ||
1621 | /* 8MB for 32bit, 1GB for 64bit */ | 1628 | /* 8MB for 32bit, 1GB for 64bit */ |
1622 | if (is_32bit_task()) | 1629 | if (is_32bit_task()) |
1623 | rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); | 1630 | rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); |
1624 | else | 1631 | else |
1625 | rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); | 1632 | rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); |
1626 | 1633 | ||
1627 | return rnd << PAGE_SHIFT; | 1634 | return rnd << PAGE_SHIFT; |
1628 | } | 1635 | } |
1629 | 1636 | ||
1630 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 1637 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
1631 | { | 1638 | { |
1632 | unsigned long base = mm->brk; | 1639 | unsigned long base = mm->brk; |
1633 | unsigned long ret; | 1640 | unsigned long ret; |
1634 | 1641 | ||
1635 | #ifdef CONFIG_PPC_STD_MMU_64 | 1642 | #ifdef CONFIG_PPC_STD_MMU_64 |
1636 | /* | 1643 | /* |
1637 | * If we are using 1TB segments and we are allowed to randomise | 1644 | * If we are using 1TB segments and we are allowed to randomise |
1638 | * the heap, we can put it above 1TB so it is backed by a 1TB | 1645 | * the heap, we can put it above 1TB so it is backed by a 1TB |
1639 | * segment. Otherwise the heap will be in the bottom 1TB | 1646 | * segment. Otherwise the heap will be in the bottom 1TB |
1640 | * which always uses 256MB segments and this may result in a | 1647 | * which always uses 256MB segments and this may result in a |
1641 | * performance penalty. | 1648 | * performance penalty. |
1642 | */ | 1649 | */ |
1643 | if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) | 1650 | if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) |
1644 | base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); | 1651 | base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); |
1645 | #endif | 1652 | #endif |
1646 | 1653 | ||
1647 | ret = PAGE_ALIGN(base + brk_rnd()); | 1654 | ret = PAGE_ALIGN(base + brk_rnd()); |
1648 | 1655 | ||
1649 | if (ret < mm->brk) | 1656 | if (ret < mm->brk) |
1650 | return mm->brk; | 1657 | return mm->brk; |
1651 | 1658 | ||
1652 | return ret; | 1659 | return ret; |
1653 | } | 1660 | } |
1654 | 1661 | ||
1655 | unsigned long randomize_et_dyn(unsigned long base) | 1662 | unsigned long randomize_et_dyn(unsigned long base) |
1656 | { | 1663 | { |
1657 | unsigned long ret = PAGE_ALIGN(base + brk_rnd()); | 1664 | unsigned long ret = PAGE_ALIGN(base + brk_rnd()); |
1658 | 1665 | ||
1659 | if (ret < base) | 1666 | if (ret < base) |
1660 | return base; | 1667 | return base; |
1661 | 1668 | ||
1662 | return ret; | 1669 | return ret; |
1663 | } | 1670 | } |
1664 | 1671 |
arch/powerpc/kernel/signal.c
1 | /* | 1 | /* |
2 | * Common signal handling code for both 32 and 64 bits | 2 | * Common signal handling code for both 32 and 64 bits |
3 | * | 3 | * |
4 | * Copyright (c) 2007 Benjamin Herrenschmidt, IBM Coproration | 4 | * Copyright (c) 2007 Benjamin Herrenschmidt, IBM Coproration |
5 | * Extracted from signal_32.c and signal_64.c | 5 | * Extracted from signal_32.c and signal_64.c |
6 | * | 6 | * |
7 | * This file is subject to the terms and conditions of the GNU General | 7 | * This file is subject to the terms and conditions of the GNU General |
8 | * Public License. See the file README.legal in the main directory of | 8 | * Public License. See the file README.legal in the main directory of |
9 | * this archive for more details. | 9 | * this archive for more details. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/tracehook.h> | 12 | #include <linux/tracehook.h> |
13 | #include <linux/signal.h> | 13 | #include <linux/signal.h> |
14 | #include <linux/uprobes.h> | 14 | #include <linux/uprobes.h> |
15 | #include <linux/key.h> | 15 | #include <linux/key.h> |
16 | #include <linux/context_tracking.h> | 16 | #include <linux/context_tracking.h> |
17 | #include <asm/hw_breakpoint.h> | 17 | #include <asm/hw_breakpoint.h> |
18 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
19 | #include <asm/unistd.h> | 19 | #include <asm/unistd.h> |
20 | #include <asm/debug.h> | 20 | #include <asm/debug.h> |
21 | #include <asm/tm.h> | 21 | #include <asm/tm.h> |
22 | 22 | ||
23 | #include "signal.h" | 23 | #include "signal.h" |
24 | 24 | ||
25 | /* Log an error when sending an unhandled signal to a process. Controlled | 25 | /* Log an error when sending an unhandled signal to a process. Controlled |
26 | * through debug.exception-trace sysctl. | 26 | * through debug.exception-trace sysctl. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | int show_unhandled_signals = 1; | 29 | int show_unhandled_signals = 1; |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Allocate space for the signal frame | 32 | * Allocate space for the signal frame |
33 | */ | 33 | */ |
34 | void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, | 34 | void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, |
35 | size_t frame_size, int is_32) | 35 | size_t frame_size, int is_32) |
36 | { | 36 | { |
37 | unsigned long oldsp, newsp; | 37 | unsigned long oldsp, newsp; |
38 | 38 | ||
39 | /* Default to using normal stack */ | 39 | /* Default to using normal stack */ |
40 | oldsp = get_clean_sp(sp, is_32); | 40 | oldsp = get_clean_sp(sp, is_32); |
41 | 41 | ||
42 | /* Check for alt stack */ | 42 | /* Check for alt stack */ |
43 | if ((ka->sa.sa_flags & SA_ONSTACK) && | 43 | if ((ka->sa.sa_flags & SA_ONSTACK) && |
44 | current->sas_ss_size && !on_sig_stack(oldsp)) | 44 | current->sas_ss_size && !on_sig_stack(oldsp)) |
45 | oldsp = (current->sas_ss_sp + current->sas_ss_size); | 45 | oldsp = (current->sas_ss_sp + current->sas_ss_size); |
46 | 46 | ||
47 | /* Get aligned frame */ | 47 | /* Get aligned frame */ |
48 | newsp = (oldsp - frame_size) & ~0xFUL; | 48 | newsp = (oldsp - frame_size) & ~0xFUL; |
49 | 49 | ||
50 | /* Check access */ | 50 | /* Check access */ |
51 | if (!access_ok(VERIFY_WRITE, (void __user *)newsp, oldsp - newsp)) | 51 | if (!access_ok(VERIFY_WRITE, (void __user *)newsp, oldsp - newsp)) |
52 | return NULL; | 52 | return NULL; |
53 | 53 | ||
54 | return (void __user *)newsp; | 54 | return (void __user *)newsp; |
55 | } | 55 | } |
56 | 56 | ||
57 | static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, | 57 | static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, |
58 | int has_handler) | 58 | int has_handler) |
59 | { | 59 | { |
60 | unsigned long ret = regs->gpr[3]; | 60 | unsigned long ret = regs->gpr[3]; |
61 | int restart = 1; | 61 | int restart = 1; |
62 | 62 | ||
63 | /* syscall ? */ | 63 | /* syscall ? */ |
64 | if (TRAP(regs) != 0x0C00) | 64 | if (TRAP(regs) != 0x0C00) |
65 | return; | 65 | return; |
66 | 66 | ||
67 | /* error signalled ? */ | 67 | /* error signalled ? */ |
68 | if (!(regs->ccr & 0x10000000)) | 68 | if (!(regs->ccr & 0x10000000)) |
69 | return; | 69 | return; |
70 | 70 | ||
71 | switch (ret) { | 71 | switch (ret) { |
72 | case ERESTART_RESTARTBLOCK: | 72 | case ERESTART_RESTARTBLOCK: |
73 | case ERESTARTNOHAND: | 73 | case ERESTARTNOHAND: |
74 | /* ERESTARTNOHAND means that the syscall should only be | 74 | /* ERESTARTNOHAND means that the syscall should only be |
75 | * restarted if there was no handler for the signal, and since | 75 | * restarted if there was no handler for the signal, and since |
76 | * we only get here if there is a handler, we dont restart. | 76 | * we only get here if there is a handler, we dont restart. |
77 | */ | 77 | */ |
78 | restart = !has_handler; | 78 | restart = !has_handler; |
79 | break; | 79 | break; |
80 | case ERESTARTSYS: | 80 | case ERESTARTSYS: |
81 | /* ERESTARTSYS means to restart the syscall if there is no | 81 | /* ERESTARTSYS means to restart the syscall if there is no |
82 | * handler or the handler was registered with SA_RESTART | 82 | * handler or the handler was registered with SA_RESTART |
83 | */ | 83 | */ |
84 | restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0; | 84 | restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0; |
85 | break; | 85 | break; |
86 | case ERESTARTNOINTR: | 86 | case ERESTARTNOINTR: |
87 | /* ERESTARTNOINTR means that the syscall should be | 87 | /* ERESTARTNOINTR means that the syscall should be |
88 | * called again after the signal handler returns. | 88 | * called again after the signal handler returns. |
89 | */ | 89 | */ |
90 | break; | 90 | break; |
91 | default: | 91 | default: |
92 | return; | 92 | return; |
93 | } | 93 | } |
94 | if (restart) { | 94 | if (restart) { |
95 | if (ret == ERESTART_RESTARTBLOCK) | 95 | if (ret == ERESTART_RESTARTBLOCK) |
96 | regs->gpr[0] = __NR_restart_syscall; | 96 | regs->gpr[0] = __NR_restart_syscall; |
97 | else | 97 | else |
98 | regs->gpr[3] = regs->orig_gpr3; | 98 | regs->gpr[3] = regs->orig_gpr3; |
99 | regs->nip -= 4; | 99 | regs->nip -= 4; |
100 | regs->result = 0; | 100 | regs->result = 0; |
101 | } else { | 101 | } else { |
102 | regs->result = -EINTR; | 102 | regs->result = -EINTR; |
103 | regs->gpr[3] = EINTR; | 103 | regs->gpr[3] = EINTR; |
104 | regs->ccr |= 0x10000000; | 104 | regs->ccr |= 0x10000000; |
105 | } | 105 | } |
106 | } | 106 | } |
107 | 107 | ||
108 | static int do_signal(struct pt_regs *regs) | 108 | static int do_signal(struct pt_regs *regs) |
109 | { | 109 | { |
110 | sigset_t *oldset = sigmask_to_save(); | 110 | sigset_t *oldset = sigmask_to_save(); |
111 | siginfo_t info; | 111 | siginfo_t info; |
112 | int signr; | 112 | int signr; |
113 | struct k_sigaction ka; | 113 | struct k_sigaction ka; |
114 | int ret; | 114 | int ret; |
115 | int is32 = is_32bit_task(); | 115 | int is32 = is_32bit_task(); |
116 | 116 | ||
117 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 117 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
118 | 118 | ||
119 | /* Is there any syscall restart business here ? */ | 119 | /* Is there any syscall restart business here ? */ |
120 | check_syscall_restart(regs, &ka, signr > 0); | 120 | check_syscall_restart(regs, &ka, signr > 0); |
121 | 121 | ||
122 | if (signr <= 0) { | 122 | if (signr <= 0) { |
123 | /* No signal to deliver -- put the saved sigmask back */ | 123 | /* No signal to deliver -- put the saved sigmask back */ |
124 | restore_saved_sigmask(); | 124 | restore_saved_sigmask(); |
125 | regs->trap = 0; | 125 | regs->trap = 0; |
126 | return 0; /* no signals delivered */ | 126 | return 0; /* no signals delivered */ |
127 | } | 127 | } |
128 | 128 | ||
129 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS | 129 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS |
130 | /* | 130 | /* |
131 | * Reenable the DABR before delivering the signal to | 131 | * Reenable the DABR before delivering the signal to |
132 | * user space. The DABR will have been cleared if it | 132 | * user space. The DABR will have been cleared if it |
133 | * triggered inside the kernel. | 133 | * triggered inside the kernel. |
134 | */ | 134 | */ |
135 | if (current->thread.hw_brk.address && | 135 | if (current->thread.hw_brk.address && |
136 | current->thread.hw_brk.type) | 136 | current->thread.hw_brk.type) |
137 | set_breakpoint(¤t->thread.hw_brk); | 137 | __set_breakpoint(¤t->thread.hw_brk); |
138 | #endif | 138 | #endif |
139 | /* Re-enable the breakpoints for the signal stack */ | 139 | /* Re-enable the breakpoints for the signal stack */ |
140 | thread_change_pc(current, regs); | 140 | thread_change_pc(current, regs); |
141 | 141 | ||
142 | if (is32) { | 142 | if (is32) { |
143 | if (ka.sa.sa_flags & SA_SIGINFO) | 143 | if (ka.sa.sa_flags & SA_SIGINFO) |
144 | ret = handle_rt_signal32(signr, &ka, &info, oldset, | 144 | ret = handle_rt_signal32(signr, &ka, &info, oldset, |
145 | regs); | 145 | regs); |
146 | else | 146 | else |
147 | ret = handle_signal32(signr, &ka, &info, oldset, | 147 | ret = handle_signal32(signr, &ka, &info, oldset, |
148 | regs); | 148 | regs); |
149 | } else { | 149 | } else { |
150 | ret = handle_rt_signal64(signr, &ka, &info, oldset, regs); | 150 | ret = handle_rt_signal64(signr, &ka, &info, oldset, regs); |
151 | } | 151 | } |
152 | 152 | ||
153 | regs->trap = 0; | 153 | regs->trap = 0; |
154 | if (ret) { | 154 | if (ret) { |
155 | signal_delivered(signr, &info, &ka, regs, | 155 | signal_delivered(signr, &info, &ka, regs, |
156 | test_thread_flag(TIF_SINGLESTEP)); | 156 | test_thread_flag(TIF_SINGLESTEP)); |
157 | } | 157 | } |
158 | 158 | ||
159 | return ret; | 159 | return ret; |
160 | } | 160 | } |
161 | 161 | ||
162 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | 162 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) |
163 | { | 163 | { |
164 | user_exit(); | 164 | user_exit(); |
165 | 165 | ||
166 | if (thread_info_flags & _TIF_UPROBE) | 166 | if (thread_info_flags & _TIF_UPROBE) |
167 | uprobe_notify_resume(regs); | 167 | uprobe_notify_resume(regs); |
168 | 168 | ||
169 | if (thread_info_flags & _TIF_SIGPENDING) | 169 | if (thread_info_flags & _TIF_SIGPENDING) |
170 | do_signal(regs); | 170 | do_signal(regs); |
171 | 171 | ||
172 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 172 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
173 | clear_thread_flag(TIF_NOTIFY_RESUME); | 173 | clear_thread_flag(TIF_NOTIFY_RESUME); |
174 | tracehook_notify_resume(regs); | 174 | tracehook_notify_resume(regs); |
175 | } | 175 | } |
176 | 176 | ||
177 | user_enter(); | 177 | user_enter(); |
178 | } | 178 | } |
179 | 179 | ||
180 | unsigned long get_tm_stackpointer(struct pt_regs *regs) | 180 | unsigned long get_tm_stackpointer(struct pt_regs *regs) |
181 | { | 181 | { |
182 | /* When in an active transaction that takes a signal, we need to be | 182 | /* When in an active transaction that takes a signal, we need to be |
183 | * careful with the stack. It's possible that the stack has moved back | 183 | * careful with the stack. It's possible that the stack has moved back |
184 | * up after the tbegin. The obvious case here is when the tbegin is | 184 | * up after the tbegin. The obvious case here is when the tbegin is |
185 | * called inside a function that returns before a tend. In this case, | 185 | * called inside a function that returns before a tend. In this case, |
186 | * the stack is part of the checkpointed transactional memory state. | 186 | * the stack is part of the checkpointed transactional memory state. |
187 | * If we write over this non transactionally or in suspend, we are in | 187 | * If we write over this non transactionally or in suspend, we are in |
188 | * trouble because if we get a tm abort, the program counter and stack | 188 | * trouble because if we get a tm abort, the program counter and stack |
189 | * pointer will be back at the tbegin but our in memory stack won't be | 189 | * pointer will be back at the tbegin but our in memory stack won't be |
190 | * valid anymore. | 190 | * valid anymore. |
191 | * | 191 | * |
192 | * To avoid this, when taking a signal in an active transaction, we | 192 | * To avoid this, when taking a signal in an active transaction, we |
193 | * need to use the stack pointer from the checkpointed state, rather | 193 | * need to use the stack pointer from the checkpointed state, rather |
194 | * than the speculated state. This ensures that the signal context | 194 | * than the speculated state. This ensures that the signal context |
195 | * (written tm suspended) will be written below the stack required for | 195 | * (written tm suspended) will be written below the stack required for |
196 | * the rollback. The transaction is aborted becuase of the treclaim, | 196 | * the rollback. The transaction is aborted becuase of the treclaim, |
197 | * so any memory written between the tbegin and the signal will be | 197 | * so any memory written between the tbegin and the signal will be |
198 | * rolled back anyway. | 198 | * rolled back anyway. |
199 | * | 199 | * |
200 | * For signals taken in non-TM or suspended mode, we use the | 200 | * For signals taken in non-TM or suspended mode, we use the |
201 | * normal/non-checkpointed stack pointer. | 201 | * normal/non-checkpointed stack pointer. |
202 | */ | 202 | */ |
203 | 203 | ||
204 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 204 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
205 | if (MSR_TM_ACTIVE(regs->msr)) { | 205 | if (MSR_TM_ACTIVE(regs->msr)) { |
206 | tm_reclaim_current(TM_CAUSE_SIGNAL); | 206 | tm_reclaim_current(TM_CAUSE_SIGNAL); |
207 | if (MSR_TM_TRANSACTIONAL(regs->msr)) | 207 | if (MSR_TM_TRANSACTIONAL(regs->msr)) |
208 | return current->thread.ckpt_regs.gpr[1]; | 208 | return current->thread.ckpt_regs.gpr[1]; |
209 | } | 209 | } |
210 | #endif | 210 | #endif |
211 | return regs->gpr[1]; | 211 | return regs->gpr[1]; |
212 | } | 212 | } |
213 | 213 |
arch/powerpc/xmon/xmon.c
1 | /* | 1 | /* |
2 | * Routines providing a simple monitor for use on the PowerMac. | 2 | * Routines providing a simple monitor for use on the PowerMac. |
3 | * | 3 | * |
4 | * Copyright (C) 1996-2005 Paul Mackerras. | 4 | * Copyright (C) 1996-2005 Paul Mackerras. |
5 | * Copyright (C) 2001 PPC64 Team, IBM Corp | 5 | * Copyright (C) 2001 PPC64 Team, IBM Corp |
6 | * Copyrignt (C) 2006 Michael Ellerman, IBM Corp | 6 | * Copyrignt (C) 2006 Michael Ellerman, IBM Corp |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
10 | * as published by the Free Software Foundation; either version | 10 | * as published by the Free Software Foundation; either version |
11 | * 2 of the License, or (at your option) any later version. | 11 | * 2 of the License, or (at your option) any later version. |
12 | */ | 12 | */ |
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/reboot.h> | 17 | #include <linux/reboot.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/kallsyms.h> | 19 | #include <linux/kallsyms.h> |
20 | #include <linux/kmsg_dump.h> | 20 | #include <linux/kmsg_dump.h> |
21 | #include <linux/cpumask.h> | 21 | #include <linux/cpumask.h> |
22 | #include <linux/export.h> | 22 | #include <linux/export.h> |
23 | #include <linux/sysrq.h> | 23 | #include <linux/sysrq.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/bug.h> | 26 | #include <linux/bug.h> |
27 | 27 | ||
28 | #include <asm/ptrace.h> | 28 | #include <asm/ptrace.h> |
29 | #include <asm/string.h> | 29 | #include <asm/string.h> |
30 | #include <asm/prom.h> | 30 | #include <asm/prom.h> |
31 | #include <asm/machdep.h> | 31 | #include <asm/machdep.h> |
32 | #include <asm/xmon.h> | 32 | #include <asm/xmon.h> |
33 | #include <asm/processor.h> | 33 | #include <asm/processor.h> |
34 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
35 | #include <asm/mmu.h> | 35 | #include <asm/mmu.h> |
36 | #include <asm/mmu_context.h> | 36 | #include <asm/mmu_context.h> |
37 | #include <asm/cputable.h> | 37 | #include <asm/cputable.h> |
38 | #include <asm/rtas.h> | 38 | #include <asm/rtas.h> |
39 | #include <asm/sstep.h> | 39 | #include <asm/sstep.h> |
40 | #include <asm/irq_regs.h> | 40 | #include <asm/irq_regs.h> |
41 | #include <asm/spu.h> | 41 | #include <asm/spu.h> |
42 | #include <asm/spu_priv1.h> | 42 | #include <asm/spu_priv1.h> |
43 | #include <asm/setjmp.h> | 43 | #include <asm/setjmp.h> |
44 | #include <asm/reg.h> | 44 | #include <asm/reg.h> |
45 | #include <asm/debug.h> | 45 | #include <asm/debug.h> |
46 | #include <asm/hw_breakpoint.h> | 46 | #include <asm/hw_breakpoint.h> |
47 | 47 | ||
48 | #ifdef CONFIG_PPC64 | 48 | #ifdef CONFIG_PPC64 |
49 | #include <asm/hvcall.h> | 49 | #include <asm/hvcall.h> |
50 | #include <asm/paca.h> | 50 | #include <asm/paca.h> |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | #include "nonstdio.h" | 53 | #include "nonstdio.h" |
54 | #include "dis-asm.h" | 54 | #include "dis-asm.h" |
55 | 55 | ||
56 | #ifdef CONFIG_SMP | 56 | #ifdef CONFIG_SMP |
57 | static cpumask_t cpus_in_xmon = CPU_MASK_NONE; | 57 | static cpumask_t cpus_in_xmon = CPU_MASK_NONE; |
58 | static unsigned long xmon_taken = 1; | 58 | static unsigned long xmon_taken = 1; |
59 | static int xmon_owner; | 59 | static int xmon_owner; |
60 | static int xmon_gate; | 60 | static int xmon_gate; |
61 | #else | 61 | #else |
62 | #define xmon_owner 0 | 62 | #define xmon_owner 0 |
63 | #endif /* CONFIG_SMP */ | 63 | #endif /* CONFIG_SMP */ |
64 | 64 | ||
65 | static unsigned long in_xmon __read_mostly = 0; | 65 | static unsigned long in_xmon __read_mostly = 0; |
66 | 66 | ||
67 | static unsigned long adrs; | 67 | static unsigned long adrs; |
68 | static int size = 1; | 68 | static int size = 1; |
69 | #define MAX_DUMP (128 * 1024) | 69 | #define MAX_DUMP (128 * 1024) |
70 | static unsigned long ndump = 64; | 70 | static unsigned long ndump = 64; |
71 | static unsigned long nidump = 16; | 71 | static unsigned long nidump = 16; |
72 | static unsigned long ncsum = 4096; | 72 | static unsigned long ncsum = 4096; |
73 | static int termch; | 73 | static int termch; |
74 | static char tmpstr[128]; | 74 | static char tmpstr[128]; |
75 | 75 | ||
76 | static long bus_error_jmp[JMP_BUF_LEN]; | 76 | static long bus_error_jmp[JMP_BUF_LEN]; |
77 | static int catch_memory_errors; | 77 | static int catch_memory_errors; |
78 | static long *xmon_fault_jmp[NR_CPUS]; | 78 | static long *xmon_fault_jmp[NR_CPUS]; |
79 | 79 | ||
80 | /* Breakpoint stuff */ | 80 | /* Breakpoint stuff */ |
81 | struct bpt { | 81 | struct bpt { |
82 | unsigned long address; | 82 | unsigned long address; |
83 | unsigned int instr[2]; | 83 | unsigned int instr[2]; |
84 | atomic_t ref_count; | 84 | atomic_t ref_count; |
85 | int enabled; | 85 | int enabled; |
86 | unsigned long pad; | 86 | unsigned long pad; |
87 | }; | 87 | }; |
88 | 88 | ||
89 | /* Bits in bpt.enabled */ | 89 | /* Bits in bpt.enabled */ |
90 | #define BP_IABR_TE 1 /* IABR translation enabled */ | 90 | #define BP_IABR_TE 1 /* IABR translation enabled */ |
91 | #define BP_IABR 2 | 91 | #define BP_IABR 2 |
92 | #define BP_TRAP 8 | 92 | #define BP_TRAP 8 |
93 | #define BP_DABR 0x10 | 93 | #define BP_DABR 0x10 |
94 | 94 | ||
95 | #define NBPTS 256 | 95 | #define NBPTS 256 |
96 | static struct bpt bpts[NBPTS]; | 96 | static struct bpt bpts[NBPTS]; |
97 | static struct bpt dabr; | 97 | static struct bpt dabr; |
98 | static struct bpt *iabr; | 98 | static struct bpt *iabr; |
99 | static unsigned bpinstr = 0x7fe00008; /* trap */ | 99 | static unsigned bpinstr = 0x7fe00008; /* trap */ |
100 | 100 | ||
101 | #define BP_NUM(bp) ((bp) - bpts + 1) | 101 | #define BP_NUM(bp) ((bp) - bpts + 1) |
102 | 102 | ||
103 | /* Prototypes */ | 103 | /* Prototypes */ |
104 | static int cmds(struct pt_regs *); | 104 | static int cmds(struct pt_regs *); |
105 | static int mread(unsigned long, void *, int); | 105 | static int mread(unsigned long, void *, int); |
106 | static int mwrite(unsigned long, void *, int); | 106 | static int mwrite(unsigned long, void *, int); |
107 | static int handle_fault(struct pt_regs *); | 107 | static int handle_fault(struct pt_regs *); |
108 | static void byterev(unsigned char *, int); | 108 | static void byterev(unsigned char *, int); |
109 | static void memex(void); | 109 | static void memex(void); |
110 | static int bsesc(void); | 110 | static int bsesc(void); |
111 | static void dump(void); | 111 | static void dump(void); |
112 | static void prdump(unsigned long, long); | 112 | static void prdump(unsigned long, long); |
113 | static int ppc_inst_dump(unsigned long, long, int); | 113 | static int ppc_inst_dump(unsigned long, long, int); |
114 | static void dump_log_buf(void); | 114 | static void dump_log_buf(void); |
115 | static void backtrace(struct pt_regs *); | 115 | static void backtrace(struct pt_regs *); |
116 | static void excprint(struct pt_regs *); | 116 | static void excprint(struct pt_regs *); |
117 | static void prregs(struct pt_regs *); | 117 | static void prregs(struct pt_regs *); |
118 | static void memops(int); | 118 | static void memops(int); |
119 | static void memlocate(void); | 119 | static void memlocate(void); |
120 | static void memzcan(void); | 120 | static void memzcan(void); |
121 | static void memdiffs(unsigned char *, unsigned char *, unsigned, unsigned); | 121 | static void memdiffs(unsigned char *, unsigned char *, unsigned, unsigned); |
122 | int skipbl(void); | 122 | int skipbl(void); |
123 | int scanhex(unsigned long *valp); | 123 | int scanhex(unsigned long *valp); |
124 | static void scannl(void); | 124 | static void scannl(void); |
125 | static int hexdigit(int); | 125 | static int hexdigit(int); |
126 | void getstring(char *, int); | 126 | void getstring(char *, int); |
127 | static void flush_input(void); | 127 | static void flush_input(void); |
128 | static int inchar(void); | 128 | static int inchar(void); |
129 | static void take_input(char *); | 129 | static void take_input(char *); |
130 | static unsigned long read_spr(int); | 130 | static unsigned long read_spr(int); |
131 | static void write_spr(int, unsigned long); | 131 | static void write_spr(int, unsigned long); |
132 | static void super_regs(void); | 132 | static void super_regs(void); |
133 | static void remove_bpts(void); | 133 | static void remove_bpts(void); |
134 | static void insert_bpts(void); | 134 | static void insert_bpts(void); |
135 | static void remove_cpu_bpts(void); | 135 | static void remove_cpu_bpts(void); |
136 | static void insert_cpu_bpts(void); | 136 | static void insert_cpu_bpts(void); |
137 | static struct bpt *at_breakpoint(unsigned long pc); | 137 | static struct bpt *at_breakpoint(unsigned long pc); |
138 | static struct bpt *in_breakpoint_table(unsigned long pc, unsigned long *offp); | 138 | static struct bpt *in_breakpoint_table(unsigned long pc, unsigned long *offp); |
139 | static int do_step(struct pt_regs *); | 139 | static int do_step(struct pt_regs *); |
140 | static void bpt_cmds(void); | 140 | static void bpt_cmds(void); |
141 | static void cacheflush(void); | 141 | static void cacheflush(void); |
142 | static int cpu_cmd(void); | 142 | static int cpu_cmd(void); |
143 | static void csum(void); | 143 | static void csum(void); |
144 | static void bootcmds(void); | 144 | static void bootcmds(void); |
145 | static void proccall(void); | 145 | static void proccall(void); |
146 | void dump_segments(void); | 146 | void dump_segments(void); |
147 | static void symbol_lookup(void); | 147 | static void symbol_lookup(void); |
148 | static void xmon_show_stack(unsigned long sp, unsigned long lr, | 148 | static void xmon_show_stack(unsigned long sp, unsigned long lr, |
149 | unsigned long pc); | 149 | unsigned long pc); |
150 | static void xmon_print_symbol(unsigned long address, const char *mid, | 150 | static void xmon_print_symbol(unsigned long address, const char *mid, |
151 | const char *after); | 151 | const char *after); |
152 | static const char *getvecname(unsigned long vec); | 152 | static const char *getvecname(unsigned long vec); |
153 | 153 | ||
154 | static int do_spu_cmd(void); | 154 | static int do_spu_cmd(void); |
155 | 155 | ||
156 | #ifdef CONFIG_44x | 156 | #ifdef CONFIG_44x |
157 | static void dump_tlb_44x(void); | 157 | static void dump_tlb_44x(void); |
158 | #endif | 158 | #endif |
159 | #ifdef CONFIG_PPC_BOOK3E | 159 | #ifdef CONFIG_PPC_BOOK3E |
160 | static void dump_tlb_book3e(void); | 160 | static void dump_tlb_book3e(void); |
161 | #endif | 161 | #endif |
162 | 162 | ||
163 | static int xmon_no_auto_backtrace; | 163 | static int xmon_no_auto_backtrace; |
164 | 164 | ||
165 | extern void xmon_enter(void); | 165 | extern void xmon_enter(void); |
166 | extern void xmon_leave(void); | 166 | extern void xmon_leave(void); |
167 | 167 | ||
168 | #ifdef CONFIG_PPC64 | 168 | #ifdef CONFIG_PPC64 |
169 | #define REG "%.16lx" | 169 | #define REG "%.16lx" |
170 | #else | 170 | #else |
171 | #define REG "%.8lx" | 171 | #define REG "%.8lx" |
172 | #endif | 172 | #endif |
173 | 173 | ||
174 | #ifdef __LITTLE_ENDIAN__ | 174 | #ifdef __LITTLE_ENDIAN__ |
175 | #define GETWORD(v) (((v)[3] << 24) + ((v)[2] << 16) + ((v)[1] << 8) + (v)[0]) | 175 | #define GETWORD(v) (((v)[3] << 24) + ((v)[2] << 16) + ((v)[1] << 8) + (v)[0]) |
176 | #else | 176 | #else |
177 | #define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3]) | 177 | #define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3]) |
178 | #endif | 178 | #endif |
179 | 179 | ||
180 | #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ | 180 | #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ |
181 | || ('a' <= (c) && (c) <= 'f') \ | 181 | || ('a' <= (c) && (c) <= 'f') \ |
182 | || ('A' <= (c) && (c) <= 'F')) | 182 | || ('A' <= (c) && (c) <= 'F')) |
183 | #define isalnum(c) (('0' <= (c) && (c) <= '9') \ | 183 | #define isalnum(c) (('0' <= (c) && (c) <= '9') \ |
184 | || ('a' <= (c) && (c) <= 'z') \ | 184 | || ('a' <= (c) && (c) <= 'z') \ |
185 | || ('A' <= (c) && (c) <= 'Z')) | 185 | || ('A' <= (c) && (c) <= 'Z')) |
186 | #define isspace(c) (c == ' ' || c == '\t' || c == 10 || c == 13 || c == 0) | 186 | #define isspace(c) (c == ' ' || c == '\t' || c == 10 || c == 13 || c == 0) |
187 | 187 | ||
188 | static char *help_string = "\ | 188 | static char *help_string = "\ |
189 | Commands:\n\ | 189 | Commands:\n\ |
190 | b show breakpoints\n\ | 190 | b show breakpoints\n\ |
191 | bd set data breakpoint\n\ | 191 | bd set data breakpoint\n\ |
192 | bi set instruction breakpoint\n\ | 192 | bi set instruction breakpoint\n\ |
193 | bc clear breakpoint\n" | 193 | bc clear breakpoint\n" |
194 | #ifdef CONFIG_SMP | 194 | #ifdef CONFIG_SMP |
195 | "\ | 195 | "\ |
196 | c print cpus stopped in xmon\n\ | 196 | c print cpus stopped in xmon\n\ |
197 | c# try to switch to cpu number h (in hex)\n" | 197 | c# try to switch to cpu number h (in hex)\n" |
198 | #endif | 198 | #endif |
199 | "\ | 199 | "\ |
200 | C checksum\n\ | 200 | C checksum\n\ |
201 | d dump bytes\n\ | 201 | d dump bytes\n\ |
202 | di dump instructions\n\ | 202 | di dump instructions\n\ |
203 | df dump float values\n\ | 203 | df dump float values\n\ |
204 | dd dump double values\n\ | 204 | dd dump double values\n\ |
205 | dl dump the kernel log buffer\n" | 205 | dl dump the kernel log buffer\n" |
206 | #ifdef CONFIG_PPC64 | 206 | #ifdef CONFIG_PPC64 |
207 | "\ | 207 | "\ |
208 | dp[#] dump paca for current cpu, or cpu #\n\ | 208 | dp[#] dump paca for current cpu, or cpu #\n\ |
209 | dpa dump paca for all possible cpus\n" | 209 | dpa dump paca for all possible cpus\n" |
210 | #endif | 210 | #endif |
211 | "\ | 211 | "\ |
212 | dr dump stream of raw bytes\n\ | 212 | dr dump stream of raw bytes\n\ |
213 | e print exception information\n\ | 213 | e print exception information\n\ |
214 | f flush cache\n\ | 214 | f flush cache\n\ |
215 | la lookup symbol+offset of specified address\n\ | 215 | la lookup symbol+offset of specified address\n\ |
216 | ls lookup address of specified symbol\n\ | 216 | ls lookup address of specified symbol\n\ |
217 | m examine/change memory\n\ | 217 | m examine/change memory\n\ |
218 | mm move a block of memory\n\ | 218 | mm move a block of memory\n\ |
219 | ms set a block of memory\n\ | 219 | ms set a block of memory\n\ |
220 | md compare two blocks of memory\n\ | 220 | md compare two blocks of memory\n\ |
221 | ml locate a block of memory\n\ | 221 | ml locate a block of memory\n\ |
222 | mz zero a block of memory\n\ | 222 | mz zero a block of memory\n\ |
223 | mi show information about memory allocation\n\ | 223 | mi show information about memory allocation\n\ |
224 | p call a procedure\n\ | 224 | p call a procedure\n\ |
225 | r print registers\n\ | 225 | r print registers\n\ |
226 | s single step\n" | 226 | s single step\n" |
227 | #ifdef CONFIG_SPU_BASE | 227 | #ifdef CONFIG_SPU_BASE |
228 | " ss stop execution on all spus\n\ | 228 | " ss stop execution on all spus\n\ |
229 | sr restore execution on stopped spus\n\ | 229 | sr restore execution on stopped spus\n\ |
230 | sf # dump spu fields for spu # (in hex)\n\ | 230 | sf # dump spu fields for spu # (in hex)\n\ |
231 | sd # dump spu local store for spu # (in hex)\n\ | 231 | sd # dump spu local store for spu # (in hex)\n\ |
232 | sdi # disassemble spu local store for spu # (in hex)\n" | 232 | sdi # disassemble spu local store for spu # (in hex)\n" |
233 | #endif | 233 | #endif |
234 | " S print special registers\n\ | 234 | " S print special registers\n\ |
235 | t print backtrace\n\ | 235 | t print backtrace\n\ |
236 | x exit monitor and recover\n\ | 236 | x exit monitor and recover\n\ |
237 | X exit monitor and dont recover\n" | 237 | X exit monitor and dont recover\n" |
238 | #if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E) | 238 | #if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E) |
239 | " u dump segment table or SLB\n" | 239 | " u dump segment table or SLB\n" |
240 | #elif defined(CONFIG_PPC_STD_MMU_32) | 240 | #elif defined(CONFIG_PPC_STD_MMU_32) |
241 | " u dump segment registers\n" | 241 | " u dump segment registers\n" |
242 | #elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E) | 242 | #elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E) |
243 | " u dump TLB\n" | 243 | " u dump TLB\n" |
244 | #endif | 244 | #endif |
245 | " ? help\n" | 245 | " ? help\n" |
246 | " zr reboot\n\ | 246 | " zr reboot\n\ |
247 | zh halt\n" | 247 | zh halt\n" |
248 | ; | 248 | ; |
249 | 249 | ||
250 | static struct pt_regs *xmon_regs; | 250 | static struct pt_regs *xmon_regs; |
251 | 251 | ||
252 | static inline void sync(void) | 252 | static inline void sync(void) |
253 | { | 253 | { |
254 | asm volatile("sync; isync"); | 254 | asm volatile("sync; isync"); |
255 | } | 255 | } |
256 | 256 | ||
257 | static inline void store_inst(void *p) | 257 | static inline void store_inst(void *p) |
258 | { | 258 | { |
259 | asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p)); | 259 | asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p)); |
260 | } | 260 | } |
261 | 261 | ||
262 | static inline void cflush(void *p) | 262 | static inline void cflush(void *p) |
263 | { | 263 | { |
264 | asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p)); | 264 | asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p)); |
265 | } | 265 | } |
266 | 266 | ||
267 | static inline void cinval(void *p) | 267 | static inline void cinval(void *p) |
268 | { | 268 | { |
269 | asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p)); | 269 | asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p)); |
270 | } | 270 | } |
271 | 271 | ||
272 | /* | 272 | /* |
273 | * Disable surveillance (the service processor watchdog function) | 273 | * Disable surveillance (the service processor watchdog function) |
274 | * while we are in xmon. | 274 | * while we are in xmon. |
275 | * XXX we should re-enable it when we leave. :) | 275 | * XXX we should re-enable it when we leave. :) |
276 | */ | 276 | */ |
277 | #define SURVEILLANCE_TOKEN 9000 | 277 | #define SURVEILLANCE_TOKEN 9000 |
278 | 278 | ||
279 | static inline void disable_surveillance(void) | 279 | static inline void disable_surveillance(void) |
280 | { | 280 | { |
281 | #ifdef CONFIG_PPC_PSERIES | 281 | #ifdef CONFIG_PPC_PSERIES |
282 | /* Since this can't be a module, args should end up below 4GB. */ | 282 | /* Since this can't be a module, args should end up below 4GB. */ |
283 | static struct rtas_args args; | 283 | static struct rtas_args args; |
284 | 284 | ||
285 | /* | 285 | /* |
286 | * At this point we have got all the cpus we can into | 286 | * At this point we have got all the cpus we can into |
287 | * xmon, so there is hopefully no other cpu calling RTAS | 287 | * xmon, so there is hopefully no other cpu calling RTAS |
288 | * at the moment, even though we don't take rtas.lock. | 288 | * at the moment, even though we don't take rtas.lock. |
289 | * If we did try to take rtas.lock there would be a | 289 | * If we did try to take rtas.lock there would be a |
290 | * real possibility of deadlock. | 290 | * real possibility of deadlock. |
291 | */ | 291 | */ |
292 | args.token = rtas_token("set-indicator"); | 292 | args.token = rtas_token("set-indicator"); |
293 | if (args.token == RTAS_UNKNOWN_SERVICE) | 293 | if (args.token == RTAS_UNKNOWN_SERVICE) |
294 | return; | 294 | return; |
295 | args.nargs = 3; | 295 | args.nargs = 3; |
296 | args.nret = 1; | 296 | args.nret = 1; |
297 | args.rets = &args.args[3]; | 297 | args.rets = &args.args[3]; |
298 | args.args[0] = SURVEILLANCE_TOKEN; | 298 | args.args[0] = SURVEILLANCE_TOKEN; |
299 | args.args[1] = 0; | 299 | args.args[1] = 0; |
300 | args.args[2] = 0; | 300 | args.args[2] = 0; |
301 | enter_rtas(__pa(&args)); | 301 | enter_rtas(__pa(&args)); |
302 | #endif /* CONFIG_PPC_PSERIES */ | 302 | #endif /* CONFIG_PPC_PSERIES */ |
303 | } | 303 | } |
304 | 304 | ||
305 | #ifdef CONFIG_SMP | 305 | #ifdef CONFIG_SMP |
306 | static int xmon_speaker; | 306 | static int xmon_speaker; |
307 | 307 | ||
308 | static void get_output_lock(void) | 308 | static void get_output_lock(void) |
309 | { | 309 | { |
310 | int me = smp_processor_id() + 0x100; | 310 | int me = smp_processor_id() + 0x100; |
311 | int last_speaker = 0, prev; | 311 | int last_speaker = 0, prev; |
312 | long timeout; | 312 | long timeout; |
313 | 313 | ||
314 | if (xmon_speaker == me) | 314 | if (xmon_speaker == me) |
315 | return; | 315 | return; |
316 | 316 | ||
317 | for (;;) { | 317 | for (;;) { |
318 | last_speaker = cmpxchg(&xmon_speaker, 0, me); | 318 | last_speaker = cmpxchg(&xmon_speaker, 0, me); |
319 | if (last_speaker == 0) | 319 | if (last_speaker == 0) |
320 | return; | 320 | return; |
321 | 321 | ||
322 | /* | 322 | /* |
323 | * Wait a full second for the lock, we might be on a slow | 323 | * Wait a full second for the lock, we might be on a slow |
324 | * console, but check every 100us. | 324 | * console, but check every 100us. |
325 | */ | 325 | */ |
326 | timeout = 10000; | 326 | timeout = 10000; |
327 | while (xmon_speaker == last_speaker) { | 327 | while (xmon_speaker == last_speaker) { |
328 | if (--timeout > 0) { | 328 | if (--timeout > 0) { |
329 | udelay(100); | 329 | udelay(100); |
330 | continue; | 330 | continue; |
331 | } | 331 | } |
332 | 332 | ||
333 | /* hostile takeover */ | 333 | /* hostile takeover */ |
334 | prev = cmpxchg(&xmon_speaker, last_speaker, me); | 334 | prev = cmpxchg(&xmon_speaker, last_speaker, me); |
335 | if (prev == last_speaker) | 335 | if (prev == last_speaker) |
336 | return; | 336 | return; |
337 | break; | 337 | break; |
338 | } | 338 | } |
339 | } | 339 | } |
340 | } | 340 | } |
341 | 341 | ||
342 | static void release_output_lock(void) | 342 | static void release_output_lock(void) |
343 | { | 343 | { |
344 | xmon_speaker = 0; | 344 | xmon_speaker = 0; |
345 | } | 345 | } |
346 | 346 | ||
347 | int cpus_are_in_xmon(void) | 347 | int cpus_are_in_xmon(void) |
348 | { | 348 | { |
349 | return !cpumask_empty(&cpus_in_xmon); | 349 | return !cpumask_empty(&cpus_in_xmon); |
350 | } | 350 | } |
351 | #endif | 351 | #endif |
352 | 352 | ||
353 | static inline int unrecoverable_excp(struct pt_regs *regs) | 353 | static inline int unrecoverable_excp(struct pt_regs *regs) |
354 | { | 354 | { |
355 | #if defined(CONFIG_4xx) || defined(CONFIG_PPC_BOOK3E) | 355 | #if defined(CONFIG_4xx) || defined(CONFIG_PPC_BOOK3E) |
356 | /* We have no MSR_RI bit on 4xx or Book3e, so we simply return false */ | 356 | /* We have no MSR_RI bit on 4xx or Book3e, so we simply return false */ |
357 | return 0; | 357 | return 0; |
358 | #else | 358 | #else |
359 | return ((regs->msr & MSR_RI) == 0); | 359 | return ((regs->msr & MSR_RI) == 0); |
360 | #endif | 360 | #endif |
361 | } | 361 | } |
362 | 362 | ||
363 | static int xmon_core(struct pt_regs *regs, int fromipi) | 363 | static int xmon_core(struct pt_regs *regs, int fromipi) |
364 | { | 364 | { |
365 | int cmd = 0; | 365 | int cmd = 0; |
366 | struct bpt *bp; | 366 | struct bpt *bp; |
367 | long recurse_jmp[JMP_BUF_LEN]; | 367 | long recurse_jmp[JMP_BUF_LEN]; |
368 | unsigned long offset; | 368 | unsigned long offset; |
369 | unsigned long flags; | 369 | unsigned long flags; |
370 | #ifdef CONFIG_SMP | 370 | #ifdef CONFIG_SMP |
371 | int cpu; | 371 | int cpu; |
372 | int secondary; | 372 | int secondary; |
373 | unsigned long timeout; | 373 | unsigned long timeout; |
374 | #endif | 374 | #endif |
375 | 375 | ||
376 | local_irq_save(flags); | 376 | local_irq_save(flags); |
377 | 377 | ||
378 | bp = in_breakpoint_table(regs->nip, &offset); | 378 | bp = in_breakpoint_table(regs->nip, &offset); |
379 | if (bp != NULL) { | 379 | if (bp != NULL) { |
380 | regs->nip = bp->address + offset; | 380 | regs->nip = bp->address + offset; |
381 | atomic_dec(&bp->ref_count); | 381 | atomic_dec(&bp->ref_count); |
382 | } | 382 | } |
383 | 383 | ||
384 | remove_cpu_bpts(); | 384 | remove_cpu_bpts(); |
385 | 385 | ||
386 | #ifdef CONFIG_SMP | 386 | #ifdef CONFIG_SMP |
387 | cpu = smp_processor_id(); | 387 | cpu = smp_processor_id(); |
388 | if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { | 388 | if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { |
389 | get_output_lock(); | 389 | get_output_lock(); |
390 | excprint(regs); | 390 | excprint(regs); |
391 | printf("cpu 0x%x: Exception %lx %s in xmon, " | 391 | printf("cpu 0x%x: Exception %lx %s in xmon, " |
392 | "returning to main loop\n", | 392 | "returning to main loop\n", |
393 | cpu, regs->trap, getvecname(TRAP(regs))); | 393 | cpu, regs->trap, getvecname(TRAP(regs))); |
394 | release_output_lock(); | 394 | release_output_lock(); |
395 | longjmp(xmon_fault_jmp[cpu], 1); | 395 | longjmp(xmon_fault_jmp[cpu], 1); |
396 | } | 396 | } |
397 | 397 | ||
398 | if (setjmp(recurse_jmp) != 0) { | 398 | if (setjmp(recurse_jmp) != 0) { |
399 | if (!in_xmon || !xmon_gate) { | 399 | if (!in_xmon || !xmon_gate) { |
400 | get_output_lock(); | 400 | get_output_lock(); |
401 | printf("xmon: WARNING: bad recursive fault " | 401 | printf("xmon: WARNING: bad recursive fault " |
402 | "on cpu 0x%x\n", cpu); | 402 | "on cpu 0x%x\n", cpu); |
403 | release_output_lock(); | 403 | release_output_lock(); |
404 | goto waiting; | 404 | goto waiting; |
405 | } | 405 | } |
406 | secondary = !(xmon_taken && cpu == xmon_owner); | 406 | secondary = !(xmon_taken && cpu == xmon_owner); |
407 | goto cmdloop; | 407 | goto cmdloop; |
408 | } | 408 | } |
409 | 409 | ||
410 | xmon_fault_jmp[cpu] = recurse_jmp; | 410 | xmon_fault_jmp[cpu] = recurse_jmp; |
411 | 411 | ||
412 | bp = NULL; | 412 | bp = NULL; |
413 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) | 413 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) |
414 | bp = at_breakpoint(regs->nip); | 414 | bp = at_breakpoint(regs->nip); |
415 | if (bp || unrecoverable_excp(regs)) | 415 | if (bp || unrecoverable_excp(regs)) |
416 | fromipi = 0; | 416 | fromipi = 0; |
417 | 417 | ||
418 | if (!fromipi) { | 418 | if (!fromipi) { |
419 | get_output_lock(); | 419 | get_output_lock(); |
420 | excprint(regs); | 420 | excprint(regs); |
421 | if (bp) { | 421 | if (bp) { |
422 | printf("cpu 0x%x stopped at breakpoint 0x%x (", | 422 | printf("cpu 0x%x stopped at breakpoint 0x%x (", |
423 | cpu, BP_NUM(bp)); | 423 | cpu, BP_NUM(bp)); |
424 | xmon_print_symbol(regs->nip, " ", ")\n"); | 424 | xmon_print_symbol(regs->nip, " ", ")\n"); |
425 | } | 425 | } |
426 | if (unrecoverable_excp(regs)) | 426 | if (unrecoverable_excp(regs)) |
427 | printf("WARNING: exception is not recoverable, " | 427 | printf("WARNING: exception is not recoverable, " |
428 | "can't continue\n"); | 428 | "can't continue\n"); |
429 | release_output_lock(); | 429 | release_output_lock(); |
430 | } | 430 | } |
431 | 431 | ||
432 | cpumask_set_cpu(cpu, &cpus_in_xmon); | 432 | cpumask_set_cpu(cpu, &cpus_in_xmon); |
433 | 433 | ||
434 | waiting: | 434 | waiting: |
435 | secondary = 1; | 435 | secondary = 1; |
436 | while (secondary && !xmon_gate) { | 436 | while (secondary && !xmon_gate) { |
437 | if (in_xmon == 0) { | 437 | if (in_xmon == 0) { |
438 | if (fromipi) | 438 | if (fromipi) |
439 | goto leave; | 439 | goto leave; |
440 | secondary = test_and_set_bit(0, &in_xmon); | 440 | secondary = test_and_set_bit(0, &in_xmon); |
441 | } | 441 | } |
442 | barrier(); | 442 | barrier(); |
443 | } | 443 | } |
444 | 444 | ||
445 | if (!secondary && !xmon_gate) { | 445 | if (!secondary && !xmon_gate) { |
446 | /* we are the first cpu to come in */ | 446 | /* we are the first cpu to come in */ |
447 | /* interrupt other cpu(s) */ | 447 | /* interrupt other cpu(s) */ |
448 | int ncpus = num_online_cpus(); | 448 | int ncpus = num_online_cpus(); |
449 | 449 | ||
450 | xmon_owner = cpu; | 450 | xmon_owner = cpu; |
451 | mb(); | 451 | mb(); |
452 | if (ncpus > 1) { | 452 | if (ncpus > 1) { |
453 | smp_send_debugger_break(); | 453 | smp_send_debugger_break(); |
454 | /* wait for other cpus to come in */ | 454 | /* wait for other cpus to come in */ |
455 | for (timeout = 100000000; timeout != 0; --timeout) { | 455 | for (timeout = 100000000; timeout != 0; --timeout) { |
456 | if (cpumask_weight(&cpus_in_xmon) >= ncpus) | 456 | if (cpumask_weight(&cpus_in_xmon) >= ncpus) |
457 | break; | 457 | break; |
458 | barrier(); | 458 | barrier(); |
459 | } | 459 | } |
460 | } | 460 | } |
461 | remove_bpts(); | 461 | remove_bpts(); |
462 | disable_surveillance(); | 462 | disable_surveillance(); |
463 | /* for breakpoint or single step, print the current instr. */ | 463 | /* for breakpoint or single step, print the current instr. */ |
464 | if (bp || TRAP(regs) == 0xd00) | 464 | if (bp || TRAP(regs) == 0xd00) |
465 | ppc_inst_dump(regs->nip, 1, 0); | 465 | ppc_inst_dump(regs->nip, 1, 0); |
466 | printf("enter ? for help\n"); | 466 | printf("enter ? for help\n"); |
467 | mb(); | 467 | mb(); |
468 | xmon_gate = 1; | 468 | xmon_gate = 1; |
469 | barrier(); | 469 | barrier(); |
470 | } | 470 | } |
471 | 471 | ||
472 | cmdloop: | 472 | cmdloop: |
473 | while (in_xmon) { | 473 | while (in_xmon) { |
474 | if (secondary) { | 474 | if (secondary) { |
475 | if (cpu == xmon_owner) { | 475 | if (cpu == xmon_owner) { |
476 | if (!test_and_set_bit(0, &xmon_taken)) { | 476 | if (!test_and_set_bit(0, &xmon_taken)) { |
477 | secondary = 0; | 477 | secondary = 0; |
478 | continue; | 478 | continue; |
479 | } | 479 | } |
480 | /* missed it */ | 480 | /* missed it */ |
481 | while (cpu == xmon_owner) | 481 | while (cpu == xmon_owner) |
482 | barrier(); | 482 | barrier(); |
483 | } | 483 | } |
484 | barrier(); | 484 | barrier(); |
485 | } else { | 485 | } else { |
486 | cmd = cmds(regs); | 486 | cmd = cmds(regs); |
487 | if (cmd != 0) { | 487 | if (cmd != 0) { |
488 | /* exiting xmon */ | 488 | /* exiting xmon */ |
489 | insert_bpts(); | 489 | insert_bpts(); |
490 | xmon_gate = 0; | 490 | xmon_gate = 0; |
491 | wmb(); | 491 | wmb(); |
492 | in_xmon = 0; | 492 | in_xmon = 0; |
493 | break; | 493 | break; |
494 | } | 494 | } |
495 | /* have switched to some other cpu */ | 495 | /* have switched to some other cpu */ |
496 | secondary = 1; | 496 | secondary = 1; |
497 | } | 497 | } |
498 | } | 498 | } |
499 | leave: | 499 | leave: |
500 | cpumask_clear_cpu(cpu, &cpus_in_xmon); | 500 | cpumask_clear_cpu(cpu, &cpus_in_xmon); |
501 | xmon_fault_jmp[cpu] = NULL; | 501 | xmon_fault_jmp[cpu] = NULL; |
502 | #else | 502 | #else |
503 | /* UP is simple... */ | 503 | /* UP is simple... */ |
504 | if (in_xmon) { | 504 | if (in_xmon) { |
505 | printf("Exception %lx %s in xmon, returning to main loop\n", | 505 | printf("Exception %lx %s in xmon, returning to main loop\n", |
506 | regs->trap, getvecname(TRAP(regs))); | 506 | regs->trap, getvecname(TRAP(regs))); |
507 | longjmp(xmon_fault_jmp[0], 1); | 507 | longjmp(xmon_fault_jmp[0], 1); |
508 | } | 508 | } |
509 | if (setjmp(recurse_jmp) == 0) { | 509 | if (setjmp(recurse_jmp) == 0) { |
510 | xmon_fault_jmp[0] = recurse_jmp; | 510 | xmon_fault_jmp[0] = recurse_jmp; |
511 | in_xmon = 1; | 511 | in_xmon = 1; |
512 | 512 | ||
513 | excprint(regs); | 513 | excprint(regs); |
514 | bp = at_breakpoint(regs->nip); | 514 | bp = at_breakpoint(regs->nip); |
515 | if (bp) { | 515 | if (bp) { |
516 | printf("Stopped at breakpoint %x (", BP_NUM(bp)); | 516 | printf("Stopped at breakpoint %x (", BP_NUM(bp)); |
517 | xmon_print_symbol(regs->nip, " ", ")\n"); | 517 | xmon_print_symbol(regs->nip, " ", ")\n"); |
518 | } | 518 | } |
519 | if (unrecoverable_excp(regs)) | 519 | if (unrecoverable_excp(regs)) |
520 | printf("WARNING: exception is not recoverable, " | 520 | printf("WARNING: exception is not recoverable, " |
521 | "can't continue\n"); | 521 | "can't continue\n"); |
522 | remove_bpts(); | 522 | remove_bpts(); |
523 | disable_surveillance(); | 523 | disable_surveillance(); |
524 | /* for breakpoint or single step, print the current instr. */ | 524 | /* for breakpoint or single step, print the current instr. */ |
525 | if (bp || TRAP(regs) == 0xd00) | 525 | if (bp || TRAP(regs) == 0xd00) |
526 | ppc_inst_dump(regs->nip, 1, 0); | 526 | ppc_inst_dump(regs->nip, 1, 0); |
527 | printf("enter ? for help\n"); | 527 | printf("enter ? for help\n"); |
528 | } | 528 | } |
529 | 529 | ||
530 | cmd = cmds(regs); | 530 | cmd = cmds(regs); |
531 | 531 | ||
532 | insert_bpts(); | 532 | insert_bpts(); |
533 | in_xmon = 0; | 533 | in_xmon = 0; |
534 | #endif | 534 | #endif |
535 | 535 | ||
536 | #ifdef CONFIG_BOOKE | 536 | #ifdef CONFIG_BOOKE |
537 | if (regs->msr & MSR_DE) { | 537 | if (regs->msr & MSR_DE) { |
538 | bp = at_breakpoint(regs->nip); | 538 | bp = at_breakpoint(regs->nip); |
539 | if (bp != NULL) { | 539 | if (bp != NULL) { |
540 | regs->nip = (unsigned long) &bp->instr[0]; | 540 | regs->nip = (unsigned long) &bp->instr[0]; |
541 | atomic_inc(&bp->ref_count); | 541 | atomic_inc(&bp->ref_count); |
542 | } | 542 | } |
543 | } | 543 | } |
544 | #else | 544 | #else |
545 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { | 545 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { |
546 | bp = at_breakpoint(regs->nip); | 546 | bp = at_breakpoint(regs->nip); |
547 | if (bp != NULL) { | 547 | if (bp != NULL) { |
548 | int stepped = emulate_step(regs, bp->instr[0]); | 548 | int stepped = emulate_step(regs, bp->instr[0]); |
549 | if (stepped == 0) { | 549 | if (stepped == 0) { |
550 | regs->nip = (unsigned long) &bp->instr[0]; | 550 | regs->nip = (unsigned long) &bp->instr[0]; |
551 | atomic_inc(&bp->ref_count); | 551 | atomic_inc(&bp->ref_count); |
552 | } else if (stepped < 0) { | 552 | } else if (stepped < 0) { |
553 | printf("Couldn't single-step %s instruction\n", | 553 | printf("Couldn't single-step %s instruction\n", |
554 | (IS_RFID(bp->instr[0])? "rfid": "mtmsrd")); | 554 | (IS_RFID(bp->instr[0])? "rfid": "mtmsrd")); |
555 | } | 555 | } |
556 | } | 556 | } |
557 | } | 557 | } |
558 | #endif | 558 | #endif |
559 | insert_cpu_bpts(); | 559 | insert_cpu_bpts(); |
560 | 560 | ||
561 | local_irq_restore(flags); | 561 | local_irq_restore(flags); |
562 | 562 | ||
563 | return cmd != 'X' && cmd != EOF; | 563 | return cmd != 'X' && cmd != EOF; |
564 | } | 564 | } |
565 | 565 | ||
566 | int xmon(struct pt_regs *excp) | 566 | int xmon(struct pt_regs *excp) |
567 | { | 567 | { |
568 | struct pt_regs regs; | 568 | struct pt_regs regs; |
569 | 569 | ||
570 | if (excp == NULL) { | 570 | if (excp == NULL) { |
571 | ppc_save_regs(®s); | 571 | ppc_save_regs(®s); |
572 | excp = ®s; | 572 | excp = ®s; |
573 | } | 573 | } |
574 | 574 | ||
575 | return xmon_core(excp, 0); | 575 | return xmon_core(excp, 0); |
576 | } | 576 | } |
577 | EXPORT_SYMBOL(xmon); | 577 | EXPORT_SYMBOL(xmon); |
578 | 578 | ||
579 | irqreturn_t xmon_irq(int irq, void *d) | 579 | irqreturn_t xmon_irq(int irq, void *d) |
580 | { | 580 | { |
581 | unsigned long flags; | 581 | unsigned long flags; |
582 | local_irq_save(flags); | 582 | local_irq_save(flags); |
583 | printf("Keyboard interrupt\n"); | 583 | printf("Keyboard interrupt\n"); |
584 | xmon(get_irq_regs()); | 584 | xmon(get_irq_regs()); |
585 | local_irq_restore(flags); | 585 | local_irq_restore(flags); |
586 | return IRQ_HANDLED; | 586 | return IRQ_HANDLED; |
587 | } | 587 | } |
588 | 588 | ||
589 | static int xmon_bpt(struct pt_regs *regs) | 589 | static int xmon_bpt(struct pt_regs *regs) |
590 | { | 590 | { |
591 | struct bpt *bp; | 591 | struct bpt *bp; |
592 | unsigned long offset; | 592 | unsigned long offset; |
593 | 593 | ||
594 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) | 594 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) |
595 | return 0; | 595 | return 0; |
596 | 596 | ||
597 | /* Are we at the trap at bp->instr[1] for some bp? */ | 597 | /* Are we at the trap at bp->instr[1] for some bp? */ |
598 | bp = in_breakpoint_table(regs->nip, &offset); | 598 | bp = in_breakpoint_table(regs->nip, &offset); |
599 | if (bp != NULL && offset == 4) { | 599 | if (bp != NULL && offset == 4) { |
600 | regs->nip = bp->address + 4; | 600 | regs->nip = bp->address + 4; |
601 | atomic_dec(&bp->ref_count); | 601 | atomic_dec(&bp->ref_count); |
602 | return 1; | 602 | return 1; |
603 | } | 603 | } |
604 | 604 | ||
605 | /* Are we at a breakpoint? */ | 605 | /* Are we at a breakpoint? */ |
606 | bp = at_breakpoint(regs->nip); | 606 | bp = at_breakpoint(regs->nip); |
607 | if (!bp) | 607 | if (!bp) |
608 | return 0; | 608 | return 0; |
609 | 609 | ||
610 | xmon_core(regs, 0); | 610 | xmon_core(regs, 0); |
611 | 611 | ||
612 | return 1; | 612 | return 1; |
613 | } | 613 | } |
614 | 614 | ||
615 | static int xmon_sstep(struct pt_regs *regs) | 615 | static int xmon_sstep(struct pt_regs *regs) |
616 | { | 616 | { |
617 | if (user_mode(regs)) | 617 | if (user_mode(regs)) |
618 | return 0; | 618 | return 0; |
619 | xmon_core(regs, 0); | 619 | xmon_core(regs, 0); |
620 | return 1; | 620 | return 1; |
621 | } | 621 | } |
622 | 622 | ||
623 | static int xmon_break_match(struct pt_regs *regs) | 623 | static int xmon_break_match(struct pt_regs *regs) |
624 | { | 624 | { |
625 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) | 625 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) |
626 | return 0; | 626 | return 0; |
627 | if (dabr.enabled == 0) | 627 | if (dabr.enabled == 0) |
628 | return 0; | 628 | return 0; |
629 | xmon_core(regs, 0); | 629 | xmon_core(regs, 0); |
630 | return 1; | 630 | return 1; |
631 | } | 631 | } |
632 | 632 | ||
633 | static int xmon_iabr_match(struct pt_regs *regs) | 633 | static int xmon_iabr_match(struct pt_regs *regs) |
634 | { | 634 | { |
635 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) | 635 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) |
636 | return 0; | 636 | return 0; |
637 | if (iabr == NULL) | 637 | if (iabr == NULL) |
638 | return 0; | 638 | return 0; |
639 | xmon_core(regs, 0); | 639 | xmon_core(regs, 0); |
640 | return 1; | 640 | return 1; |
641 | } | 641 | } |
642 | 642 | ||
643 | static int xmon_ipi(struct pt_regs *regs) | 643 | static int xmon_ipi(struct pt_regs *regs) |
644 | { | 644 | { |
645 | #ifdef CONFIG_SMP | 645 | #ifdef CONFIG_SMP |
646 | if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon)) | 646 | if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon)) |
647 | xmon_core(regs, 1); | 647 | xmon_core(regs, 1); |
648 | #endif | 648 | #endif |
649 | return 0; | 649 | return 0; |
650 | } | 650 | } |
651 | 651 | ||
652 | static int xmon_fault_handler(struct pt_regs *regs) | 652 | static int xmon_fault_handler(struct pt_regs *regs) |
653 | { | 653 | { |
654 | struct bpt *bp; | 654 | struct bpt *bp; |
655 | unsigned long offset; | 655 | unsigned long offset; |
656 | 656 | ||
657 | if (in_xmon && catch_memory_errors) | 657 | if (in_xmon && catch_memory_errors) |
658 | handle_fault(regs); /* doesn't return */ | 658 | handle_fault(regs); /* doesn't return */ |
659 | 659 | ||
660 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { | 660 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { |
661 | bp = in_breakpoint_table(regs->nip, &offset); | 661 | bp = in_breakpoint_table(regs->nip, &offset); |
662 | if (bp != NULL) { | 662 | if (bp != NULL) { |
663 | regs->nip = bp->address + offset; | 663 | regs->nip = bp->address + offset; |
664 | atomic_dec(&bp->ref_count); | 664 | atomic_dec(&bp->ref_count); |
665 | } | 665 | } |
666 | } | 666 | } |
667 | 667 | ||
668 | return 0; | 668 | return 0; |
669 | } | 669 | } |
670 | 670 | ||
671 | static struct bpt *at_breakpoint(unsigned long pc) | 671 | static struct bpt *at_breakpoint(unsigned long pc) |
672 | { | 672 | { |
673 | int i; | 673 | int i; |
674 | struct bpt *bp; | 674 | struct bpt *bp; |
675 | 675 | ||
676 | bp = bpts; | 676 | bp = bpts; |
677 | for (i = 0; i < NBPTS; ++i, ++bp) | 677 | for (i = 0; i < NBPTS; ++i, ++bp) |
678 | if (bp->enabled && pc == bp->address) | 678 | if (bp->enabled && pc == bp->address) |
679 | return bp; | 679 | return bp; |
680 | return NULL; | 680 | return NULL; |
681 | } | 681 | } |
682 | 682 | ||
683 | static struct bpt *in_breakpoint_table(unsigned long nip, unsigned long *offp) | 683 | static struct bpt *in_breakpoint_table(unsigned long nip, unsigned long *offp) |
684 | { | 684 | { |
685 | unsigned long off; | 685 | unsigned long off; |
686 | 686 | ||
687 | off = nip - (unsigned long) bpts; | 687 | off = nip - (unsigned long) bpts; |
688 | if (off >= sizeof(bpts)) | 688 | if (off >= sizeof(bpts)) |
689 | return NULL; | 689 | return NULL; |
690 | off %= sizeof(struct bpt); | 690 | off %= sizeof(struct bpt); |
691 | if (off != offsetof(struct bpt, instr[0]) | 691 | if (off != offsetof(struct bpt, instr[0]) |
692 | && off != offsetof(struct bpt, instr[1])) | 692 | && off != offsetof(struct bpt, instr[1])) |
693 | return NULL; | 693 | return NULL; |
694 | *offp = off - offsetof(struct bpt, instr[0]); | 694 | *offp = off - offsetof(struct bpt, instr[0]); |
695 | return (struct bpt *) (nip - off); | 695 | return (struct bpt *) (nip - off); |
696 | } | 696 | } |
697 | 697 | ||
698 | static struct bpt *new_breakpoint(unsigned long a) | 698 | static struct bpt *new_breakpoint(unsigned long a) |
699 | { | 699 | { |
700 | struct bpt *bp; | 700 | struct bpt *bp; |
701 | 701 | ||
702 | a &= ~3UL; | 702 | a &= ~3UL; |
703 | bp = at_breakpoint(a); | 703 | bp = at_breakpoint(a); |
704 | if (bp) | 704 | if (bp) |
705 | return bp; | 705 | return bp; |
706 | 706 | ||
707 | for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { | 707 | for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { |
708 | if (!bp->enabled && atomic_read(&bp->ref_count) == 0) { | 708 | if (!bp->enabled && atomic_read(&bp->ref_count) == 0) { |
709 | bp->address = a; | 709 | bp->address = a; |
710 | bp->instr[1] = bpinstr; | 710 | bp->instr[1] = bpinstr; |
711 | store_inst(&bp->instr[1]); | 711 | store_inst(&bp->instr[1]); |
712 | return bp; | 712 | return bp; |
713 | } | 713 | } |
714 | } | 714 | } |
715 | 715 | ||
716 | printf("Sorry, no free breakpoints. Please clear one first.\n"); | 716 | printf("Sorry, no free breakpoints. Please clear one first.\n"); |
717 | return NULL; | 717 | return NULL; |
718 | } | 718 | } |
719 | 719 | ||
720 | static void insert_bpts(void) | 720 | static void insert_bpts(void) |
721 | { | 721 | { |
722 | int i; | 722 | int i; |
723 | struct bpt *bp; | 723 | struct bpt *bp; |
724 | 724 | ||
725 | bp = bpts; | 725 | bp = bpts; |
726 | for (i = 0; i < NBPTS; ++i, ++bp) { | 726 | for (i = 0; i < NBPTS; ++i, ++bp) { |
727 | if ((bp->enabled & (BP_TRAP|BP_IABR)) == 0) | 727 | if ((bp->enabled & (BP_TRAP|BP_IABR)) == 0) |
728 | continue; | 728 | continue; |
729 | if (mread(bp->address, &bp->instr[0], 4) != 4) { | 729 | if (mread(bp->address, &bp->instr[0], 4) != 4) { |
730 | printf("Couldn't read instruction at %lx, " | 730 | printf("Couldn't read instruction at %lx, " |
731 | "disabling breakpoint there\n", bp->address); | 731 | "disabling breakpoint there\n", bp->address); |
732 | bp->enabled = 0; | 732 | bp->enabled = 0; |
733 | continue; | 733 | continue; |
734 | } | 734 | } |
735 | if (IS_MTMSRD(bp->instr[0]) || IS_RFID(bp->instr[0])) { | 735 | if (IS_MTMSRD(bp->instr[0]) || IS_RFID(bp->instr[0])) { |
736 | printf("Breakpoint at %lx is on an mtmsrd or rfid " | 736 | printf("Breakpoint at %lx is on an mtmsrd or rfid " |
737 | "instruction, disabling it\n", bp->address); | 737 | "instruction, disabling it\n", bp->address); |
738 | bp->enabled = 0; | 738 | bp->enabled = 0; |
739 | continue; | 739 | continue; |
740 | } | 740 | } |
741 | store_inst(&bp->instr[0]); | 741 | store_inst(&bp->instr[0]); |
742 | if (bp->enabled & BP_IABR) | 742 | if (bp->enabled & BP_IABR) |
743 | continue; | 743 | continue; |
744 | if (mwrite(bp->address, &bpinstr, 4) != 4) { | 744 | if (mwrite(bp->address, &bpinstr, 4) != 4) { |
745 | printf("Couldn't write instruction at %lx, " | 745 | printf("Couldn't write instruction at %lx, " |
746 | "disabling breakpoint there\n", bp->address); | 746 | "disabling breakpoint there\n", bp->address); |
747 | bp->enabled &= ~BP_TRAP; | 747 | bp->enabled &= ~BP_TRAP; |
748 | continue; | 748 | continue; |
749 | } | 749 | } |
750 | store_inst((void *)bp->address); | 750 | store_inst((void *)bp->address); |
751 | } | 751 | } |
752 | } | 752 | } |
753 | 753 | ||
754 | static void insert_cpu_bpts(void) | 754 | static void insert_cpu_bpts(void) |
755 | { | 755 | { |
756 | struct arch_hw_breakpoint brk; | 756 | struct arch_hw_breakpoint brk; |
757 | 757 | ||
758 | if (dabr.enabled) { | 758 | if (dabr.enabled) { |
759 | brk.address = dabr.address; | 759 | brk.address = dabr.address; |
760 | brk.type = (dabr.enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; | 760 | brk.type = (dabr.enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; |
761 | brk.len = 8; | 761 | brk.len = 8; |
762 | set_breakpoint(&brk); | 762 | __set_breakpoint(&brk); |
763 | } | 763 | } |
764 | if (iabr && cpu_has_feature(CPU_FTR_IABR)) | 764 | if (iabr && cpu_has_feature(CPU_FTR_IABR)) |
765 | mtspr(SPRN_IABR, iabr->address | 765 | mtspr(SPRN_IABR, iabr->address |
766 | | (iabr->enabled & (BP_IABR|BP_IABR_TE))); | 766 | | (iabr->enabled & (BP_IABR|BP_IABR_TE))); |
767 | } | 767 | } |
768 | 768 | ||
769 | static void remove_bpts(void) | 769 | static void remove_bpts(void) |
770 | { | 770 | { |
771 | int i; | 771 | int i; |
772 | struct bpt *bp; | 772 | struct bpt *bp; |
773 | unsigned instr; | 773 | unsigned instr; |
774 | 774 | ||
775 | bp = bpts; | 775 | bp = bpts; |
776 | for (i = 0; i < NBPTS; ++i, ++bp) { | 776 | for (i = 0; i < NBPTS; ++i, ++bp) { |
777 | if ((bp->enabled & (BP_TRAP|BP_IABR)) != BP_TRAP) | 777 | if ((bp->enabled & (BP_TRAP|BP_IABR)) != BP_TRAP) |
778 | continue; | 778 | continue; |
779 | if (mread(bp->address, &instr, 4) == 4 | 779 | if (mread(bp->address, &instr, 4) == 4 |
780 | && instr == bpinstr | 780 | && instr == bpinstr |
781 | && mwrite(bp->address, &bp->instr, 4) != 4) | 781 | && mwrite(bp->address, &bp->instr, 4) != 4) |
782 | printf("Couldn't remove breakpoint at %lx\n", | 782 | printf("Couldn't remove breakpoint at %lx\n", |
783 | bp->address); | 783 | bp->address); |
784 | else | 784 | else |
785 | store_inst((void *)bp->address); | 785 | store_inst((void *)bp->address); |
786 | } | 786 | } |
787 | } | 787 | } |
788 | 788 | ||
789 | static void remove_cpu_bpts(void) | 789 | static void remove_cpu_bpts(void) |
790 | { | 790 | { |
791 | hw_breakpoint_disable(); | 791 | hw_breakpoint_disable(); |
792 | if (cpu_has_feature(CPU_FTR_IABR)) | 792 | if (cpu_has_feature(CPU_FTR_IABR)) |
793 | mtspr(SPRN_IABR, 0); | 793 | mtspr(SPRN_IABR, 0); |
794 | } | 794 | } |
795 | 795 | ||
796 | /* Command interpreting routine */ | 796 | /* Command interpreting routine */ |
797 | static char *last_cmd; | 797 | static char *last_cmd; |
798 | 798 | ||
799 | static int | 799 | static int |
800 | cmds(struct pt_regs *excp) | 800 | cmds(struct pt_regs *excp) |
801 | { | 801 | { |
802 | int cmd = 0; | 802 | int cmd = 0; |
803 | 803 | ||
804 | last_cmd = NULL; | 804 | last_cmd = NULL; |
805 | xmon_regs = excp; | 805 | xmon_regs = excp; |
806 | 806 | ||
807 | if (!xmon_no_auto_backtrace) { | 807 | if (!xmon_no_auto_backtrace) { |
808 | xmon_no_auto_backtrace = 1; | 808 | xmon_no_auto_backtrace = 1; |
809 | xmon_show_stack(excp->gpr[1], excp->link, excp->nip); | 809 | xmon_show_stack(excp->gpr[1], excp->link, excp->nip); |
810 | } | 810 | } |
811 | 811 | ||
812 | for(;;) { | 812 | for(;;) { |
813 | #ifdef CONFIG_SMP | 813 | #ifdef CONFIG_SMP |
814 | printf("%x:", smp_processor_id()); | 814 | printf("%x:", smp_processor_id()); |
815 | #endif /* CONFIG_SMP */ | 815 | #endif /* CONFIG_SMP */ |
816 | printf("mon> "); | 816 | printf("mon> "); |
817 | flush_input(); | 817 | flush_input(); |
818 | termch = 0; | 818 | termch = 0; |
819 | cmd = skipbl(); | 819 | cmd = skipbl(); |
820 | if( cmd == '\n' ) { | 820 | if( cmd == '\n' ) { |
821 | if (last_cmd == NULL) | 821 | if (last_cmd == NULL) |
822 | continue; | 822 | continue; |
823 | take_input(last_cmd); | 823 | take_input(last_cmd); |
824 | last_cmd = NULL; | 824 | last_cmd = NULL; |
825 | cmd = inchar(); | 825 | cmd = inchar(); |
826 | } | 826 | } |
827 | switch (cmd) { | 827 | switch (cmd) { |
828 | case 'm': | 828 | case 'm': |
829 | cmd = inchar(); | 829 | cmd = inchar(); |
830 | switch (cmd) { | 830 | switch (cmd) { |
831 | case 'm': | 831 | case 'm': |
832 | case 's': | 832 | case 's': |
833 | case 'd': | 833 | case 'd': |
834 | memops(cmd); | 834 | memops(cmd); |
835 | break; | 835 | break; |
836 | case 'l': | 836 | case 'l': |
837 | memlocate(); | 837 | memlocate(); |
838 | break; | 838 | break; |
839 | case 'z': | 839 | case 'z': |
840 | memzcan(); | 840 | memzcan(); |
841 | break; | 841 | break; |
842 | case 'i': | 842 | case 'i': |
843 | show_mem(0); | 843 | show_mem(0); |
844 | break; | 844 | break; |
845 | default: | 845 | default: |
846 | termch = cmd; | 846 | termch = cmd; |
847 | memex(); | 847 | memex(); |
848 | } | 848 | } |
849 | break; | 849 | break; |
850 | case 'd': | 850 | case 'd': |
851 | dump(); | 851 | dump(); |
852 | break; | 852 | break; |
853 | case 'l': | 853 | case 'l': |
854 | symbol_lookup(); | 854 | symbol_lookup(); |
855 | break; | 855 | break; |
856 | case 'r': | 856 | case 'r': |
857 | prregs(excp); /* print regs */ | 857 | prregs(excp); /* print regs */ |
858 | break; | 858 | break; |
859 | case 'e': | 859 | case 'e': |
860 | excprint(excp); | 860 | excprint(excp); |
861 | break; | 861 | break; |
862 | case 'S': | 862 | case 'S': |
863 | super_regs(); | 863 | super_regs(); |
864 | break; | 864 | break; |
865 | case 't': | 865 | case 't': |
866 | backtrace(excp); | 866 | backtrace(excp); |
867 | break; | 867 | break; |
868 | case 'f': | 868 | case 'f': |
869 | cacheflush(); | 869 | cacheflush(); |
870 | break; | 870 | break; |
871 | case 's': | 871 | case 's': |
872 | if (do_spu_cmd() == 0) | 872 | if (do_spu_cmd() == 0) |
873 | break; | 873 | break; |
874 | if (do_step(excp)) | 874 | if (do_step(excp)) |
875 | return cmd; | 875 | return cmd; |
876 | break; | 876 | break; |
877 | case 'x': | 877 | case 'x': |
878 | case 'X': | 878 | case 'X': |
879 | return cmd; | 879 | return cmd; |
880 | case EOF: | 880 | case EOF: |
881 | printf(" <no input ...>\n"); | 881 | printf(" <no input ...>\n"); |
882 | mdelay(2000); | 882 | mdelay(2000); |
883 | return cmd; | 883 | return cmd; |
884 | case '?': | 884 | case '?': |
885 | xmon_puts(help_string); | 885 | xmon_puts(help_string); |
886 | break; | 886 | break; |
887 | case 'b': | 887 | case 'b': |
888 | bpt_cmds(); | 888 | bpt_cmds(); |
889 | break; | 889 | break; |
890 | case 'C': | 890 | case 'C': |
891 | csum(); | 891 | csum(); |
892 | break; | 892 | break; |
893 | case 'c': | 893 | case 'c': |
894 | if (cpu_cmd()) | 894 | if (cpu_cmd()) |
895 | return 0; | 895 | return 0; |
896 | break; | 896 | break; |
897 | case 'z': | 897 | case 'z': |
898 | bootcmds(); | 898 | bootcmds(); |
899 | break; | 899 | break; |
900 | case 'p': | 900 | case 'p': |
901 | proccall(); | 901 | proccall(); |
902 | break; | 902 | break; |
903 | #ifdef CONFIG_PPC_STD_MMU | 903 | #ifdef CONFIG_PPC_STD_MMU |
904 | case 'u': | 904 | case 'u': |
905 | dump_segments(); | 905 | dump_segments(); |
906 | break; | 906 | break; |
907 | #elif defined(CONFIG_4xx) | 907 | #elif defined(CONFIG_4xx) |
908 | case 'u': | 908 | case 'u': |
909 | dump_tlb_44x(); | 909 | dump_tlb_44x(); |
910 | break; | 910 | break; |
911 | #elif defined(CONFIG_PPC_BOOK3E) | 911 | #elif defined(CONFIG_PPC_BOOK3E) |
912 | case 'u': | 912 | case 'u': |
913 | dump_tlb_book3e(); | 913 | dump_tlb_book3e(); |
914 | break; | 914 | break; |
915 | #endif | 915 | #endif |
916 | default: | 916 | default: |
917 | printf("Unrecognized command: "); | 917 | printf("Unrecognized command: "); |
918 | do { | 918 | do { |
919 | if (' ' < cmd && cmd <= '~') | 919 | if (' ' < cmd && cmd <= '~') |
920 | putchar(cmd); | 920 | putchar(cmd); |
921 | else | 921 | else |
922 | printf("\\x%x", cmd); | 922 | printf("\\x%x", cmd); |
923 | cmd = inchar(); | 923 | cmd = inchar(); |
924 | } while (cmd != '\n'); | 924 | } while (cmd != '\n'); |
925 | printf(" (type ? for help)\n"); | 925 | printf(" (type ? for help)\n"); |
926 | break; | 926 | break; |
927 | } | 927 | } |
928 | } | 928 | } |
929 | } | 929 | } |
930 | 930 | ||
931 | #ifdef CONFIG_BOOKE | 931 | #ifdef CONFIG_BOOKE |
932 | static int do_step(struct pt_regs *regs) | 932 | static int do_step(struct pt_regs *regs) |
933 | { | 933 | { |
934 | regs->msr |= MSR_DE; | 934 | regs->msr |= MSR_DE; |
935 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); | 935 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); |
936 | return 1; | 936 | return 1; |
937 | } | 937 | } |
938 | #else | 938 | #else |
939 | /* | 939 | /* |
940 | * Step a single instruction. | 940 | * Step a single instruction. |
941 | * Some instructions we emulate, others we execute with MSR_SE set. | 941 | * Some instructions we emulate, others we execute with MSR_SE set. |
942 | */ | 942 | */ |
943 | static int do_step(struct pt_regs *regs) | 943 | static int do_step(struct pt_regs *regs) |
944 | { | 944 | { |
945 | unsigned int instr; | 945 | unsigned int instr; |
946 | int stepped; | 946 | int stepped; |
947 | 947 | ||
948 | /* check we are in 64-bit kernel mode, translation enabled */ | 948 | /* check we are in 64-bit kernel mode, translation enabled */ |
949 | if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) { | 949 | if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) { |
950 | if (mread(regs->nip, &instr, 4) == 4) { | 950 | if (mread(regs->nip, &instr, 4) == 4) { |
951 | stepped = emulate_step(regs, instr); | 951 | stepped = emulate_step(regs, instr); |
952 | if (stepped < 0) { | 952 | if (stepped < 0) { |
953 | printf("Couldn't single-step %s instruction\n", | 953 | printf("Couldn't single-step %s instruction\n", |
954 | (IS_RFID(instr)? "rfid": "mtmsrd")); | 954 | (IS_RFID(instr)? "rfid": "mtmsrd")); |
955 | return 0; | 955 | return 0; |
956 | } | 956 | } |
957 | if (stepped > 0) { | 957 | if (stepped > 0) { |
958 | regs->trap = 0xd00 | (regs->trap & 1); | 958 | regs->trap = 0xd00 | (regs->trap & 1); |
959 | printf("stepped to "); | 959 | printf("stepped to "); |
960 | xmon_print_symbol(regs->nip, " ", "\n"); | 960 | xmon_print_symbol(regs->nip, " ", "\n"); |
961 | ppc_inst_dump(regs->nip, 1, 0); | 961 | ppc_inst_dump(regs->nip, 1, 0); |
962 | return 0; | 962 | return 0; |
963 | } | 963 | } |
964 | } | 964 | } |
965 | } | 965 | } |
966 | regs->msr |= MSR_SE; | 966 | regs->msr |= MSR_SE; |
967 | return 1; | 967 | return 1; |
968 | } | 968 | } |
969 | #endif | 969 | #endif |
970 | 970 | ||
971 | static void bootcmds(void) | 971 | static void bootcmds(void) |
972 | { | 972 | { |
973 | int cmd; | 973 | int cmd; |
974 | 974 | ||
975 | cmd = inchar(); | 975 | cmd = inchar(); |
976 | if (cmd == 'r') | 976 | if (cmd == 'r') |
977 | ppc_md.restart(NULL); | 977 | ppc_md.restart(NULL); |
978 | else if (cmd == 'h') | 978 | else if (cmd == 'h') |
979 | ppc_md.halt(); | 979 | ppc_md.halt(); |
980 | else if (cmd == 'p') | 980 | else if (cmd == 'p') |
981 | ppc_md.power_off(); | 981 | ppc_md.power_off(); |
982 | } | 982 | } |
983 | 983 | ||
984 | static int cpu_cmd(void) | 984 | static int cpu_cmd(void) |
985 | { | 985 | { |
986 | #ifdef CONFIG_SMP | 986 | #ifdef CONFIG_SMP |
987 | unsigned long cpu, first_cpu, last_cpu; | 987 | unsigned long cpu, first_cpu, last_cpu; |
988 | int timeout; | 988 | int timeout; |
989 | 989 | ||
990 | if (!scanhex(&cpu)) { | 990 | if (!scanhex(&cpu)) { |
991 | /* print cpus waiting or in xmon */ | 991 | /* print cpus waiting or in xmon */ |
992 | printf("cpus stopped:"); | 992 | printf("cpus stopped:"); |
993 | last_cpu = first_cpu = NR_CPUS; | 993 | last_cpu = first_cpu = NR_CPUS; |
994 | for_each_possible_cpu(cpu) { | 994 | for_each_possible_cpu(cpu) { |
995 | if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { | 995 | if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { |
996 | if (cpu == last_cpu + 1) { | 996 | if (cpu == last_cpu + 1) { |
997 | last_cpu = cpu; | 997 | last_cpu = cpu; |
998 | } else { | 998 | } else { |
999 | if (last_cpu != first_cpu) | 999 | if (last_cpu != first_cpu) |
1000 | printf("-%lx", last_cpu); | 1000 | printf("-%lx", last_cpu); |
1001 | last_cpu = first_cpu = cpu; | 1001 | last_cpu = first_cpu = cpu; |
1002 | printf(" %lx", cpu); | 1002 | printf(" %lx", cpu); |
1003 | } | 1003 | } |
1004 | } | 1004 | } |
1005 | } | 1005 | } |
1006 | if (last_cpu != first_cpu) | 1006 | if (last_cpu != first_cpu) |
1007 | printf("-%lx", last_cpu); | 1007 | printf("-%lx", last_cpu); |
1008 | printf("\n"); | 1008 | printf("\n"); |
1009 | return 0; | 1009 | return 0; |
1010 | } | 1010 | } |
1011 | /* try to switch to cpu specified */ | 1011 | /* try to switch to cpu specified */ |
1012 | if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) { | 1012 | if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) { |
1013 | printf("cpu 0x%x isn't in xmon\n", cpu); | 1013 | printf("cpu 0x%x isn't in xmon\n", cpu); |
1014 | return 0; | 1014 | return 0; |
1015 | } | 1015 | } |
1016 | xmon_taken = 0; | 1016 | xmon_taken = 0; |
1017 | mb(); | 1017 | mb(); |
1018 | xmon_owner = cpu; | 1018 | xmon_owner = cpu; |
1019 | timeout = 10000000; | 1019 | timeout = 10000000; |
1020 | while (!xmon_taken) { | 1020 | while (!xmon_taken) { |
1021 | if (--timeout == 0) { | 1021 | if (--timeout == 0) { |
1022 | if (test_and_set_bit(0, &xmon_taken)) | 1022 | if (test_and_set_bit(0, &xmon_taken)) |
1023 | break; | 1023 | break; |
1024 | /* take control back */ | 1024 | /* take control back */ |
1025 | mb(); | 1025 | mb(); |
1026 | xmon_owner = smp_processor_id(); | 1026 | xmon_owner = smp_processor_id(); |
1027 | printf("cpu %u didn't take control\n", cpu); | 1027 | printf("cpu %u didn't take control\n", cpu); |
1028 | return 0; | 1028 | return 0; |
1029 | } | 1029 | } |
1030 | barrier(); | 1030 | barrier(); |
1031 | } | 1031 | } |
1032 | return 1; | 1032 | return 1; |
1033 | #else | 1033 | #else |
1034 | return 0; | 1034 | return 0; |
1035 | #endif /* CONFIG_SMP */ | 1035 | #endif /* CONFIG_SMP */ |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | static unsigned short fcstab[256] = { | 1038 | static unsigned short fcstab[256] = { |
1039 | 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, | 1039 | 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, |
1040 | 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, | 1040 | 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, |
1041 | 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, | 1041 | 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, |
1042 | 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, | 1042 | 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, |
1043 | 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, | 1043 | 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, |
1044 | 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, | 1044 | 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, |
1045 | 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, | 1045 | 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, |
1046 | 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, | 1046 | 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, |
1047 | 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, | 1047 | 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, |
1048 | 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, | 1048 | 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, |
1049 | 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, | 1049 | 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, |
1050 | 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, | 1050 | 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, |
1051 | 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, | 1051 | 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, |
1052 | 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, | 1052 | 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, |
1053 | 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, | 1053 | 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, |
1054 | 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, | 1054 | 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, |
1055 | 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, | 1055 | 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, |
1056 | 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, | 1056 | 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, |
1057 | 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, | 1057 | 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, |
1058 | 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, | 1058 | 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, |
1059 | 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, | 1059 | 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, |
1060 | 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, | 1060 | 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, |
1061 | 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, | 1061 | 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, |
1062 | 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, | 1062 | 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, |
1063 | 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, | 1063 | 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, |
1064 | 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, | 1064 | 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, |
1065 | 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, | 1065 | 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, |
1066 | 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, | 1066 | 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, |
1067 | 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, | 1067 | 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, |
1068 | 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, | 1068 | 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, |
1069 | 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, | 1069 | 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, |
1070 | 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 | 1070 | 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 |
1071 | }; | 1071 | }; |
1072 | 1072 | ||
1073 | #define FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) | 1073 | #define FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) |
1074 | 1074 | ||
1075 | static void | 1075 | static void |
1076 | csum(void) | 1076 | csum(void) |
1077 | { | 1077 | { |
1078 | unsigned int i; | 1078 | unsigned int i; |
1079 | unsigned short fcs; | 1079 | unsigned short fcs; |
1080 | unsigned char v; | 1080 | unsigned char v; |
1081 | 1081 | ||
1082 | if (!scanhex(&adrs)) | 1082 | if (!scanhex(&adrs)) |
1083 | return; | 1083 | return; |
1084 | if (!scanhex(&ncsum)) | 1084 | if (!scanhex(&ncsum)) |
1085 | return; | 1085 | return; |
1086 | fcs = 0xffff; | 1086 | fcs = 0xffff; |
1087 | for (i = 0; i < ncsum; ++i) { | 1087 | for (i = 0; i < ncsum; ++i) { |
1088 | if (mread(adrs+i, &v, 1) == 0) { | 1088 | if (mread(adrs+i, &v, 1) == 0) { |
1089 | printf("csum stopped at %x\n", adrs+i); | 1089 | printf("csum stopped at %x\n", adrs+i); |
1090 | break; | 1090 | break; |
1091 | } | 1091 | } |
1092 | fcs = FCS(fcs, v); | 1092 | fcs = FCS(fcs, v); |
1093 | } | 1093 | } |
1094 | printf("%x\n", fcs); | 1094 | printf("%x\n", fcs); |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | /* | 1097 | /* |
1098 | * Check if this is a suitable place to put a breakpoint. | 1098 | * Check if this is a suitable place to put a breakpoint. |
1099 | */ | 1099 | */ |
1100 | static long check_bp_loc(unsigned long addr) | 1100 | static long check_bp_loc(unsigned long addr) |
1101 | { | 1101 | { |
1102 | unsigned int instr; | 1102 | unsigned int instr; |
1103 | 1103 | ||
1104 | addr &= ~3; | 1104 | addr &= ~3; |
1105 | if (!is_kernel_addr(addr)) { | 1105 | if (!is_kernel_addr(addr)) { |
1106 | printf("Breakpoints may only be placed at kernel addresses\n"); | 1106 | printf("Breakpoints may only be placed at kernel addresses\n"); |
1107 | return 0; | 1107 | return 0; |
1108 | } | 1108 | } |
1109 | if (!mread(addr, &instr, sizeof(instr))) { | 1109 | if (!mread(addr, &instr, sizeof(instr))) { |
1110 | printf("Can't read instruction at address %lx\n", addr); | 1110 | printf("Can't read instruction at address %lx\n", addr); |
1111 | return 0; | 1111 | return 0; |
1112 | } | 1112 | } |
1113 | if (IS_MTMSRD(instr) || IS_RFID(instr)) { | 1113 | if (IS_MTMSRD(instr) || IS_RFID(instr)) { |
1114 | printf("Breakpoints may not be placed on mtmsrd or rfid " | 1114 | printf("Breakpoints may not be placed on mtmsrd or rfid " |
1115 | "instructions\n"); | 1115 | "instructions\n"); |
1116 | return 0; | 1116 | return 0; |
1117 | } | 1117 | } |
1118 | return 1; | 1118 | return 1; |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | static char *breakpoint_help_string = | 1121 | static char *breakpoint_help_string = |
1122 | "Breakpoint command usage:\n" | 1122 | "Breakpoint command usage:\n" |
1123 | "b show breakpoints\n" | 1123 | "b show breakpoints\n" |
1124 | "b <addr> [cnt] set breakpoint at given instr addr\n" | 1124 | "b <addr> [cnt] set breakpoint at given instr addr\n" |
1125 | "bc clear all breakpoints\n" | 1125 | "bc clear all breakpoints\n" |
1126 | "bc <n/addr> clear breakpoint number n or at addr\n" | 1126 | "bc <n/addr> clear breakpoint number n or at addr\n" |
1127 | "bi <addr> [cnt] set hardware instr breakpoint (POWER3/RS64 only)\n" | 1127 | "bi <addr> [cnt] set hardware instr breakpoint (POWER3/RS64 only)\n" |
1128 | "bd <addr> [cnt] set hardware data breakpoint\n" | 1128 | "bd <addr> [cnt] set hardware data breakpoint\n" |
1129 | ""; | 1129 | ""; |
1130 | 1130 | ||
1131 | static void | 1131 | static void |
1132 | bpt_cmds(void) | 1132 | bpt_cmds(void) |
1133 | { | 1133 | { |
1134 | int cmd; | 1134 | int cmd; |
1135 | unsigned long a; | 1135 | unsigned long a; |
1136 | int mode, i; | 1136 | int mode, i; |
1137 | struct bpt *bp; | 1137 | struct bpt *bp; |
1138 | const char badaddr[] = "Only kernel addresses are permitted " | 1138 | const char badaddr[] = "Only kernel addresses are permitted " |
1139 | "for breakpoints\n"; | 1139 | "for breakpoints\n"; |
1140 | 1140 | ||
1141 | cmd = inchar(); | 1141 | cmd = inchar(); |
1142 | switch (cmd) { | 1142 | switch (cmd) { |
1143 | #ifndef CONFIG_8xx | 1143 | #ifndef CONFIG_8xx |
1144 | case 'd': /* bd - hardware data breakpoint */ | 1144 | case 'd': /* bd - hardware data breakpoint */ |
1145 | mode = 7; | 1145 | mode = 7; |
1146 | cmd = inchar(); | 1146 | cmd = inchar(); |
1147 | if (cmd == 'r') | 1147 | if (cmd == 'r') |
1148 | mode = 5; | 1148 | mode = 5; |
1149 | else if (cmd == 'w') | 1149 | else if (cmd == 'w') |
1150 | mode = 6; | 1150 | mode = 6; |
1151 | else | 1151 | else |
1152 | termch = cmd; | 1152 | termch = cmd; |
1153 | dabr.address = 0; | 1153 | dabr.address = 0; |
1154 | dabr.enabled = 0; | 1154 | dabr.enabled = 0; |
1155 | if (scanhex(&dabr.address)) { | 1155 | if (scanhex(&dabr.address)) { |
1156 | if (!is_kernel_addr(dabr.address)) { | 1156 | if (!is_kernel_addr(dabr.address)) { |
1157 | printf(badaddr); | 1157 | printf(badaddr); |
1158 | break; | 1158 | break; |
1159 | } | 1159 | } |
1160 | dabr.address &= ~HW_BRK_TYPE_DABR; | 1160 | dabr.address &= ~HW_BRK_TYPE_DABR; |
1161 | dabr.enabled = mode | BP_DABR; | 1161 | dabr.enabled = mode | BP_DABR; |
1162 | } | 1162 | } |
1163 | break; | 1163 | break; |
1164 | 1164 | ||
1165 | case 'i': /* bi - hardware instr breakpoint */ | 1165 | case 'i': /* bi - hardware instr breakpoint */ |
1166 | if (!cpu_has_feature(CPU_FTR_IABR)) { | 1166 | if (!cpu_has_feature(CPU_FTR_IABR)) { |
1167 | printf("Hardware instruction breakpoint " | 1167 | printf("Hardware instruction breakpoint " |
1168 | "not supported on this cpu\n"); | 1168 | "not supported on this cpu\n"); |
1169 | break; | 1169 | break; |
1170 | } | 1170 | } |
1171 | if (iabr) { | 1171 | if (iabr) { |
1172 | iabr->enabled &= ~(BP_IABR | BP_IABR_TE); | 1172 | iabr->enabled &= ~(BP_IABR | BP_IABR_TE); |
1173 | iabr = NULL; | 1173 | iabr = NULL; |
1174 | } | 1174 | } |
1175 | if (!scanhex(&a)) | 1175 | if (!scanhex(&a)) |
1176 | break; | 1176 | break; |
1177 | if (!check_bp_loc(a)) | 1177 | if (!check_bp_loc(a)) |
1178 | break; | 1178 | break; |
1179 | bp = new_breakpoint(a); | 1179 | bp = new_breakpoint(a); |
1180 | if (bp != NULL) { | 1180 | if (bp != NULL) { |
1181 | bp->enabled |= BP_IABR | BP_IABR_TE; | 1181 | bp->enabled |= BP_IABR | BP_IABR_TE; |
1182 | iabr = bp; | 1182 | iabr = bp; |
1183 | } | 1183 | } |
1184 | break; | 1184 | break; |
1185 | #endif | 1185 | #endif |
1186 | 1186 | ||
1187 | case 'c': | 1187 | case 'c': |
1188 | if (!scanhex(&a)) { | 1188 | if (!scanhex(&a)) { |
1189 | /* clear all breakpoints */ | 1189 | /* clear all breakpoints */ |
1190 | for (i = 0; i < NBPTS; ++i) | 1190 | for (i = 0; i < NBPTS; ++i) |
1191 | bpts[i].enabled = 0; | 1191 | bpts[i].enabled = 0; |
1192 | iabr = NULL; | 1192 | iabr = NULL; |
1193 | dabr.enabled = 0; | 1193 | dabr.enabled = 0; |
1194 | printf("All breakpoints cleared\n"); | 1194 | printf("All breakpoints cleared\n"); |
1195 | break; | 1195 | break; |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | if (a <= NBPTS && a >= 1) { | 1198 | if (a <= NBPTS && a >= 1) { |
1199 | /* assume a breakpoint number */ | 1199 | /* assume a breakpoint number */ |
1200 | bp = &bpts[a-1]; /* bp nums are 1 based */ | 1200 | bp = &bpts[a-1]; /* bp nums are 1 based */ |
1201 | } else { | 1201 | } else { |
1202 | /* assume a breakpoint address */ | 1202 | /* assume a breakpoint address */ |
1203 | bp = at_breakpoint(a); | 1203 | bp = at_breakpoint(a); |
1204 | if (bp == NULL) { | 1204 | if (bp == NULL) { |
1205 | printf("No breakpoint at %x\n", a); | 1205 | printf("No breakpoint at %x\n", a); |
1206 | break; | 1206 | break; |
1207 | } | 1207 | } |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | printf("Cleared breakpoint %x (", BP_NUM(bp)); | 1210 | printf("Cleared breakpoint %x (", BP_NUM(bp)); |
1211 | xmon_print_symbol(bp->address, " ", ")\n"); | 1211 | xmon_print_symbol(bp->address, " ", ")\n"); |
1212 | bp->enabled = 0; | 1212 | bp->enabled = 0; |
1213 | break; | 1213 | break; |
1214 | 1214 | ||
1215 | default: | 1215 | default: |
1216 | termch = cmd; | 1216 | termch = cmd; |
1217 | cmd = skipbl(); | 1217 | cmd = skipbl(); |
1218 | if (cmd == '?') { | 1218 | if (cmd == '?') { |
1219 | printf(breakpoint_help_string); | 1219 | printf(breakpoint_help_string); |
1220 | break; | 1220 | break; |
1221 | } | 1221 | } |
1222 | termch = cmd; | 1222 | termch = cmd; |
1223 | if (!scanhex(&a)) { | 1223 | if (!scanhex(&a)) { |
1224 | /* print all breakpoints */ | 1224 | /* print all breakpoints */ |
1225 | printf(" type address\n"); | 1225 | printf(" type address\n"); |
1226 | if (dabr.enabled) { | 1226 | if (dabr.enabled) { |
1227 | printf(" data "REG" [", dabr.address); | 1227 | printf(" data "REG" [", dabr.address); |
1228 | if (dabr.enabled & 1) | 1228 | if (dabr.enabled & 1) |
1229 | printf("r"); | 1229 | printf("r"); |
1230 | if (dabr.enabled & 2) | 1230 | if (dabr.enabled & 2) |
1231 | printf("w"); | 1231 | printf("w"); |
1232 | printf("]\n"); | 1232 | printf("]\n"); |
1233 | } | 1233 | } |
1234 | for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { | 1234 | for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { |
1235 | if (!bp->enabled) | 1235 | if (!bp->enabled) |
1236 | continue; | 1236 | continue; |
1237 | printf("%2x %s ", BP_NUM(bp), | 1237 | printf("%2x %s ", BP_NUM(bp), |
1238 | (bp->enabled & BP_IABR)? "inst": "trap"); | 1238 | (bp->enabled & BP_IABR)? "inst": "trap"); |
1239 | xmon_print_symbol(bp->address, " ", "\n"); | 1239 | xmon_print_symbol(bp->address, " ", "\n"); |
1240 | } | 1240 | } |
1241 | break; | 1241 | break; |
1242 | } | 1242 | } |
1243 | 1243 | ||
1244 | if (!check_bp_loc(a)) | 1244 | if (!check_bp_loc(a)) |
1245 | break; | 1245 | break; |
1246 | bp = new_breakpoint(a); | 1246 | bp = new_breakpoint(a); |
1247 | if (bp != NULL) | 1247 | if (bp != NULL) |
1248 | bp->enabled |= BP_TRAP; | 1248 | bp->enabled |= BP_TRAP; |
1249 | break; | 1249 | break; |
1250 | } | 1250 | } |
1251 | } | 1251 | } |
1252 | 1252 | ||
1253 | /* Very cheap human name for vector lookup. */ | 1253 | /* Very cheap human name for vector lookup. */ |
1254 | static | 1254 | static |
1255 | const char *getvecname(unsigned long vec) | 1255 | const char *getvecname(unsigned long vec) |
1256 | { | 1256 | { |
1257 | char *ret; | 1257 | char *ret; |
1258 | 1258 | ||
1259 | switch (vec) { | 1259 | switch (vec) { |
1260 | case 0x100: ret = "(System Reset)"; break; | 1260 | case 0x100: ret = "(System Reset)"; break; |
1261 | case 0x200: ret = "(Machine Check)"; break; | 1261 | case 0x200: ret = "(Machine Check)"; break; |
1262 | case 0x300: ret = "(Data Access)"; break; | 1262 | case 0x300: ret = "(Data Access)"; break; |
1263 | case 0x380: ret = "(Data SLB Access)"; break; | 1263 | case 0x380: ret = "(Data SLB Access)"; break; |
1264 | case 0x400: ret = "(Instruction Access)"; break; | 1264 | case 0x400: ret = "(Instruction Access)"; break; |
1265 | case 0x480: ret = "(Instruction SLB Access)"; break; | 1265 | case 0x480: ret = "(Instruction SLB Access)"; break; |
1266 | case 0x500: ret = "(Hardware Interrupt)"; break; | 1266 | case 0x500: ret = "(Hardware Interrupt)"; break; |
1267 | case 0x600: ret = "(Alignment)"; break; | 1267 | case 0x600: ret = "(Alignment)"; break; |
1268 | case 0x700: ret = "(Program Check)"; break; | 1268 | case 0x700: ret = "(Program Check)"; break; |
1269 | case 0x800: ret = "(FPU Unavailable)"; break; | 1269 | case 0x800: ret = "(FPU Unavailable)"; break; |
1270 | case 0x900: ret = "(Decrementer)"; break; | 1270 | case 0x900: ret = "(Decrementer)"; break; |
1271 | case 0x980: ret = "(Hypervisor Decrementer)"; break; | 1271 | case 0x980: ret = "(Hypervisor Decrementer)"; break; |
1272 | case 0xa00: ret = "(Doorbell)"; break; | 1272 | case 0xa00: ret = "(Doorbell)"; break; |
1273 | case 0xc00: ret = "(System Call)"; break; | 1273 | case 0xc00: ret = "(System Call)"; break; |
1274 | case 0xd00: ret = "(Single Step)"; break; | 1274 | case 0xd00: ret = "(Single Step)"; break; |
1275 | case 0xe40: ret = "(Emulation Assist)"; break; | 1275 | case 0xe40: ret = "(Emulation Assist)"; break; |
1276 | case 0xe60: ret = "(HMI)"; break; | 1276 | case 0xe60: ret = "(HMI)"; break; |
1277 | case 0xe80: ret = "(Hypervisor Doorbell)"; break; | 1277 | case 0xe80: ret = "(Hypervisor Doorbell)"; break; |
1278 | case 0xf00: ret = "(Performance Monitor)"; break; | 1278 | case 0xf00: ret = "(Performance Monitor)"; break; |
1279 | case 0xf20: ret = "(Altivec Unavailable)"; break; | 1279 | case 0xf20: ret = "(Altivec Unavailable)"; break; |
1280 | case 0x1300: ret = "(Instruction Breakpoint)"; break; | 1280 | case 0x1300: ret = "(Instruction Breakpoint)"; break; |
1281 | case 0x1500: ret = "(Denormalisation)"; break; | 1281 | case 0x1500: ret = "(Denormalisation)"; break; |
1282 | case 0x1700: ret = "(Altivec Assist)"; break; | 1282 | case 0x1700: ret = "(Altivec Assist)"; break; |
1283 | default: ret = ""; | 1283 | default: ret = ""; |
1284 | } | 1284 | } |
1285 | return ret; | 1285 | return ret; |
1286 | } | 1286 | } |
1287 | 1287 | ||
1288 | static void get_function_bounds(unsigned long pc, unsigned long *startp, | 1288 | static void get_function_bounds(unsigned long pc, unsigned long *startp, |
1289 | unsigned long *endp) | 1289 | unsigned long *endp) |
1290 | { | 1290 | { |
1291 | unsigned long size, offset; | 1291 | unsigned long size, offset; |
1292 | const char *name; | 1292 | const char *name; |
1293 | 1293 | ||
1294 | *startp = *endp = 0; | 1294 | *startp = *endp = 0; |
1295 | if (pc == 0) | 1295 | if (pc == 0) |
1296 | return; | 1296 | return; |
1297 | if (setjmp(bus_error_jmp) == 0) { | 1297 | if (setjmp(bus_error_jmp) == 0) { |
1298 | catch_memory_errors = 1; | 1298 | catch_memory_errors = 1; |
1299 | sync(); | 1299 | sync(); |
1300 | name = kallsyms_lookup(pc, &size, &offset, NULL, tmpstr); | 1300 | name = kallsyms_lookup(pc, &size, &offset, NULL, tmpstr); |
1301 | if (name != NULL) { | 1301 | if (name != NULL) { |
1302 | *startp = pc - offset; | 1302 | *startp = pc - offset; |
1303 | *endp = pc - offset + size; | 1303 | *endp = pc - offset + size; |
1304 | } | 1304 | } |
1305 | sync(); | 1305 | sync(); |
1306 | } | 1306 | } |
1307 | catch_memory_errors = 0; | 1307 | catch_memory_errors = 0; |
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | #define LRSAVE_OFFSET (STACK_FRAME_LR_SAVE * sizeof(unsigned long)) | 1310 | #define LRSAVE_OFFSET (STACK_FRAME_LR_SAVE * sizeof(unsigned long)) |
1311 | #define MARKER_OFFSET (STACK_FRAME_MARKER * sizeof(unsigned long)) | 1311 | #define MARKER_OFFSET (STACK_FRAME_MARKER * sizeof(unsigned long)) |
1312 | 1312 | ||
1313 | static void xmon_show_stack(unsigned long sp, unsigned long lr, | 1313 | static void xmon_show_stack(unsigned long sp, unsigned long lr, |
1314 | unsigned long pc) | 1314 | unsigned long pc) |
1315 | { | 1315 | { |
1316 | int max_to_print = 64; | 1316 | int max_to_print = 64; |
1317 | unsigned long ip; | 1317 | unsigned long ip; |
1318 | unsigned long newsp; | 1318 | unsigned long newsp; |
1319 | unsigned long marker; | 1319 | unsigned long marker; |
1320 | struct pt_regs regs; | 1320 | struct pt_regs regs; |
1321 | 1321 | ||
1322 | while (max_to_print--) { | 1322 | while (max_to_print--) { |
1323 | if (sp < PAGE_OFFSET) { | 1323 | if (sp < PAGE_OFFSET) { |
1324 | if (sp != 0) | 1324 | if (sp != 0) |
1325 | printf("SP (%lx) is in userspace\n", sp); | 1325 | printf("SP (%lx) is in userspace\n", sp); |
1326 | break; | 1326 | break; |
1327 | } | 1327 | } |
1328 | 1328 | ||
1329 | if (!mread(sp + LRSAVE_OFFSET, &ip, sizeof(unsigned long)) | 1329 | if (!mread(sp + LRSAVE_OFFSET, &ip, sizeof(unsigned long)) |
1330 | || !mread(sp, &newsp, sizeof(unsigned long))) { | 1330 | || !mread(sp, &newsp, sizeof(unsigned long))) { |
1331 | printf("Couldn't read stack frame at %lx\n", sp); | 1331 | printf("Couldn't read stack frame at %lx\n", sp); |
1332 | break; | 1332 | break; |
1333 | } | 1333 | } |
1334 | 1334 | ||
1335 | /* | 1335 | /* |
1336 | * For the first stack frame, try to work out if | 1336 | * For the first stack frame, try to work out if |
1337 | * LR and/or the saved LR value in the bottommost | 1337 | * LR and/or the saved LR value in the bottommost |
1338 | * stack frame are valid. | 1338 | * stack frame are valid. |
1339 | */ | 1339 | */ |
1340 | if ((pc | lr) != 0) { | 1340 | if ((pc | lr) != 0) { |
1341 | unsigned long fnstart, fnend; | 1341 | unsigned long fnstart, fnend; |
1342 | unsigned long nextip; | 1342 | unsigned long nextip; |
1343 | int printip = 1; | 1343 | int printip = 1; |
1344 | 1344 | ||
1345 | get_function_bounds(pc, &fnstart, &fnend); | 1345 | get_function_bounds(pc, &fnstart, &fnend); |
1346 | nextip = 0; | 1346 | nextip = 0; |
1347 | if (newsp > sp) | 1347 | if (newsp > sp) |
1348 | mread(newsp + LRSAVE_OFFSET, &nextip, | 1348 | mread(newsp + LRSAVE_OFFSET, &nextip, |
1349 | sizeof(unsigned long)); | 1349 | sizeof(unsigned long)); |
1350 | if (lr == ip) { | 1350 | if (lr == ip) { |
1351 | if (lr < PAGE_OFFSET | 1351 | if (lr < PAGE_OFFSET |
1352 | || (fnstart <= lr && lr < fnend)) | 1352 | || (fnstart <= lr && lr < fnend)) |
1353 | printip = 0; | 1353 | printip = 0; |
1354 | } else if (lr == nextip) { | 1354 | } else if (lr == nextip) { |
1355 | printip = 0; | 1355 | printip = 0; |
1356 | } else if (lr >= PAGE_OFFSET | 1356 | } else if (lr >= PAGE_OFFSET |
1357 | && !(fnstart <= lr && lr < fnend)) { | 1357 | && !(fnstart <= lr && lr < fnend)) { |
1358 | printf("[link register ] "); | 1358 | printf("[link register ] "); |
1359 | xmon_print_symbol(lr, " ", "\n"); | 1359 | xmon_print_symbol(lr, " ", "\n"); |
1360 | } | 1360 | } |
1361 | if (printip) { | 1361 | if (printip) { |
1362 | printf("["REG"] ", sp); | 1362 | printf("["REG"] ", sp); |
1363 | xmon_print_symbol(ip, " ", " (unreliable)\n"); | 1363 | xmon_print_symbol(ip, " ", " (unreliable)\n"); |
1364 | } | 1364 | } |
1365 | pc = lr = 0; | 1365 | pc = lr = 0; |
1366 | 1366 | ||
1367 | } else { | 1367 | } else { |
1368 | printf("["REG"] ", sp); | 1368 | printf("["REG"] ", sp); |
1369 | xmon_print_symbol(ip, " ", "\n"); | 1369 | xmon_print_symbol(ip, " ", "\n"); |
1370 | } | 1370 | } |
1371 | 1371 | ||
1372 | /* Look for "regshere" marker to see if this is | 1372 | /* Look for "regshere" marker to see if this is |
1373 | an exception frame. */ | 1373 | an exception frame. */ |
1374 | if (mread(sp + MARKER_OFFSET, &marker, sizeof(unsigned long)) | 1374 | if (mread(sp + MARKER_OFFSET, &marker, sizeof(unsigned long)) |
1375 | && marker == STACK_FRAME_REGS_MARKER) { | 1375 | && marker == STACK_FRAME_REGS_MARKER) { |
1376 | if (mread(sp + STACK_FRAME_OVERHEAD, ®s, sizeof(regs)) | 1376 | if (mread(sp + STACK_FRAME_OVERHEAD, ®s, sizeof(regs)) |
1377 | != sizeof(regs)) { | 1377 | != sizeof(regs)) { |
1378 | printf("Couldn't read registers at %lx\n", | 1378 | printf("Couldn't read registers at %lx\n", |
1379 | sp + STACK_FRAME_OVERHEAD); | 1379 | sp + STACK_FRAME_OVERHEAD); |
1380 | break; | 1380 | break; |
1381 | } | 1381 | } |
1382 | printf("--- Exception: %lx %s at ", regs.trap, | 1382 | printf("--- Exception: %lx %s at ", regs.trap, |
1383 | getvecname(TRAP(®s))); | 1383 | getvecname(TRAP(®s))); |
1384 | pc = regs.nip; | 1384 | pc = regs.nip; |
1385 | lr = regs.link; | 1385 | lr = regs.link; |
1386 | xmon_print_symbol(pc, " ", "\n"); | 1386 | xmon_print_symbol(pc, " ", "\n"); |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | if (newsp == 0) | 1389 | if (newsp == 0) |
1390 | break; | 1390 | break; |
1391 | 1391 | ||
1392 | sp = newsp; | 1392 | sp = newsp; |
1393 | } | 1393 | } |
1394 | } | 1394 | } |
1395 | 1395 | ||
1396 | static void backtrace(struct pt_regs *excp) | 1396 | static void backtrace(struct pt_regs *excp) |
1397 | { | 1397 | { |
1398 | unsigned long sp; | 1398 | unsigned long sp; |
1399 | 1399 | ||
1400 | if (scanhex(&sp)) | 1400 | if (scanhex(&sp)) |
1401 | xmon_show_stack(sp, 0, 0); | 1401 | xmon_show_stack(sp, 0, 0); |
1402 | else | 1402 | else |
1403 | xmon_show_stack(excp->gpr[1], excp->link, excp->nip); | 1403 | xmon_show_stack(excp->gpr[1], excp->link, excp->nip); |
1404 | scannl(); | 1404 | scannl(); |
1405 | } | 1405 | } |
1406 | 1406 | ||
1407 | static void print_bug_trap(struct pt_regs *regs) | 1407 | static void print_bug_trap(struct pt_regs *regs) |
1408 | { | 1408 | { |
1409 | #ifdef CONFIG_BUG | 1409 | #ifdef CONFIG_BUG |
1410 | const struct bug_entry *bug; | 1410 | const struct bug_entry *bug; |
1411 | unsigned long addr; | 1411 | unsigned long addr; |
1412 | 1412 | ||
1413 | if (regs->msr & MSR_PR) | 1413 | if (regs->msr & MSR_PR) |
1414 | return; /* not in kernel */ | 1414 | return; /* not in kernel */ |
1415 | addr = regs->nip; /* address of trap instruction */ | 1415 | addr = regs->nip; /* address of trap instruction */ |
1416 | if (addr < PAGE_OFFSET) | 1416 | if (addr < PAGE_OFFSET) |
1417 | return; | 1417 | return; |
1418 | bug = find_bug(regs->nip); | 1418 | bug = find_bug(regs->nip); |
1419 | if (bug == NULL) | 1419 | if (bug == NULL) |
1420 | return; | 1420 | return; |
1421 | if (is_warning_bug(bug)) | 1421 | if (is_warning_bug(bug)) |
1422 | return; | 1422 | return; |
1423 | 1423 | ||
1424 | #ifdef CONFIG_DEBUG_BUGVERBOSE | 1424 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
1425 | printf("kernel BUG at %s:%u!\n", | 1425 | printf("kernel BUG at %s:%u!\n", |
1426 | bug->file, bug->line); | 1426 | bug->file, bug->line); |
1427 | #else | 1427 | #else |
1428 | printf("kernel BUG at %p!\n", (void *)bug->bug_addr); | 1428 | printf("kernel BUG at %p!\n", (void *)bug->bug_addr); |
1429 | #endif | 1429 | #endif |
1430 | #endif /* CONFIG_BUG */ | 1430 | #endif /* CONFIG_BUG */ |
1431 | } | 1431 | } |
1432 | 1432 | ||
1433 | static void excprint(struct pt_regs *fp) | 1433 | static void excprint(struct pt_regs *fp) |
1434 | { | 1434 | { |
1435 | unsigned long trap; | 1435 | unsigned long trap; |
1436 | 1436 | ||
1437 | #ifdef CONFIG_SMP | 1437 | #ifdef CONFIG_SMP |
1438 | printf("cpu 0x%x: ", smp_processor_id()); | 1438 | printf("cpu 0x%x: ", smp_processor_id()); |
1439 | #endif /* CONFIG_SMP */ | 1439 | #endif /* CONFIG_SMP */ |
1440 | 1440 | ||
1441 | trap = TRAP(fp); | 1441 | trap = TRAP(fp); |
1442 | printf("Vector: %lx %s at [%lx]\n", fp->trap, getvecname(trap), fp); | 1442 | printf("Vector: %lx %s at [%lx]\n", fp->trap, getvecname(trap), fp); |
1443 | printf(" pc: "); | 1443 | printf(" pc: "); |
1444 | xmon_print_symbol(fp->nip, ": ", "\n"); | 1444 | xmon_print_symbol(fp->nip, ": ", "\n"); |
1445 | 1445 | ||
1446 | printf(" lr: ", fp->link); | 1446 | printf(" lr: ", fp->link); |
1447 | xmon_print_symbol(fp->link, ": ", "\n"); | 1447 | xmon_print_symbol(fp->link, ": ", "\n"); |
1448 | 1448 | ||
1449 | printf(" sp: %lx\n", fp->gpr[1]); | 1449 | printf(" sp: %lx\n", fp->gpr[1]); |
1450 | printf(" msr: %lx\n", fp->msr); | 1450 | printf(" msr: %lx\n", fp->msr); |
1451 | 1451 | ||
1452 | if (trap == 0x300 || trap == 0x380 || trap == 0x600 || trap == 0x200) { | 1452 | if (trap == 0x300 || trap == 0x380 || trap == 0x600 || trap == 0x200) { |
1453 | printf(" dar: %lx\n", fp->dar); | 1453 | printf(" dar: %lx\n", fp->dar); |
1454 | if (trap != 0x380) | 1454 | if (trap != 0x380) |
1455 | printf(" dsisr: %lx\n", fp->dsisr); | 1455 | printf(" dsisr: %lx\n", fp->dsisr); |
1456 | } | 1456 | } |
1457 | 1457 | ||
1458 | printf(" current = 0x%lx\n", current); | 1458 | printf(" current = 0x%lx\n", current); |
1459 | #ifdef CONFIG_PPC64 | 1459 | #ifdef CONFIG_PPC64 |
1460 | printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n", | 1460 | printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n", |
1461 | local_paca, local_paca->soft_enabled, local_paca->irq_happened); | 1461 | local_paca, local_paca->soft_enabled, local_paca->irq_happened); |
1462 | #endif | 1462 | #endif |
1463 | if (current) { | 1463 | if (current) { |
1464 | printf(" pid = %ld, comm = %s\n", | 1464 | printf(" pid = %ld, comm = %s\n", |
1465 | current->pid, current->comm); | 1465 | current->pid, current->comm); |
1466 | } | 1466 | } |
1467 | 1467 | ||
1468 | if (trap == 0x700) | 1468 | if (trap == 0x700) |
1469 | print_bug_trap(fp); | 1469 | print_bug_trap(fp); |
1470 | } | 1470 | } |
1471 | 1471 | ||
1472 | static void prregs(struct pt_regs *fp) | 1472 | static void prregs(struct pt_regs *fp) |
1473 | { | 1473 | { |
1474 | int n, trap; | 1474 | int n, trap; |
1475 | unsigned long base; | 1475 | unsigned long base; |
1476 | struct pt_regs regs; | 1476 | struct pt_regs regs; |
1477 | 1477 | ||
1478 | if (scanhex(&base)) { | 1478 | if (scanhex(&base)) { |
1479 | if (setjmp(bus_error_jmp) == 0) { | 1479 | if (setjmp(bus_error_jmp) == 0) { |
1480 | catch_memory_errors = 1; | 1480 | catch_memory_errors = 1; |
1481 | sync(); | 1481 | sync(); |
1482 | regs = *(struct pt_regs *)base; | 1482 | regs = *(struct pt_regs *)base; |
1483 | sync(); | 1483 | sync(); |
1484 | __delay(200); | 1484 | __delay(200); |
1485 | } else { | 1485 | } else { |
1486 | catch_memory_errors = 0; | 1486 | catch_memory_errors = 0; |
1487 | printf("*** Error reading registers from "REG"\n", | 1487 | printf("*** Error reading registers from "REG"\n", |
1488 | base); | 1488 | base); |
1489 | return; | 1489 | return; |
1490 | } | 1490 | } |
1491 | catch_memory_errors = 0; | 1491 | catch_memory_errors = 0; |
1492 | fp = ®s; | 1492 | fp = ®s; |
1493 | } | 1493 | } |
1494 | 1494 | ||
1495 | #ifdef CONFIG_PPC64 | 1495 | #ifdef CONFIG_PPC64 |
1496 | if (FULL_REGS(fp)) { | 1496 | if (FULL_REGS(fp)) { |
1497 | for (n = 0; n < 16; ++n) | 1497 | for (n = 0; n < 16; ++n) |
1498 | printf("R%.2ld = "REG" R%.2ld = "REG"\n", | 1498 | printf("R%.2ld = "REG" R%.2ld = "REG"\n", |
1499 | n, fp->gpr[n], n+16, fp->gpr[n+16]); | 1499 | n, fp->gpr[n], n+16, fp->gpr[n+16]); |
1500 | } else { | 1500 | } else { |
1501 | for (n = 0; n < 7; ++n) | 1501 | for (n = 0; n < 7; ++n) |
1502 | printf("R%.2ld = "REG" R%.2ld = "REG"\n", | 1502 | printf("R%.2ld = "REG" R%.2ld = "REG"\n", |
1503 | n, fp->gpr[n], n+7, fp->gpr[n+7]); | 1503 | n, fp->gpr[n], n+7, fp->gpr[n+7]); |
1504 | } | 1504 | } |
1505 | #else | 1505 | #else |
1506 | for (n = 0; n < 32; ++n) { | 1506 | for (n = 0; n < 32; ++n) { |
1507 | printf("R%.2d = %.8x%s", n, fp->gpr[n], | 1507 | printf("R%.2d = %.8x%s", n, fp->gpr[n], |
1508 | (n & 3) == 3? "\n": " "); | 1508 | (n & 3) == 3? "\n": " "); |
1509 | if (n == 12 && !FULL_REGS(fp)) { | 1509 | if (n == 12 && !FULL_REGS(fp)) { |
1510 | printf("\n"); | 1510 | printf("\n"); |
1511 | break; | 1511 | break; |
1512 | } | 1512 | } |
1513 | } | 1513 | } |
1514 | #endif | 1514 | #endif |
1515 | printf("pc = "); | 1515 | printf("pc = "); |
1516 | xmon_print_symbol(fp->nip, " ", "\n"); | 1516 | xmon_print_symbol(fp->nip, " ", "\n"); |
1517 | if (TRAP(fp) != 0xc00 && cpu_has_feature(CPU_FTR_CFAR)) { | 1517 | if (TRAP(fp) != 0xc00 && cpu_has_feature(CPU_FTR_CFAR)) { |
1518 | printf("cfar= "); | 1518 | printf("cfar= "); |
1519 | xmon_print_symbol(fp->orig_gpr3, " ", "\n"); | 1519 | xmon_print_symbol(fp->orig_gpr3, " ", "\n"); |
1520 | } | 1520 | } |
1521 | printf("lr = "); | 1521 | printf("lr = "); |
1522 | xmon_print_symbol(fp->link, " ", "\n"); | 1522 | xmon_print_symbol(fp->link, " ", "\n"); |
1523 | printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr); | 1523 | printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr); |
1524 | printf("ctr = "REG" xer = "REG" trap = %4lx\n", | 1524 | printf("ctr = "REG" xer = "REG" trap = %4lx\n", |
1525 | fp->ctr, fp->xer, fp->trap); | 1525 | fp->ctr, fp->xer, fp->trap); |
1526 | trap = TRAP(fp); | 1526 | trap = TRAP(fp); |
1527 | if (trap == 0x300 || trap == 0x380 || trap == 0x600) | 1527 | if (trap == 0x300 || trap == 0x380 || trap == 0x600) |
1528 | printf("dar = "REG" dsisr = %.8lx\n", fp->dar, fp->dsisr); | 1528 | printf("dar = "REG" dsisr = %.8lx\n", fp->dar, fp->dsisr); |
1529 | } | 1529 | } |
1530 | 1530 | ||
1531 | static void cacheflush(void) | 1531 | static void cacheflush(void) |
1532 | { | 1532 | { |
1533 | int cmd; | 1533 | int cmd; |
1534 | unsigned long nflush; | 1534 | unsigned long nflush; |
1535 | 1535 | ||
1536 | cmd = inchar(); | 1536 | cmd = inchar(); |
1537 | if (cmd != 'i') | 1537 | if (cmd != 'i') |
1538 | termch = cmd; | 1538 | termch = cmd; |
1539 | scanhex((void *)&adrs); | 1539 | scanhex((void *)&adrs); |
1540 | if (termch != '\n') | 1540 | if (termch != '\n') |
1541 | termch = 0; | 1541 | termch = 0; |
1542 | nflush = 1; | 1542 | nflush = 1; |
1543 | scanhex(&nflush); | 1543 | scanhex(&nflush); |
1544 | nflush = (nflush + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES; | 1544 | nflush = (nflush + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES; |
1545 | if (setjmp(bus_error_jmp) == 0) { | 1545 | if (setjmp(bus_error_jmp) == 0) { |
1546 | catch_memory_errors = 1; | 1546 | catch_memory_errors = 1; |
1547 | sync(); | 1547 | sync(); |
1548 | 1548 | ||
1549 | if (cmd != 'i') { | 1549 | if (cmd != 'i') { |
1550 | for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES) | 1550 | for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES) |
1551 | cflush((void *) adrs); | 1551 | cflush((void *) adrs); |
1552 | } else { | 1552 | } else { |
1553 | for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES) | 1553 | for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES) |
1554 | cinval((void *) adrs); | 1554 | cinval((void *) adrs); |
1555 | } | 1555 | } |
1556 | sync(); | 1556 | sync(); |
1557 | /* wait a little while to see if we get a machine check */ | 1557 | /* wait a little while to see if we get a machine check */ |
1558 | __delay(200); | 1558 | __delay(200); |
1559 | } | 1559 | } |
1560 | catch_memory_errors = 0; | 1560 | catch_memory_errors = 0; |
1561 | } | 1561 | } |
1562 | 1562 | ||
1563 | static unsigned long | 1563 | static unsigned long |
1564 | read_spr(int n) | 1564 | read_spr(int n) |
1565 | { | 1565 | { |
1566 | unsigned int instrs[2]; | 1566 | unsigned int instrs[2]; |
1567 | unsigned long (*code)(void); | 1567 | unsigned long (*code)(void); |
1568 | unsigned long ret = -1UL; | 1568 | unsigned long ret = -1UL; |
1569 | #ifdef CONFIG_PPC64 | 1569 | #ifdef CONFIG_PPC64 |
1570 | unsigned long opd[3]; | 1570 | unsigned long opd[3]; |
1571 | 1571 | ||
1572 | opd[0] = (unsigned long)instrs; | 1572 | opd[0] = (unsigned long)instrs; |
1573 | opd[1] = 0; | 1573 | opd[1] = 0; |
1574 | opd[2] = 0; | 1574 | opd[2] = 0; |
1575 | code = (unsigned long (*)(void)) opd; | 1575 | code = (unsigned long (*)(void)) opd; |
1576 | #else | 1576 | #else |
1577 | code = (unsigned long (*)(void)) instrs; | 1577 | code = (unsigned long (*)(void)) instrs; |
1578 | #endif | 1578 | #endif |
1579 | 1579 | ||
1580 | /* mfspr r3,n; blr */ | 1580 | /* mfspr r3,n; blr */ |
1581 | instrs[0] = 0x7c6002a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6); | 1581 | instrs[0] = 0x7c6002a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6); |
1582 | instrs[1] = 0x4e800020; | 1582 | instrs[1] = 0x4e800020; |
1583 | store_inst(instrs); | 1583 | store_inst(instrs); |
1584 | store_inst(instrs+1); | 1584 | store_inst(instrs+1); |
1585 | 1585 | ||
1586 | if (setjmp(bus_error_jmp) == 0) { | 1586 | if (setjmp(bus_error_jmp) == 0) { |
1587 | catch_memory_errors = 1; | 1587 | catch_memory_errors = 1; |
1588 | sync(); | 1588 | sync(); |
1589 | 1589 | ||
1590 | ret = code(); | 1590 | ret = code(); |
1591 | 1591 | ||
1592 | sync(); | 1592 | sync(); |
1593 | /* wait a little while to see if we get a machine check */ | 1593 | /* wait a little while to see if we get a machine check */ |
1594 | __delay(200); | 1594 | __delay(200); |
1595 | n = size; | 1595 | n = size; |
1596 | } | 1596 | } |
1597 | 1597 | ||
1598 | return ret; | 1598 | return ret; |
1599 | } | 1599 | } |
1600 | 1600 | ||
1601 | static void | 1601 | static void |
1602 | write_spr(int n, unsigned long val) | 1602 | write_spr(int n, unsigned long val) |
1603 | { | 1603 | { |
1604 | unsigned int instrs[2]; | 1604 | unsigned int instrs[2]; |
1605 | unsigned long (*code)(unsigned long); | 1605 | unsigned long (*code)(unsigned long); |
1606 | #ifdef CONFIG_PPC64 | 1606 | #ifdef CONFIG_PPC64 |
1607 | unsigned long opd[3]; | 1607 | unsigned long opd[3]; |
1608 | 1608 | ||
1609 | opd[0] = (unsigned long)instrs; | 1609 | opd[0] = (unsigned long)instrs; |
1610 | opd[1] = 0; | 1610 | opd[1] = 0; |
1611 | opd[2] = 0; | 1611 | opd[2] = 0; |
1612 | code = (unsigned long (*)(unsigned long)) opd; | 1612 | code = (unsigned long (*)(unsigned long)) opd; |
1613 | #else | 1613 | #else |
1614 | code = (unsigned long (*)(unsigned long)) instrs; | 1614 | code = (unsigned long (*)(unsigned long)) instrs; |
1615 | #endif | 1615 | #endif |
1616 | 1616 | ||
1617 | instrs[0] = 0x7c6003a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6); | 1617 | instrs[0] = 0x7c6003a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6); |
1618 | instrs[1] = 0x4e800020; | 1618 | instrs[1] = 0x4e800020; |
1619 | store_inst(instrs); | 1619 | store_inst(instrs); |
1620 | store_inst(instrs+1); | 1620 | store_inst(instrs+1); |
1621 | 1621 | ||
1622 | if (setjmp(bus_error_jmp) == 0) { | 1622 | if (setjmp(bus_error_jmp) == 0) { |
1623 | catch_memory_errors = 1; | 1623 | catch_memory_errors = 1; |
1624 | sync(); | 1624 | sync(); |
1625 | 1625 | ||
1626 | code(val); | 1626 | code(val); |
1627 | 1627 | ||
1628 | sync(); | 1628 | sync(); |
1629 | /* wait a little while to see if we get a machine check */ | 1629 | /* wait a little while to see if we get a machine check */ |
1630 | __delay(200); | 1630 | __delay(200); |
1631 | n = size; | 1631 | n = size; |
1632 | } | 1632 | } |
1633 | } | 1633 | } |
1634 | 1634 | ||
1635 | static unsigned long regno; | 1635 | static unsigned long regno; |
1636 | extern char exc_prolog; | 1636 | extern char exc_prolog; |
1637 | extern char dec_exc; | 1637 | extern char dec_exc; |
1638 | 1638 | ||
1639 | static void super_regs(void) | 1639 | static void super_regs(void) |
1640 | { | 1640 | { |
1641 | int cmd; | 1641 | int cmd; |
1642 | unsigned long val; | 1642 | unsigned long val; |
1643 | 1643 | ||
1644 | cmd = skipbl(); | 1644 | cmd = skipbl(); |
1645 | if (cmd == '\n') { | 1645 | if (cmd == '\n') { |
1646 | unsigned long sp, toc; | 1646 | unsigned long sp, toc; |
1647 | asm("mr %0,1" : "=r" (sp) :); | 1647 | asm("mr %0,1" : "=r" (sp) :); |
1648 | asm("mr %0,2" : "=r" (toc) :); | 1648 | asm("mr %0,2" : "=r" (toc) :); |
1649 | 1649 | ||
1650 | printf("msr = "REG" sprg0= "REG"\n", | 1650 | printf("msr = "REG" sprg0= "REG"\n", |
1651 | mfmsr(), mfspr(SPRN_SPRG0)); | 1651 | mfmsr(), mfspr(SPRN_SPRG0)); |
1652 | printf("pvr = "REG" sprg1= "REG"\n", | 1652 | printf("pvr = "REG" sprg1= "REG"\n", |
1653 | mfspr(SPRN_PVR), mfspr(SPRN_SPRG1)); | 1653 | mfspr(SPRN_PVR), mfspr(SPRN_SPRG1)); |
1654 | printf("dec = "REG" sprg2= "REG"\n", | 1654 | printf("dec = "REG" sprg2= "REG"\n", |
1655 | mfspr(SPRN_DEC), mfspr(SPRN_SPRG2)); | 1655 | mfspr(SPRN_DEC), mfspr(SPRN_SPRG2)); |
1656 | printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3)); | 1656 | printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3)); |
1657 | printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR)); | 1657 | printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR)); |
1658 | 1658 | ||
1659 | return; | 1659 | return; |
1660 | } | 1660 | } |
1661 | 1661 | ||
1662 | scanhex(®no); | 1662 | scanhex(®no); |
1663 | switch (cmd) { | 1663 | switch (cmd) { |
1664 | case 'w': | 1664 | case 'w': |
1665 | val = read_spr(regno); | 1665 | val = read_spr(regno); |
1666 | scanhex(&val); | 1666 | scanhex(&val); |
1667 | write_spr(regno, val); | 1667 | write_spr(regno, val); |
1668 | /* fall through */ | 1668 | /* fall through */ |
1669 | case 'r': | 1669 | case 'r': |
1670 | printf("spr %lx = %lx\n", regno, read_spr(regno)); | 1670 | printf("spr %lx = %lx\n", regno, read_spr(regno)); |
1671 | break; | 1671 | break; |
1672 | } | 1672 | } |
1673 | scannl(); | 1673 | scannl(); |
1674 | } | 1674 | } |
1675 | 1675 | ||
1676 | /* | 1676 | /* |
1677 | * Stuff for reading and writing memory safely | 1677 | * Stuff for reading and writing memory safely |
1678 | */ | 1678 | */ |
1679 | static int | 1679 | static int |
1680 | mread(unsigned long adrs, void *buf, int size) | 1680 | mread(unsigned long adrs, void *buf, int size) |
1681 | { | 1681 | { |
1682 | volatile int n; | 1682 | volatile int n; |
1683 | char *p, *q; | 1683 | char *p, *q; |
1684 | 1684 | ||
1685 | n = 0; | 1685 | n = 0; |
1686 | if (setjmp(bus_error_jmp) == 0) { | 1686 | if (setjmp(bus_error_jmp) == 0) { |
1687 | catch_memory_errors = 1; | 1687 | catch_memory_errors = 1; |
1688 | sync(); | 1688 | sync(); |
1689 | p = (char *)adrs; | 1689 | p = (char *)adrs; |
1690 | q = (char *)buf; | 1690 | q = (char *)buf; |
1691 | switch (size) { | 1691 | switch (size) { |
1692 | case 2: | 1692 | case 2: |
1693 | *(u16 *)q = *(u16 *)p; | 1693 | *(u16 *)q = *(u16 *)p; |
1694 | break; | 1694 | break; |
1695 | case 4: | 1695 | case 4: |
1696 | *(u32 *)q = *(u32 *)p; | 1696 | *(u32 *)q = *(u32 *)p; |
1697 | break; | 1697 | break; |
1698 | case 8: | 1698 | case 8: |
1699 | *(u64 *)q = *(u64 *)p; | 1699 | *(u64 *)q = *(u64 *)p; |
1700 | break; | 1700 | break; |
1701 | default: | 1701 | default: |
1702 | for( ; n < size; ++n) { | 1702 | for( ; n < size; ++n) { |
1703 | *q++ = *p++; | 1703 | *q++ = *p++; |
1704 | sync(); | 1704 | sync(); |
1705 | } | 1705 | } |
1706 | } | 1706 | } |
1707 | sync(); | 1707 | sync(); |
1708 | /* wait a little while to see if we get a machine check */ | 1708 | /* wait a little while to see if we get a machine check */ |
1709 | __delay(200); | 1709 | __delay(200); |
1710 | n = size; | 1710 | n = size; |
1711 | } | 1711 | } |
1712 | catch_memory_errors = 0; | 1712 | catch_memory_errors = 0; |
1713 | return n; | 1713 | return n; |
1714 | } | 1714 | } |
1715 | 1715 | ||
1716 | static int | 1716 | static int |
1717 | mwrite(unsigned long adrs, void *buf, int size) | 1717 | mwrite(unsigned long adrs, void *buf, int size) |
1718 | { | 1718 | { |
1719 | volatile int n; | 1719 | volatile int n; |
1720 | char *p, *q; | 1720 | char *p, *q; |
1721 | 1721 | ||
1722 | n = 0; | 1722 | n = 0; |
1723 | if (setjmp(bus_error_jmp) == 0) { | 1723 | if (setjmp(bus_error_jmp) == 0) { |
1724 | catch_memory_errors = 1; | 1724 | catch_memory_errors = 1; |
1725 | sync(); | 1725 | sync(); |
1726 | p = (char *) adrs; | 1726 | p = (char *) adrs; |
1727 | q = (char *) buf; | 1727 | q = (char *) buf; |
1728 | switch (size) { | 1728 | switch (size) { |
1729 | case 2: | 1729 | case 2: |
1730 | *(u16 *)p = *(u16 *)q; | 1730 | *(u16 *)p = *(u16 *)q; |
1731 | break; | 1731 | break; |
1732 | case 4: | 1732 | case 4: |
1733 | *(u32 *)p = *(u32 *)q; | 1733 | *(u32 *)p = *(u32 *)q; |
1734 | break; | 1734 | break; |
1735 | case 8: | 1735 | case 8: |
1736 | *(u64 *)p = *(u64 *)q; | 1736 | *(u64 *)p = *(u64 *)q; |
1737 | break; | 1737 | break; |
1738 | default: | 1738 | default: |
1739 | for ( ; n < size; ++n) { | 1739 | for ( ; n < size; ++n) { |
1740 | *p++ = *q++; | 1740 | *p++ = *q++; |
1741 | sync(); | 1741 | sync(); |
1742 | } | 1742 | } |
1743 | } | 1743 | } |
1744 | sync(); | 1744 | sync(); |
1745 | /* wait a little while to see if we get a machine check */ | 1745 | /* wait a little while to see if we get a machine check */ |
1746 | __delay(200); | 1746 | __delay(200); |
1747 | n = size; | 1747 | n = size; |
1748 | } else { | 1748 | } else { |
1749 | printf("*** Error writing address %x\n", adrs + n); | 1749 | printf("*** Error writing address %x\n", adrs + n); |
1750 | } | 1750 | } |
1751 | catch_memory_errors = 0; | 1751 | catch_memory_errors = 0; |
1752 | return n; | 1752 | return n; |
1753 | } | 1753 | } |
1754 | 1754 | ||
1755 | static int fault_type; | 1755 | static int fault_type; |
1756 | static int fault_except; | 1756 | static int fault_except; |
1757 | static char *fault_chars[] = { "--", "**", "##" }; | 1757 | static char *fault_chars[] = { "--", "**", "##" }; |
1758 | 1758 | ||
1759 | static int handle_fault(struct pt_regs *regs) | 1759 | static int handle_fault(struct pt_regs *regs) |
1760 | { | 1760 | { |
1761 | fault_except = TRAP(regs); | 1761 | fault_except = TRAP(regs); |
1762 | switch (TRAP(regs)) { | 1762 | switch (TRAP(regs)) { |
1763 | case 0x200: | 1763 | case 0x200: |
1764 | fault_type = 0; | 1764 | fault_type = 0; |
1765 | break; | 1765 | break; |
1766 | case 0x300: | 1766 | case 0x300: |
1767 | case 0x380: | 1767 | case 0x380: |
1768 | fault_type = 1; | 1768 | fault_type = 1; |
1769 | break; | 1769 | break; |
1770 | default: | 1770 | default: |
1771 | fault_type = 2; | 1771 | fault_type = 2; |
1772 | } | 1772 | } |
1773 | 1773 | ||
1774 | longjmp(bus_error_jmp, 1); | 1774 | longjmp(bus_error_jmp, 1); |
1775 | 1775 | ||
1776 | return 0; | 1776 | return 0; |
1777 | } | 1777 | } |
1778 | 1778 | ||
1779 | #define SWAP(a, b, t) ((t) = (a), (a) = (b), (b) = (t)) | 1779 | #define SWAP(a, b, t) ((t) = (a), (a) = (b), (b) = (t)) |
1780 | 1780 | ||
1781 | static void | 1781 | static void |
1782 | byterev(unsigned char *val, int size) | 1782 | byterev(unsigned char *val, int size) |
1783 | { | 1783 | { |
1784 | int t; | 1784 | int t; |
1785 | 1785 | ||
1786 | switch (size) { | 1786 | switch (size) { |
1787 | case 2: | 1787 | case 2: |
1788 | SWAP(val[0], val[1], t); | 1788 | SWAP(val[0], val[1], t); |
1789 | break; | 1789 | break; |
1790 | case 4: | 1790 | case 4: |
1791 | SWAP(val[0], val[3], t); | 1791 | SWAP(val[0], val[3], t); |
1792 | SWAP(val[1], val[2], t); | 1792 | SWAP(val[1], val[2], t); |
1793 | break; | 1793 | break; |
1794 | case 8: /* is there really any use for this? */ | 1794 | case 8: /* is there really any use for this? */ |
1795 | SWAP(val[0], val[7], t); | 1795 | SWAP(val[0], val[7], t); |
1796 | SWAP(val[1], val[6], t); | 1796 | SWAP(val[1], val[6], t); |
1797 | SWAP(val[2], val[5], t); | 1797 | SWAP(val[2], val[5], t); |
1798 | SWAP(val[3], val[4], t); | 1798 | SWAP(val[3], val[4], t); |
1799 | break; | 1799 | break; |
1800 | } | 1800 | } |
1801 | } | 1801 | } |
1802 | 1802 | ||
1803 | static int brev; | 1803 | static int brev; |
1804 | static int mnoread; | 1804 | static int mnoread; |
1805 | 1805 | ||
1806 | static char *memex_help_string = | 1806 | static char *memex_help_string = |
1807 | "Memory examine command usage:\n" | 1807 | "Memory examine command usage:\n" |
1808 | "m [addr] [flags] examine/change memory\n" | 1808 | "m [addr] [flags] examine/change memory\n" |
1809 | " addr is optional. will start where left off.\n" | 1809 | " addr is optional. will start where left off.\n" |
1810 | " flags may include chars from this set:\n" | 1810 | " flags may include chars from this set:\n" |
1811 | " b modify by bytes (default)\n" | 1811 | " b modify by bytes (default)\n" |
1812 | " w modify by words (2 byte)\n" | 1812 | " w modify by words (2 byte)\n" |
1813 | " l modify by longs (4 byte)\n" | 1813 | " l modify by longs (4 byte)\n" |
1814 | " d modify by doubleword (8 byte)\n" | 1814 | " d modify by doubleword (8 byte)\n" |
1815 | " r toggle reverse byte order mode\n" | 1815 | " r toggle reverse byte order mode\n" |
1816 | " n do not read memory (for i/o spaces)\n" | 1816 | " n do not read memory (for i/o spaces)\n" |
1817 | " . ok to read (default)\n" | 1817 | " . ok to read (default)\n" |
1818 | "NOTE: flags are saved as defaults\n" | 1818 | "NOTE: flags are saved as defaults\n" |
1819 | ""; | 1819 | ""; |
1820 | 1820 | ||
1821 | static char *memex_subcmd_help_string = | 1821 | static char *memex_subcmd_help_string = |
1822 | "Memory examine subcommands:\n" | 1822 | "Memory examine subcommands:\n" |
1823 | " hexval write this val to current location\n" | 1823 | " hexval write this val to current location\n" |
1824 | " 'string' write chars from string to this location\n" | 1824 | " 'string' write chars from string to this location\n" |
1825 | " ' increment address\n" | 1825 | " ' increment address\n" |
1826 | " ^ decrement address\n" | 1826 | " ^ decrement address\n" |
1827 | " / increment addr by 0x10. //=0x100, ///=0x1000, etc\n" | 1827 | " / increment addr by 0x10. //=0x100, ///=0x1000, etc\n" |
1828 | " \\ decrement addr by 0x10. \\\\=0x100, \\\\\\=0x1000, etc\n" | 1828 | " \\ decrement addr by 0x10. \\\\=0x100, \\\\\\=0x1000, etc\n" |
1829 | " ` clear no-read flag\n" | 1829 | " ` clear no-read flag\n" |
1830 | " ; stay at this addr\n" | 1830 | " ; stay at this addr\n" |
1831 | " v change to byte mode\n" | 1831 | " v change to byte mode\n" |
1832 | " w change to word (2 byte) mode\n" | 1832 | " w change to word (2 byte) mode\n" |
1833 | " l change to long (4 byte) mode\n" | 1833 | " l change to long (4 byte) mode\n" |
1834 | " u change to doubleword (8 byte) mode\n" | 1834 | " u change to doubleword (8 byte) mode\n" |
1835 | " m addr change current addr\n" | 1835 | " m addr change current addr\n" |
1836 | " n toggle no-read flag\n" | 1836 | " n toggle no-read flag\n" |
1837 | " r toggle byte reverse flag\n" | 1837 | " r toggle byte reverse flag\n" |
1838 | " < count back up count bytes\n" | 1838 | " < count back up count bytes\n" |
1839 | " > count skip forward count bytes\n" | 1839 | " > count skip forward count bytes\n" |
1840 | " x exit this mode\n" | 1840 | " x exit this mode\n" |
1841 | ""; | 1841 | ""; |
1842 | 1842 | ||
1843 | static void | 1843 | static void |
1844 | memex(void) | 1844 | memex(void) |
1845 | { | 1845 | { |
1846 | int cmd, inc, i, nslash; | 1846 | int cmd, inc, i, nslash; |
1847 | unsigned long n; | 1847 | unsigned long n; |
1848 | unsigned char val[16]; | 1848 | unsigned char val[16]; |
1849 | 1849 | ||
1850 | scanhex((void *)&adrs); | 1850 | scanhex((void *)&adrs); |
1851 | cmd = skipbl(); | 1851 | cmd = skipbl(); |
1852 | if (cmd == '?') { | 1852 | if (cmd == '?') { |
1853 | printf(memex_help_string); | 1853 | printf(memex_help_string); |
1854 | return; | 1854 | return; |
1855 | } else { | 1855 | } else { |
1856 | termch = cmd; | 1856 | termch = cmd; |
1857 | } | 1857 | } |
1858 | last_cmd = "m\n"; | 1858 | last_cmd = "m\n"; |
1859 | while ((cmd = skipbl()) != '\n') { | 1859 | while ((cmd = skipbl()) != '\n') { |
1860 | switch( cmd ){ | 1860 | switch( cmd ){ |
1861 | case 'b': size = 1; break; | 1861 | case 'b': size = 1; break; |
1862 | case 'w': size = 2; break; | 1862 | case 'w': size = 2; break; |
1863 | case 'l': size = 4; break; | 1863 | case 'l': size = 4; break; |
1864 | case 'd': size = 8; break; | 1864 | case 'd': size = 8; break; |
1865 | case 'r': brev = !brev; break; | 1865 | case 'r': brev = !brev; break; |
1866 | case 'n': mnoread = 1; break; | 1866 | case 'n': mnoread = 1; break; |
1867 | case '.': mnoread = 0; break; | 1867 | case '.': mnoread = 0; break; |
1868 | } | 1868 | } |
1869 | } | 1869 | } |
1870 | if( size <= 0 ) | 1870 | if( size <= 0 ) |
1871 | size = 1; | 1871 | size = 1; |
1872 | else if( size > 8 ) | 1872 | else if( size > 8 ) |
1873 | size = 8; | 1873 | size = 8; |
1874 | for(;;){ | 1874 | for(;;){ |
1875 | if (!mnoread) | 1875 | if (!mnoread) |
1876 | n = mread(adrs, val, size); | 1876 | n = mread(adrs, val, size); |
1877 | printf(REG"%c", adrs, brev? 'r': ' '); | 1877 | printf(REG"%c", adrs, brev? 'r': ' '); |
1878 | if (!mnoread) { | 1878 | if (!mnoread) { |
1879 | if (brev) | 1879 | if (brev) |
1880 | byterev(val, size); | 1880 | byterev(val, size); |
1881 | putchar(' '); | 1881 | putchar(' '); |
1882 | for (i = 0; i < n; ++i) | 1882 | for (i = 0; i < n; ++i) |
1883 | printf("%.2x", val[i]); | 1883 | printf("%.2x", val[i]); |
1884 | for (; i < size; ++i) | 1884 | for (; i < size; ++i) |
1885 | printf("%s", fault_chars[fault_type]); | 1885 | printf("%s", fault_chars[fault_type]); |
1886 | } | 1886 | } |
1887 | putchar(' '); | 1887 | putchar(' '); |
1888 | inc = size; | 1888 | inc = size; |
1889 | nslash = 0; | 1889 | nslash = 0; |
1890 | for(;;){ | 1890 | for(;;){ |
1891 | if( scanhex(&n) ){ | 1891 | if( scanhex(&n) ){ |
1892 | for (i = 0; i < size; ++i) | 1892 | for (i = 0; i < size; ++i) |
1893 | val[i] = n >> (i * 8); | 1893 | val[i] = n >> (i * 8); |
1894 | if (!brev) | 1894 | if (!brev) |
1895 | byterev(val, size); | 1895 | byterev(val, size); |
1896 | mwrite(adrs, val, size); | 1896 | mwrite(adrs, val, size); |
1897 | inc = size; | 1897 | inc = size; |
1898 | } | 1898 | } |
1899 | cmd = skipbl(); | 1899 | cmd = skipbl(); |
1900 | if (cmd == '\n') | 1900 | if (cmd == '\n') |
1901 | break; | 1901 | break; |
1902 | inc = 0; | 1902 | inc = 0; |
1903 | switch (cmd) { | 1903 | switch (cmd) { |
1904 | case '\'': | 1904 | case '\'': |
1905 | for(;;){ | 1905 | for(;;){ |
1906 | n = inchar(); | 1906 | n = inchar(); |
1907 | if( n == '\\' ) | 1907 | if( n == '\\' ) |
1908 | n = bsesc(); | 1908 | n = bsesc(); |
1909 | else if( n == '\'' ) | 1909 | else if( n == '\'' ) |
1910 | break; | 1910 | break; |
1911 | for (i = 0; i < size; ++i) | 1911 | for (i = 0; i < size; ++i) |
1912 | val[i] = n >> (i * 8); | 1912 | val[i] = n >> (i * 8); |
1913 | if (!brev) | 1913 | if (!brev) |
1914 | byterev(val, size); | 1914 | byterev(val, size); |
1915 | mwrite(adrs, val, size); | 1915 | mwrite(adrs, val, size); |
1916 | adrs += size; | 1916 | adrs += size; |
1917 | } | 1917 | } |
1918 | adrs -= size; | 1918 | adrs -= size; |
1919 | inc = size; | 1919 | inc = size; |
1920 | break; | 1920 | break; |
1921 | case ',': | 1921 | case ',': |
1922 | adrs += size; | 1922 | adrs += size; |
1923 | break; | 1923 | break; |
1924 | case '.': | 1924 | case '.': |
1925 | mnoread = 0; | 1925 | mnoread = 0; |
1926 | break; | 1926 | break; |
1927 | case ';': | 1927 | case ';': |
1928 | break; | 1928 | break; |
1929 | case 'x': | 1929 | case 'x': |
1930 | case EOF: | 1930 | case EOF: |
1931 | scannl(); | 1931 | scannl(); |
1932 | return; | 1932 | return; |
1933 | case 'b': | 1933 | case 'b': |
1934 | case 'v': | 1934 | case 'v': |
1935 | size = 1; | 1935 | size = 1; |
1936 | break; | 1936 | break; |
1937 | case 'w': | 1937 | case 'w': |
1938 | size = 2; | 1938 | size = 2; |
1939 | break; | 1939 | break; |
1940 | case 'l': | 1940 | case 'l': |
1941 | size = 4; | 1941 | size = 4; |
1942 | break; | 1942 | break; |
1943 | case 'u': | 1943 | case 'u': |
1944 | size = 8; | 1944 | size = 8; |
1945 | break; | 1945 | break; |
1946 | case '^': | 1946 | case '^': |
1947 | adrs -= size; | 1947 | adrs -= size; |
1948 | break; | 1948 | break; |
1949 | break; | 1949 | break; |
1950 | case '/': | 1950 | case '/': |
1951 | if (nslash > 0) | 1951 | if (nslash > 0) |
1952 | adrs -= 1 << nslash; | 1952 | adrs -= 1 << nslash; |
1953 | else | 1953 | else |
1954 | nslash = 0; | 1954 | nslash = 0; |
1955 | nslash += 4; | 1955 | nslash += 4; |
1956 | adrs += 1 << nslash; | 1956 | adrs += 1 << nslash; |
1957 | break; | 1957 | break; |
1958 | case '\\': | 1958 | case '\\': |
1959 | if (nslash < 0) | 1959 | if (nslash < 0) |
1960 | adrs += 1 << -nslash; | 1960 | adrs += 1 << -nslash; |
1961 | else | 1961 | else |
1962 | nslash = 0; | 1962 | nslash = 0; |
1963 | nslash -= 4; | 1963 | nslash -= 4; |
1964 | adrs -= 1 << -nslash; | 1964 | adrs -= 1 << -nslash; |
1965 | break; | 1965 | break; |
1966 | case 'm': | 1966 | case 'm': |
1967 | scanhex((void *)&adrs); | 1967 | scanhex((void *)&adrs); |
1968 | break; | 1968 | break; |
1969 | case 'n': | 1969 | case 'n': |
1970 | mnoread = 1; | 1970 | mnoread = 1; |
1971 | break; | 1971 | break; |
1972 | case 'r': | 1972 | case 'r': |
1973 | brev = !brev; | 1973 | brev = !brev; |
1974 | break; | 1974 | break; |
1975 | case '<': | 1975 | case '<': |
1976 | n = size; | 1976 | n = size; |
1977 | scanhex(&n); | 1977 | scanhex(&n); |
1978 | adrs -= n; | 1978 | adrs -= n; |
1979 | break; | 1979 | break; |
1980 | case '>': | 1980 | case '>': |
1981 | n = size; | 1981 | n = size; |
1982 | scanhex(&n); | 1982 | scanhex(&n); |
1983 | adrs += n; | 1983 | adrs += n; |
1984 | break; | 1984 | break; |
1985 | case '?': | 1985 | case '?': |
1986 | printf(memex_subcmd_help_string); | 1986 | printf(memex_subcmd_help_string); |
1987 | break; | 1987 | break; |
1988 | } | 1988 | } |
1989 | } | 1989 | } |
1990 | adrs += inc; | 1990 | adrs += inc; |
1991 | } | 1991 | } |
1992 | } | 1992 | } |
1993 | 1993 | ||
1994 | static int | 1994 | static int |
1995 | bsesc(void) | 1995 | bsesc(void) |
1996 | { | 1996 | { |
1997 | int c; | 1997 | int c; |
1998 | 1998 | ||
1999 | c = inchar(); | 1999 | c = inchar(); |
2000 | switch( c ){ | 2000 | switch( c ){ |
2001 | case 'n': c = '\n'; break; | 2001 | case 'n': c = '\n'; break; |
2002 | case 'r': c = '\r'; break; | 2002 | case 'r': c = '\r'; break; |
2003 | case 'b': c = '\b'; break; | 2003 | case 'b': c = '\b'; break; |
2004 | case 't': c = '\t'; break; | 2004 | case 't': c = '\t'; break; |
2005 | } | 2005 | } |
2006 | return c; | 2006 | return c; |
2007 | } | 2007 | } |
2008 | 2008 | ||
2009 | static void xmon_rawdump (unsigned long adrs, long ndump) | 2009 | static void xmon_rawdump (unsigned long adrs, long ndump) |
2010 | { | 2010 | { |
2011 | long n, m, r, nr; | 2011 | long n, m, r, nr; |
2012 | unsigned char temp[16]; | 2012 | unsigned char temp[16]; |
2013 | 2013 | ||
2014 | for (n = ndump; n > 0;) { | 2014 | for (n = ndump; n > 0;) { |
2015 | r = n < 16? n: 16; | 2015 | r = n < 16? n: 16; |
2016 | nr = mread(adrs, temp, r); | 2016 | nr = mread(adrs, temp, r); |
2017 | adrs += nr; | 2017 | adrs += nr; |
2018 | for (m = 0; m < r; ++m) { | 2018 | for (m = 0; m < r; ++m) { |
2019 | if (m < nr) | 2019 | if (m < nr) |
2020 | printf("%.2x", temp[m]); | 2020 | printf("%.2x", temp[m]); |
2021 | else | 2021 | else |
2022 | printf("%s", fault_chars[fault_type]); | 2022 | printf("%s", fault_chars[fault_type]); |
2023 | } | 2023 | } |
2024 | n -= r; | 2024 | n -= r; |
2025 | if (nr < r) | 2025 | if (nr < r) |
2026 | break; | 2026 | break; |
2027 | } | 2027 | } |
2028 | printf("\n"); | 2028 | printf("\n"); |
2029 | } | 2029 | } |
2030 | 2030 | ||
2031 | #ifdef CONFIG_PPC64 | 2031 | #ifdef CONFIG_PPC64 |
2032 | static void dump_one_paca(int cpu) | 2032 | static void dump_one_paca(int cpu) |
2033 | { | 2033 | { |
2034 | struct paca_struct *p; | 2034 | struct paca_struct *p; |
2035 | 2035 | ||
2036 | if (setjmp(bus_error_jmp) != 0) { | 2036 | if (setjmp(bus_error_jmp) != 0) { |
2037 | printf("*** Error dumping paca for cpu 0x%x!\n", cpu); | 2037 | printf("*** Error dumping paca for cpu 0x%x!\n", cpu); |
2038 | return; | 2038 | return; |
2039 | } | 2039 | } |
2040 | 2040 | ||
2041 | catch_memory_errors = 1; | 2041 | catch_memory_errors = 1; |
2042 | sync(); | 2042 | sync(); |
2043 | 2043 | ||
2044 | p = &paca[cpu]; | 2044 | p = &paca[cpu]; |
2045 | 2045 | ||
2046 | printf("paca for cpu 0x%x @ %p:\n", cpu, p); | 2046 | printf("paca for cpu 0x%x @ %p:\n", cpu, p); |
2047 | 2047 | ||
2048 | printf(" %-*s = %s\n", 16, "possible", cpu_possible(cpu) ? "yes" : "no"); | 2048 | printf(" %-*s = %s\n", 16, "possible", cpu_possible(cpu) ? "yes" : "no"); |
2049 | printf(" %-*s = %s\n", 16, "present", cpu_present(cpu) ? "yes" : "no"); | 2049 | printf(" %-*s = %s\n", 16, "present", cpu_present(cpu) ? "yes" : "no"); |
2050 | printf(" %-*s = %s\n", 16, "online", cpu_online(cpu) ? "yes" : "no"); | 2050 | printf(" %-*s = %s\n", 16, "online", cpu_online(cpu) ? "yes" : "no"); |
2051 | 2051 | ||
2052 | #define DUMP(paca, name, format) \ | 2052 | #define DUMP(paca, name, format) \ |
2053 | printf(" %-*s = %#-*"format"\t(0x%lx)\n", 16, #name, 18, paca->name, \ | 2053 | printf(" %-*s = %#-*"format"\t(0x%lx)\n", 16, #name, 18, paca->name, \ |
2054 | offsetof(struct paca_struct, name)); | 2054 | offsetof(struct paca_struct, name)); |
2055 | 2055 | ||
2056 | DUMP(p, lock_token, "x"); | 2056 | DUMP(p, lock_token, "x"); |
2057 | DUMP(p, paca_index, "x"); | 2057 | DUMP(p, paca_index, "x"); |
2058 | DUMP(p, kernel_toc, "lx"); | 2058 | DUMP(p, kernel_toc, "lx"); |
2059 | DUMP(p, kernelbase, "lx"); | 2059 | DUMP(p, kernelbase, "lx"); |
2060 | DUMP(p, kernel_msr, "lx"); | 2060 | DUMP(p, kernel_msr, "lx"); |
2061 | #ifdef CONFIG_PPC_STD_MMU_64 | 2061 | #ifdef CONFIG_PPC_STD_MMU_64 |
2062 | DUMP(p, stab_real, "lx"); | 2062 | DUMP(p, stab_real, "lx"); |
2063 | DUMP(p, stab_addr, "lx"); | 2063 | DUMP(p, stab_addr, "lx"); |
2064 | #endif | 2064 | #endif |
2065 | DUMP(p, emergency_sp, "p"); | 2065 | DUMP(p, emergency_sp, "p"); |
2066 | #ifdef CONFIG_PPC_BOOK3S_64 | 2066 | #ifdef CONFIG_PPC_BOOK3S_64 |
2067 | DUMP(p, mc_emergency_sp, "p"); | 2067 | DUMP(p, mc_emergency_sp, "p"); |
2068 | DUMP(p, in_mce, "x"); | 2068 | DUMP(p, in_mce, "x"); |
2069 | #endif | 2069 | #endif |
2070 | DUMP(p, data_offset, "lx"); | 2070 | DUMP(p, data_offset, "lx"); |
2071 | DUMP(p, hw_cpu_id, "x"); | 2071 | DUMP(p, hw_cpu_id, "x"); |
2072 | DUMP(p, cpu_start, "x"); | 2072 | DUMP(p, cpu_start, "x"); |
2073 | DUMP(p, kexec_state, "x"); | 2073 | DUMP(p, kexec_state, "x"); |
2074 | DUMP(p, __current, "p"); | 2074 | DUMP(p, __current, "p"); |
2075 | DUMP(p, kstack, "lx"); | 2075 | DUMP(p, kstack, "lx"); |
2076 | DUMP(p, stab_rr, "lx"); | 2076 | DUMP(p, stab_rr, "lx"); |
2077 | DUMP(p, saved_r1, "lx"); | 2077 | DUMP(p, saved_r1, "lx"); |
2078 | DUMP(p, trap_save, "x"); | 2078 | DUMP(p, trap_save, "x"); |
2079 | DUMP(p, soft_enabled, "x"); | 2079 | DUMP(p, soft_enabled, "x"); |
2080 | DUMP(p, irq_happened, "x"); | 2080 | DUMP(p, irq_happened, "x"); |
2081 | DUMP(p, io_sync, "x"); | 2081 | DUMP(p, io_sync, "x"); |
2082 | DUMP(p, irq_work_pending, "x"); | 2082 | DUMP(p, irq_work_pending, "x"); |
2083 | DUMP(p, nap_state_lost, "x"); | 2083 | DUMP(p, nap_state_lost, "x"); |
2084 | 2084 | ||
2085 | #undef DUMP | 2085 | #undef DUMP |
2086 | 2086 | ||
2087 | catch_memory_errors = 0; | 2087 | catch_memory_errors = 0; |
2088 | sync(); | 2088 | sync(); |
2089 | } | 2089 | } |
2090 | 2090 | ||
2091 | static void dump_all_pacas(void) | 2091 | static void dump_all_pacas(void) |
2092 | { | 2092 | { |
2093 | int cpu; | 2093 | int cpu; |
2094 | 2094 | ||
2095 | if (num_possible_cpus() == 0) { | 2095 | if (num_possible_cpus() == 0) { |
2096 | printf("No possible cpus, use 'dp #' to dump individual cpus\n"); | 2096 | printf("No possible cpus, use 'dp #' to dump individual cpus\n"); |
2097 | return; | 2097 | return; |
2098 | } | 2098 | } |
2099 | 2099 | ||
2100 | for_each_possible_cpu(cpu) | 2100 | for_each_possible_cpu(cpu) |
2101 | dump_one_paca(cpu); | 2101 | dump_one_paca(cpu); |
2102 | } | 2102 | } |
2103 | 2103 | ||
2104 | static void dump_pacas(void) | 2104 | static void dump_pacas(void) |
2105 | { | 2105 | { |
2106 | unsigned long num; | 2106 | unsigned long num; |
2107 | int c; | 2107 | int c; |
2108 | 2108 | ||
2109 | c = inchar(); | 2109 | c = inchar(); |
2110 | if (c == 'a') { | 2110 | if (c == 'a') { |
2111 | dump_all_pacas(); | 2111 | dump_all_pacas(); |
2112 | return; | 2112 | return; |
2113 | } | 2113 | } |
2114 | 2114 | ||
2115 | termch = c; /* Put c back, it wasn't 'a' */ | 2115 | termch = c; /* Put c back, it wasn't 'a' */ |
2116 | 2116 | ||
2117 | if (scanhex(&num)) | 2117 | if (scanhex(&num)) |
2118 | dump_one_paca(num); | 2118 | dump_one_paca(num); |
2119 | else | 2119 | else |
2120 | dump_one_paca(xmon_owner); | 2120 | dump_one_paca(xmon_owner); |
2121 | } | 2121 | } |
2122 | #endif | 2122 | #endif |
2123 | 2123 | ||
2124 | #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ | 2124 | #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ |
2125 | || ('a' <= (c) && (c) <= 'f') \ | 2125 | || ('a' <= (c) && (c) <= 'f') \ |
2126 | || ('A' <= (c) && (c) <= 'F')) | 2126 | || ('A' <= (c) && (c) <= 'F')) |
2127 | static void | 2127 | static void |
2128 | dump(void) | 2128 | dump(void) |
2129 | { | 2129 | { |
2130 | int c; | 2130 | int c; |
2131 | 2131 | ||
2132 | c = inchar(); | 2132 | c = inchar(); |
2133 | 2133 | ||
2134 | #ifdef CONFIG_PPC64 | 2134 | #ifdef CONFIG_PPC64 |
2135 | if (c == 'p') { | 2135 | if (c == 'p') { |
2136 | dump_pacas(); | 2136 | dump_pacas(); |
2137 | return; | 2137 | return; |
2138 | } | 2138 | } |
2139 | #endif | 2139 | #endif |
2140 | 2140 | ||
2141 | if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n') | 2141 | if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n') |
2142 | termch = c; | 2142 | termch = c; |
2143 | scanhex((void *)&adrs); | 2143 | scanhex((void *)&adrs); |
2144 | if (termch != '\n') | 2144 | if (termch != '\n') |
2145 | termch = 0; | 2145 | termch = 0; |
2146 | if (c == 'i') { | 2146 | if (c == 'i') { |
2147 | scanhex(&nidump); | 2147 | scanhex(&nidump); |
2148 | if (nidump == 0) | 2148 | if (nidump == 0) |
2149 | nidump = 16; | 2149 | nidump = 16; |
2150 | else if (nidump > MAX_DUMP) | 2150 | else if (nidump > MAX_DUMP) |
2151 | nidump = MAX_DUMP; | 2151 | nidump = MAX_DUMP; |
2152 | adrs += ppc_inst_dump(adrs, nidump, 1); | 2152 | adrs += ppc_inst_dump(adrs, nidump, 1); |
2153 | last_cmd = "di\n"; | 2153 | last_cmd = "di\n"; |
2154 | } else if (c == 'l') { | 2154 | } else if (c == 'l') { |
2155 | dump_log_buf(); | 2155 | dump_log_buf(); |
2156 | } else if (c == 'r') { | 2156 | } else if (c == 'r') { |
2157 | scanhex(&ndump); | 2157 | scanhex(&ndump); |
2158 | if (ndump == 0) | 2158 | if (ndump == 0) |
2159 | ndump = 64; | 2159 | ndump = 64; |
2160 | xmon_rawdump(adrs, ndump); | 2160 | xmon_rawdump(adrs, ndump); |
2161 | adrs += ndump; | 2161 | adrs += ndump; |
2162 | last_cmd = "dr\n"; | 2162 | last_cmd = "dr\n"; |
2163 | } else { | 2163 | } else { |
2164 | scanhex(&ndump); | 2164 | scanhex(&ndump); |
2165 | if (ndump == 0) | 2165 | if (ndump == 0) |
2166 | ndump = 64; | 2166 | ndump = 64; |
2167 | else if (ndump > MAX_DUMP) | 2167 | else if (ndump > MAX_DUMP) |
2168 | ndump = MAX_DUMP; | 2168 | ndump = MAX_DUMP; |
2169 | prdump(adrs, ndump); | 2169 | prdump(adrs, ndump); |
2170 | adrs += ndump; | 2170 | adrs += ndump; |
2171 | last_cmd = "d\n"; | 2171 | last_cmd = "d\n"; |
2172 | } | 2172 | } |
2173 | } | 2173 | } |
2174 | 2174 | ||
2175 | static void | 2175 | static void |
2176 | prdump(unsigned long adrs, long ndump) | 2176 | prdump(unsigned long adrs, long ndump) |
2177 | { | 2177 | { |
2178 | long n, m, c, r, nr; | 2178 | long n, m, c, r, nr; |
2179 | unsigned char temp[16]; | 2179 | unsigned char temp[16]; |
2180 | 2180 | ||
2181 | for (n = ndump; n > 0;) { | 2181 | for (n = ndump; n > 0;) { |
2182 | printf(REG, adrs); | 2182 | printf(REG, adrs); |
2183 | putchar(' '); | 2183 | putchar(' '); |
2184 | r = n < 16? n: 16; | 2184 | r = n < 16? n: 16; |
2185 | nr = mread(adrs, temp, r); | 2185 | nr = mread(adrs, temp, r); |
2186 | adrs += nr; | 2186 | adrs += nr; |
2187 | for (m = 0; m < r; ++m) { | 2187 | for (m = 0; m < r; ++m) { |
2188 | if ((m & (sizeof(long) - 1)) == 0 && m > 0) | 2188 | if ((m & (sizeof(long) - 1)) == 0 && m > 0) |
2189 | putchar(' '); | 2189 | putchar(' '); |
2190 | if (m < nr) | 2190 | if (m < nr) |
2191 | printf("%.2x", temp[m]); | 2191 | printf("%.2x", temp[m]); |
2192 | else | 2192 | else |
2193 | printf("%s", fault_chars[fault_type]); | 2193 | printf("%s", fault_chars[fault_type]); |
2194 | } | 2194 | } |
2195 | for (; m < 16; ++m) { | 2195 | for (; m < 16; ++m) { |
2196 | if ((m & (sizeof(long) - 1)) == 0) | 2196 | if ((m & (sizeof(long) - 1)) == 0) |
2197 | putchar(' '); | 2197 | putchar(' '); |
2198 | printf(" "); | 2198 | printf(" "); |
2199 | } | 2199 | } |
2200 | printf(" |"); | 2200 | printf(" |"); |
2201 | for (m = 0; m < r; ++m) { | 2201 | for (m = 0; m < r; ++m) { |
2202 | if (m < nr) { | 2202 | if (m < nr) { |
2203 | c = temp[m]; | 2203 | c = temp[m]; |
2204 | putchar(' ' <= c && c <= '~'? c: '.'); | 2204 | putchar(' ' <= c && c <= '~'? c: '.'); |
2205 | } else | 2205 | } else |
2206 | putchar(' '); | 2206 | putchar(' '); |
2207 | } | 2207 | } |
2208 | n -= r; | 2208 | n -= r; |
2209 | for (; m < 16; ++m) | 2209 | for (; m < 16; ++m) |
2210 | putchar(' '); | 2210 | putchar(' '); |
2211 | printf("|\n"); | 2211 | printf("|\n"); |
2212 | if (nr < r) | 2212 | if (nr < r) |
2213 | break; | 2213 | break; |
2214 | } | 2214 | } |
2215 | } | 2215 | } |
2216 | 2216 | ||
2217 | typedef int (*instruction_dump_func)(unsigned long inst, unsigned long addr); | 2217 | typedef int (*instruction_dump_func)(unsigned long inst, unsigned long addr); |
2218 | 2218 | ||
2219 | static int | 2219 | static int |
2220 | generic_inst_dump(unsigned long adr, long count, int praddr, | 2220 | generic_inst_dump(unsigned long adr, long count, int praddr, |
2221 | instruction_dump_func dump_func) | 2221 | instruction_dump_func dump_func) |
2222 | { | 2222 | { |
2223 | int nr, dotted; | 2223 | int nr, dotted; |
2224 | unsigned long first_adr; | 2224 | unsigned long first_adr; |
2225 | unsigned long inst, last_inst = 0; | 2225 | unsigned long inst, last_inst = 0; |
2226 | unsigned char val[4]; | 2226 | unsigned char val[4]; |
2227 | 2227 | ||
2228 | dotted = 0; | 2228 | dotted = 0; |
2229 | for (first_adr = adr; count > 0; --count, adr += 4) { | 2229 | for (first_adr = adr; count > 0; --count, adr += 4) { |
2230 | nr = mread(adr, val, 4); | 2230 | nr = mread(adr, val, 4); |
2231 | if (nr == 0) { | 2231 | if (nr == 0) { |
2232 | if (praddr) { | 2232 | if (praddr) { |
2233 | const char *x = fault_chars[fault_type]; | 2233 | const char *x = fault_chars[fault_type]; |
2234 | printf(REG" %s%s%s%s\n", adr, x, x, x, x); | 2234 | printf(REG" %s%s%s%s\n", adr, x, x, x, x); |
2235 | } | 2235 | } |
2236 | break; | 2236 | break; |
2237 | } | 2237 | } |
2238 | inst = GETWORD(val); | 2238 | inst = GETWORD(val); |
2239 | if (adr > first_adr && inst == last_inst) { | 2239 | if (adr > first_adr && inst == last_inst) { |
2240 | if (!dotted) { | 2240 | if (!dotted) { |
2241 | printf(" ...\n"); | 2241 | printf(" ...\n"); |
2242 | dotted = 1; | 2242 | dotted = 1; |
2243 | } | 2243 | } |
2244 | continue; | 2244 | continue; |
2245 | } | 2245 | } |
2246 | dotted = 0; | 2246 | dotted = 0; |
2247 | last_inst = inst; | 2247 | last_inst = inst; |
2248 | if (praddr) | 2248 | if (praddr) |
2249 | printf(REG" %.8x", adr, inst); | 2249 | printf(REG" %.8x", adr, inst); |
2250 | printf("\t"); | 2250 | printf("\t"); |
2251 | dump_func(inst, adr); | 2251 | dump_func(inst, adr); |
2252 | printf("\n"); | 2252 | printf("\n"); |
2253 | } | 2253 | } |
2254 | return adr - first_adr; | 2254 | return adr - first_adr; |
2255 | } | 2255 | } |
2256 | 2256 | ||
2257 | static int | 2257 | static int |
2258 | ppc_inst_dump(unsigned long adr, long count, int praddr) | 2258 | ppc_inst_dump(unsigned long adr, long count, int praddr) |
2259 | { | 2259 | { |
2260 | return generic_inst_dump(adr, count, praddr, print_insn_powerpc); | 2260 | return generic_inst_dump(adr, count, praddr, print_insn_powerpc); |
2261 | } | 2261 | } |
2262 | 2262 | ||
2263 | void | 2263 | void |
2264 | print_address(unsigned long addr) | 2264 | print_address(unsigned long addr) |
2265 | { | 2265 | { |
2266 | xmon_print_symbol(addr, "\t# ", ""); | 2266 | xmon_print_symbol(addr, "\t# ", ""); |
2267 | } | 2267 | } |
2268 | 2268 | ||
2269 | void | 2269 | void |
2270 | dump_log_buf(void) | 2270 | dump_log_buf(void) |
2271 | { | 2271 | { |
2272 | struct kmsg_dumper dumper = { .active = 1 }; | 2272 | struct kmsg_dumper dumper = { .active = 1 }; |
2273 | unsigned char buf[128]; | 2273 | unsigned char buf[128]; |
2274 | size_t len; | 2274 | size_t len; |
2275 | 2275 | ||
2276 | if (setjmp(bus_error_jmp) != 0) { | 2276 | if (setjmp(bus_error_jmp) != 0) { |
2277 | printf("Error dumping printk buffer!\n"); | 2277 | printf("Error dumping printk buffer!\n"); |
2278 | return; | 2278 | return; |
2279 | } | 2279 | } |
2280 | 2280 | ||
2281 | catch_memory_errors = 1; | 2281 | catch_memory_errors = 1; |
2282 | sync(); | 2282 | sync(); |
2283 | 2283 | ||
2284 | kmsg_dump_rewind_nolock(&dumper); | 2284 | kmsg_dump_rewind_nolock(&dumper); |
2285 | while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) { | 2285 | while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) { |
2286 | buf[len] = '\0'; | 2286 | buf[len] = '\0'; |
2287 | printf("%s", buf); | 2287 | printf("%s", buf); |
2288 | } | 2288 | } |
2289 | 2289 | ||
2290 | sync(); | 2290 | sync(); |
2291 | /* wait a little while to see if we get a machine check */ | 2291 | /* wait a little while to see if we get a machine check */ |
2292 | __delay(200); | 2292 | __delay(200); |
2293 | catch_memory_errors = 0; | 2293 | catch_memory_errors = 0; |
2294 | } | 2294 | } |
2295 | 2295 | ||
2296 | /* | 2296 | /* |
2297 | * Memory operations - move, set, print differences | 2297 | * Memory operations - move, set, print differences |
2298 | */ | 2298 | */ |
2299 | static unsigned long mdest; /* destination address */ | 2299 | static unsigned long mdest; /* destination address */ |
2300 | static unsigned long msrc; /* source address */ | 2300 | static unsigned long msrc; /* source address */ |
2301 | static unsigned long mval; /* byte value to set memory to */ | 2301 | static unsigned long mval; /* byte value to set memory to */ |
2302 | static unsigned long mcount; /* # bytes to affect */ | 2302 | static unsigned long mcount; /* # bytes to affect */ |
2303 | static unsigned long mdiffs; /* max # differences to print */ | 2303 | static unsigned long mdiffs; /* max # differences to print */ |
2304 | 2304 | ||
2305 | static void | 2305 | static void |
2306 | memops(int cmd) | 2306 | memops(int cmd) |
2307 | { | 2307 | { |
2308 | scanhex((void *)&mdest); | 2308 | scanhex((void *)&mdest); |
2309 | if( termch != '\n' ) | 2309 | if( termch != '\n' ) |
2310 | termch = 0; | 2310 | termch = 0; |
2311 | scanhex((void *)(cmd == 's'? &mval: &msrc)); | 2311 | scanhex((void *)(cmd == 's'? &mval: &msrc)); |
2312 | if( termch != '\n' ) | 2312 | if( termch != '\n' ) |
2313 | termch = 0; | 2313 | termch = 0; |
2314 | scanhex((void *)&mcount); | 2314 | scanhex((void *)&mcount); |
2315 | switch( cmd ){ | 2315 | switch( cmd ){ |
2316 | case 'm': | 2316 | case 'm': |
2317 | memmove((void *)mdest, (void *)msrc, mcount); | 2317 | memmove((void *)mdest, (void *)msrc, mcount); |
2318 | break; | 2318 | break; |
2319 | case 's': | 2319 | case 's': |
2320 | memset((void *)mdest, mval, mcount); | 2320 | memset((void *)mdest, mval, mcount); |
2321 | break; | 2321 | break; |
2322 | case 'd': | 2322 | case 'd': |
2323 | if( termch != '\n' ) | 2323 | if( termch != '\n' ) |
2324 | termch = 0; | 2324 | termch = 0; |
2325 | scanhex((void *)&mdiffs); | 2325 | scanhex((void *)&mdiffs); |
2326 | memdiffs((unsigned char *)mdest, (unsigned char *)msrc, mcount, mdiffs); | 2326 | memdiffs((unsigned char *)mdest, (unsigned char *)msrc, mcount, mdiffs); |
2327 | break; | 2327 | break; |
2328 | } | 2328 | } |
2329 | } | 2329 | } |
2330 | 2330 | ||
2331 | static void | 2331 | static void |
2332 | memdiffs(unsigned char *p1, unsigned char *p2, unsigned nb, unsigned maxpr) | 2332 | memdiffs(unsigned char *p1, unsigned char *p2, unsigned nb, unsigned maxpr) |
2333 | { | 2333 | { |
2334 | unsigned n, prt; | 2334 | unsigned n, prt; |
2335 | 2335 | ||
2336 | prt = 0; | 2336 | prt = 0; |
2337 | for( n = nb; n > 0; --n ) | 2337 | for( n = nb; n > 0; --n ) |
2338 | if( *p1++ != *p2++ ) | 2338 | if( *p1++ != *p2++ ) |
2339 | if( ++prt <= maxpr ) | 2339 | if( ++prt <= maxpr ) |
2340 | printf("%.16x %.2x # %.16x %.2x\n", p1 - 1, | 2340 | printf("%.16x %.2x # %.16x %.2x\n", p1 - 1, |
2341 | p1[-1], p2 - 1, p2[-1]); | 2341 | p1[-1], p2 - 1, p2[-1]); |
2342 | if( prt > maxpr ) | 2342 | if( prt > maxpr ) |
2343 | printf("Total of %d differences\n", prt); | 2343 | printf("Total of %d differences\n", prt); |
2344 | } | 2344 | } |
2345 | 2345 | ||
2346 | static unsigned mend; | 2346 | static unsigned mend; |
2347 | static unsigned mask; | 2347 | static unsigned mask; |
2348 | 2348 | ||
2349 | static void | 2349 | static void |
2350 | memlocate(void) | 2350 | memlocate(void) |
2351 | { | 2351 | { |
2352 | unsigned a, n; | 2352 | unsigned a, n; |
2353 | unsigned char val[4]; | 2353 | unsigned char val[4]; |
2354 | 2354 | ||
2355 | last_cmd = "ml"; | 2355 | last_cmd = "ml"; |
2356 | scanhex((void *)&mdest); | 2356 | scanhex((void *)&mdest); |
2357 | if (termch != '\n') { | 2357 | if (termch != '\n') { |
2358 | termch = 0; | 2358 | termch = 0; |
2359 | scanhex((void *)&mend); | 2359 | scanhex((void *)&mend); |
2360 | if (termch != '\n') { | 2360 | if (termch != '\n') { |
2361 | termch = 0; | 2361 | termch = 0; |
2362 | scanhex((void *)&mval); | 2362 | scanhex((void *)&mval); |
2363 | mask = ~0; | 2363 | mask = ~0; |
2364 | if (termch != '\n') termch = 0; | 2364 | if (termch != '\n') termch = 0; |
2365 | scanhex((void *)&mask); | 2365 | scanhex((void *)&mask); |
2366 | } | 2366 | } |
2367 | } | 2367 | } |
2368 | n = 0; | 2368 | n = 0; |
2369 | for (a = mdest; a < mend; a += 4) { | 2369 | for (a = mdest; a < mend; a += 4) { |
2370 | if (mread(a, val, 4) == 4 | 2370 | if (mread(a, val, 4) == 4 |
2371 | && ((GETWORD(val) ^ mval) & mask) == 0) { | 2371 | && ((GETWORD(val) ^ mval) & mask) == 0) { |
2372 | printf("%.16x: %.16x\n", a, GETWORD(val)); | 2372 | printf("%.16x: %.16x\n", a, GETWORD(val)); |
2373 | if (++n >= 10) | 2373 | if (++n >= 10) |
2374 | break; | 2374 | break; |
2375 | } | 2375 | } |
2376 | } | 2376 | } |
2377 | } | 2377 | } |
2378 | 2378 | ||
2379 | static unsigned long mskip = 0x1000; | 2379 | static unsigned long mskip = 0x1000; |
2380 | static unsigned long mlim = 0xffffffff; | 2380 | static unsigned long mlim = 0xffffffff; |
2381 | 2381 | ||
2382 | static void | 2382 | static void |
2383 | memzcan(void) | 2383 | memzcan(void) |
2384 | { | 2384 | { |
2385 | unsigned char v; | 2385 | unsigned char v; |
2386 | unsigned a; | 2386 | unsigned a; |
2387 | int ok, ook; | 2387 | int ok, ook; |
2388 | 2388 | ||
2389 | scanhex(&mdest); | 2389 | scanhex(&mdest); |
2390 | if (termch != '\n') termch = 0; | 2390 | if (termch != '\n') termch = 0; |
2391 | scanhex(&mskip); | 2391 | scanhex(&mskip); |
2392 | if (termch != '\n') termch = 0; | 2392 | if (termch != '\n') termch = 0; |
2393 | scanhex(&mlim); | 2393 | scanhex(&mlim); |
2394 | ook = 0; | 2394 | ook = 0; |
2395 | for (a = mdest; a < mlim; a += mskip) { | 2395 | for (a = mdest; a < mlim; a += mskip) { |
2396 | ok = mread(a, &v, 1); | 2396 | ok = mread(a, &v, 1); |
2397 | if (ok && !ook) { | 2397 | if (ok && !ook) { |
2398 | printf("%.8x .. ", a); | 2398 | printf("%.8x .. ", a); |
2399 | } else if (!ok && ook) | 2399 | } else if (!ok && ook) |
2400 | printf("%.8x\n", a - mskip); | 2400 | printf("%.8x\n", a - mskip); |
2401 | ook = ok; | 2401 | ook = ok; |
2402 | if (a + mskip < a) | 2402 | if (a + mskip < a) |
2403 | break; | 2403 | break; |
2404 | } | 2404 | } |
2405 | if (ook) | 2405 | if (ook) |
2406 | printf("%.8x\n", a - mskip); | 2406 | printf("%.8x\n", a - mskip); |
2407 | } | 2407 | } |
2408 | 2408 | ||
2409 | static void proccall(void) | 2409 | static void proccall(void) |
2410 | { | 2410 | { |
2411 | unsigned long args[8]; | 2411 | unsigned long args[8]; |
2412 | unsigned long ret; | 2412 | unsigned long ret; |
2413 | int i; | 2413 | int i; |
2414 | typedef unsigned long (*callfunc_t)(unsigned long, unsigned long, | 2414 | typedef unsigned long (*callfunc_t)(unsigned long, unsigned long, |
2415 | unsigned long, unsigned long, unsigned long, | 2415 | unsigned long, unsigned long, unsigned long, |
2416 | unsigned long, unsigned long, unsigned long); | 2416 | unsigned long, unsigned long, unsigned long); |
2417 | callfunc_t func; | 2417 | callfunc_t func; |
2418 | 2418 | ||
2419 | if (!scanhex(&adrs)) | 2419 | if (!scanhex(&adrs)) |
2420 | return; | 2420 | return; |
2421 | if (termch != '\n') | 2421 | if (termch != '\n') |
2422 | termch = 0; | 2422 | termch = 0; |
2423 | for (i = 0; i < 8; ++i) | 2423 | for (i = 0; i < 8; ++i) |
2424 | args[i] = 0; | 2424 | args[i] = 0; |
2425 | for (i = 0; i < 8; ++i) { | 2425 | for (i = 0; i < 8; ++i) { |
2426 | if (!scanhex(&args[i]) || termch == '\n') | 2426 | if (!scanhex(&args[i]) || termch == '\n') |
2427 | break; | 2427 | break; |
2428 | termch = 0; | 2428 | termch = 0; |
2429 | } | 2429 | } |
2430 | func = (callfunc_t) adrs; | 2430 | func = (callfunc_t) adrs; |
2431 | ret = 0; | 2431 | ret = 0; |
2432 | if (setjmp(bus_error_jmp) == 0) { | 2432 | if (setjmp(bus_error_jmp) == 0) { |
2433 | catch_memory_errors = 1; | 2433 | catch_memory_errors = 1; |
2434 | sync(); | 2434 | sync(); |
2435 | ret = func(args[0], args[1], args[2], args[3], | 2435 | ret = func(args[0], args[1], args[2], args[3], |
2436 | args[4], args[5], args[6], args[7]); | 2436 | args[4], args[5], args[6], args[7]); |
2437 | sync(); | 2437 | sync(); |
2438 | printf("return value is %x\n", ret); | 2438 | printf("return value is %x\n", ret); |
2439 | } else { | 2439 | } else { |
2440 | printf("*** %x exception occurred\n", fault_except); | 2440 | printf("*** %x exception occurred\n", fault_except); |
2441 | } | 2441 | } |
2442 | catch_memory_errors = 0; | 2442 | catch_memory_errors = 0; |
2443 | } | 2443 | } |
2444 | 2444 | ||
2445 | /* Input scanning routines */ | 2445 | /* Input scanning routines */ |
2446 | int | 2446 | int |
2447 | skipbl(void) | 2447 | skipbl(void) |
2448 | { | 2448 | { |
2449 | int c; | 2449 | int c; |
2450 | 2450 | ||
2451 | if( termch != 0 ){ | 2451 | if( termch != 0 ){ |
2452 | c = termch; | 2452 | c = termch; |
2453 | termch = 0; | 2453 | termch = 0; |
2454 | } else | 2454 | } else |
2455 | c = inchar(); | 2455 | c = inchar(); |
2456 | while( c == ' ' || c == '\t' ) | 2456 | while( c == ' ' || c == '\t' ) |
2457 | c = inchar(); | 2457 | c = inchar(); |
2458 | return c; | 2458 | return c; |
2459 | } | 2459 | } |
2460 | 2460 | ||
2461 | #define N_PTREGS 44 | 2461 | #define N_PTREGS 44 |
2462 | static char *regnames[N_PTREGS] = { | 2462 | static char *regnames[N_PTREGS] = { |
2463 | "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", | 2463 | "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", |
2464 | "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", | 2464 | "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", |
2465 | "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", | 2465 | "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", |
2466 | "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", | 2466 | "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", |
2467 | "pc", "msr", "or3", "ctr", "lr", "xer", "ccr", | 2467 | "pc", "msr", "or3", "ctr", "lr", "xer", "ccr", |
2468 | #ifdef CONFIG_PPC64 | 2468 | #ifdef CONFIG_PPC64 |
2469 | "softe", | 2469 | "softe", |
2470 | #else | 2470 | #else |
2471 | "mq", | 2471 | "mq", |
2472 | #endif | 2472 | #endif |
2473 | "trap", "dar", "dsisr", "res" | 2473 | "trap", "dar", "dsisr", "res" |
2474 | }; | 2474 | }; |
2475 | 2475 | ||
2476 | int | 2476 | int |
2477 | scanhex(unsigned long *vp) | 2477 | scanhex(unsigned long *vp) |
2478 | { | 2478 | { |
2479 | int c, d; | 2479 | int c, d; |
2480 | unsigned long v; | 2480 | unsigned long v; |
2481 | 2481 | ||
2482 | c = skipbl(); | 2482 | c = skipbl(); |
2483 | if (c == '%') { | 2483 | if (c == '%') { |
2484 | /* parse register name */ | 2484 | /* parse register name */ |
2485 | char regname[8]; | 2485 | char regname[8]; |
2486 | int i; | 2486 | int i; |
2487 | 2487 | ||
2488 | for (i = 0; i < sizeof(regname) - 1; ++i) { | 2488 | for (i = 0; i < sizeof(regname) - 1; ++i) { |
2489 | c = inchar(); | 2489 | c = inchar(); |
2490 | if (!isalnum(c)) { | 2490 | if (!isalnum(c)) { |
2491 | termch = c; | 2491 | termch = c; |
2492 | break; | 2492 | break; |
2493 | } | 2493 | } |
2494 | regname[i] = c; | 2494 | regname[i] = c; |
2495 | } | 2495 | } |
2496 | regname[i] = 0; | 2496 | regname[i] = 0; |
2497 | for (i = 0; i < N_PTREGS; ++i) { | 2497 | for (i = 0; i < N_PTREGS; ++i) { |
2498 | if (strcmp(regnames[i], regname) == 0) { | 2498 | if (strcmp(regnames[i], regname) == 0) { |
2499 | if (xmon_regs == NULL) { | 2499 | if (xmon_regs == NULL) { |
2500 | printf("regs not available\n"); | 2500 | printf("regs not available\n"); |
2501 | return 0; | 2501 | return 0; |
2502 | } | 2502 | } |
2503 | *vp = ((unsigned long *)xmon_regs)[i]; | 2503 | *vp = ((unsigned long *)xmon_regs)[i]; |
2504 | return 1; | 2504 | return 1; |
2505 | } | 2505 | } |
2506 | } | 2506 | } |
2507 | printf("invalid register name '%%%s'\n", regname); | 2507 | printf("invalid register name '%%%s'\n", regname); |
2508 | return 0; | 2508 | return 0; |
2509 | } | 2509 | } |
2510 | 2510 | ||
2511 | /* skip leading "0x" if any */ | 2511 | /* skip leading "0x" if any */ |
2512 | 2512 | ||
2513 | if (c == '0') { | 2513 | if (c == '0') { |
2514 | c = inchar(); | 2514 | c = inchar(); |
2515 | if (c == 'x') { | 2515 | if (c == 'x') { |
2516 | c = inchar(); | 2516 | c = inchar(); |
2517 | } else { | 2517 | } else { |
2518 | d = hexdigit(c); | 2518 | d = hexdigit(c); |
2519 | if (d == EOF) { | 2519 | if (d == EOF) { |
2520 | termch = c; | 2520 | termch = c; |
2521 | *vp = 0; | 2521 | *vp = 0; |
2522 | return 1; | 2522 | return 1; |
2523 | } | 2523 | } |
2524 | } | 2524 | } |
2525 | } else if (c == '$') { | 2525 | } else if (c == '$') { |
2526 | int i; | 2526 | int i; |
2527 | for (i=0; i<63; i++) { | 2527 | for (i=0; i<63; i++) { |
2528 | c = inchar(); | 2528 | c = inchar(); |
2529 | if (isspace(c)) { | 2529 | if (isspace(c)) { |
2530 | termch = c; | 2530 | termch = c; |
2531 | break; | 2531 | break; |
2532 | } | 2532 | } |
2533 | tmpstr[i] = c; | 2533 | tmpstr[i] = c; |
2534 | } | 2534 | } |
2535 | tmpstr[i++] = 0; | 2535 | tmpstr[i++] = 0; |
2536 | *vp = 0; | 2536 | *vp = 0; |
2537 | if (setjmp(bus_error_jmp) == 0) { | 2537 | if (setjmp(bus_error_jmp) == 0) { |
2538 | catch_memory_errors = 1; | 2538 | catch_memory_errors = 1; |
2539 | sync(); | 2539 | sync(); |
2540 | *vp = kallsyms_lookup_name(tmpstr); | 2540 | *vp = kallsyms_lookup_name(tmpstr); |
2541 | sync(); | 2541 | sync(); |
2542 | } | 2542 | } |
2543 | catch_memory_errors = 0; | 2543 | catch_memory_errors = 0; |
2544 | if (!(*vp)) { | 2544 | if (!(*vp)) { |
2545 | printf("unknown symbol '%s'\n", tmpstr); | 2545 | printf("unknown symbol '%s'\n", tmpstr); |
2546 | return 0; | 2546 | return 0; |
2547 | } | 2547 | } |
2548 | return 1; | 2548 | return 1; |
2549 | } | 2549 | } |
2550 | 2550 | ||
2551 | d = hexdigit(c); | 2551 | d = hexdigit(c); |
2552 | if (d == EOF) { | 2552 | if (d == EOF) { |
2553 | termch = c; | 2553 | termch = c; |
2554 | return 0; | 2554 | return 0; |
2555 | } | 2555 | } |
2556 | v = 0; | 2556 | v = 0; |
2557 | do { | 2557 | do { |
2558 | v = (v << 4) + d; | 2558 | v = (v << 4) + d; |
2559 | c = inchar(); | 2559 | c = inchar(); |
2560 | d = hexdigit(c); | 2560 | d = hexdigit(c); |
2561 | } while (d != EOF); | 2561 | } while (d != EOF); |
2562 | termch = c; | 2562 | termch = c; |
2563 | *vp = v; | 2563 | *vp = v; |
2564 | return 1; | 2564 | return 1; |
2565 | } | 2565 | } |
2566 | 2566 | ||
2567 | static void | 2567 | static void |
2568 | scannl(void) | 2568 | scannl(void) |
2569 | { | 2569 | { |
2570 | int c; | 2570 | int c; |
2571 | 2571 | ||
2572 | c = termch; | 2572 | c = termch; |
2573 | termch = 0; | 2573 | termch = 0; |
2574 | while( c != '\n' ) | 2574 | while( c != '\n' ) |
2575 | c = inchar(); | 2575 | c = inchar(); |
2576 | } | 2576 | } |
2577 | 2577 | ||
2578 | static int hexdigit(int c) | 2578 | static int hexdigit(int c) |
2579 | { | 2579 | { |
2580 | if( '0' <= c && c <= '9' ) | 2580 | if( '0' <= c && c <= '9' ) |
2581 | return c - '0'; | 2581 | return c - '0'; |
2582 | if( 'A' <= c && c <= 'F' ) | 2582 | if( 'A' <= c && c <= 'F' ) |
2583 | return c - ('A' - 10); | 2583 | return c - ('A' - 10); |
2584 | if( 'a' <= c && c <= 'f' ) | 2584 | if( 'a' <= c && c <= 'f' ) |
2585 | return c - ('a' - 10); | 2585 | return c - ('a' - 10); |
2586 | return EOF; | 2586 | return EOF; |
2587 | } | 2587 | } |
2588 | 2588 | ||
2589 | void | 2589 | void |
2590 | getstring(char *s, int size) | 2590 | getstring(char *s, int size) |
2591 | { | 2591 | { |
2592 | int c; | 2592 | int c; |
2593 | 2593 | ||
2594 | c = skipbl(); | 2594 | c = skipbl(); |
2595 | do { | 2595 | do { |
2596 | if( size > 1 ){ | 2596 | if( size > 1 ){ |
2597 | *s++ = c; | 2597 | *s++ = c; |
2598 | --size; | 2598 | --size; |
2599 | } | 2599 | } |
2600 | c = inchar(); | 2600 | c = inchar(); |
2601 | } while( c != ' ' && c != '\t' && c != '\n' ); | 2601 | } while( c != ' ' && c != '\t' && c != '\n' ); |
2602 | termch = c; | 2602 | termch = c; |
2603 | *s = 0; | 2603 | *s = 0; |
2604 | } | 2604 | } |
2605 | 2605 | ||
2606 | static char line[256]; | 2606 | static char line[256]; |
2607 | static char *lineptr; | 2607 | static char *lineptr; |
2608 | 2608 | ||
2609 | static void | 2609 | static void |
2610 | flush_input(void) | 2610 | flush_input(void) |
2611 | { | 2611 | { |
2612 | lineptr = NULL; | 2612 | lineptr = NULL; |
2613 | } | 2613 | } |
2614 | 2614 | ||
2615 | static int | 2615 | static int |
2616 | inchar(void) | 2616 | inchar(void) |
2617 | { | 2617 | { |
2618 | if (lineptr == NULL || *lineptr == 0) { | 2618 | if (lineptr == NULL || *lineptr == 0) { |
2619 | if (xmon_gets(line, sizeof(line)) == NULL) { | 2619 | if (xmon_gets(line, sizeof(line)) == NULL) { |
2620 | lineptr = NULL; | 2620 | lineptr = NULL; |
2621 | return EOF; | 2621 | return EOF; |
2622 | } | 2622 | } |
2623 | lineptr = line; | 2623 | lineptr = line; |
2624 | } | 2624 | } |
2625 | return *lineptr++; | 2625 | return *lineptr++; |
2626 | } | 2626 | } |
2627 | 2627 | ||
2628 | static void | 2628 | static void |
2629 | take_input(char *str) | 2629 | take_input(char *str) |
2630 | { | 2630 | { |
2631 | lineptr = str; | 2631 | lineptr = str; |
2632 | } | 2632 | } |
2633 | 2633 | ||
2634 | 2634 | ||
2635 | static void | 2635 | static void |
2636 | symbol_lookup(void) | 2636 | symbol_lookup(void) |
2637 | { | 2637 | { |
2638 | int type = inchar(); | 2638 | int type = inchar(); |
2639 | unsigned long addr; | 2639 | unsigned long addr; |
2640 | static char tmp[64]; | 2640 | static char tmp[64]; |
2641 | 2641 | ||
2642 | switch (type) { | 2642 | switch (type) { |
2643 | case 'a': | 2643 | case 'a': |
2644 | if (scanhex(&addr)) | 2644 | if (scanhex(&addr)) |
2645 | xmon_print_symbol(addr, ": ", "\n"); | 2645 | xmon_print_symbol(addr, ": ", "\n"); |
2646 | termch = 0; | 2646 | termch = 0; |
2647 | break; | 2647 | break; |
2648 | case 's': | 2648 | case 's': |
2649 | getstring(tmp, 64); | 2649 | getstring(tmp, 64); |
2650 | if (setjmp(bus_error_jmp) == 0) { | 2650 | if (setjmp(bus_error_jmp) == 0) { |
2651 | catch_memory_errors = 1; | 2651 | catch_memory_errors = 1; |
2652 | sync(); | 2652 | sync(); |
2653 | addr = kallsyms_lookup_name(tmp); | 2653 | addr = kallsyms_lookup_name(tmp); |
2654 | if (addr) | 2654 | if (addr) |
2655 | printf("%s: %lx\n", tmp, addr); | 2655 | printf("%s: %lx\n", tmp, addr); |
2656 | else | 2656 | else |
2657 | printf("Symbol '%s' not found.\n", tmp); | 2657 | printf("Symbol '%s' not found.\n", tmp); |
2658 | sync(); | 2658 | sync(); |
2659 | } | 2659 | } |
2660 | catch_memory_errors = 0; | 2660 | catch_memory_errors = 0; |
2661 | termch = 0; | 2661 | termch = 0; |
2662 | break; | 2662 | break; |
2663 | } | 2663 | } |
2664 | } | 2664 | } |
2665 | 2665 | ||
2666 | 2666 | ||
2667 | /* Print an address in numeric and symbolic form (if possible) */ | 2667 | /* Print an address in numeric and symbolic form (if possible) */ |
2668 | static void xmon_print_symbol(unsigned long address, const char *mid, | 2668 | static void xmon_print_symbol(unsigned long address, const char *mid, |
2669 | const char *after) | 2669 | const char *after) |
2670 | { | 2670 | { |
2671 | char *modname; | 2671 | char *modname; |
2672 | const char *name = NULL; | 2672 | const char *name = NULL; |
2673 | unsigned long offset, size; | 2673 | unsigned long offset, size; |
2674 | 2674 | ||
2675 | printf(REG, address); | 2675 | printf(REG, address); |
2676 | if (setjmp(bus_error_jmp) == 0) { | 2676 | if (setjmp(bus_error_jmp) == 0) { |
2677 | catch_memory_errors = 1; | 2677 | catch_memory_errors = 1; |
2678 | sync(); | 2678 | sync(); |
2679 | name = kallsyms_lookup(address, &size, &offset, &modname, | 2679 | name = kallsyms_lookup(address, &size, &offset, &modname, |
2680 | tmpstr); | 2680 | tmpstr); |
2681 | sync(); | 2681 | sync(); |
2682 | /* wait a little while to see if we get a machine check */ | 2682 | /* wait a little while to see if we get a machine check */ |
2683 | __delay(200); | 2683 | __delay(200); |
2684 | } | 2684 | } |
2685 | 2685 | ||
2686 | catch_memory_errors = 0; | 2686 | catch_memory_errors = 0; |
2687 | 2687 | ||
2688 | if (name) { | 2688 | if (name) { |
2689 | printf("%s%s+%#lx/%#lx", mid, name, offset, size); | 2689 | printf("%s%s+%#lx/%#lx", mid, name, offset, size); |
2690 | if (modname) | 2690 | if (modname) |
2691 | printf(" [%s]", modname); | 2691 | printf(" [%s]", modname); |
2692 | } | 2692 | } |
2693 | printf("%s", after); | 2693 | printf("%s", after); |
2694 | } | 2694 | } |
2695 | 2695 | ||
2696 | #ifdef CONFIG_PPC_BOOK3S_64 | 2696 | #ifdef CONFIG_PPC_BOOK3S_64 |
2697 | static void dump_slb(void) | 2697 | static void dump_slb(void) |
2698 | { | 2698 | { |
2699 | int i; | 2699 | int i; |
2700 | unsigned long esid,vsid,valid; | 2700 | unsigned long esid,vsid,valid; |
2701 | unsigned long llp; | 2701 | unsigned long llp; |
2702 | 2702 | ||
2703 | printf("SLB contents of cpu %x\n", smp_processor_id()); | 2703 | printf("SLB contents of cpu %x\n", smp_processor_id()); |
2704 | 2704 | ||
2705 | for (i = 0; i < mmu_slb_size; i++) { | 2705 | for (i = 0; i < mmu_slb_size; i++) { |
2706 | asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i)); | 2706 | asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i)); |
2707 | asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i)); | 2707 | asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i)); |
2708 | valid = (esid & SLB_ESID_V); | 2708 | valid = (esid & SLB_ESID_V); |
2709 | if (valid | esid | vsid) { | 2709 | if (valid | esid | vsid) { |
2710 | printf("%02d %016lx %016lx", i, esid, vsid); | 2710 | printf("%02d %016lx %016lx", i, esid, vsid); |
2711 | if (valid) { | 2711 | if (valid) { |
2712 | llp = vsid & SLB_VSID_LLP; | 2712 | llp = vsid & SLB_VSID_LLP; |
2713 | if (vsid & SLB_VSID_B_1T) { | 2713 | if (vsid & SLB_VSID_B_1T) { |
2714 | printf(" 1T ESID=%9lx VSID=%13lx LLP:%3lx \n", | 2714 | printf(" 1T ESID=%9lx VSID=%13lx LLP:%3lx \n", |
2715 | GET_ESID_1T(esid), | 2715 | GET_ESID_1T(esid), |
2716 | (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, | 2716 | (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, |
2717 | llp); | 2717 | llp); |
2718 | } else { | 2718 | } else { |
2719 | printf(" 256M ESID=%9lx VSID=%13lx LLP:%3lx \n", | 2719 | printf(" 256M ESID=%9lx VSID=%13lx LLP:%3lx \n", |
2720 | GET_ESID(esid), | 2720 | GET_ESID(esid), |
2721 | (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT, | 2721 | (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT, |
2722 | llp); | 2722 | llp); |
2723 | } | 2723 | } |
2724 | } else | 2724 | } else |
2725 | printf("\n"); | 2725 | printf("\n"); |
2726 | } | 2726 | } |
2727 | } | 2727 | } |
2728 | } | 2728 | } |
2729 | 2729 | ||
2730 | static void dump_stab(void) | 2730 | static void dump_stab(void) |
2731 | { | 2731 | { |
2732 | int i; | 2732 | int i; |
2733 | unsigned long *tmp = (unsigned long *)local_paca->stab_addr; | 2733 | unsigned long *tmp = (unsigned long *)local_paca->stab_addr; |
2734 | 2734 | ||
2735 | printf("Segment table contents of cpu %x\n", smp_processor_id()); | 2735 | printf("Segment table contents of cpu %x\n", smp_processor_id()); |
2736 | 2736 | ||
2737 | for (i = 0; i < PAGE_SIZE/16; i++) { | 2737 | for (i = 0; i < PAGE_SIZE/16; i++) { |
2738 | unsigned long a, b; | 2738 | unsigned long a, b; |
2739 | 2739 | ||
2740 | a = *tmp++; | 2740 | a = *tmp++; |
2741 | b = *tmp++; | 2741 | b = *tmp++; |
2742 | 2742 | ||
2743 | if (a || b) { | 2743 | if (a || b) { |
2744 | printf("%03d %016lx ", i, a); | 2744 | printf("%03d %016lx ", i, a); |
2745 | printf("%016lx\n", b); | 2745 | printf("%016lx\n", b); |
2746 | } | 2746 | } |
2747 | } | 2747 | } |
2748 | } | 2748 | } |
2749 | 2749 | ||
2750 | void dump_segments(void) | 2750 | void dump_segments(void) |
2751 | { | 2751 | { |
2752 | if (mmu_has_feature(MMU_FTR_SLB)) | 2752 | if (mmu_has_feature(MMU_FTR_SLB)) |
2753 | dump_slb(); | 2753 | dump_slb(); |
2754 | else | 2754 | else |
2755 | dump_stab(); | 2755 | dump_stab(); |
2756 | } | 2756 | } |
2757 | #endif | 2757 | #endif |
2758 | 2758 | ||
2759 | #ifdef CONFIG_PPC_STD_MMU_32 | 2759 | #ifdef CONFIG_PPC_STD_MMU_32 |
2760 | void dump_segments(void) | 2760 | void dump_segments(void) |
2761 | { | 2761 | { |
2762 | int i; | 2762 | int i; |
2763 | 2763 | ||
2764 | printf("sr0-15 ="); | 2764 | printf("sr0-15 ="); |
2765 | for (i = 0; i < 16; ++i) | 2765 | for (i = 0; i < 16; ++i) |
2766 | printf(" %x", mfsrin(i)); | 2766 | printf(" %x", mfsrin(i)); |
2767 | printf("\n"); | 2767 | printf("\n"); |
2768 | } | 2768 | } |
2769 | #endif | 2769 | #endif |
2770 | 2770 | ||
2771 | #ifdef CONFIG_44x | 2771 | #ifdef CONFIG_44x |
2772 | static void dump_tlb_44x(void) | 2772 | static void dump_tlb_44x(void) |
2773 | { | 2773 | { |
2774 | int i; | 2774 | int i; |
2775 | 2775 | ||
2776 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | 2776 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { |
2777 | unsigned long w0,w1,w2; | 2777 | unsigned long w0,w1,w2; |
2778 | asm volatile("tlbre %0,%1,0" : "=r" (w0) : "r" (i)); | 2778 | asm volatile("tlbre %0,%1,0" : "=r" (w0) : "r" (i)); |
2779 | asm volatile("tlbre %0,%1,1" : "=r" (w1) : "r" (i)); | 2779 | asm volatile("tlbre %0,%1,1" : "=r" (w1) : "r" (i)); |
2780 | asm volatile("tlbre %0,%1,2" : "=r" (w2) : "r" (i)); | 2780 | asm volatile("tlbre %0,%1,2" : "=r" (w2) : "r" (i)); |
2781 | printf("[%02x] %08x %08x %08x ", i, w0, w1, w2); | 2781 | printf("[%02x] %08x %08x %08x ", i, w0, w1, w2); |
2782 | if (w0 & PPC44x_TLB_VALID) { | 2782 | if (w0 & PPC44x_TLB_VALID) { |
2783 | printf("V %08x -> %01x%08x %c%c%c%c%c", | 2783 | printf("V %08x -> %01x%08x %c%c%c%c%c", |
2784 | w0 & PPC44x_TLB_EPN_MASK, | 2784 | w0 & PPC44x_TLB_EPN_MASK, |
2785 | w1 & PPC44x_TLB_ERPN_MASK, | 2785 | w1 & PPC44x_TLB_ERPN_MASK, |
2786 | w1 & PPC44x_TLB_RPN_MASK, | 2786 | w1 & PPC44x_TLB_RPN_MASK, |
2787 | (w2 & PPC44x_TLB_W) ? 'W' : 'w', | 2787 | (w2 & PPC44x_TLB_W) ? 'W' : 'w', |
2788 | (w2 & PPC44x_TLB_I) ? 'I' : 'i', | 2788 | (w2 & PPC44x_TLB_I) ? 'I' : 'i', |
2789 | (w2 & PPC44x_TLB_M) ? 'M' : 'm', | 2789 | (w2 & PPC44x_TLB_M) ? 'M' : 'm', |
2790 | (w2 & PPC44x_TLB_G) ? 'G' : 'g', | 2790 | (w2 & PPC44x_TLB_G) ? 'G' : 'g', |
2791 | (w2 & PPC44x_TLB_E) ? 'E' : 'e'); | 2791 | (w2 & PPC44x_TLB_E) ? 'E' : 'e'); |
2792 | } | 2792 | } |
2793 | printf("\n"); | 2793 | printf("\n"); |
2794 | } | 2794 | } |
2795 | } | 2795 | } |
2796 | #endif /* CONFIG_44x */ | 2796 | #endif /* CONFIG_44x */ |
2797 | 2797 | ||
2798 | #ifdef CONFIG_PPC_BOOK3E | 2798 | #ifdef CONFIG_PPC_BOOK3E |
2799 | static void dump_tlb_book3e(void) | 2799 | static void dump_tlb_book3e(void) |
2800 | { | 2800 | { |
2801 | u32 mmucfg, pidmask, lpidmask; | 2801 | u32 mmucfg, pidmask, lpidmask; |
2802 | u64 ramask; | 2802 | u64 ramask; |
2803 | int i, tlb, ntlbs, pidsz, lpidsz, rasz, lrat = 0; | 2803 | int i, tlb, ntlbs, pidsz, lpidsz, rasz, lrat = 0; |
2804 | int mmu_version; | 2804 | int mmu_version; |
2805 | static const char *pgsz_names[] = { | 2805 | static const char *pgsz_names[] = { |
2806 | " 1K", | 2806 | " 1K", |
2807 | " 2K", | 2807 | " 2K", |
2808 | " 4K", | 2808 | " 4K", |
2809 | " 8K", | 2809 | " 8K", |
2810 | " 16K", | 2810 | " 16K", |
2811 | " 32K", | 2811 | " 32K", |
2812 | " 64K", | 2812 | " 64K", |
2813 | "128K", | 2813 | "128K", |
2814 | "256K", | 2814 | "256K", |
2815 | "512K", | 2815 | "512K", |
2816 | " 1M", | 2816 | " 1M", |
2817 | " 2M", | 2817 | " 2M", |
2818 | " 4M", | 2818 | " 4M", |
2819 | " 8M", | 2819 | " 8M", |
2820 | " 16M", | 2820 | " 16M", |
2821 | " 32M", | 2821 | " 32M", |
2822 | " 64M", | 2822 | " 64M", |
2823 | "128M", | 2823 | "128M", |
2824 | "256M", | 2824 | "256M", |
2825 | "512M", | 2825 | "512M", |
2826 | " 1G", | 2826 | " 1G", |
2827 | " 2G", | 2827 | " 2G", |
2828 | " 4G", | 2828 | " 4G", |
2829 | " 8G", | 2829 | " 8G", |
2830 | " 16G", | 2830 | " 16G", |
2831 | " 32G", | 2831 | " 32G", |
2832 | " 64G", | 2832 | " 64G", |
2833 | "128G", | 2833 | "128G", |
2834 | "256G", | 2834 | "256G", |
2835 | "512G", | 2835 | "512G", |
2836 | " 1T", | 2836 | " 1T", |
2837 | " 2T", | 2837 | " 2T", |
2838 | }; | 2838 | }; |
2839 | 2839 | ||
2840 | /* Gather some infos about the MMU */ | 2840 | /* Gather some infos about the MMU */ |
2841 | mmucfg = mfspr(SPRN_MMUCFG); | 2841 | mmucfg = mfspr(SPRN_MMUCFG); |
2842 | mmu_version = (mmucfg & 3) + 1; | 2842 | mmu_version = (mmucfg & 3) + 1; |
2843 | ntlbs = ((mmucfg >> 2) & 3) + 1; | 2843 | ntlbs = ((mmucfg >> 2) & 3) + 1; |
2844 | pidsz = ((mmucfg >> 6) & 0x1f) + 1; | 2844 | pidsz = ((mmucfg >> 6) & 0x1f) + 1; |
2845 | lpidsz = (mmucfg >> 24) & 0xf; | 2845 | lpidsz = (mmucfg >> 24) & 0xf; |
2846 | rasz = (mmucfg >> 16) & 0x7f; | 2846 | rasz = (mmucfg >> 16) & 0x7f; |
2847 | if ((mmu_version > 1) && (mmucfg & 0x10000)) | 2847 | if ((mmu_version > 1) && (mmucfg & 0x10000)) |
2848 | lrat = 1; | 2848 | lrat = 1; |
2849 | printf("Book3E MMU MAV=%d.0,%d TLBs,%d-bit PID,%d-bit LPID,%d-bit RA\n", | 2849 | printf("Book3E MMU MAV=%d.0,%d TLBs,%d-bit PID,%d-bit LPID,%d-bit RA\n", |
2850 | mmu_version, ntlbs, pidsz, lpidsz, rasz); | 2850 | mmu_version, ntlbs, pidsz, lpidsz, rasz); |
2851 | pidmask = (1ul << pidsz) - 1; | 2851 | pidmask = (1ul << pidsz) - 1; |
2852 | lpidmask = (1ul << lpidsz) - 1; | 2852 | lpidmask = (1ul << lpidsz) - 1; |
2853 | ramask = (1ull << rasz) - 1; | 2853 | ramask = (1ull << rasz) - 1; |
2854 | 2854 | ||
2855 | for (tlb = 0; tlb < ntlbs; tlb++) { | 2855 | for (tlb = 0; tlb < ntlbs; tlb++) { |
2856 | u32 tlbcfg; | 2856 | u32 tlbcfg; |
2857 | int nent, assoc, new_cc = 1; | 2857 | int nent, assoc, new_cc = 1; |
2858 | printf("TLB %d:\n------\n", tlb); | 2858 | printf("TLB %d:\n------\n", tlb); |
2859 | switch(tlb) { | 2859 | switch(tlb) { |
2860 | case 0: | 2860 | case 0: |
2861 | tlbcfg = mfspr(SPRN_TLB0CFG); | 2861 | tlbcfg = mfspr(SPRN_TLB0CFG); |
2862 | break; | 2862 | break; |
2863 | case 1: | 2863 | case 1: |
2864 | tlbcfg = mfspr(SPRN_TLB1CFG); | 2864 | tlbcfg = mfspr(SPRN_TLB1CFG); |
2865 | break; | 2865 | break; |
2866 | case 2: | 2866 | case 2: |
2867 | tlbcfg = mfspr(SPRN_TLB2CFG); | 2867 | tlbcfg = mfspr(SPRN_TLB2CFG); |
2868 | break; | 2868 | break; |
2869 | case 3: | 2869 | case 3: |
2870 | tlbcfg = mfspr(SPRN_TLB3CFG); | 2870 | tlbcfg = mfspr(SPRN_TLB3CFG); |
2871 | break; | 2871 | break; |
2872 | default: | 2872 | default: |
2873 | printf("Unsupported TLB number !\n"); | 2873 | printf("Unsupported TLB number !\n"); |
2874 | continue; | 2874 | continue; |
2875 | } | 2875 | } |
2876 | nent = tlbcfg & 0xfff; | 2876 | nent = tlbcfg & 0xfff; |
2877 | assoc = (tlbcfg >> 24) & 0xff; | 2877 | assoc = (tlbcfg >> 24) & 0xff; |
2878 | for (i = 0; i < nent; i++) { | 2878 | for (i = 0; i < nent; i++) { |
2879 | u32 mas0 = MAS0_TLBSEL(tlb); | 2879 | u32 mas0 = MAS0_TLBSEL(tlb); |
2880 | u32 mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K); | 2880 | u32 mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K); |
2881 | u64 mas2 = 0; | 2881 | u64 mas2 = 0; |
2882 | u64 mas7_mas3; | 2882 | u64 mas7_mas3; |
2883 | int esel = i, cc = i; | 2883 | int esel = i, cc = i; |
2884 | 2884 | ||
2885 | if (assoc != 0) { | 2885 | if (assoc != 0) { |
2886 | cc = i / assoc; | 2886 | cc = i / assoc; |
2887 | esel = i % assoc; | 2887 | esel = i % assoc; |
2888 | mas2 = cc * 0x1000; | 2888 | mas2 = cc * 0x1000; |
2889 | } | 2889 | } |
2890 | 2890 | ||
2891 | mas0 |= MAS0_ESEL(esel); | 2891 | mas0 |= MAS0_ESEL(esel); |
2892 | mtspr(SPRN_MAS0, mas0); | 2892 | mtspr(SPRN_MAS0, mas0); |
2893 | mtspr(SPRN_MAS1, mas1); | 2893 | mtspr(SPRN_MAS1, mas1); |
2894 | mtspr(SPRN_MAS2, mas2); | 2894 | mtspr(SPRN_MAS2, mas2); |
2895 | asm volatile("tlbre 0,0,0" : : : "memory"); | 2895 | asm volatile("tlbre 0,0,0" : : : "memory"); |
2896 | mas1 = mfspr(SPRN_MAS1); | 2896 | mas1 = mfspr(SPRN_MAS1); |
2897 | mas2 = mfspr(SPRN_MAS2); | 2897 | mas2 = mfspr(SPRN_MAS2); |
2898 | mas7_mas3 = mfspr(SPRN_MAS7_MAS3); | 2898 | mas7_mas3 = mfspr(SPRN_MAS7_MAS3); |
2899 | if (assoc && (i % assoc) == 0) | 2899 | if (assoc && (i % assoc) == 0) |
2900 | new_cc = 1; | 2900 | new_cc = 1; |
2901 | if (!(mas1 & MAS1_VALID)) | 2901 | if (!(mas1 & MAS1_VALID)) |
2902 | continue; | 2902 | continue; |
2903 | if (assoc == 0) | 2903 | if (assoc == 0) |
2904 | printf("%04x- ", i); | 2904 | printf("%04x- ", i); |
2905 | else if (new_cc) | 2905 | else if (new_cc) |
2906 | printf("%04x-%c", cc, 'A' + esel); | 2906 | printf("%04x-%c", cc, 'A' + esel); |
2907 | else | 2907 | else |
2908 | printf(" |%c", 'A' + esel); | 2908 | printf(" |%c", 'A' + esel); |
2909 | new_cc = 0; | 2909 | new_cc = 0; |
2910 | printf(" %016llx %04x %s %c%c AS%c", | 2910 | printf(" %016llx %04x %s %c%c AS%c", |
2911 | mas2 & ~0x3ffull, | 2911 | mas2 & ~0x3ffull, |
2912 | (mas1 >> 16) & 0x3fff, | 2912 | (mas1 >> 16) & 0x3fff, |
2913 | pgsz_names[(mas1 >> 7) & 0x1f], | 2913 | pgsz_names[(mas1 >> 7) & 0x1f], |
2914 | mas1 & MAS1_IND ? 'I' : ' ', | 2914 | mas1 & MAS1_IND ? 'I' : ' ', |
2915 | mas1 & MAS1_IPROT ? 'P' : ' ', | 2915 | mas1 & MAS1_IPROT ? 'P' : ' ', |
2916 | mas1 & MAS1_TS ? '1' : '0'); | 2916 | mas1 & MAS1_TS ? '1' : '0'); |
2917 | printf(" %c%c%c%c%c%c%c", | 2917 | printf(" %c%c%c%c%c%c%c", |
2918 | mas2 & MAS2_X0 ? 'a' : ' ', | 2918 | mas2 & MAS2_X0 ? 'a' : ' ', |
2919 | mas2 & MAS2_X1 ? 'v' : ' ', | 2919 | mas2 & MAS2_X1 ? 'v' : ' ', |
2920 | mas2 & MAS2_W ? 'w' : ' ', | 2920 | mas2 & MAS2_W ? 'w' : ' ', |
2921 | mas2 & MAS2_I ? 'i' : ' ', | 2921 | mas2 & MAS2_I ? 'i' : ' ', |
2922 | mas2 & MAS2_M ? 'm' : ' ', | 2922 | mas2 & MAS2_M ? 'm' : ' ', |
2923 | mas2 & MAS2_G ? 'g' : ' ', | 2923 | mas2 & MAS2_G ? 'g' : ' ', |
2924 | mas2 & MAS2_E ? 'e' : ' '); | 2924 | mas2 & MAS2_E ? 'e' : ' '); |
2925 | printf(" %016llx", mas7_mas3 & ramask & ~0x7ffull); | 2925 | printf(" %016llx", mas7_mas3 & ramask & ~0x7ffull); |
2926 | if (mas1 & MAS1_IND) | 2926 | if (mas1 & MAS1_IND) |
2927 | printf(" %s\n", | 2927 | printf(" %s\n", |
2928 | pgsz_names[(mas7_mas3 >> 1) & 0x1f]); | 2928 | pgsz_names[(mas7_mas3 >> 1) & 0x1f]); |
2929 | else | 2929 | else |
2930 | printf(" U%c%c%c S%c%c%c\n", | 2930 | printf(" U%c%c%c S%c%c%c\n", |
2931 | mas7_mas3 & MAS3_UX ? 'x' : ' ', | 2931 | mas7_mas3 & MAS3_UX ? 'x' : ' ', |
2932 | mas7_mas3 & MAS3_UW ? 'w' : ' ', | 2932 | mas7_mas3 & MAS3_UW ? 'w' : ' ', |
2933 | mas7_mas3 & MAS3_UR ? 'r' : ' ', | 2933 | mas7_mas3 & MAS3_UR ? 'r' : ' ', |
2934 | mas7_mas3 & MAS3_SX ? 'x' : ' ', | 2934 | mas7_mas3 & MAS3_SX ? 'x' : ' ', |
2935 | mas7_mas3 & MAS3_SW ? 'w' : ' ', | 2935 | mas7_mas3 & MAS3_SW ? 'w' : ' ', |
2936 | mas7_mas3 & MAS3_SR ? 'r' : ' '); | 2936 | mas7_mas3 & MAS3_SR ? 'r' : ' '); |
2937 | } | 2937 | } |
2938 | } | 2938 | } |
2939 | } | 2939 | } |
2940 | #endif /* CONFIG_PPC_BOOK3E */ | 2940 | #endif /* CONFIG_PPC_BOOK3E */ |
2941 | 2941 | ||
2942 | static void xmon_init(int enable) | 2942 | static void xmon_init(int enable) |
2943 | { | 2943 | { |
2944 | if (enable) { | 2944 | if (enable) { |
2945 | __debugger = xmon; | 2945 | __debugger = xmon; |
2946 | __debugger_ipi = xmon_ipi; | 2946 | __debugger_ipi = xmon_ipi; |
2947 | __debugger_bpt = xmon_bpt; | 2947 | __debugger_bpt = xmon_bpt; |
2948 | __debugger_sstep = xmon_sstep; | 2948 | __debugger_sstep = xmon_sstep; |
2949 | __debugger_iabr_match = xmon_iabr_match; | 2949 | __debugger_iabr_match = xmon_iabr_match; |
2950 | __debugger_break_match = xmon_break_match; | 2950 | __debugger_break_match = xmon_break_match; |
2951 | __debugger_fault_handler = xmon_fault_handler; | 2951 | __debugger_fault_handler = xmon_fault_handler; |
2952 | } else { | 2952 | } else { |
2953 | __debugger = NULL; | 2953 | __debugger = NULL; |
2954 | __debugger_ipi = NULL; | 2954 | __debugger_ipi = NULL; |
2955 | __debugger_bpt = NULL; | 2955 | __debugger_bpt = NULL; |
2956 | __debugger_sstep = NULL; | 2956 | __debugger_sstep = NULL; |
2957 | __debugger_iabr_match = NULL; | 2957 | __debugger_iabr_match = NULL; |
2958 | __debugger_break_match = NULL; | 2958 | __debugger_break_match = NULL; |
2959 | __debugger_fault_handler = NULL; | 2959 | __debugger_fault_handler = NULL; |
2960 | } | 2960 | } |
2961 | } | 2961 | } |
2962 | 2962 | ||
2963 | #ifdef CONFIG_MAGIC_SYSRQ | 2963 | #ifdef CONFIG_MAGIC_SYSRQ |
2964 | static void sysrq_handle_xmon(int key) | 2964 | static void sysrq_handle_xmon(int key) |
2965 | { | 2965 | { |
2966 | /* ensure xmon is enabled */ | 2966 | /* ensure xmon is enabled */ |
2967 | xmon_init(1); | 2967 | xmon_init(1); |
2968 | debugger(get_irq_regs()); | 2968 | debugger(get_irq_regs()); |
2969 | } | 2969 | } |
2970 | 2970 | ||
2971 | static struct sysrq_key_op sysrq_xmon_op = { | 2971 | static struct sysrq_key_op sysrq_xmon_op = { |
2972 | .handler = sysrq_handle_xmon, | 2972 | .handler = sysrq_handle_xmon, |
2973 | .help_msg = "xmon(x)", | 2973 | .help_msg = "xmon(x)", |
2974 | .action_msg = "Entering xmon", | 2974 | .action_msg = "Entering xmon", |
2975 | }; | 2975 | }; |
2976 | 2976 | ||
2977 | static int __init setup_xmon_sysrq(void) | 2977 | static int __init setup_xmon_sysrq(void) |
2978 | { | 2978 | { |
2979 | register_sysrq_key('x', &sysrq_xmon_op); | 2979 | register_sysrq_key('x', &sysrq_xmon_op); |
2980 | return 0; | 2980 | return 0; |
2981 | } | 2981 | } |
2982 | __initcall(setup_xmon_sysrq); | 2982 | __initcall(setup_xmon_sysrq); |
2983 | #endif /* CONFIG_MAGIC_SYSRQ */ | 2983 | #endif /* CONFIG_MAGIC_SYSRQ */ |
2984 | 2984 | ||
2985 | static int __initdata xmon_early, xmon_off; | 2985 | static int __initdata xmon_early, xmon_off; |
2986 | 2986 | ||
2987 | static int __init early_parse_xmon(char *p) | 2987 | static int __init early_parse_xmon(char *p) |
2988 | { | 2988 | { |
2989 | if (!p || strncmp(p, "early", 5) == 0) { | 2989 | if (!p || strncmp(p, "early", 5) == 0) { |
2990 | /* just "xmon" is equivalent to "xmon=early" */ | 2990 | /* just "xmon" is equivalent to "xmon=early" */ |
2991 | xmon_init(1); | 2991 | xmon_init(1); |
2992 | xmon_early = 1; | 2992 | xmon_early = 1; |
2993 | } else if (strncmp(p, "on", 2) == 0) | 2993 | } else if (strncmp(p, "on", 2) == 0) |
2994 | xmon_init(1); | 2994 | xmon_init(1); |
2995 | else if (strncmp(p, "off", 3) == 0) | 2995 | else if (strncmp(p, "off", 3) == 0) |
2996 | xmon_off = 1; | 2996 | xmon_off = 1; |
2997 | else if (strncmp(p, "nobt", 4) == 0) | 2997 | else if (strncmp(p, "nobt", 4) == 0) |
2998 | xmon_no_auto_backtrace = 1; | 2998 | xmon_no_auto_backtrace = 1; |
2999 | else | 2999 | else |
3000 | return 1; | 3000 | return 1; |
3001 | 3001 | ||
3002 | return 0; | 3002 | return 0; |
3003 | } | 3003 | } |
3004 | early_param("xmon", early_parse_xmon); | 3004 | early_param("xmon", early_parse_xmon); |
3005 | 3005 | ||
3006 | void __init xmon_setup(void) | 3006 | void __init xmon_setup(void) |
3007 | { | 3007 | { |
3008 | #ifdef CONFIG_XMON_DEFAULT | 3008 | #ifdef CONFIG_XMON_DEFAULT |
3009 | if (!xmon_off) | 3009 | if (!xmon_off) |
3010 | xmon_init(1); | 3010 | xmon_init(1); |
3011 | #endif | 3011 | #endif |
3012 | if (xmon_early) | 3012 | if (xmon_early) |
3013 | debugger(NULL); | 3013 | debugger(NULL); |
3014 | } | 3014 | } |
3015 | 3015 | ||
3016 | #ifdef CONFIG_SPU_BASE | 3016 | #ifdef CONFIG_SPU_BASE |
3017 | 3017 | ||
3018 | struct spu_info { | 3018 | struct spu_info { |
3019 | struct spu *spu; | 3019 | struct spu *spu; |
3020 | u64 saved_mfc_sr1_RW; | 3020 | u64 saved_mfc_sr1_RW; |
3021 | u32 saved_spu_runcntl_RW; | 3021 | u32 saved_spu_runcntl_RW; |
3022 | unsigned long dump_addr; | 3022 | unsigned long dump_addr; |
3023 | u8 stopped_ok; | 3023 | u8 stopped_ok; |
3024 | }; | 3024 | }; |
3025 | 3025 | ||
3026 | #define XMON_NUM_SPUS 16 /* Enough for current hardware */ | 3026 | #define XMON_NUM_SPUS 16 /* Enough for current hardware */ |
3027 | 3027 | ||
3028 | static struct spu_info spu_info[XMON_NUM_SPUS]; | 3028 | static struct spu_info spu_info[XMON_NUM_SPUS]; |
3029 | 3029 | ||
3030 | void xmon_register_spus(struct list_head *list) | 3030 | void xmon_register_spus(struct list_head *list) |
3031 | { | 3031 | { |
3032 | struct spu *spu; | 3032 | struct spu *spu; |
3033 | 3033 | ||
3034 | list_for_each_entry(spu, list, full_list) { | 3034 | list_for_each_entry(spu, list, full_list) { |
3035 | if (spu->number >= XMON_NUM_SPUS) { | 3035 | if (spu->number >= XMON_NUM_SPUS) { |
3036 | WARN_ON(1); | 3036 | WARN_ON(1); |
3037 | continue; | 3037 | continue; |
3038 | } | 3038 | } |
3039 | 3039 | ||
3040 | spu_info[spu->number].spu = spu; | 3040 | spu_info[spu->number].spu = spu; |
3041 | spu_info[spu->number].stopped_ok = 0; | 3041 | spu_info[spu->number].stopped_ok = 0; |
3042 | spu_info[spu->number].dump_addr = (unsigned long) | 3042 | spu_info[spu->number].dump_addr = (unsigned long) |
3043 | spu_info[spu->number].spu->local_store; | 3043 | spu_info[spu->number].spu->local_store; |
3044 | } | 3044 | } |
3045 | } | 3045 | } |
3046 | 3046 | ||
3047 | static void stop_spus(void) | 3047 | static void stop_spus(void) |
3048 | { | 3048 | { |
3049 | struct spu *spu; | 3049 | struct spu *spu; |
3050 | int i; | 3050 | int i; |
3051 | u64 tmp; | 3051 | u64 tmp; |
3052 | 3052 | ||
3053 | for (i = 0; i < XMON_NUM_SPUS; i++) { | 3053 | for (i = 0; i < XMON_NUM_SPUS; i++) { |
3054 | if (!spu_info[i].spu) | 3054 | if (!spu_info[i].spu) |
3055 | continue; | 3055 | continue; |
3056 | 3056 | ||
3057 | if (setjmp(bus_error_jmp) == 0) { | 3057 | if (setjmp(bus_error_jmp) == 0) { |
3058 | catch_memory_errors = 1; | 3058 | catch_memory_errors = 1; |
3059 | sync(); | 3059 | sync(); |
3060 | 3060 | ||
3061 | spu = spu_info[i].spu; | 3061 | spu = spu_info[i].spu; |
3062 | 3062 | ||
3063 | spu_info[i].saved_spu_runcntl_RW = | 3063 | spu_info[i].saved_spu_runcntl_RW = |
3064 | in_be32(&spu->problem->spu_runcntl_RW); | 3064 | in_be32(&spu->problem->spu_runcntl_RW); |
3065 | 3065 | ||
3066 | tmp = spu_mfc_sr1_get(spu); | 3066 | tmp = spu_mfc_sr1_get(spu); |
3067 | spu_info[i].saved_mfc_sr1_RW = tmp; | 3067 | spu_info[i].saved_mfc_sr1_RW = tmp; |
3068 | 3068 | ||
3069 | tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; | 3069 | tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; |
3070 | spu_mfc_sr1_set(spu, tmp); | 3070 | spu_mfc_sr1_set(spu, tmp); |
3071 | 3071 | ||
3072 | sync(); | 3072 | sync(); |
3073 | __delay(200); | 3073 | __delay(200); |
3074 | 3074 | ||
3075 | spu_info[i].stopped_ok = 1; | 3075 | spu_info[i].stopped_ok = 1; |
3076 | 3076 | ||
3077 | printf("Stopped spu %.2d (was %s)\n", i, | 3077 | printf("Stopped spu %.2d (was %s)\n", i, |
3078 | spu_info[i].saved_spu_runcntl_RW ? | 3078 | spu_info[i].saved_spu_runcntl_RW ? |
3079 | "running" : "stopped"); | 3079 | "running" : "stopped"); |
3080 | } else { | 3080 | } else { |
3081 | catch_memory_errors = 0; | 3081 | catch_memory_errors = 0; |
3082 | printf("*** Error stopping spu %.2d\n", i); | 3082 | printf("*** Error stopping spu %.2d\n", i); |
3083 | } | 3083 | } |
3084 | catch_memory_errors = 0; | 3084 | catch_memory_errors = 0; |
3085 | } | 3085 | } |
3086 | } | 3086 | } |
3087 | 3087 | ||
3088 | static void restart_spus(void) | 3088 | static void restart_spus(void) |
3089 | { | 3089 | { |
3090 | struct spu *spu; | 3090 | struct spu *spu; |
3091 | int i; | 3091 | int i; |
3092 | 3092 | ||
3093 | for (i = 0; i < XMON_NUM_SPUS; i++) { | 3093 | for (i = 0; i < XMON_NUM_SPUS; i++) { |
3094 | if (!spu_info[i].spu) | 3094 | if (!spu_info[i].spu) |
3095 | continue; | 3095 | continue; |
3096 | 3096 | ||
3097 | if (!spu_info[i].stopped_ok) { | 3097 | if (!spu_info[i].stopped_ok) { |
3098 | printf("*** Error, spu %d was not successfully stopped" | 3098 | printf("*** Error, spu %d was not successfully stopped" |
3099 | ", not restarting\n", i); | 3099 | ", not restarting\n", i); |
3100 | continue; | 3100 | continue; |
3101 | } | 3101 | } |
3102 | 3102 | ||
3103 | if (setjmp(bus_error_jmp) == 0) { | 3103 | if (setjmp(bus_error_jmp) == 0) { |
3104 | catch_memory_errors = 1; | 3104 | catch_memory_errors = 1; |
3105 | sync(); | 3105 | sync(); |
3106 | 3106 | ||
3107 | spu = spu_info[i].spu; | 3107 | spu = spu_info[i].spu; |
3108 | spu_mfc_sr1_set(spu, spu_info[i].saved_mfc_sr1_RW); | 3108 | spu_mfc_sr1_set(spu, spu_info[i].saved_mfc_sr1_RW); |
3109 | out_be32(&spu->problem->spu_runcntl_RW, | 3109 | out_be32(&spu->problem->spu_runcntl_RW, |
3110 | spu_info[i].saved_spu_runcntl_RW); | 3110 | spu_info[i].saved_spu_runcntl_RW); |
3111 | 3111 | ||
3112 | sync(); | 3112 | sync(); |
3113 | __delay(200); | 3113 | __delay(200); |
3114 | 3114 | ||
3115 | printf("Restarted spu %.2d\n", i); | 3115 | printf("Restarted spu %.2d\n", i); |
3116 | } else { | 3116 | } else { |
3117 | catch_memory_errors = 0; | 3117 | catch_memory_errors = 0; |
3118 | printf("*** Error restarting spu %.2d\n", i); | 3118 | printf("*** Error restarting spu %.2d\n", i); |
3119 | } | 3119 | } |
3120 | catch_memory_errors = 0; | 3120 | catch_memory_errors = 0; |
3121 | } | 3121 | } |
3122 | } | 3122 | } |
3123 | 3123 | ||
3124 | #define DUMP_WIDTH 23 | 3124 | #define DUMP_WIDTH 23 |
3125 | #define DUMP_VALUE(format, field, value) \ | 3125 | #define DUMP_VALUE(format, field, value) \ |
3126 | do { \ | 3126 | do { \ |
3127 | if (setjmp(bus_error_jmp) == 0) { \ | 3127 | if (setjmp(bus_error_jmp) == 0) { \ |
3128 | catch_memory_errors = 1; \ | 3128 | catch_memory_errors = 1; \ |
3129 | sync(); \ | 3129 | sync(); \ |
3130 | printf(" %-*s = "format"\n", DUMP_WIDTH, \ | 3130 | printf(" %-*s = "format"\n", DUMP_WIDTH, \ |
3131 | #field, value); \ | 3131 | #field, value); \ |
3132 | sync(); \ | 3132 | sync(); \ |
3133 | __delay(200); \ | 3133 | __delay(200); \ |
3134 | } else { \ | 3134 | } else { \ |
3135 | catch_memory_errors = 0; \ | 3135 | catch_memory_errors = 0; \ |
3136 | printf(" %-*s = *** Error reading field.\n", \ | 3136 | printf(" %-*s = *** Error reading field.\n", \ |
3137 | DUMP_WIDTH, #field); \ | 3137 | DUMP_WIDTH, #field); \ |
3138 | } \ | 3138 | } \ |
3139 | catch_memory_errors = 0; \ | 3139 | catch_memory_errors = 0; \ |
3140 | } while (0) | 3140 | } while (0) |
3141 | 3141 | ||
3142 | #define DUMP_FIELD(obj, format, field) \ | 3142 | #define DUMP_FIELD(obj, format, field) \ |
3143 | DUMP_VALUE(format, field, obj->field) | 3143 | DUMP_VALUE(format, field, obj->field) |
3144 | 3144 | ||
3145 | static void dump_spu_fields(struct spu *spu) | 3145 | static void dump_spu_fields(struct spu *spu) |
3146 | { | 3146 | { |
3147 | printf("Dumping spu fields at address %p:\n", spu); | 3147 | printf("Dumping spu fields at address %p:\n", spu); |
3148 | 3148 | ||
3149 | DUMP_FIELD(spu, "0x%x", number); | 3149 | DUMP_FIELD(spu, "0x%x", number); |
3150 | DUMP_FIELD(spu, "%s", name); | 3150 | DUMP_FIELD(spu, "%s", name); |
3151 | DUMP_FIELD(spu, "0x%lx", local_store_phys); | 3151 | DUMP_FIELD(spu, "0x%lx", local_store_phys); |
3152 | DUMP_FIELD(spu, "0x%p", local_store); | 3152 | DUMP_FIELD(spu, "0x%p", local_store); |
3153 | DUMP_FIELD(spu, "0x%lx", ls_size); | 3153 | DUMP_FIELD(spu, "0x%lx", ls_size); |
3154 | DUMP_FIELD(spu, "0x%x", node); | 3154 | DUMP_FIELD(spu, "0x%x", node); |
3155 | DUMP_FIELD(spu, "0x%lx", flags); | 3155 | DUMP_FIELD(spu, "0x%lx", flags); |
3156 | DUMP_FIELD(spu, "%d", class_0_pending); | 3156 | DUMP_FIELD(spu, "%d", class_0_pending); |
3157 | DUMP_FIELD(spu, "0x%lx", class_0_dar); | 3157 | DUMP_FIELD(spu, "0x%lx", class_0_dar); |
3158 | DUMP_FIELD(spu, "0x%lx", class_1_dar); | 3158 | DUMP_FIELD(spu, "0x%lx", class_1_dar); |
3159 | DUMP_FIELD(spu, "0x%lx", class_1_dsisr); | 3159 | DUMP_FIELD(spu, "0x%lx", class_1_dsisr); |
3160 | DUMP_FIELD(spu, "0x%lx", irqs[0]); | 3160 | DUMP_FIELD(spu, "0x%lx", irqs[0]); |
3161 | DUMP_FIELD(spu, "0x%lx", irqs[1]); | 3161 | DUMP_FIELD(spu, "0x%lx", irqs[1]); |
3162 | DUMP_FIELD(spu, "0x%lx", irqs[2]); | 3162 | DUMP_FIELD(spu, "0x%lx", irqs[2]); |
3163 | DUMP_FIELD(spu, "0x%x", slb_replace); | 3163 | DUMP_FIELD(spu, "0x%x", slb_replace); |
3164 | DUMP_FIELD(spu, "%d", pid); | 3164 | DUMP_FIELD(spu, "%d", pid); |
3165 | DUMP_FIELD(spu, "0x%p", mm); | 3165 | DUMP_FIELD(spu, "0x%p", mm); |
3166 | DUMP_FIELD(spu, "0x%p", ctx); | 3166 | DUMP_FIELD(spu, "0x%p", ctx); |
3167 | DUMP_FIELD(spu, "0x%p", rq); | 3167 | DUMP_FIELD(spu, "0x%p", rq); |
3168 | DUMP_FIELD(spu, "0x%p", timestamp); | 3168 | DUMP_FIELD(spu, "0x%p", timestamp); |
3169 | DUMP_FIELD(spu, "0x%lx", problem_phys); | 3169 | DUMP_FIELD(spu, "0x%lx", problem_phys); |
3170 | DUMP_FIELD(spu, "0x%p", problem); | 3170 | DUMP_FIELD(spu, "0x%p", problem); |
3171 | DUMP_VALUE("0x%x", problem->spu_runcntl_RW, | 3171 | DUMP_VALUE("0x%x", problem->spu_runcntl_RW, |
3172 | in_be32(&spu->problem->spu_runcntl_RW)); | 3172 | in_be32(&spu->problem->spu_runcntl_RW)); |
3173 | DUMP_VALUE("0x%x", problem->spu_status_R, | 3173 | DUMP_VALUE("0x%x", problem->spu_status_R, |
3174 | in_be32(&spu->problem->spu_status_R)); | 3174 | in_be32(&spu->problem->spu_status_R)); |
3175 | DUMP_VALUE("0x%x", problem->spu_npc_RW, | 3175 | DUMP_VALUE("0x%x", problem->spu_npc_RW, |
3176 | in_be32(&spu->problem->spu_npc_RW)); | 3176 | in_be32(&spu->problem->spu_npc_RW)); |
3177 | DUMP_FIELD(spu, "0x%p", priv2); | 3177 | DUMP_FIELD(spu, "0x%p", priv2); |
3178 | DUMP_FIELD(spu, "0x%p", pdata); | 3178 | DUMP_FIELD(spu, "0x%p", pdata); |
3179 | } | 3179 | } |
3180 | 3180 | ||
3181 | int | 3181 | int |
3182 | spu_inst_dump(unsigned long adr, long count, int praddr) | 3182 | spu_inst_dump(unsigned long adr, long count, int praddr) |
3183 | { | 3183 | { |
3184 | return generic_inst_dump(adr, count, praddr, print_insn_spu); | 3184 | return generic_inst_dump(adr, count, praddr, print_insn_spu); |
3185 | } | 3185 | } |
3186 | 3186 | ||
3187 | static void dump_spu_ls(unsigned long num, int subcmd) | 3187 | static void dump_spu_ls(unsigned long num, int subcmd) |
3188 | { | 3188 | { |
3189 | unsigned long offset, addr, ls_addr; | 3189 | unsigned long offset, addr, ls_addr; |
3190 | 3190 | ||
3191 | if (setjmp(bus_error_jmp) == 0) { | 3191 | if (setjmp(bus_error_jmp) == 0) { |
3192 | catch_memory_errors = 1; | 3192 | catch_memory_errors = 1; |
3193 | sync(); | 3193 | sync(); |
3194 | ls_addr = (unsigned long)spu_info[num].spu->local_store; | 3194 | ls_addr = (unsigned long)spu_info[num].spu->local_store; |
3195 | sync(); | 3195 | sync(); |
3196 | __delay(200); | 3196 | __delay(200); |
3197 | } else { | 3197 | } else { |
3198 | catch_memory_errors = 0; | 3198 | catch_memory_errors = 0; |
3199 | printf("*** Error: accessing spu info for spu %d\n", num); | 3199 | printf("*** Error: accessing spu info for spu %d\n", num); |
3200 | return; | 3200 | return; |
3201 | } | 3201 | } |
3202 | catch_memory_errors = 0; | 3202 | catch_memory_errors = 0; |
3203 | 3203 | ||
3204 | if (scanhex(&offset)) | 3204 | if (scanhex(&offset)) |
3205 | addr = ls_addr + offset; | 3205 | addr = ls_addr + offset; |
3206 | else | 3206 | else |
3207 | addr = spu_info[num].dump_addr; | 3207 | addr = spu_info[num].dump_addr; |
3208 | 3208 | ||
3209 | if (addr >= ls_addr + LS_SIZE) { | 3209 | if (addr >= ls_addr + LS_SIZE) { |
3210 | printf("*** Error: address outside of local store\n"); | 3210 | printf("*** Error: address outside of local store\n"); |
3211 | return; | 3211 | return; |
3212 | } | 3212 | } |
3213 | 3213 | ||
3214 | switch (subcmd) { | 3214 | switch (subcmd) { |
3215 | case 'i': | 3215 | case 'i': |
3216 | addr += spu_inst_dump(addr, 16, 1); | 3216 | addr += spu_inst_dump(addr, 16, 1); |
3217 | last_cmd = "sdi\n"; | 3217 | last_cmd = "sdi\n"; |
3218 | break; | 3218 | break; |
3219 | default: | 3219 | default: |
3220 | prdump(addr, 64); | 3220 | prdump(addr, 64); |
3221 | addr += 64; | 3221 | addr += 64; |
3222 | last_cmd = "sd\n"; | 3222 | last_cmd = "sd\n"; |
3223 | break; | 3223 | break; |
3224 | } | 3224 | } |
3225 | 3225 | ||
3226 | spu_info[num].dump_addr = addr; | 3226 | spu_info[num].dump_addr = addr; |
3227 | } | 3227 | } |
3228 | 3228 | ||
3229 | static int do_spu_cmd(void) | 3229 | static int do_spu_cmd(void) |
3230 | { | 3230 | { |
3231 | static unsigned long num = 0; | 3231 | static unsigned long num = 0; |
3232 | int cmd, subcmd = 0; | 3232 | int cmd, subcmd = 0; |
3233 | 3233 | ||
3234 | cmd = inchar(); | 3234 | cmd = inchar(); |
3235 | switch (cmd) { | 3235 | switch (cmd) { |
3236 | case 's': | 3236 | case 's': |
3237 | stop_spus(); | 3237 | stop_spus(); |
3238 | break; | 3238 | break; |
3239 | case 'r': | 3239 | case 'r': |
3240 | restart_spus(); | 3240 | restart_spus(); |
3241 | break; | 3241 | break; |
3242 | case 'd': | 3242 | case 'd': |
3243 | subcmd = inchar(); | 3243 | subcmd = inchar(); |
3244 | if (isxdigit(subcmd) || subcmd == '\n') | 3244 | if (isxdigit(subcmd) || subcmd == '\n') |
3245 | termch = subcmd; | 3245 | termch = subcmd; |
3246 | case 'f': | 3246 | case 'f': |
3247 | scanhex(&num); | 3247 | scanhex(&num); |
3248 | if (num >= XMON_NUM_SPUS || !spu_info[num].spu) { | 3248 | if (num >= XMON_NUM_SPUS || !spu_info[num].spu) { |
3249 | printf("*** Error: invalid spu number\n"); | 3249 | printf("*** Error: invalid spu number\n"); |
3250 | return 0; | 3250 | return 0; |
3251 | } | 3251 | } |
3252 | 3252 | ||
3253 | switch (cmd) { | 3253 | switch (cmd) { |
3254 | case 'f': | 3254 | case 'f': |
3255 | dump_spu_fields(spu_info[num].spu); | 3255 | dump_spu_fields(spu_info[num].spu); |
3256 | break; | 3256 | break; |
3257 | default: | 3257 | default: |
3258 | dump_spu_ls(num, subcmd); | 3258 | dump_spu_ls(num, subcmd); |
3259 | break; | 3259 | break; |
3260 | } | 3260 | } |
3261 | 3261 | ||
3262 | break; | 3262 | break; |
3263 | default: | 3263 | default: |
3264 | return -1; | 3264 | return -1; |
3265 | } | 3265 | } |
3266 | 3266 | ||
3267 | return 0; | 3267 | return 0; |
3268 | } | 3268 | } |
3269 | #else /* ! CONFIG_SPU_BASE */ | 3269 | #else /* ! CONFIG_SPU_BASE */ |
3270 | static int do_spu_cmd(void) | 3270 | static int do_spu_cmd(void) |
3271 | { | 3271 | { |
3272 | return -1; | 3272 | return -1; |
3273 | } | 3273 | } |
3274 | #endif | 3274 | #endif |
3275 | 3275 |