Commit 7c3078b637882303b1dcf6a16229d0e35f6b60a5
Committed by
Ingo Molnar
1 parent
d359752407
Exists in
master
and in
20 other branches
kgdb: clocksource watchdog
In order to not trip the clocksource watchdog, kgdb must touch the clocksource watchdog on the return to normal system run state. Signed-off-by: Jason Wessel <jason.wessel@windriver.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 3 changed files with 17 additions and 0 deletions Inline Diff
include/linux/clocksource.h
1 | /* linux/include/linux/clocksource.h | 1 | /* linux/include/linux/clocksource.h |
2 | * | 2 | * |
3 | * This file contains the structure definitions for clocksources. | 3 | * This file contains the structure definitions for clocksources. |
4 | * | 4 | * |
5 | * If you are not a clocksource, or timekeeping code, you should | 5 | * If you are not a clocksource, or timekeeping code, you should |
6 | * not be including this file! | 6 | * not be including this file! |
7 | */ | 7 | */ |
8 | #ifndef _LINUX_CLOCKSOURCE_H | 8 | #ifndef _LINUX_CLOCKSOURCE_H |
9 | #define _LINUX_CLOCKSOURCE_H | 9 | #define _LINUX_CLOCKSOURCE_H |
10 | 10 | ||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/timex.h> | 12 | #include <linux/timex.h> |
13 | #include <linux/time.h> | 13 | #include <linux/time.h> |
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/cache.h> | 15 | #include <linux/cache.h> |
16 | #include <linux/timer.h> | 16 | #include <linux/timer.h> |
17 | #include <asm/div64.h> | 17 | #include <asm/div64.h> |
18 | #include <asm/io.h> | 18 | #include <asm/io.h> |
19 | 19 | ||
20 | /* clocksource cycle base type */ | 20 | /* clocksource cycle base type */ |
21 | typedef u64 cycle_t; | 21 | typedef u64 cycle_t; |
22 | struct clocksource; | 22 | struct clocksource; |
23 | 23 | ||
24 | /** | 24 | /** |
25 | * struct clocksource - hardware abstraction for a free running counter | 25 | * struct clocksource - hardware abstraction for a free running counter |
26 | * Provides mostly state-free accessors to the underlying hardware. | 26 | * Provides mostly state-free accessors to the underlying hardware. |
27 | * | 27 | * |
28 | * @name: ptr to clocksource name | 28 | * @name: ptr to clocksource name |
29 | * @list: list head for registration | 29 | * @list: list head for registration |
30 | * @rating: rating value for selection (higher is better) | 30 | * @rating: rating value for selection (higher is better) |
31 | * To avoid rating inflation the following | 31 | * To avoid rating inflation the following |
32 | * list should give you a guide as to how | 32 | * list should give you a guide as to how |
33 | * to assign your clocksource a rating | 33 | * to assign your clocksource a rating |
34 | * 1-99: Unfit for real use | 34 | * 1-99: Unfit for real use |
35 | * Only available for bootup and testing purposes. | 35 | * Only available for bootup and testing purposes. |
36 | * 100-199: Base level usability. | 36 | * 100-199: Base level usability. |
37 | * Functional for real use, but not desired. | 37 | * Functional for real use, but not desired. |
38 | * 200-299: Good. | 38 | * 200-299: Good. |
39 | * A correct and usable clocksource. | 39 | * A correct and usable clocksource. |
40 | * 300-399: Desired. | 40 | * 300-399: Desired. |
41 | * A reasonably fast and accurate clocksource. | 41 | * A reasonably fast and accurate clocksource. |
42 | * 400-499: Perfect | 42 | * 400-499: Perfect |
43 | * The ideal clocksource. A must-use where | 43 | * The ideal clocksource. A must-use where |
44 | * available. | 44 | * available. |
45 | * @read: returns a cycle value | 45 | * @read: returns a cycle value |
46 | * @mask: bitmask for two's complement | 46 | * @mask: bitmask for two's complement |
47 | * subtraction of non 64 bit counters | 47 | * subtraction of non 64 bit counters |
48 | * @mult: cycle to nanosecond multiplier | 48 | * @mult: cycle to nanosecond multiplier |
49 | * @shift: cycle to nanosecond divisor (power of two) | 49 | * @shift: cycle to nanosecond divisor (power of two) |
50 | * @flags: flags describing special properties | 50 | * @flags: flags describing special properties |
51 | * @vread: vsyscall based read | 51 | * @vread: vsyscall based read |
52 | * @resume: resume function for the clocksource, if necessary | 52 | * @resume: resume function for the clocksource, if necessary |
53 | * @cycle_interval: Used internally by timekeeping core, please ignore. | 53 | * @cycle_interval: Used internally by timekeeping core, please ignore. |
54 | * @xtime_interval: Used internally by timekeeping core, please ignore. | 54 | * @xtime_interval: Used internally by timekeeping core, please ignore. |
55 | */ | 55 | */ |
56 | struct clocksource { | 56 | struct clocksource { |
57 | /* | 57 | /* |
58 | * First part of structure is read mostly | 58 | * First part of structure is read mostly |
59 | */ | 59 | */ |
60 | char *name; | 60 | char *name; |
61 | struct list_head list; | 61 | struct list_head list; |
62 | int rating; | 62 | int rating; |
63 | cycle_t (*read)(void); | 63 | cycle_t (*read)(void); |
64 | cycle_t mask; | 64 | cycle_t mask; |
65 | u32 mult; | 65 | u32 mult; |
66 | u32 shift; | 66 | u32 shift; |
67 | unsigned long flags; | 67 | unsigned long flags; |
68 | cycle_t (*vread)(void); | 68 | cycle_t (*vread)(void); |
69 | void (*resume)(void); | 69 | void (*resume)(void); |
70 | #ifdef CONFIG_IA64 | 70 | #ifdef CONFIG_IA64 |
71 | void *fsys_mmio; /* used by fsyscall asm code */ | 71 | void *fsys_mmio; /* used by fsyscall asm code */ |
72 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) | 72 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) |
73 | #else | 73 | #else |
74 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) | 74 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | /* timekeeping specific data, ignore */ | 77 | /* timekeeping specific data, ignore */ |
78 | cycle_t cycle_interval; | 78 | cycle_t cycle_interval; |
79 | u64 xtime_interval; | 79 | u64 xtime_interval; |
80 | /* | 80 | /* |
81 | * Second part is written at each timer interrupt | 81 | * Second part is written at each timer interrupt |
82 | * Keep it in a different cache line to dirty no | 82 | * Keep it in a different cache line to dirty no |
83 | * more than one cache line. | 83 | * more than one cache line. |
84 | */ | 84 | */ |
85 | cycle_t cycle_last ____cacheline_aligned_in_smp; | 85 | cycle_t cycle_last ____cacheline_aligned_in_smp; |
86 | u64 xtime_nsec; | 86 | u64 xtime_nsec; |
87 | s64 error; | 87 | s64 error; |
88 | 88 | ||
89 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 89 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
90 | /* Watchdog related data, used by the framework */ | 90 | /* Watchdog related data, used by the framework */ |
91 | struct list_head wd_list; | 91 | struct list_head wd_list; |
92 | cycle_t wd_last; | 92 | cycle_t wd_last; |
93 | #endif | 93 | #endif |
94 | }; | 94 | }; |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Clock source flags bits:: | 97 | * Clock source flags bits:: |
98 | */ | 98 | */ |
99 | #define CLOCK_SOURCE_IS_CONTINUOUS 0x01 | 99 | #define CLOCK_SOURCE_IS_CONTINUOUS 0x01 |
100 | #define CLOCK_SOURCE_MUST_VERIFY 0x02 | 100 | #define CLOCK_SOURCE_MUST_VERIFY 0x02 |
101 | 101 | ||
102 | #define CLOCK_SOURCE_WATCHDOG 0x10 | 102 | #define CLOCK_SOURCE_WATCHDOG 0x10 |
103 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 | 103 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 |
104 | 104 | ||
105 | /* simplify initialization of mask field */ | 105 | /* simplify initialization of mask field */ |
106 | #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) | 106 | #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) |
107 | 107 | ||
108 | /** | 108 | /** |
109 | * clocksource_khz2mult - calculates mult from khz and shift | 109 | * clocksource_khz2mult - calculates mult from khz and shift |
110 | * @khz: Clocksource frequency in KHz | 110 | * @khz: Clocksource frequency in KHz |
111 | * @shift_constant: Clocksource shift factor | 111 | * @shift_constant: Clocksource shift factor |
112 | * | 112 | * |
113 | * Helper functions that converts a khz counter frequency to a timsource | 113 | * Helper functions that converts a khz counter frequency to a timsource |
114 | * multiplier, given the clocksource shift value | 114 | * multiplier, given the clocksource shift value |
115 | */ | 115 | */ |
116 | static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) | 116 | static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) |
117 | { | 117 | { |
118 | /* khz = cyc/(Million ns) | 118 | /* khz = cyc/(Million ns) |
119 | * mult/2^shift = ns/cyc | 119 | * mult/2^shift = ns/cyc |
120 | * mult = ns/cyc * 2^shift | 120 | * mult = ns/cyc * 2^shift |
121 | * mult = 1Million/khz * 2^shift | 121 | * mult = 1Million/khz * 2^shift |
122 | * mult = 1000000 * 2^shift / khz | 122 | * mult = 1000000 * 2^shift / khz |
123 | * mult = (1000000<<shift) / khz | 123 | * mult = (1000000<<shift) / khz |
124 | */ | 124 | */ |
125 | u64 tmp = ((u64)1000000) << shift_constant; | 125 | u64 tmp = ((u64)1000000) << shift_constant; |
126 | 126 | ||
127 | tmp += khz/2; /* round for do_div */ | 127 | tmp += khz/2; /* round for do_div */ |
128 | do_div(tmp, khz); | 128 | do_div(tmp, khz); |
129 | 129 | ||
130 | return (u32)tmp; | 130 | return (u32)tmp; |
131 | } | 131 | } |
132 | 132 | ||
133 | /** | 133 | /** |
134 | * clocksource_hz2mult - calculates mult from hz and shift | 134 | * clocksource_hz2mult - calculates mult from hz and shift |
135 | * @hz: Clocksource frequency in Hz | 135 | * @hz: Clocksource frequency in Hz |
136 | * @shift_constant: Clocksource shift factor | 136 | * @shift_constant: Clocksource shift factor |
137 | * | 137 | * |
138 | * Helper functions that converts a hz counter | 138 | * Helper functions that converts a hz counter |
139 | * frequency to a timsource multiplier, given the | 139 | * frequency to a timsource multiplier, given the |
140 | * clocksource shift value | 140 | * clocksource shift value |
141 | */ | 141 | */ |
142 | static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) | 142 | static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) |
143 | { | 143 | { |
144 | /* hz = cyc/(Billion ns) | 144 | /* hz = cyc/(Billion ns) |
145 | * mult/2^shift = ns/cyc | 145 | * mult/2^shift = ns/cyc |
146 | * mult = ns/cyc * 2^shift | 146 | * mult = ns/cyc * 2^shift |
147 | * mult = 1Billion/hz * 2^shift | 147 | * mult = 1Billion/hz * 2^shift |
148 | * mult = 1000000000 * 2^shift / hz | 148 | * mult = 1000000000 * 2^shift / hz |
149 | * mult = (1000000000<<shift) / hz | 149 | * mult = (1000000000<<shift) / hz |
150 | */ | 150 | */ |
151 | u64 tmp = ((u64)1000000000) << shift_constant; | 151 | u64 tmp = ((u64)1000000000) << shift_constant; |
152 | 152 | ||
153 | tmp += hz/2; /* round for do_div */ | 153 | tmp += hz/2; /* round for do_div */ |
154 | do_div(tmp, hz); | 154 | do_div(tmp, hz); |
155 | 155 | ||
156 | return (u32)tmp; | 156 | return (u32)tmp; |
157 | } | 157 | } |
158 | 158 | ||
159 | /** | 159 | /** |
160 | * clocksource_read: - Access the clocksource's current cycle value | 160 | * clocksource_read: - Access the clocksource's current cycle value |
161 | * @cs: pointer to clocksource being read | 161 | * @cs: pointer to clocksource being read |
162 | * | 162 | * |
163 | * Uses the clocksource to return the current cycle_t value | 163 | * Uses the clocksource to return the current cycle_t value |
164 | */ | 164 | */ |
165 | static inline cycle_t clocksource_read(struct clocksource *cs) | 165 | static inline cycle_t clocksource_read(struct clocksource *cs) |
166 | { | 166 | { |
167 | return cs->read(); | 167 | return cs->read(); |
168 | } | 168 | } |
169 | 169 | ||
170 | /** | 170 | /** |
171 | * cyc2ns - converts clocksource cycles to nanoseconds | 171 | * cyc2ns - converts clocksource cycles to nanoseconds |
172 | * @cs: Pointer to clocksource | 172 | * @cs: Pointer to clocksource |
173 | * @cycles: Cycles | 173 | * @cycles: Cycles |
174 | * | 174 | * |
175 | * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds. | 175 | * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds. |
176 | * | 176 | * |
177 | * XXX - This could use some mult_lxl_ll() asm optimization | 177 | * XXX - This could use some mult_lxl_ll() asm optimization |
178 | */ | 178 | */ |
179 | static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) | 179 | static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) |
180 | { | 180 | { |
181 | u64 ret = (u64)cycles; | 181 | u64 ret = (u64)cycles; |
182 | ret = (ret * cs->mult) >> cs->shift; | 182 | ret = (ret * cs->mult) >> cs->shift; |
183 | return ret; | 183 | return ret; |
184 | } | 184 | } |
185 | 185 | ||
186 | /** | 186 | /** |
187 | * clocksource_calculate_interval - Calculates a clocksource interval struct | 187 | * clocksource_calculate_interval - Calculates a clocksource interval struct |
188 | * | 188 | * |
189 | * @c: Pointer to clocksource. | 189 | * @c: Pointer to clocksource. |
190 | * @length_nsec: Desired interval length in nanoseconds. | 190 | * @length_nsec: Desired interval length in nanoseconds. |
191 | * | 191 | * |
192 | * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment | 192 | * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment |
193 | * pair and interval request. | 193 | * pair and interval request. |
194 | * | 194 | * |
195 | * Unless you're the timekeeping code, you should not be using this! | 195 | * Unless you're the timekeeping code, you should not be using this! |
196 | */ | 196 | */ |
197 | static inline void clocksource_calculate_interval(struct clocksource *c, | 197 | static inline void clocksource_calculate_interval(struct clocksource *c, |
198 | unsigned long length_nsec) | 198 | unsigned long length_nsec) |
199 | { | 199 | { |
200 | u64 tmp; | 200 | u64 tmp; |
201 | 201 | ||
202 | /* XXX - All of this could use a whole lot of optimization */ | 202 | /* XXX - All of this could use a whole lot of optimization */ |
203 | tmp = length_nsec; | 203 | tmp = length_nsec; |
204 | tmp <<= c->shift; | 204 | tmp <<= c->shift; |
205 | tmp += c->mult/2; | 205 | tmp += c->mult/2; |
206 | do_div(tmp, c->mult); | 206 | do_div(tmp, c->mult); |
207 | 207 | ||
208 | c->cycle_interval = (cycle_t)tmp; | 208 | c->cycle_interval = (cycle_t)tmp; |
209 | if (c->cycle_interval == 0) | 209 | if (c->cycle_interval == 0) |
210 | c->cycle_interval = 1; | 210 | c->cycle_interval = 1; |
211 | 211 | ||
212 | c->xtime_interval = (u64)c->cycle_interval * c->mult; | 212 | c->xtime_interval = (u64)c->cycle_interval * c->mult; |
213 | } | 213 | } |
214 | 214 | ||
215 | 215 | ||
216 | /* used to install a new clocksource */ | 216 | /* used to install a new clocksource */ |
217 | extern int clocksource_register(struct clocksource*); | 217 | extern int clocksource_register(struct clocksource*); |
218 | extern void clocksource_unregister(struct clocksource*); | 218 | extern void clocksource_unregister(struct clocksource*); |
219 | extern void clocksource_touch_watchdog(void); | ||
219 | extern struct clocksource* clocksource_get_next(void); | 220 | extern struct clocksource* clocksource_get_next(void); |
220 | extern void clocksource_change_rating(struct clocksource *cs, int rating); | 221 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
221 | extern void clocksource_resume(void); | 222 | extern void clocksource_resume(void); |
222 | 223 | ||
223 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL | 224 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL |
224 | extern void update_vsyscall(struct timespec *ts, struct clocksource *c); | 225 | extern void update_vsyscall(struct timespec *ts, struct clocksource *c); |
225 | extern void update_vsyscall_tz(void); | 226 | extern void update_vsyscall_tz(void); |
226 | #else | 227 | #else |
227 | static inline void update_vsyscall(struct timespec *ts, struct clocksource *c) | 228 | static inline void update_vsyscall(struct timespec *ts, struct clocksource *c) |
228 | { | 229 | { |
229 | } | 230 | } |
230 | 231 | ||
231 | static inline void update_vsyscall_tz(void) | 232 | static inline void update_vsyscall_tz(void) |
232 | { | 233 | { |
233 | } | 234 | } |
234 | #endif | 235 | #endif |
235 | 236 | ||
236 | #endif /* _LINUX_CLOCKSOURCE_H */ | 237 | #endif /* _LINUX_CLOCKSOURCE_H */ |
237 | 238 |
kernel/kgdb.c
1 | /* | 1 | /* |
2 | * KGDB stub. | 2 | * KGDB stub. |
3 | * | 3 | * |
4 | * Maintainer: Jason Wessel <jason.wessel@windriver.com> | 4 | * Maintainer: Jason Wessel <jason.wessel@windriver.com> |
5 | * | 5 | * |
6 | * Copyright (C) 2000-2001 VERITAS Software Corporation. | 6 | * Copyright (C) 2000-2001 VERITAS Software Corporation. |
7 | * Copyright (C) 2002-2004 Timesys Corporation | 7 | * Copyright (C) 2002-2004 Timesys Corporation |
8 | * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com> | 8 | * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com> |
9 | * Copyright (C) 2004 Pavel Machek <pavel@suse.cz> | 9 | * Copyright (C) 2004 Pavel Machek <pavel@suse.cz> |
10 | * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org> | 10 | * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org> |
11 | * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. | 11 | * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. |
12 | * Copyright (C) 2005-2008 Wind River Systems, Inc. | 12 | * Copyright (C) 2005-2008 Wind River Systems, Inc. |
13 | * Copyright (C) 2007 MontaVista Software, Inc. | 13 | * Copyright (C) 2007 MontaVista Software, Inc. |
14 | * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 14 | * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
15 | * | 15 | * |
16 | * Contributors at various stages not listed above: | 16 | * Contributors at various stages not listed above: |
17 | * Jason Wessel ( jason.wessel@windriver.com ) | 17 | * Jason Wessel ( jason.wessel@windriver.com ) |
18 | * George Anzinger <george@mvista.com> | 18 | * George Anzinger <george@mvista.com> |
19 | * Anurekh Saxena (anurekh.saxena@timesys.com) | 19 | * Anurekh Saxena (anurekh.saxena@timesys.com) |
20 | * Lake Stevens Instrument Division (Glenn Engel) | 20 | * Lake Stevens Instrument Division (Glenn Engel) |
21 | * Jim Kingdon, Cygnus Support. | 21 | * Jim Kingdon, Cygnus Support. |
22 | * | 22 | * |
23 | * Original KGDB stub: David Grothe <dave@gcom.com>, | 23 | * Original KGDB stub: David Grothe <dave@gcom.com>, |
24 | * Tigran Aivazian <tigran@sco.com> | 24 | * Tigran Aivazian <tigran@sco.com> |
25 | * | 25 | * |
26 | * This file is licensed under the terms of the GNU General Public License | 26 | * This file is licensed under the terms of the GNU General Public License |
27 | * version 2. This program is licensed "as is" without any warranty of any | 27 | * version 2. This program is licensed "as is" without any warranty of any |
28 | * kind, whether express or implied. | 28 | * kind, whether express or implied. |
29 | */ | 29 | */ |
30 | #include <linux/pid_namespace.h> | 30 | #include <linux/pid_namespace.h> |
31 | #include <linux/clocksource.h> | ||
31 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
32 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
33 | #include <linux/console.h> | 34 | #include <linux/console.h> |
34 | #include <linux/threads.h> | 35 | #include <linux/threads.h> |
35 | #include <linux/uaccess.h> | 36 | #include <linux/uaccess.h> |
36 | #include <linux/kernel.h> | 37 | #include <linux/kernel.h> |
37 | #include <linux/module.h> | 38 | #include <linux/module.h> |
38 | #include <linux/ptrace.h> | 39 | #include <linux/ptrace.h> |
39 | #include <linux/reboot.h> | 40 | #include <linux/reboot.h> |
40 | #include <linux/string.h> | 41 | #include <linux/string.h> |
41 | #include <linux/delay.h> | 42 | #include <linux/delay.h> |
42 | #include <linux/sched.h> | 43 | #include <linux/sched.h> |
43 | #include <linux/sysrq.h> | 44 | #include <linux/sysrq.h> |
44 | #include <linux/init.h> | 45 | #include <linux/init.h> |
45 | #include <linux/kgdb.h> | 46 | #include <linux/kgdb.h> |
46 | #include <linux/pid.h> | 47 | #include <linux/pid.h> |
47 | #include <linux/smp.h> | 48 | #include <linux/smp.h> |
48 | #include <linux/mm.h> | 49 | #include <linux/mm.h> |
49 | 50 | ||
50 | #include <asm/cacheflush.h> | 51 | #include <asm/cacheflush.h> |
51 | #include <asm/byteorder.h> | 52 | #include <asm/byteorder.h> |
52 | #include <asm/atomic.h> | 53 | #include <asm/atomic.h> |
53 | #include <asm/system.h> | 54 | #include <asm/system.h> |
54 | 55 | ||
55 | static int kgdb_break_asap; | 56 | static int kgdb_break_asap; |
56 | 57 | ||
57 | struct kgdb_state { | 58 | struct kgdb_state { |
58 | int ex_vector; | 59 | int ex_vector; |
59 | int signo; | 60 | int signo; |
60 | int err_code; | 61 | int err_code; |
61 | int cpu; | 62 | int cpu; |
62 | int pass_exception; | 63 | int pass_exception; |
63 | long threadid; | 64 | long threadid; |
64 | long kgdb_usethreadid; | 65 | long kgdb_usethreadid; |
65 | struct pt_regs *linux_regs; | 66 | struct pt_regs *linux_regs; |
66 | }; | 67 | }; |
67 | 68 | ||
68 | static struct debuggerinfo_struct { | 69 | static struct debuggerinfo_struct { |
69 | void *debuggerinfo; | 70 | void *debuggerinfo; |
70 | struct task_struct *task; | 71 | struct task_struct *task; |
71 | } kgdb_info[NR_CPUS]; | 72 | } kgdb_info[NR_CPUS]; |
72 | 73 | ||
73 | /** | 74 | /** |
74 | * kgdb_connected - Is a host GDB connected to us? | 75 | * kgdb_connected - Is a host GDB connected to us? |
75 | */ | 76 | */ |
76 | int kgdb_connected; | 77 | int kgdb_connected; |
77 | EXPORT_SYMBOL_GPL(kgdb_connected); | 78 | EXPORT_SYMBOL_GPL(kgdb_connected); |
78 | 79 | ||
79 | /* All the KGDB handlers are installed */ | 80 | /* All the KGDB handlers are installed */ |
80 | static int kgdb_io_module_registered; | 81 | static int kgdb_io_module_registered; |
81 | 82 | ||
82 | /* Guard for recursive entry */ | 83 | /* Guard for recursive entry */ |
83 | static int exception_level; | 84 | static int exception_level; |
84 | 85 | ||
85 | static struct kgdb_io *kgdb_io_ops; | 86 | static struct kgdb_io *kgdb_io_ops; |
86 | static DEFINE_SPINLOCK(kgdb_registration_lock); | 87 | static DEFINE_SPINLOCK(kgdb_registration_lock); |
87 | 88 | ||
88 | /* kgdb console driver is loaded */ | 89 | /* kgdb console driver is loaded */ |
89 | static int kgdb_con_registered; | 90 | static int kgdb_con_registered; |
90 | /* determine if kgdb console output should be used */ | 91 | /* determine if kgdb console output should be used */ |
91 | static int kgdb_use_con; | 92 | static int kgdb_use_con; |
92 | 93 | ||
93 | static int __init opt_kgdb_con(char *str) | 94 | static int __init opt_kgdb_con(char *str) |
94 | { | 95 | { |
95 | kgdb_use_con = 1; | 96 | kgdb_use_con = 1; |
96 | return 0; | 97 | return 0; |
97 | } | 98 | } |
98 | 99 | ||
99 | early_param("kgdbcon", opt_kgdb_con); | 100 | early_param("kgdbcon", opt_kgdb_con); |
100 | 101 | ||
101 | module_param(kgdb_use_con, int, 0644); | 102 | module_param(kgdb_use_con, int, 0644); |
102 | 103 | ||
103 | /* | 104 | /* |
104 | * Holds information about breakpoints in a kernel. These breakpoints are | 105 | * Holds information about breakpoints in a kernel. These breakpoints are |
105 | * added and removed by gdb. | 106 | * added and removed by gdb. |
106 | */ | 107 | */ |
107 | static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = { | 108 | static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = { |
108 | [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED } | 109 | [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED } |
109 | }; | 110 | }; |
110 | 111 | ||
111 | /* | 112 | /* |
112 | * The CPU# of the active CPU, or -1 if none: | 113 | * The CPU# of the active CPU, or -1 if none: |
113 | */ | 114 | */ |
114 | atomic_t kgdb_active = ATOMIC_INIT(-1); | 115 | atomic_t kgdb_active = ATOMIC_INIT(-1); |
115 | 116 | ||
116 | /* | 117 | /* |
117 | * We use NR_CPUs not PERCPU, in case kgdb is used to debug early | 118 | * We use NR_CPUs not PERCPU, in case kgdb is used to debug early |
118 | * bootup code (which might not have percpu set up yet): | 119 | * bootup code (which might not have percpu set up yet): |
119 | */ | 120 | */ |
120 | static atomic_t passive_cpu_wait[NR_CPUS]; | 121 | static atomic_t passive_cpu_wait[NR_CPUS]; |
121 | static atomic_t cpu_in_kgdb[NR_CPUS]; | 122 | static atomic_t cpu_in_kgdb[NR_CPUS]; |
122 | atomic_t kgdb_setting_breakpoint; | 123 | atomic_t kgdb_setting_breakpoint; |
123 | 124 | ||
124 | struct task_struct *kgdb_usethread; | 125 | struct task_struct *kgdb_usethread; |
125 | struct task_struct *kgdb_contthread; | 126 | struct task_struct *kgdb_contthread; |
126 | 127 | ||
127 | int kgdb_single_step; | 128 | int kgdb_single_step; |
128 | 129 | ||
129 | /* Our I/O buffers. */ | 130 | /* Our I/O buffers. */ |
130 | static char remcom_in_buffer[BUFMAX]; | 131 | static char remcom_in_buffer[BUFMAX]; |
131 | static char remcom_out_buffer[BUFMAX]; | 132 | static char remcom_out_buffer[BUFMAX]; |
132 | 133 | ||
133 | /* Storage for the registers, in GDB format. */ | 134 | /* Storage for the registers, in GDB format. */ |
134 | static unsigned long gdb_regs[(NUMREGBYTES + | 135 | static unsigned long gdb_regs[(NUMREGBYTES + |
135 | sizeof(unsigned long) - 1) / | 136 | sizeof(unsigned long) - 1) / |
136 | sizeof(unsigned long)]; | 137 | sizeof(unsigned long)]; |
137 | 138 | ||
138 | /* to keep track of the CPU which is doing the single stepping*/ | 139 | /* to keep track of the CPU which is doing the single stepping*/ |
139 | atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); | 140 | atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); |
140 | 141 | ||
141 | /* | 142 | /* |
142 | * If you are debugging a problem where roundup (the collection of | 143 | * If you are debugging a problem where roundup (the collection of |
143 | * all other CPUs) is a problem [this should be extremely rare], | 144 | * all other CPUs) is a problem [this should be extremely rare], |
144 | * then use the nokgdbroundup option to avoid roundup. In that case | 145 | * then use the nokgdbroundup option to avoid roundup. In that case |
145 | * the other CPUs might interfere with your debugging context, so | 146 | * the other CPUs might interfere with your debugging context, so |
146 | * use this with care: | 147 | * use this with care: |
147 | */ | 148 | */ |
148 | int kgdb_do_roundup = 1; | 149 | int kgdb_do_roundup = 1; |
149 | 150 | ||
150 | static int __init opt_nokgdbroundup(char *str) | 151 | static int __init opt_nokgdbroundup(char *str) |
151 | { | 152 | { |
152 | kgdb_do_roundup = 0; | 153 | kgdb_do_roundup = 0; |
153 | 154 | ||
154 | return 0; | 155 | return 0; |
155 | } | 156 | } |
156 | 157 | ||
157 | early_param("nokgdbroundup", opt_nokgdbroundup); | 158 | early_param("nokgdbroundup", opt_nokgdbroundup); |
158 | 159 | ||
159 | /* | 160 | /* |
160 | * Finally, some KGDB code :-) | 161 | * Finally, some KGDB code :-) |
161 | */ | 162 | */ |
162 | 163 | ||
163 | /* | 164 | /* |
164 | * Weak aliases for breakpoint management, | 165 | * Weak aliases for breakpoint management, |
165 | * can be overriden by architectures when needed: | 166 | * can be overriden by architectures when needed: |
166 | */ | 167 | */ |
167 | int __weak kgdb_validate_break_address(unsigned long addr) | 168 | int __weak kgdb_validate_break_address(unsigned long addr) |
168 | { | 169 | { |
169 | char tmp_variable[BREAK_INSTR_SIZE]; | 170 | char tmp_variable[BREAK_INSTR_SIZE]; |
170 | 171 | ||
171 | return probe_kernel_read(tmp_variable, (char *)addr, BREAK_INSTR_SIZE); | 172 | return probe_kernel_read(tmp_variable, (char *)addr, BREAK_INSTR_SIZE); |
172 | } | 173 | } |
173 | 174 | ||
174 | int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) | 175 | int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) |
175 | { | 176 | { |
176 | int err; | 177 | int err; |
177 | 178 | ||
178 | err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE); | 179 | err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE); |
179 | if (err) | 180 | if (err) |
180 | return err; | 181 | return err; |
181 | 182 | ||
182 | return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr, | 183 | return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr, |
183 | BREAK_INSTR_SIZE); | 184 | BREAK_INSTR_SIZE); |
184 | } | 185 | } |
185 | 186 | ||
186 | int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) | 187 | int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) |
187 | { | 188 | { |
188 | return probe_kernel_write((char *)addr, | 189 | return probe_kernel_write((char *)addr, |
189 | (char *)bundle, BREAK_INSTR_SIZE); | 190 | (char *)bundle, BREAK_INSTR_SIZE); |
190 | } | 191 | } |
191 | 192 | ||
192 | unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) | 193 | unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) |
193 | { | 194 | { |
194 | return instruction_pointer(regs); | 195 | return instruction_pointer(regs); |
195 | } | 196 | } |
196 | 197 | ||
197 | int __weak kgdb_arch_init(void) | 198 | int __weak kgdb_arch_init(void) |
198 | { | 199 | { |
199 | return 0; | 200 | return 0; |
200 | } | 201 | } |
201 | 202 | ||
202 | /** | 203 | /** |
203 | * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. | 204 | * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. |
204 | * @regs: Current &struct pt_regs. | 205 | * @regs: Current &struct pt_regs. |
205 | * | 206 | * |
206 | * This function will be called if the particular architecture must | 207 | * This function will be called if the particular architecture must |
207 | * disable hardware debugging while it is processing gdb packets or | 208 | * disable hardware debugging while it is processing gdb packets or |
208 | * handling exception. | 209 | * handling exception. |
209 | */ | 210 | */ |
210 | void __weak kgdb_disable_hw_debug(struct pt_regs *regs) | 211 | void __weak kgdb_disable_hw_debug(struct pt_regs *regs) |
211 | { | 212 | { |
212 | } | 213 | } |
213 | 214 | ||
214 | /* | 215 | /* |
215 | * GDB remote protocol parser: | 216 | * GDB remote protocol parser: |
216 | */ | 217 | */ |
217 | 218 | ||
218 | static const char hexchars[] = "0123456789abcdef"; | 219 | static const char hexchars[] = "0123456789abcdef"; |
219 | 220 | ||
220 | static int hex(char ch) | 221 | static int hex(char ch) |
221 | { | 222 | { |
222 | if ((ch >= 'a') && (ch <= 'f')) | 223 | if ((ch >= 'a') && (ch <= 'f')) |
223 | return ch - 'a' + 10; | 224 | return ch - 'a' + 10; |
224 | if ((ch >= '0') && (ch <= '9')) | 225 | if ((ch >= '0') && (ch <= '9')) |
225 | return ch - '0'; | 226 | return ch - '0'; |
226 | if ((ch >= 'A') && (ch <= 'F')) | 227 | if ((ch >= 'A') && (ch <= 'F')) |
227 | return ch - 'A' + 10; | 228 | return ch - 'A' + 10; |
228 | return -1; | 229 | return -1; |
229 | } | 230 | } |
230 | 231 | ||
231 | /* scan for the sequence $<data>#<checksum> */ | 232 | /* scan for the sequence $<data>#<checksum> */ |
232 | static void get_packet(char *buffer) | 233 | static void get_packet(char *buffer) |
233 | { | 234 | { |
234 | unsigned char checksum; | 235 | unsigned char checksum; |
235 | unsigned char xmitcsum; | 236 | unsigned char xmitcsum; |
236 | int count; | 237 | int count; |
237 | char ch; | 238 | char ch; |
238 | 239 | ||
239 | do { | 240 | do { |
240 | /* | 241 | /* |
241 | * Spin and wait around for the start character, ignore all | 242 | * Spin and wait around for the start character, ignore all |
242 | * other characters: | 243 | * other characters: |
243 | */ | 244 | */ |
244 | while ((ch = (kgdb_io_ops->read_char())) != '$') | 245 | while ((ch = (kgdb_io_ops->read_char())) != '$') |
245 | /* nothing */; | 246 | /* nothing */; |
246 | 247 | ||
247 | kgdb_connected = 1; | 248 | kgdb_connected = 1; |
248 | checksum = 0; | 249 | checksum = 0; |
249 | xmitcsum = -1; | 250 | xmitcsum = -1; |
250 | 251 | ||
251 | count = 0; | 252 | count = 0; |
252 | 253 | ||
253 | /* | 254 | /* |
254 | * now, read until a # or end of buffer is found: | 255 | * now, read until a # or end of buffer is found: |
255 | */ | 256 | */ |
256 | while (count < (BUFMAX - 1)) { | 257 | while (count < (BUFMAX - 1)) { |
257 | ch = kgdb_io_ops->read_char(); | 258 | ch = kgdb_io_ops->read_char(); |
258 | if (ch == '#') | 259 | if (ch == '#') |
259 | break; | 260 | break; |
260 | checksum = checksum + ch; | 261 | checksum = checksum + ch; |
261 | buffer[count] = ch; | 262 | buffer[count] = ch; |
262 | count = count + 1; | 263 | count = count + 1; |
263 | } | 264 | } |
264 | buffer[count] = 0; | 265 | buffer[count] = 0; |
265 | 266 | ||
266 | if (ch == '#') { | 267 | if (ch == '#') { |
267 | xmitcsum = hex(kgdb_io_ops->read_char()) << 4; | 268 | xmitcsum = hex(kgdb_io_ops->read_char()) << 4; |
268 | xmitcsum += hex(kgdb_io_ops->read_char()); | 269 | xmitcsum += hex(kgdb_io_ops->read_char()); |
269 | 270 | ||
270 | if (checksum != xmitcsum) | 271 | if (checksum != xmitcsum) |
271 | /* failed checksum */ | 272 | /* failed checksum */ |
272 | kgdb_io_ops->write_char('-'); | 273 | kgdb_io_ops->write_char('-'); |
273 | else | 274 | else |
274 | /* successful transfer */ | 275 | /* successful transfer */ |
275 | kgdb_io_ops->write_char('+'); | 276 | kgdb_io_ops->write_char('+'); |
276 | if (kgdb_io_ops->flush) | 277 | if (kgdb_io_ops->flush) |
277 | kgdb_io_ops->flush(); | 278 | kgdb_io_ops->flush(); |
278 | } | 279 | } |
279 | } while (checksum != xmitcsum); | 280 | } while (checksum != xmitcsum); |
280 | } | 281 | } |
281 | 282 | ||
282 | /* | 283 | /* |
283 | * Send the packet in buffer. | 284 | * Send the packet in buffer. |
284 | * Check for gdb connection if asked for. | 285 | * Check for gdb connection if asked for. |
285 | */ | 286 | */ |
286 | static void put_packet(char *buffer) | 287 | static void put_packet(char *buffer) |
287 | { | 288 | { |
288 | unsigned char checksum; | 289 | unsigned char checksum; |
289 | int count; | 290 | int count; |
290 | char ch; | 291 | char ch; |
291 | 292 | ||
292 | /* | 293 | /* |
293 | * $<packet info>#<checksum>. | 294 | * $<packet info>#<checksum>. |
294 | */ | 295 | */ |
295 | while (1) { | 296 | while (1) { |
296 | kgdb_io_ops->write_char('$'); | 297 | kgdb_io_ops->write_char('$'); |
297 | checksum = 0; | 298 | checksum = 0; |
298 | count = 0; | 299 | count = 0; |
299 | 300 | ||
300 | while ((ch = buffer[count])) { | 301 | while ((ch = buffer[count])) { |
301 | kgdb_io_ops->write_char(ch); | 302 | kgdb_io_ops->write_char(ch); |
302 | checksum += ch; | 303 | checksum += ch; |
303 | count++; | 304 | count++; |
304 | } | 305 | } |
305 | 306 | ||
306 | kgdb_io_ops->write_char('#'); | 307 | kgdb_io_ops->write_char('#'); |
307 | kgdb_io_ops->write_char(hexchars[checksum >> 4]); | 308 | kgdb_io_ops->write_char(hexchars[checksum >> 4]); |
308 | kgdb_io_ops->write_char(hexchars[checksum & 0xf]); | 309 | kgdb_io_ops->write_char(hexchars[checksum & 0xf]); |
309 | if (kgdb_io_ops->flush) | 310 | if (kgdb_io_ops->flush) |
310 | kgdb_io_ops->flush(); | 311 | kgdb_io_ops->flush(); |
311 | 312 | ||
312 | /* Now see what we get in reply. */ | 313 | /* Now see what we get in reply. */ |
313 | ch = kgdb_io_ops->read_char(); | 314 | ch = kgdb_io_ops->read_char(); |
314 | 315 | ||
315 | if (ch == 3) | 316 | if (ch == 3) |
316 | ch = kgdb_io_ops->read_char(); | 317 | ch = kgdb_io_ops->read_char(); |
317 | 318 | ||
318 | /* If we get an ACK, we are done. */ | 319 | /* If we get an ACK, we are done. */ |
319 | if (ch == '+') | 320 | if (ch == '+') |
320 | return; | 321 | return; |
321 | 322 | ||
322 | /* | 323 | /* |
323 | * If we get the start of another packet, this means | 324 | * If we get the start of another packet, this means |
324 | * that GDB is attempting to reconnect. We will NAK | 325 | * that GDB is attempting to reconnect. We will NAK |
325 | * the packet being sent, and stop trying to send this | 326 | * the packet being sent, and stop trying to send this |
326 | * packet. | 327 | * packet. |
327 | */ | 328 | */ |
328 | if (ch == '$') { | 329 | if (ch == '$') { |
329 | kgdb_io_ops->write_char('-'); | 330 | kgdb_io_ops->write_char('-'); |
330 | if (kgdb_io_ops->flush) | 331 | if (kgdb_io_ops->flush) |
331 | kgdb_io_ops->flush(); | 332 | kgdb_io_ops->flush(); |
332 | return; | 333 | return; |
333 | } | 334 | } |
334 | } | 335 | } |
335 | } | 336 | } |
336 | 337 | ||
337 | static char *pack_hex_byte(char *pkt, u8 byte) | 338 | static char *pack_hex_byte(char *pkt, u8 byte) |
338 | { | 339 | { |
339 | *pkt++ = hexchars[byte >> 4]; | 340 | *pkt++ = hexchars[byte >> 4]; |
340 | *pkt++ = hexchars[byte & 0xf]; | 341 | *pkt++ = hexchars[byte & 0xf]; |
341 | 342 | ||
342 | return pkt; | 343 | return pkt; |
343 | } | 344 | } |
344 | 345 | ||
345 | /* | 346 | /* |
346 | * Convert the memory pointed to by mem into hex, placing result in buf. | 347 | * Convert the memory pointed to by mem into hex, placing result in buf. |
347 | * Return a pointer to the last char put in buf (null). May return an error. | 348 | * Return a pointer to the last char put in buf (null). May return an error. |
348 | */ | 349 | */ |
349 | int kgdb_mem2hex(char *mem, char *buf, int count) | 350 | int kgdb_mem2hex(char *mem, char *buf, int count) |
350 | { | 351 | { |
351 | char *tmp; | 352 | char *tmp; |
352 | int err; | 353 | int err; |
353 | 354 | ||
354 | /* | 355 | /* |
355 | * We use the upper half of buf as an intermediate buffer for the | 356 | * We use the upper half of buf as an intermediate buffer for the |
356 | * raw memory copy. Hex conversion will work against this one. | 357 | * raw memory copy. Hex conversion will work against this one. |
357 | */ | 358 | */ |
358 | tmp = buf + count; | 359 | tmp = buf + count; |
359 | 360 | ||
360 | err = probe_kernel_read(tmp, mem, count); | 361 | err = probe_kernel_read(tmp, mem, count); |
361 | if (!err) { | 362 | if (!err) { |
362 | while (count > 0) { | 363 | while (count > 0) { |
363 | buf = pack_hex_byte(buf, *tmp); | 364 | buf = pack_hex_byte(buf, *tmp); |
364 | tmp++; | 365 | tmp++; |
365 | count--; | 366 | count--; |
366 | } | 367 | } |
367 | 368 | ||
368 | *buf = 0; | 369 | *buf = 0; |
369 | } | 370 | } |
370 | 371 | ||
371 | return err; | 372 | return err; |
372 | } | 373 | } |
373 | 374 | ||
374 | /* | 375 | /* |
375 | * Copy the binary array pointed to by buf into mem. Fix $, #, and | 376 | * Copy the binary array pointed to by buf into mem. Fix $, #, and |
376 | * 0x7d escaped with 0x7d. Return a pointer to the character after | 377 | * 0x7d escaped with 0x7d. Return a pointer to the character after |
377 | * the last byte written. | 378 | * the last byte written. |
378 | */ | 379 | */ |
379 | static int kgdb_ebin2mem(char *buf, char *mem, int count) | 380 | static int kgdb_ebin2mem(char *buf, char *mem, int count) |
380 | { | 381 | { |
381 | int err = 0; | 382 | int err = 0; |
382 | char c; | 383 | char c; |
383 | 384 | ||
384 | while (count-- > 0) { | 385 | while (count-- > 0) { |
385 | c = *buf++; | 386 | c = *buf++; |
386 | if (c == 0x7d) | 387 | if (c == 0x7d) |
387 | c = *buf++ ^ 0x20; | 388 | c = *buf++ ^ 0x20; |
388 | 389 | ||
389 | err = probe_kernel_write(mem, &c, 1); | 390 | err = probe_kernel_write(mem, &c, 1); |
390 | if (err) | 391 | if (err) |
391 | break; | 392 | break; |
392 | 393 | ||
393 | mem++; | 394 | mem++; |
394 | } | 395 | } |
395 | 396 | ||
396 | return err; | 397 | return err; |
397 | } | 398 | } |
398 | 399 | ||
399 | /* | 400 | /* |
400 | * Convert the hex array pointed to by buf into binary to be placed in mem. | 401 | * Convert the hex array pointed to by buf into binary to be placed in mem. |
401 | * Return a pointer to the character AFTER the last byte written. | 402 | * Return a pointer to the character AFTER the last byte written. |
402 | * May return an error. | 403 | * May return an error. |
403 | */ | 404 | */ |
404 | int kgdb_hex2mem(char *buf, char *mem, int count) | 405 | int kgdb_hex2mem(char *buf, char *mem, int count) |
405 | { | 406 | { |
406 | char *tmp_raw; | 407 | char *tmp_raw; |
407 | char *tmp_hex; | 408 | char *tmp_hex; |
408 | 409 | ||
409 | /* | 410 | /* |
410 | * We use the upper half of buf as an intermediate buffer for the | 411 | * We use the upper half of buf as an intermediate buffer for the |
411 | * raw memory that is converted from hex. | 412 | * raw memory that is converted from hex. |
412 | */ | 413 | */ |
413 | tmp_raw = buf + count * 2; | 414 | tmp_raw = buf + count * 2; |
414 | 415 | ||
415 | tmp_hex = tmp_raw - 1; | 416 | tmp_hex = tmp_raw - 1; |
416 | while (tmp_hex >= buf) { | 417 | while (tmp_hex >= buf) { |
417 | tmp_raw--; | 418 | tmp_raw--; |
418 | *tmp_raw = hex(*tmp_hex--); | 419 | *tmp_raw = hex(*tmp_hex--); |
419 | *tmp_raw |= hex(*tmp_hex--) << 4; | 420 | *tmp_raw |= hex(*tmp_hex--) << 4; |
420 | } | 421 | } |
421 | 422 | ||
422 | return probe_kernel_write(mem, tmp_raw, count); | 423 | return probe_kernel_write(mem, tmp_raw, count); |
423 | } | 424 | } |
424 | 425 | ||
425 | /* | 426 | /* |
426 | * While we find nice hex chars, build a long_val. | 427 | * While we find nice hex chars, build a long_val. |
427 | * Return number of chars processed. | 428 | * Return number of chars processed. |
428 | */ | 429 | */ |
429 | int kgdb_hex2long(char **ptr, long *long_val) | 430 | int kgdb_hex2long(char **ptr, long *long_val) |
430 | { | 431 | { |
431 | int hex_val; | 432 | int hex_val; |
432 | int num = 0; | 433 | int num = 0; |
433 | 434 | ||
434 | *long_val = 0; | 435 | *long_val = 0; |
435 | 436 | ||
436 | while (**ptr) { | 437 | while (**ptr) { |
437 | hex_val = hex(**ptr); | 438 | hex_val = hex(**ptr); |
438 | if (hex_val < 0) | 439 | if (hex_val < 0) |
439 | break; | 440 | break; |
440 | 441 | ||
441 | *long_val = (*long_val << 4) | hex_val; | 442 | *long_val = (*long_val << 4) | hex_val; |
442 | num++; | 443 | num++; |
443 | (*ptr)++; | 444 | (*ptr)++; |
444 | } | 445 | } |
445 | 446 | ||
446 | return num; | 447 | return num; |
447 | } | 448 | } |
448 | 449 | ||
449 | /* Write memory due to an 'M' or 'X' packet. */ | 450 | /* Write memory due to an 'M' or 'X' packet. */ |
450 | static int write_mem_msg(int binary) | 451 | static int write_mem_msg(int binary) |
451 | { | 452 | { |
452 | char *ptr = &remcom_in_buffer[1]; | 453 | char *ptr = &remcom_in_buffer[1]; |
453 | unsigned long addr; | 454 | unsigned long addr; |
454 | unsigned long length; | 455 | unsigned long length; |
455 | int err; | 456 | int err; |
456 | 457 | ||
457 | if (kgdb_hex2long(&ptr, &addr) > 0 && *(ptr++) == ',' && | 458 | if (kgdb_hex2long(&ptr, &addr) > 0 && *(ptr++) == ',' && |
458 | kgdb_hex2long(&ptr, &length) > 0 && *(ptr++) == ':') { | 459 | kgdb_hex2long(&ptr, &length) > 0 && *(ptr++) == ':') { |
459 | if (binary) | 460 | if (binary) |
460 | err = kgdb_ebin2mem(ptr, (char *)addr, length); | 461 | err = kgdb_ebin2mem(ptr, (char *)addr, length); |
461 | else | 462 | else |
462 | err = kgdb_hex2mem(ptr, (char *)addr, length); | 463 | err = kgdb_hex2mem(ptr, (char *)addr, length); |
463 | if (err) | 464 | if (err) |
464 | return err; | 465 | return err; |
465 | if (CACHE_FLUSH_IS_SAFE) | 466 | if (CACHE_FLUSH_IS_SAFE) |
466 | flush_icache_range(addr, addr + length + 1); | 467 | flush_icache_range(addr, addr + length + 1); |
467 | return 0; | 468 | return 0; |
468 | } | 469 | } |
469 | 470 | ||
470 | return -EINVAL; | 471 | return -EINVAL; |
471 | } | 472 | } |
472 | 473 | ||
473 | static void error_packet(char *pkt, int error) | 474 | static void error_packet(char *pkt, int error) |
474 | { | 475 | { |
475 | error = -error; | 476 | error = -error; |
476 | pkt[0] = 'E'; | 477 | pkt[0] = 'E'; |
477 | pkt[1] = hexchars[(error / 10)]; | 478 | pkt[1] = hexchars[(error / 10)]; |
478 | pkt[2] = hexchars[(error % 10)]; | 479 | pkt[2] = hexchars[(error % 10)]; |
479 | pkt[3] = '\0'; | 480 | pkt[3] = '\0'; |
480 | } | 481 | } |
481 | 482 | ||
482 | /* | 483 | /* |
483 | * Thread ID accessors. We represent a flat TID space to GDB, where | 484 | * Thread ID accessors. We represent a flat TID space to GDB, where |
484 | * the per CPU idle threads (which under Linux all have PID 0) are | 485 | * the per CPU idle threads (which under Linux all have PID 0) are |
485 | * remapped to negative TIDs. | 486 | * remapped to negative TIDs. |
486 | */ | 487 | */ |
487 | 488 | ||
488 | #define BUF_THREAD_ID_SIZE 16 | 489 | #define BUF_THREAD_ID_SIZE 16 |
489 | 490 | ||
490 | static char *pack_threadid(char *pkt, unsigned char *id) | 491 | static char *pack_threadid(char *pkt, unsigned char *id) |
491 | { | 492 | { |
492 | char *limit; | 493 | char *limit; |
493 | 494 | ||
494 | limit = pkt + BUF_THREAD_ID_SIZE; | 495 | limit = pkt + BUF_THREAD_ID_SIZE; |
495 | while (pkt < limit) | 496 | while (pkt < limit) |
496 | pkt = pack_hex_byte(pkt, *id++); | 497 | pkt = pack_hex_byte(pkt, *id++); |
497 | 498 | ||
498 | return pkt; | 499 | return pkt; |
499 | } | 500 | } |
500 | 501 | ||
501 | static void int_to_threadref(unsigned char *id, int value) | 502 | static void int_to_threadref(unsigned char *id, int value) |
502 | { | 503 | { |
503 | unsigned char *scan; | 504 | unsigned char *scan; |
504 | int i = 4; | 505 | int i = 4; |
505 | 506 | ||
506 | scan = (unsigned char *)id; | 507 | scan = (unsigned char *)id; |
507 | while (i--) | 508 | while (i--) |
508 | *scan++ = 0; | 509 | *scan++ = 0; |
509 | *scan++ = (value >> 24) & 0xff; | 510 | *scan++ = (value >> 24) & 0xff; |
510 | *scan++ = (value >> 16) & 0xff; | 511 | *scan++ = (value >> 16) & 0xff; |
511 | *scan++ = (value >> 8) & 0xff; | 512 | *scan++ = (value >> 8) & 0xff; |
512 | *scan++ = (value & 0xff); | 513 | *scan++ = (value & 0xff); |
513 | } | 514 | } |
514 | 515 | ||
515 | static struct task_struct *getthread(struct pt_regs *regs, int tid) | 516 | static struct task_struct *getthread(struct pt_regs *regs, int tid) |
516 | { | 517 | { |
517 | /* | 518 | /* |
518 | * Non-positive TIDs are remapped idle tasks: | 519 | * Non-positive TIDs are remapped idle tasks: |
519 | */ | 520 | */ |
520 | if (tid <= 0) | 521 | if (tid <= 0) |
521 | return idle_task(-tid); | 522 | return idle_task(-tid); |
522 | 523 | ||
523 | /* | 524 | /* |
524 | * find_task_by_pid_ns() does not take the tasklist lock anymore | 525 | * find_task_by_pid_ns() does not take the tasklist lock anymore |
525 | * but is nicely RCU locked - hence is a pretty resilient | 526 | * but is nicely RCU locked - hence is a pretty resilient |
526 | * thing to use: | 527 | * thing to use: |
527 | */ | 528 | */ |
528 | return find_task_by_pid_ns(tid, &init_pid_ns); | 529 | return find_task_by_pid_ns(tid, &init_pid_ns); |
529 | } | 530 | } |
530 | 531 | ||
531 | /* | 532 | /* |
532 | * CPU debug state control: | 533 | * CPU debug state control: |
533 | */ | 534 | */ |
534 | 535 | ||
535 | #ifdef CONFIG_SMP | 536 | #ifdef CONFIG_SMP |
536 | static void kgdb_wait(struct pt_regs *regs) | 537 | static void kgdb_wait(struct pt_regs *regs) |
537 | { | 538 | { |
538 | unsigned long flags; | 539 | unsigned long flags; |
539 | int cpu; | 540 | int cpu; |
540 | 541 | ||
541 | local_irq_save(flags); | 542 | local_irq_save(flags); |
542 | cpu = raw_smp_processor_id(); | 543 | cpu = raw_smp_processor_id(); |
543 | kgdb_info[cpu].debuggerinfo = regs; | 544 | kgdb_info[cpu].debuggerinfo = regs; |
544 | kgdb_info[cpu].task = current; | 545 | kgdb_info[cpu].task = current; |
545 | /* | 546 | /* |
546 | * Make sure the above info reaches the primary CPU before | 547 | * Make sure the above info reaches the primary CPU before |
547 | * our cpu_in_kgdb[] flag setting does: | 548 | * our cpu_in_kgdb[] flag setting does: |
548 | */ | 549 | */ |
549 | smp_wmb(); | 550 | smp_wmb(); |
550 | atomic_set(&cpu_in_kgdb[cpu], 1); | 551 | atomic_set(&cpu_in_kgdb[cpu], 1); |
551 | 552 | ||
552 | /* | 553 | /* |
553 | * The primary CPU must be active to enter here, but this is | 554 | * The primary CPU must be active to enter here, but this is |
554 | * guard in case the primary CPU had not been selected if | 555 | * guard in case the primary CPU had not been selected if |
555 | * this was an entry via nmi. | 556 | * this was an entry via nmi. |
556 | */ | 557 | */ |
557 | while (atomic_read(&kgdb_active) == -1) | 558 | while (atomic_read(&kgdb_active) == -1) |
558 | cpu_relax(); | 559 | cpu_relax(); |
559 | 560 | ||
560 | /* Wait till primary CPU goes completely into the debugger. */ | 561 | /* Wait till primary CPU goes completely into the debugger. */ |
561 | while (!atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) | 562 | while (!atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) |
562 | cpu_relax(); | 563 | cpu_relax(); |
563 | 564 | ||
564 | /* Wait till primary CPU is done with debugging */ | 565 | /* Wait till primary CPU is done with debugging */ |
565 | while (atomic_read(&passive_cpu_wait[cpu])) | 566 | while (atomic_read(&passive_cpu_wait[cpu])) |
566 | cpu_relax(); | 567 | cpu_relax(); |
567 | 568 | ||
568 | kgdb_info[cpu].debuggerinfo = NULL; | 569 | kgdb_info[cpu].debuggerinfo = NULL; |
569 | kgdb_info[cpu].task = NULL; | 570 | kgdb_info[cpu].task = NULL; |
570 | 571 | ||
571 | /* fix up hardware debug registers on local cpu */ | 572 | /* fix up hardware debug registers on local cpu */ |
572 | if (arch_kgdb_ops.correct_hw_break) | 573 | if (arch_kgdb_ops.correct_hw_break) |
573 | arch_kgdb_ops.correct_hw_break(); | 574 | arch_kgdb_ops.correct_hw_break(); |
574 | 575 | ||
575 | /* Signal the primary CPU that we are done: */ | 576 | /* Signal the primary CPU that we are done: */ |
576 | atomic_set(&cpu_in_kgdb[cpu], 0); | 577 | atomic_set(&cpu_in_kgdb[cpu], 0); |
578 | clocksource_touch_watchdog(); | ||
577 | local_irq_restore(flags); | 579 | local_irq_restore(flags); |
578 | } | 580 | } |
579 | #endif | 581 | #endif |
580 | 582 | ||
581 | /* | 583 | /* |
582 | * Some architectures need cache flushes when we set/clear a | 584 | * Some architectures need cache flushes when we set/clear a |
583 | * breakpoint: | 585 | * breakpoint: |
584 | */ | 586 | */ |
585 | static void kgdb_flush_swbreak_addr(unsigned long addr) | 587 | static void kgdb_flush_swbreak_addr(unsigned long addr) |
586 | { | 588 | { |
587 | if (!CACHE_FLUSH_IS_SAFE) | 589 | if (!CACHE_FLUSH_IS_SAFE) |
588 | return; | 590 | return; |
589 | 591 | ||
590 | if (current->mm) { | 592 | if (current->mm) { |
591 | flush_cache_range(current->mm->mmap_cache, | 593 | flush_cache_range(current->mm->mmap_cache, |
592 | addr, addr + BREAK_INSTR_SIZE); | 594 | addr, addr + BREAK_INSTR_SIZE); |
593 | } else { | 595 | } else { |
594 | flush_icache_range(addr, addr + BREAK_INSTR_SIZE); | 596 | flush_icache_range(addr, addr + BREAK_INSTR_SIZE); |
595 | } | 597 | } |
596 | } | 598 | } |
597 | 599 | ||
598 | /* | 600 | /* |
599 | * SW breakpoint management: | 601 | * SW breakpoint management: |
600 | */ | 602 | */ |
601 | static int kgdb_activate_sw_breakpoints(void) | 603 | static int kgdb_activate_sw_breakpoints(void) |
602 | { | 604 | { |
603 | unsigned long addr; | 605 | unsigned long addr; |
604 | int error = 0; | 606 | int error = 0; |
605 | int i; | 607 | int i; |
606 | 608 | ||
607 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 609 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
608 | if (kgdb_break[i].state != BP_SET) | 610 | if (kgdb_break[i].state != BP_SET) |
609 | continue; | 611 | continue; |
610 | 612 | ||
611 | addr = kgdb_break[i].bpt_addr; | 613 | addr = kgdb_break[i].bpt_addr; |
612 | error = kgdb_arch_set_breakpoint(addr, | 614 | error = kgdb_arch_set_breakpoint(addr, |
613 | kgdb_break[i].saved_instr); | 615 | kgdb_break[i].saved_instr); |
614 | if (error) | 616 | if (error) |
615 | return error; | 617 | return error; |
616 | 618 | ||
617 | kgdb_flush_swbreak_addr(addr); | 619 | kgdb_flush_swbreak_addr(addr); |
618 | kgdb_break[i].state = BP_ACTIVE; | 620 | kgdb_break[i].state = BP_ACTIVE; |
619 | } | 621 | } |
620 | return 0; | 622 | return 0; |
621 | } | 623 | } |
622 | 624 | ||
623 | static int kgdb_set_sw_break(unsigned long addr) | 625 | static int kgdb_set_sw_break(unsigned long addr) |
624 | { | 626 | { |
625 | int err = kgdb_validate_break_address(addr); | 627 | int err = kgdb_validate_break_address(addr); |
626 | int breakno = -1; | 628 | int breakno = -1; |
627 | int i; | 629 | int i; |
628 | 630 | ||
629 | if (err) | 631 | if (err) |
630 | return err; | 632 | return err; |
631 | 633 | ||
632 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 634 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
633 | if ((kgdb_break[i].state == BP_SET) && | 635 | if ((kgdb_break[i].state == BP_SET) && |
634 | (kgdb_break[i].bpt_addr == addr)) | 636 | (kgdb_break[i].bpt_addr == addr)) |
635 | return -EEXIST; | 637 | return -EEXIST; |
636 | } | 638 | } |
637 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 639 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
638 | if (kgdb_break[i].state == BP_REMOVED && | 640 | if (kgdb_break[i].state == BP_REMOVED && |
639 | kgdb_break[i].bpt_addr == addr) { | 641 | kgdb_break[i].bpt_addr == addr) { |
640 | breakno = i; | 642 | breakno = i; |
641 | break; | 643 | break; |
642 | } | 644 | } |
643 | } | 645 | } |
644 | 646 | ||
645 | if (breakno == -1) { | 647 | if (breakno == -1) { |
646 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 648 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
647 | if (kgdb_break[i].state == BP_UNDEFINED) { | 649 | if (kgdb_break[i].state == BP_UNDEFINED) { |
648 | breakno = i; | 650 | breakno = i; |
649 | break; | 651 | break; |
650 | } | 652 | } |
651 | } | 653 | } |
652 | } | 654 | } |
653 | 655 | ||
654 | if (breakno == -1) | 656 | if (breakno == -1) |
655 | return -E2BIG; | 657 | return -E2BIG; |
656 | 658 | ||
657 | kgdb_break[breakno].state = BP_SET; | 659 | kgdb_break[breakno].state = BP_SET; |
658 | kgdb_break[breakno].type = BP_BREAKPOINT; | 660 | kgdb_break[breakno].type = BP_BREAKPOINT; |
659 | kgdb_break[breakno].bpt_addr = addr; | 661 | kgdb_break[breakno].bpt_addr = addr; |
660 | 662 | ||
661 | return 0; | 663 | return 0; |
662 | } | 664 | } |
663 | 665 | ||
664 | static int kgdb_deactivate_sw_breakpoints(void) | 666 | static int kgdb_deactivate_sw_breakpoints(void) |
665 | { | 667 | { |
666 | unsigned long addr; | 668 | unsigned long addr; |
667 | int error = 0; | 669 | int error = 0; |
668 | int i; | 670 | int i; |
669 | 671 | ||
670 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 672 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
671 | if (kgdb_break[i].state != BP_ACTIVE) | 673 | if (kgdb_break[i].state != BP_ACTIVE) |
672 | continue; | 674 | continue; |
673 | addr = kgdb_break[i].bpt_addr; | 675 | addr = kgdb_break[i].bpt_addr; |
674 | error = kgdb_arch_remove_breakpoint(addr, | 676 | error = kgdb_arch_remove_breakpoint(addr, |
675 | kgdb_break[i].saved_instr); | 677 | kgdb_break[i].saved_instr); |
676 | if (error) | 678 | if (error) |
677 | return error; | 679 | return error; |
678 | 680 | ||
679 | kgdb_flush_swbreak_addr(addr); | 681 | kgdb_flush_swbreak_addr(addr); |
680 | kgdb_break[i].state = BP_SET; | 682 | kgdb_break[i].state = BP_SET; |
681 | } | 683 | } |
682 | return 0; | 684 | return 0; |
683 | } | 685 | } |
684 | 686 | ||
685 | static int kgdb_remove_sw_break(unsigned long addr) | 687 | static int kgdb_remove_sw_break(unsigned long addr) |
686 | { | 688 | { |
687 | int i; | 689 | int i; |
688 | 690 | ||
689 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 691 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
690 | if ((kgdb_break[i].state == BP_SET) && | 692 | if ((kgdb_break[i].state == BP_SET) && |
691 | (kgdb_break[i].bpt_addr == addr)) { | 693 | (kgdb_break[i].bpt_addr == addr)) { |
692 | kgdb_break[i].state = BP_REMOVED; | 694 | kgdb_break[i].state = BP_REMOVED; |
693 | return 0; | 695 | return 0; |
694 | } | 696 | } |
695 | } | 697 | } |
696 | return -ENOENT; | 698 | return -ENOENT; |
697 | } | 699 | } |
698 | 700 | ||
699 | int kgdb_isremovedbreak(unsigned long addr) | 701 | int kgdb_isremovedbreak(unsigned long addr) |
700 | { | 702 | { |
701 | int i; | 703 | int i; |
702 | 704 | ||
703 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 705 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
704 | if ((kgdb_break[i].state == BP_REMOVED) && | 706 | if ((kgdb_break[i].state == BP_REMOVED) && |
705 | (kgdb_break[i].bpt_addr == addr)) | 707 | (kgdb_break[i].bpt_addr == addr)) |
706 | return 1; | 708 | return 1; |
707 | } | 709 | } |
708 | return 0; | 710 | return 0; |
709 | } | 711 | } |
710 | 712 | ||
711 | int remove_all_break(void) | 713 | int remove_all_break(void) |
712 | { | 714 | { |
713 | unsigned long addr; | 715 | unsigned long addr; |
714 | int error; | 716 | int error; |
715 | int i; | 717 | int i; |
716 | 718 | ||
717 | /* Clear memory breakpoints. */ | 719 | /* Clear memory breakpoints. */ |
718 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 720 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
719 | if (kgdb_break[i].state != BP_SET) | 721 | if (kgdb_break[i].state != BP_SET) |
720 | continue; | 722 | continue; |
721 | addr = kgdb_break[i].bpt_addr; | 723 | addr = kgdb_break[i].bpt_addr; |
722 | error = kgdb_arch_remove_breakpoint(addr, | 724 | error = kgdb_arch_remove_breakpoint(addr, |
723 | kgdb_break[i].saved_instr); | 725 | kgdb_break[i].saved_instr); |
724 | if (error) | 726 | if (error) |
725 | return error; | 727 | return error; |
726 | kgdb_break[i].state = BP_REMOVED; | 728 | kgdb_break[i].state = BP_REMOVED; |
727 | } | 729 | } |
728 | 730 | ||
729 | /* Clear hardware breakpoints. */ | 731 | /* Clear hardware breakpoints. */ |
730 | if (arch_kgdb_ops.remove_all_hw_break) | 732 | if (arch_kgdb_ops.remove_all_hw_break) |
731 | arch_kgdb_ops.remove_all_hw_break(); | 733 | arch_kgdb_ops.remove_all_hw_break(); |
732 | 734 | ||
733 | return 0; | 735 | return 0; |
734 | } | 736 | } |
735 | 737 | ||
736 | /* | 738 | /* |
737 | * Remap normal tasks to their real PID, idle tasks to -1 ... -NR_CPUs: | 739 | * Remap normal tasks to their real PID, idle tasks to -1 ... -NR_CPUs: |
738 | */ | 740 | */ |
739 | static inline int shadow_pid(int realpid) | 741 | static inline int shadow_pid(int realpid) |
740 | { | 742 | { |
741 | if (realpid) | 743 | if (realpid) |
742 | return realpid; | 744 | return realpid; |
743 | 745 | ||
744 | return -1-raw_smp_processor_id(); | 746 | return -1-raw_smp_processor_id(); |
745 | } | 747 | } |
746 | 748 | ||
747 | static char gdbmsgbuf[BUFMAX + 1]; | 749 | static char gdbmsgbuf[BUFMAX + 1]; |
748 | 750 | ||
749 | static void kgdb_msg_write(const char *s, int len) | 751 | static void kgdb_msg_write(const char *s, int len) |
750 | { | 752 | { |
751 | char *bufptr; | 753 | char *bufptr; |
752 | int wcount; | 754 | int wcount; |
753 | int i; | 755 | int i; |
754 | 756 | ||
755 | /* 'O'utput */ | 757 | /* 'O'utput */ |
756 | gdbmsgbuf[0] = 'O'; | 758 | gdbmsgbuf[0] = 'O'; |
757 | 759 | ||
758 | /* Fill and send buffers... */ | 760 | /* Fill and send buffers... */ |
759 | while (len > 0) { | 761 | while (len > 0) { |
760 | bufptr = gdbmsgbuf + 1; | 762 | bufptr = gdbmsgbuf + 1; |
761 | 763 | ||
762 | /* Calculate how many this time */ | 764 | /* Calculate how many this time */ |
763 | if ((len << 1) > (BUFMAX - 2)) | 765 | if ((len << 1) > (BUFMAX - 2)) |
764 | wcount = (BUFMAX - 2) >> 1; | 766 | wcount = (BUFMAX - 2) >> 1; |
765 | else | 767 | else |
766 | wcount = len; | 768 | wcount = len; |
767 | 769 | ||
768 | /* Pack in hex chars */ | 770 | /* Pack in hex chars */ |
769 | for (i = 0; i < wcount; i++) | 771 | for (i = 0; i < wcount; i++) |
770 | bufptr = pack_hex_byte(bufptr, s[i]); | 772 | bufptr = pack_hex_byte(bufptr, s[i]); |
771 | *bufptr = '\0'; | 773 | *bufptr = '\0'; |
772 | 774 | ||
773 | /* Move up */ | 775 | /* Move up */ |
774 | s += wcount; | 776 | s += wcount; |
775 | len -= wcount; | 777 | len -= wcount; |
776 | 778 | ||
777 | /* Write packet */ | 779 | /* Write packet */ |
778 | put_packet(gdbmsgbuf); | 780 | put_packet(gdbmsgbuf); |
779 | } | 781 | } |
780 | } | 782 | } |
781 | 783 | ||
782 | /* | 784 | /* |
783 | * Return true if there is a valid kgdb I/O module. Also if no | 785 | * Return true if there is a valid kgdb I/O module. Also if no |
784 | * debugger is attached a message can be printed to the console about | 786 | * debugger is attached a message can be printed to the console about |
785 | * waiting for the debugger to attach. | 787 | * waiting for the debugger to attach. |
786 | * | 788 | * |
787 | * The print_wait argument is only to be true when called from inside | 789 | * The print_wait argument is only to be true when called from inside |
788 | * the core kgdb_handle_exception, because it will wait for the | 790 | * the core kgdb_handle_exception, because it will wait for the |
789 | * debugger to attach. | 791 | * debugger to attach. |
790 | */ | 792 | */ |
791 | static int kgdb_io_ready(int print_wait) | 793 | static int kgdb_io_ready(int print_wait) |
792 | { | 794 | { |
793 | if (!kgdb_io_ops) | 795 | if (!kgdb_io_ops) |
794 | return 0; | 796 | return 0; |
795 | if (kgdb_connected) | 797 | if (kgdb_connected) |
796 | return 1; | 798 | return 1; |
797 | if (atomic_read(&kgdb_setting_breakpoint)) | 799 | if (atomic_read(&kgdb_setting_breakpoint)) |
798 | return 1; | 800 | return 1; |
799 | if (print_wait) | 801 | if (print_wait) |
800 | printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); | 802 | printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); |
801 | return 1; | 803 | return 1; |
802 | } | 804 | } |
803 | 805 | ||
804 | /* | 806 | /* |
805 | * All the functions that start with gdb_cmd are the various | 807 | * All the functions that start with gdb_cmd are the various |
806 | * operations to implement the handlers for the gdbserial protocol | 808 | * operations to implement the handlers for the gdbserial protocol |
807 | * where KGDB is communicating with an external debugger | 809 | * where KGDB is communicating with an external debugger |
808 | */ | 810 | */ |
809 | 811 | ||
810 | /* Handle the '?' status packets */ | 812 | /* Handle the '?' status packets */ |
811 | static void gdb_cmd_status(struct kgdb_state *ks) | 813 | static void gdb_cmd_status(struct kgdb_state *ks) |
812 | { | 814 | { |
813 | /* | 815 | /* |
814 | * We know that this packet is only sent | 816 | * We know that this packet is only sent |
815 | * during initial connect. So to be safe, | 817 | * during initial connect. So to be safe, |
816 | * we clear out our breakpoints now in case | 818 | * we clear out our breakpoints now in case |
817 | * GDB is reconnecting. | 819 | * GDB is reconnecting. |
818 | */ | 820 | */ |
819 | remove_all_break(); | 821 | remove_all_break(); |
820 | 822 | ||
821 | remcom_out_buffer[0] = 'S'; | 823 | remcom_out_buffer[0] = 'S'; |
822 | pack_hex_byte(&remcom_out_buffer[1], ks->signo); | 824 | pack_hex_byte(&remcom_out_buffer[1], ks->signo); |
823 | } | 825 | } |
824 | 826 | ||
825 | /* Handle the 'g' get registers request */ | 827 | /* Handle the 'g' get registers request */ |
826 | static void gdb_cmd_getregs(struct kgdb_state *ks) | 828 | static void gdb_cmd_getregs(struct kgdb_state *ks) |
827 | { | 829 | { |
828 | struct task_struct *thread; | 830 | struct task_struct *thread; |
829 | void *local_debuggerinfo; | 831 | void *local_debuggerinfo; |
830 | int i; | 832 | int i; |
831 | 833 | ||
832 | thread = kgdb_usethread; | 834 | thread = kgdb_usethread; |
833 | if (!thread) { | 835 | if (!thread) { |
834 | thread = kgdb_info[ks->cpu].task; | 836 | thread = kgdb_info[ks->cpu].task; |
835 | local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo; | 837 | local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo; |
836 | } else { | 838 | } else { |
837 | local_debuggerinfo = NULL; | 839 | local_debuggerinfo = NULL; |
838 | for (i = 0; i < NR_CPUS; i++) { | 840 | for (i = 0; i < NR_CPUS; i++) { |
839 | /* | 841 | /* |
840 | * Try to find the task on some other | 842 | * Try to find the task on some other |
841 | * or possibly this node if we do not | 843 | * or possibly this node if we do not |
842 | * find the matching task then we try | 844 | * find the matching task then we try |
843 | * to approximate the results. | 845 | * to approximate the results. |
844 | */ | 846 | */ |
845 | if (thread == kgdb_info[i].task) | 847 | if (thread == kgdb_info[i].task) |
846 | local_debuggerinfo = kgdb_info[i].debuggerinfo; | 848 | local_debuggerinfo = kgdb_info[i].debuggerinfo; |
847 | } | 849 | } |
848 | } | 850 | } |
849 | 851 | ||
850 | /* | 852 | /* |
851 | * All threads that don't have debuggerinfo should be | 853 | * All threads that don't have debuggerinfo should be |
852 | * in __schedule() sleeping, since all other CPUs | 854 | * in __schedule() sleeping, since all other CPUs |
853 | * are in kgdb_wait, and thus have debuggerinfo. | 855 | * are in kgdb_wait, and thus have debuggerinfo. |
854 | */ | 856 | */ |
855 | if (local_debuggerinfo) { | 857 | if (local_debuggerinfo) { |
856 | pt_regs_to_gdb_regs(gdb_regs, local_debuggerinfo); | 858 | pt_regs_to_gdb_regs(gdb_regs, local_debuggerinfo); |
857 | } else { | 859 | } else { |
858 | /* | 860 | /* |
859 | * Pull stuff saved during switch_to; nothing | 861 | * Pull stuff saved during switch_to; nothing |
860 | * else is accessible (or even particularly | 862 | * else is accessible (or even particularly |
861 | * relevant). | 863 | * relevant). |
862 | * | 864 | * |
863 | * This should be enough for a stack trace. | 865 | * This should be enough for a stack trace. |
864 | */ | 866 | */ |
865 | sleeping_thread_to_gdb_regs(gdb_regs, thread); | 867 | sleeping_thread_to_gdb_regs(gdb_regs, thread); |
866 | } | 868 | } |
867 | kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES); | 869 | kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES); |
868 | } | 870 | } |
869 | 871 | ||
870 | /* Handle the 'G' set registers request */ | 872 | /* Handle the 'G' set registers request */ |
871 | static void gdb_cmd_setregs(struct kgdb_state *ks) | 873 | static void gdb_cmd_setregs(struct kgdb_state *ks) |
872 | { | 874 | { |
873 | kgdb_hex2mem(&remcom_in_buffer[1], (char *)gdb_regs, NUMREGBYTES); | 875 | kgdb_hex2mem(&remcom_in_buffer[1], (char *)gdb_regs, NUMREGBYTES); |
874 | 876 | ||
875 | if (kgdb_usethread && kgdb_usethread != current) { | 877 | if (kgdb_usethread && kgdb_usethread != current) { |
876 | error_packet(remcom_out_buffer, -EINVAL); | 878 | error_packet(remcom_out_buffer, -EINVAL); |
877 | } else { | 879 | } else { |
878 | gdb_regs_to_pt_regs(gdb_regs, ks->linux_regs); | 880 | gdb_regs_to_pt_regs(gdb_regs, ks->linux_regs); |
879 | strcpy(remcom_out_buffer, "OK"); | 881 | strcpy(remcom_out_buffer, "OK"); |
880 | } | 882 | } |
881 | } | 883 | } |
882 | 884 | ||
883 | /* Handle the 'm' memory read bytes */ | 885 | /* Handle the 'm' memory read bytes */ |
884 | static void gdb_cmd_memread(struct kgdb_state *ks) | 886 | static void gdb_cmd_memread(struct kgdb_state *ks) |
885 | { | 887 | { |
886 | char *ptr = &remcom_in_buffer[1]; | 888 | char *ptr = &remcom_in_buffer[1]; |
887 | unsigned long length; | 889 | unsigned long length; |
888 | unsigned long addr; | 890 | unsigned long addr; |
889 | int err; | 891 | int err; |
890 | 892 | ||
891 | if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' && | 893 | if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' && |
892 | kgdb_hex2long(&ptr, &length) > 0) { | 894 | kgdb_hex2long(&ptr, &length) > 0) { |
893 | err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length); | 895 | err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length); |
894 | if (err) | 896 | if (err) |
895 | error_packet(remcom_out_buffer, err); | 897 | error_packet(remcom_out_buffer, err); |
896 | } else { | 898 | } else { |
897 | error_packet(remcom_out_buffer, -EINVAL); | 899 | error_packet(remcom_out_buffer, -EINVAL); |
898 | } | 900 | } |
899 | } | 901 | } |
900 | 902 | ||
901 | /* Handle the 'M' memory write bytes */ | 903 | /* Handle the 'M' memory write bytes */ |
902 | static void gdb_cmd_memwrite(struct kgdb_state *ks) | 904 | static void gdb_cmd_memwrite(struct kgdb_state *ks) |
903 | { | 905 | { |
904 | int err = write_mem_msg(0); | 906 | int err = write_mem_msg(0); |
905 | 907 | ||
906 | if (err) | 908 | if (err) |
907 | error_packet(remcom_out_buffer, err); | 909 | error_packet(remcom_out_buffer, err); |
908 | else | 910 | else |
909 | strcpy(remcom_out_buffer, "OK"); | 911 | strcpy(remcom_out_buffer, "OK"); |
910 | } | 912 | } |
911 | 913 | ||
912 | /* Handle the 'X' memory binary write bytes */ | 914 | /* Handle the 'X' memory binary write bytes */ |
913 | static void gdb_cmd_binwrite(struct kgdb_state *ks) | 915 | static void gdb_cmd_binwrite(struct kgdb_state *ks) |
914 | { | 916 | { |
915 | int err = write_mem_msg(1); | 917 | int err = write_mem_msg(1); |
916 | 918 | ||
917 | if (err) | 919 | if (err) |
918 | error_packet(remcom_out_buffer, err); | 920 | error_packet(remcom_out_buffer, err); |
919 | else | 921 | else |
920 | strcpy(remcom_out_buffer, "OK"); | 922 | strcpy(remcom_out_buffer, "OK"); |
921 | } | 923 | } |
922 | 924 | ||
923 | /* Handle the 'D' or 'k', detach or kill packets */ | 925 | /* Handle the 'D' or 'k', detach or kill packets */ |
924 | static void gdb_cmd_detachkill(struct kgdb_state *ks) | 926 | static void gdb_cmd_detachkill(struct kgdb_state *ks) |
925 | { | 927 | { |
926 | int error; | 928 | int error; |
927 | 929 | ||
928 | /* The detach case */ | 930 | /* The detach case */ |
929 | if (remcom_in_buffer[0] == 'D') { | 931 | if (remcom_in_buffer[0] == 'D') { |
930 | error = remove_all_break(); | 932 | error = remove_all_break(); |
931 | if (error < 0) { | 933 | if (error < 0) { |
932 | error_packet(remcom_out_buffer, error); | 934 | error_packet(remcom_out_buffer, error); |
933 | } else { | 935 | } else { |
934 | strcpy(remcom_out_buffer, "OK"); | 936 | strcpy(remcom_out_buffer, "OK"); |
935 | kgdb_connected = 0; | 937 | kgdb_connected = 0; |
936 | } | 938 | } |
937 | put_packet(remcom_out_buffer); | 939 | put_packet(remcom_out_buffer); |
938 | } else { | 940 | } else { |
939 | /* | 941 | /* |
940 | * Assume the kill case, with no exit code checking, | 942 | * Assume the kill case, with no exit code checking, |
941 | * trying to force detach the debugger: | 943 | * trying to force detach the debugger: |
942 | */ | 944 | */ |
943 | remove_all_break(); | 945 | remove_all_break(); |
944 | kgdb_connected = 0; | 946 | kgdb_connected = 0; |
945 | } | 947 | } |
946 | } | 948 | } |
947 | 949 | ||
948 | /* Handle the 'R' reboot packets */ | 950 | /* Handle the 'R' reboot packets */ |
949 | static int gdb_cmd_reboot(struct kgdb_state *ks) | 951 | static int gdb_cmd_reboot(struct kgdb_state *ks) |
950 | { | 952 | { |
951 | /* For now, only honor R0 */ | 953 | /* For now, only honor R0 */ |
952 | if (strcmp(remcom_in_buffer, "R0") == 0) { | 954 | if (strcmp(remcom_in_buffer, "R0") == 0) { |
953 | printk(KERN_CRIT "Executing emergency reboot\n"); | 955 | printk(KERN_CRIT "Executing emergency reboot\n"); |
954 | strcpy(remcom_out_buffer, "OK"); | 956 | strcpy(remcom_out_buffer, "OK"); |
955 | put_packet(remcom_out_buffer); | 957 | put_packet(remcom_out_buffer); |
956 | 958 | ||
957 | /* | 959 | /* |
958 | * Execution should not return from | 960 | * Execution should not return from |
959 | * machine_emergency_restart() | 961 | * machine_emergency_restart() |
960 | */ | 962 | */ |
961 | machine_emergency_restart(); | 963 | machine_emergency_restart(); |
962 | kgdb_connected = 0; | 964 | kgdb_connected = 0; |
963 | 965 | ||
964 | return 1; | 966 | return 1; |
965 | } | 967 | } |
966 | return 0; | 968 | return 0; |
967 | } | 969 | } |
968 | 970 | ||
969 | /* Handle the 'q' query packets */ | 971 | /* Handle the 'q' query packets */ |
970 | static void gdb_cmd_query(struct kgdb_state *ks) | 972 | static void gdb_cmd_query(struct kgdb_state *ks) |
971 | { | 973 | { |
972 | struct task_struct *thread; | 974 | struct task_struct *thread; |
973 | unsigned char thref[8]; | 975 | unsigned char thref[8]; |
974 | char *ptr; | 976 | char *ptr; |
975 | int i; | 977 | int i; |
976 | 978 | ||
977 | switch (remcom_in_buffer[1]) { | 979 | switch (remcom_in_buffer[1]) { |
978 | case 's': | 980 | case 's': |
979 | case 'f': | 981 | case 'f': |
980 | if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) { | 982 | if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) { |
981 | error_packet(remcom_out_buffer, -EINVAL); | 983 | error_packet(remcom_out_buffer, -EINVAL); |
982 | break; | 984 | break; |
983 | } | 985 | } |
984 | 986 | ||
985 | if (remcom_in_buffer[1] == 'f') | 987 | if (remcom_in_buffer[1] == 'f') |
986 | ks->threadid = 1; | 988 | ks->threadid = 1; |
987 | 989 | ||
988 | remcom_out_buffer[0] = 'm'; | 990 | remcom_out_buffer[0] = 'm'; |
989 | ptr = remcom_out_buffer + 1; | 991 | ptr = remcom_out_buffer + 1; |
990 | 992 | ||
991 | for (i = 0; i < 17; ks->threadid++) { | 993 | for (i = 0; i < 17; ks->threadid++) { |
992 | thread = getthread(ks->linux_regs, ks->threadid); | 994 | thread = getthread(ks->linux_regs, ks->threadid); |
993 | if (thread) { | 995 | if (thread) { |
994 | int_to_threadref(thref, ks->threadid); | 996 | int_to_threadref(thref, ks->threadid); |
995 | pack_threadid(ptr, thref); | 997 | pack_threadid(ptr, thref); |
996 | ptr += BUF_THREAD_ID_SIZE; | 998 | ptr += BUF_THREAD_ID_SIZE; |
997 | *(ptr++) = ','; | 999 | *(ptr++) = ','; |
998 | i++; | 1000 | i++; |
999 | } | 1001 | } |
1000 | } | 1002 | } |
1001 | *(--ptr) = '\0'; | 1003 | *(--ptr) = '\0'; |
1002 | break; | 1004 | break; |
1003 | 1005 | ||
1004 | case 'C': | 1006 | case 'C': |
1005 | /* Current thread id */ | 1007 | /* Current thread id */ |
1006 | strcpy(remcom_out_buffer, "QC"); | 1008 | strcpy(remcom_out_buffer, "QC"); |
1007 | ks->threadid = shadow_pid(current->pid); | 1009 | ks->threadid = shadow_pid(current->pid); |
1008 | int_to_threadref(thref, ks->threadid); | 1010 | int_to_threadref(thref, ks->threadid); |
1009 | pack_threadid(remcom_out_buffer + 2, thref); | 1011 | pack_threadid(remcom_out_buffer + 2, thref); |
1010 | break; | 1012 | break; |
1011 | case 'T': | 1013 | case 'T': |
1012 | if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) { | 1014 | if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) { |
1013 | error_packet(remcom_out_buffer, -EINVAL); | 1015 | error_packet(remcom_out_buffer, -EINVAL); |
1014 | break; | 1016 | break; |
1015 | } | 1017 | } |
1016 | ks->threadid = 0; | 1018 | ks->threadid = 0; |
1017 | ptr = remcom_in_buffer + 17; | 1019 | ptr = remcom_in_buffer + 17; |
1018 | kgdb_hex2long(&ptr, &ks->threadid); | 1020 | kgdb_hex2long(&ptr, &ks->threadid); |
1019 | if (!getthread(ks->linux_regs, ks->threadid)) { | 1021 | if (!getthread(ks->linux_regs, ks->threadid)) { |
1020 | error_packet(remcom_out_buffer, -EINVAL); | 1022 | error_packet(remcom_out_buffer, -EINVAL); |
1021 | break; | 1023 | break; |
1022 | } | 1024 | } |
1023 | if (ks->threadid > 0) { | 1025 | if (ks->threadid > 0) { |
1024 | kgdb_mem2hex(getthread(ks->linux_regs, | 1026 | kgdb_mem2hex(getthread(ks->linux_regs, |
1025 | ks->threadid)->comm, | 1027 | ks->threadid)->comm, |
1026 | remcom_out_buffer, 16); | 1028 | remcom_out_buffer, 16); |
1027 | } else { | 1029 | } else { |
1028 | static char tmpstr[23 + BUF_THREAD_ID_SIZE]; | 1030 | static char tmpstr[23 + BUF_THREAD_ID_SIZE]; |
1029 | 1031 | ||
1030 | sprintf(tmpstr, "Shadow task %d for pid 0", | 1032 | sprintf(tmpstr, "Shadow task %d for pid 0", |
1031 | (int)(-ks->threadid-1)); | 1033 | (int)(-ks->threadid-1)); |
1032 | kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr)); | 1034 | kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr)); |
1033 | } | 1035 | } |
1034 | break; | 1036 | break; |
1035 | } | 1037 | } |
1036 | } | 1038 | } |
1037 | 1039 | ||
1038 | /* Handle the 'H' task query packets */ | 1040 | /* Handle the 'H' task query packets */ |
1039 | static void gdb_cmd_task(struct kgdb_state *ks) | 1041 | static void gdb_cmd_task(struct kgdb_state *ks) |
1040 | { | 1042 | { |
1041 | struct task_struct *thread; | 1043 | struct task_struct *thread; |
1042 | char *ptr; | 1044 | char *ptr; |
1043 | 1045 | ||
1044 | switch (remcom_in_buffer[1]) { | 1046 | switch (remcom_in_buffer[1]) { |
1045 | case 'g': | 1047 | case 'g': |
1046 | ptr = &remcom_in_buffer[2]; | 1048 | ptr = &remcom_in_buffer[2]; |
1047 | kgdb_hex2long(&ptr, &ks->threadid); | 1049 | kgdb_hex2long(&ptr, &ks->threadid); |
1048 | thread = getthread(ks->linux_regs, ks->threadid); | 1050 | thread = getthread(ks->linux_regs, ks->threadid); |
1049 | if (!thread && ks->threadid > 0) { | 1051 | if (!thread && ks->threadid > 0) { |
1050 | error_packet(remcom_out_buffer, -EINVAL); | 1052 | error_packet(remcom_out_buffer, -EINVAL); |
1051 | break; | 1053 | break; |
1052 | } | 1054 | } |
1053 | kgdb_usethread = thread; | 1055 | kgdb_usethread = thread; |
1054 | ks->kgdb_usethreadid = ks->threadid; | 1056 | ks->kgdb_usethreadid = ks->threadid; |
1055 | strcpy(remcom_out_buffer, "OK"); | 1057 | strcpy(remcom_out_buffer, "OK"); |
1056 | break; | 1058 | break; |
1057 | case 'c': | 1059 | case 'c': |
1058 | ptr = &remcom_in_buffer[2]; | 1060 | ptr = &remcom_in_buffer[2]; |
1059 | kgdb_hex2long(&ptr, &ks->threadid); | 1061 | kgdb_hex2long(&ptr, &ks->threadid); |
1060 | if (!ks->threadid) { | 1062 | if (!ks->threadid) { |
1061 | kgdb_contthread = NULL; | 1063 | kgdb_contthread = NULL; |
1062 | } else { | 1064 | } else { |
1063 | thread = getthread(ks->linux_regs, ks->threadid); | 1065 | thread = getthread(ks->linux_regs, ks->threadid); |
1064 | if (!thread && ks->threadid > 0) { | 1066 | if (!thread && ks->threadid > 0) { |
1065 | error_packet(remcom_out_buffer, -EINVAL); | 1067 | error_packet(remcom_out_buffer, -EINVAL); |
1066 | break; | 1068 | break; |
1067 | } | 1069 | } |
1068 | kgdb_contthread = thread; | 1070 | kgdb_contthread = thread; |
1069 | } | 1071 | } |
1070 | strcpy(remcom_out_buffer, "OK"); | 1072 | strcpy(remcom_out_buffer, "OK"); |
1071 | break; | 1073 | break; |
1072 | } | 1074 | } |
1073 | } | 1075 | } |
1074 | 1076 | ||
1075 | /* Handle the 'T' thread query packets */ | 1077 | /* Handle the 'T' thread query packets */ |
1076 | static void gdb_cmd_thread(struct kgdb_state *ks) | 1078 | static void gdb_cmd_thread(struct kgdb_state *ks) |
1077 | { | 1079 | { |
1078 | char *ptr = &remcom_in_buffer[1]; | 1080 | char *ptr = &remcom_in_buffer[1]; |
1079 | struct task_struct *thread; | 1081 | struct task_struct *thread; |
1080 | 1082 | ||
1081 | kgdb_hex2long(&ptr, &ks->threadid); | 1083 | kgdb_hex2long(&ptr, &ks->threadid); |
1082 | thread = getthread(ks->linux_regs, ks->threadid); | 1084 | thread = getthread(ks->linux_regs, ks->threadid); |
1083 | if (thread) | 1085 | if (thread) |
1084 | strcpy(remcom_out_buffer, "OK"); | 1086 | strcpy(remcom_out_buffer, "OK"); |
1085 | else | 1087 | else |
1086 | error_packet(remcom_out_buffer, -EINVAL); | 1088 | error_packet(remcom_out_buffer, -EINVAL); |
1087 | } | 1089 | } |
1088 | 1090 | ||
1089 | /* Handle the 'z' or 'Z' breakpoint remove or set packets */ | 1091 | /* Handle the 'z' or 'Z' breakpoint remove or set packets */ |
1090 | static void gdb_cmd_break(struct kgdb_state *ks) | 1092 | static void gdb_cmd_break(struct kgdb_state *ks) |
1091 | { | 1093 | { |
1092 | /* | 1094 | /* |
1093 | * Since GDB-5.3, it's been drafted that '0' is a software | 1095 | * Since GDB-5.3, it's been drafted that '0' is a software |
1094 | * breakpoint, '1' is a hardware breakpoint, so let's do that. | 1096 | * breakpoint, '1' is a hardware breakpoint, so let's do that. |
1095 | */ | 1097 | */ |
1096 | char *bpt_type = &remcom_in_buffer[1]; | 1098 | char *bpt_type = &remcom_in_buffer[1]; |
1097 | char *ptr = &remcom_in_buffer[2]; | 1099 | char *ptr = &remcom_in_buffer[2]; |
1098 | unsigned long addr; | 1100 | unsigned long addr; |
1099 | unsigned long length; | 1101 | unsigned long length; |
1100 | int error = 0; | 1102 | int error = 0; |
1101 | 1103 | ||
1102 | if (arch_kgdb_ops.set_hw_breakpoint && *bpt_type >= '1') { | 1104 | if (arch_kgdb_ops.set_hw_breakpoint && *bpt_type >= '1') { |
1103 | /* Unsupported */ | 1105 | /* Unsupported */ |
1104 | if (*bpt_type > '4') | 1106 | if (*bpt_type > '4') |
1105 | return; | 1107 | return; |
1106 | } else { | 1108 | } else { |
1107 | if (*bpt_type != '0' && *bpt_type != '1') | 1109 | if (*bpt_type != '0' && *bpt_type != '1') |
1108 | /* Unsupported. */ | 1110 | /* Unsupported. */ |
1109 | return; | 1111 | return; |
1110 | } | 1112 | } |
1111 | 1113 | ||
1112 | /* | 1114 | /* |
1113 | * Test if this is a hardware breakpoint, and | 1115 | * Test if this is a hardware breakpoint, and |
1114 | * if we support it: | 1116 | * if we support it: |
1115 | */ | 1117 | */ |
1116 | if (*bpt_type == '1' && !(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)) | 1118 | if (*bpt_type == '1' && !(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)) |
1117 | /* Unsupported. */ | 1119 | /* Unsupported. */ |
1118 | return; | 1120 | return; |
1119 | 1121 | ||
1120 | if (*(ptr++) != ',') { | 1122 | if (*(ptr++) != ',') { |
1121 | error_packet(remcom_out_buffer, -EINVAL); | 1123 | error_packet(remcom_out_buffer, -EINVAL); |
1122 | return; | 1124 | return; |
1123 | } | 1125 | } |
1124 | if (!kgdb_hex2long(&ptr, &addr)) { | 1126 | if (!kgdb_hex2long(&ptr, &addr)) { |
1125 | error_packet(remcom_out_buffer, -EINVAL); | 1127 | error_packet(remcom_out_buffer, -EINVAL); |
1126 | return; | 1128 | return; |
1127 | } | 1129 | } |
1128 | if (*(ptr++) != ',' || | 1130 | if (*(ptr++) != ',' || |
1129 | !kgdb_hex2long(&ptr, &length)) { | 1131 | !kgdb_hex2long(&ptr, &length)) { |
1130 | error_packet(remcom_out_buffer, -EINVAL); | 1132 | error_packet(remcom_out_buffer, -EINVAL); |
1131 | return; | 1133 | return; |
1132 | } | 1134 | } |
1133 | 1135 | ||
1134 | if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0') | 1136 | if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0') |
1135 | error = kgdb_set_sw_break(addr); | 1137 | error = kgdb_set_sw_break(addr); |
1136 | else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0') | 1138 | else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0') |
1137 | error = kgdb_remove_sw_break(addr); | 1139 | error = kgdb_remove_sw_break(addr); |
1138 | else if (remcom_in_buffer[0] == 'Z') | 1140 | else if (remcom_in_buffer[0] == 'Z') |
1139 | error = arch_kgdb_ops.set_hw_breakpoint(addr, | 1141 | error = arch_kgdb_ops.set_hw_breakpoint(addr, |
1140 | (int)length, *bpt_type); | 1142 | (int)length, *bpt_type); |
1141 | else if (remcom_in_buffer[0] == 'z') | 1143 | else if (remcom_in_buffer[0] == 'z') |
1142 | error = arch_kgdb_ops.remove_hw_breakpoint(addr, | 1144 | error = arch_kgdb_ops.remove_hw_breakpoint(addr, |
1143 | (int) length, *bpt_type); | 1145 | (int) length, *bpt_type); |
1144 | 1146 | ||
1145 | if (error == 0) | 1147 | if (error == 0) |
1146 | strcpy(remcom_out_buffer, "OK"); | 1148 | strcpy(remcom_out_buffer, "OK"); |
1147 | else | 1149 | else |
1148 | error_packet(remcom_out_buffer, error); | 1150 | error_packet(remcom_out_buffer, error); |
1149 | } | 1151 | } |
1150 | 1152 | ||
1151 | /* Handle the 'C' signal / exception passing packets */ | 1153 | /* Handle the 'C' signal / exception passing packets */ |
1152 | static int gdb_cmd_exception_pass(struct kgdb_state *ks) | 1154 | static int gdb_cmd_exception_pass(struct kgdb_state *ks) |
1153 | { | 1155 | { |
1154 | /* C09 == pass exception | 1156 | /* C09 == pass exception |
1155 | * C15 == detach kgdb, pass exception | 1157 | * C15 == detach kgdb, pass exception |
1156 | */ | 1158 | */ |
1157 | if (remcom_in_buffer[1] == '0' && remcom_in_buffer[2] == '9') { | 1159 | if (remcom_in_buffer[1] == '0' && remcom_in_buffer[2] == '9') { |
1158 | 1160 | ||
1159 | ks->pass_exception = 1; | 1161 | ks->pass_exception = 1; |
1160 | remcom_in_buffer[0] = 'c'; | 1162 | remcom_in_buffer[0] = 'c'; |
1161 | 1163 | ||
1162 | } else if (remcom_in_buffer[1] == '1' && remcom_in_buffer[2] == '5') { | 1164 | } else if (remcom_in_buffer[1] == '1' && remcom_in_buffer[2] == '5') { |
1163 | 1165 | ||
1164 | ks->pass_exception = 1; | 1166 | ks->pass_exception = 1; |
1165 | remcom_in_buffer[0] = 'D'; | 1167 | remcom_in_buffer[0] = 'D'; |
1166 | remove_all_break(); | 1168 | remove_all_break(); |
1167 | kgdb_connected = 0; | 1169 | kgdb_connected = 0; |
1168 | return 1; | 1170 | return 1; |
1169 | 1171 | ||
1170 | } else { | 1172 | } else { |
1171 | error_packet(remcom_out_buffer, -EINVAL); | 1173 | error_packet(remcom_out_buffer, -EINVAL); |
1172 | return 0; | 1174 | return 0; |
1173 | } | 1175 | } |
1174 | 1176 | ||
1175 | /* Indicate fall through */ | 1177 | /* Indicate fall through */ |
1176 | return -1; | 1178 | return -1; |
1177 | } | 1179 | } |
1178 | 1180 | ||
1179 | /* | 1181 | /* |
1180 | * This function performs all gdbserial command procesing | 1182 | * This function performs all gdbserial command procesing |
1181 | */ | 1183 | */ |
1182 | static int gdb_serial_stub(struct kgdb_state *ks) | 1184 | static int gdb_serial_stub(struct kgdb_state *ks) |
1183 | { | 1185 | { |
1184 | int error = 0; | 1186 | int error = 0; |
1185 | int tmp; | 1187 | int tmp; |
1186 | 1188 | ||
1187 | /* Clear the out buffer. */ | 1189 | /* Clear the out buffer. */ |
1188 | memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer)); | 1190 | memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer)); |
1189 | 1191 | ||
1190 | if (kgdb_connected) { | 1192 | if (kgdb_connected) { |
1191 | unsigned char thref[8]; | 1193 | unsigned char thref[8]; |
1192 | char *ptr; | 1194 | char *ptr; |
1193 | 1195 | ||
1194 | /* Reply to host that an exception has occurred */ | 1196 | /* Reply to host that an exception has occurred */ |
1195 | ptr = remcom_out_buffer; | 1197 | ptr = remcom_out_buffer; |
1196 | *ptr++ = 'T'; | 1198 | *ptr++ = 'T'; |
1197 | ptr = pack_hex_byte(ptr, ks->signo); | 1199 | ptr = pack_hex_byte(ptr, ks->signo); |
1198 | ptr += strlen(strcpy(ptr, "thread:")); | 1200 | ptr += strlen(strcpy(ptr, "thread:")); |
1199 | int_to_threadref(thref, shadow_pid(current->pid)); | 1201 | int_to_threadref(thref, shadow_pid(current->pid)); |
1200 | ptr = pack_threadid(ptr, thref); | 1202 | ptr = pack_threadid(ptr, thref); |
1201 | *ptr++ = ';'; | 1203 | *ptr++ = ';'; |
1202 | put_packet(remcom_out_buffer); | 1204 | put_packet(remcom_out_buffer); |
1203 | } | 1205 | } |
1204 | 1206 | ||
1205 | kgdb_usethread = kgdb_info[ks->cpu].task; | 1207 | kgdb_usethread = kgdb_info[ks->cpu].task; |
1206 | ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid); | 1208 | ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid); |
1207 | ks->pass_exception = 0; | 1209 | ks->pass_exception = 0; |
1208 | 1210 | ||
1209 | while (1) { | 1211 | while (1) { |
1210 | error = 0; | 1212 | error = 0; |
1211 | 1213 | ||
1212 | /* Clear the out buffer. */ | 1214 | /* Clear the out buffer. */ |
1213 | memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer)); | 1215 | memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer)); |
1214 | 1216 | ||
1215 | get_packet(remcom_in_buffer); | 1217 | get_packet(remcom_in_buffer); |
1216 | 1218 | ||
1217 | switch (remcom_in_buffer[0]) { | 1219 | switch (remcom_in_buffer[0]) { |
1218 | case '?': /* gdbserial status */ | 1220 | case '?': /* gdbserial status */ |
1219 | gdb_cmd_status(ks); | 1221 | gdb_cmd_status(ks); |
1220 | break; | 1222 | break; |
1221 | case 'g': /* return the value of the CPU registers */ | 1223 | case 'g': /* return the value of the CPU registers */ |
1222 | gdb_cmd_getregs(ks); | 1224 | gdb_cmd_getregs(ks); |
1223 | break; | 1225 | break; |
1224 | case 'G': /* set the value of the CPU registers - return OK */ | 1226 | case 'G': /* set the value of the CPU registers - return OK */ |
1225 | gdb_cmd_setregs(ks); | 1227 | gdb_cmd_setregs(ks); |
1226 | break; | 1228 | break; |
1227 | case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */ | 1229 | case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */ |
1228 | gdb_cmd_memread(ks); | 1230 | gdb_cmd_memread(ks); |
1229 | break; | 1231 | break; |
1230 | case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */ | 1232 | case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */ |
1231 | gdb_cmd_memwrite(ks); | 1233 | gdb_cmd_memwrite(ks); |
1232 | break; | 1234 | break; |
1233 | case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */ | 1235 | case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */ |
1234 | gdb_cmd_binwrite(ks); | 1236 | gdb_cmd_binwrite(ks); |
1235 | break; | 1237 | break; |
1236 | /* kill or detach. KGDB should treat this like a | 1238 | /* kill or detach. KGDB should treat this like a |
1237 | * continue. | 1239 | * continue. |
1238 | */ | 1240 | */ |
1239 | case 'D': /* Debugger detach */ | 1241 | case 'D': /* Debugger detach */ |
1240 | case 'k': /* Debugger detach via kill */ | 1242 | case 'k': /* Debugger detach via kill */ |
1241 | gdb_cmd_detachkill(ks); | 1243 | gdb_cmd_detachkill(ks); |
1242 | goto default_handle; | 1244 | goto default_handle; |
1243 | case 'R': /* Reboot */ | 1245 | case 'R': /* Reboot */ |
1244 | if (gdb_cmd_reboot(ks)) | 1246 | if (gdb_cmd_reboot(ks)) |
1245 | goto default_handle; | 1247 | goto default_handle; |
1246 | break; | 1248 | break; |
1247 | case 'q': /* query command */ | 1249 | case 'q': /* query command */ |
1248 | gdb_cmd_query(ks); | 1250 | gdb_cmd_query(ks); |
1249 | break; | 1251 | break; |
1250 | case 'H': /* task related */ | 1252 | case 'H': /* task related */ |
1251 | gdb_cmd_task(ks); | 1253 | gdb_cmd_task(ks); |
1252 | break; | 1254 | break; |
1253 | case 'T': /* Query thread status */ | 1255 | case 'T': /* Query thread status */ |
1254 | gdb_cmd_thread(ks); | 1256 | gdb_cmd_thread(ks); |
1255 | break; | 1257 | break; |
1256 | case 'z': /* Break point remove */ | 1258 | case 'z': /* Break point remove */ |
1257 | case 'Z': /* Break point set */ | 1259 | case 'Z': /* Break point set */ |
1258 | gdb_cmd_break(ks); | 1260 | gdb_cmd_break(ks); |
1259 | break; | 1261 | break; |
1260 | case 'C': /* Exception passing */ | 1262 | case 'C': /* Exception passing */ |
1261 | tmp = gdb_cmd_exception_pass(ks); | 1263 | tmp = gdb_cmd_exception_pass(ks); |
1262 | if (tmp > 0) | 1264 | if (tmp > 0) |
1263 | goto default_handle; | 1265 | goto default_handle; |
1264 | if (tmp == 0) | 1266 | if (tmp == 0) |
1265 | break; | 1267 | break; |
1266 | /* Fall through on tmp < 0 */ | 1268 | /* Fall through on tmp < 0 */ |
1267 | case 'c': /* Continue packet */ | 1269 | case 'c': /* Continue packet */ |
1268 | case 's': /* Single step packet */ | 1270 | case 's': /* Single step packet */ |
1269 | if (kgdb_contthread && kgdb_contthread != current) { | 1271 | if (kgdb_contthread && kgdb_contthread != current) { |
1270 | /* Can't switch threads in kgdb */ | 1272 | /* Can't switch threads in kgdb */ |
1271 | error_packet(remcom_out_buffer, -EINVAL); | 1273 | error_packet(remcom_out_buffer, -EINVAL); |
1272 | break; | 1274 | break; |
1273 | } | 1275 | } |
1274 | kgdb_activate_sw_breakpoints(); | 1276 | kgdb_activate_sw_breakpoints(); |
1275 | /* Fall through to default processing */ | 1277 | /* Fall through to default processing */ |
1276 | default: | 1278 | default: |
1277 | default_handle: | 1279 | default_handle: |
1278 | error = kgdb_arch_handle_exception(ks->ex_vector, | 1280 | error = kgdb_arch_handle_exception(ks->ex_vector, |
1279 | ks->signo, | 1281 | ks->signo, |
1280 | ks->err_code, | 1282 | ks->err_code, |
1281 | remcom_in_buffer, | 1283 | remcom_in_buffer, |
1282 | remcom_out_buffer, | 1284 | remcom_out_buffer, |
1283 | ks->linux_regs); | 1285 | ks->linux_regs); |
1284 | /* | 1286 | /* |
1285 | * Leave cmd processing on error, detach, | 1287 | * Leave cmd processing on error, detach, |
1286 | * kill, continue, or single step. | 1288 | * kill, continue, or single step. |
1287 | */ | 1289 | */ |
1288 | if (error >= 0 || remcom_in_buffer[0] == 'D' || | 1290 | if (error >= 0 || remcom_in_buffer[0] == 'D' || |
1289 | remcom_in_buffer[0] == 'k') { | 1291 | remcom_in_buffer[0] == 'k') { |
1290 | error = 0; | 1292 | error = 0; |
1291 | goto kgdb_exit; | 1293 | goto kgdb_exit; |
1292 | } | 1294 | } |
1293 | 1295 | ||
1294 | } | 1296 | } |
1295 | 1297 | ||
1296 | /* reply to the request */ | 1298 | /* reply to the request */ |
1297 | put_packet(remcom_out_buffer); | 1299 | put_packet(remcom_out_buffer); |
1298 | } | 1300 | } |
1299 | 1301 | ||
1300 | kgdb_exit: | 1302 | kgdb_exit: |
1301 | if (ks->pass_exception) | 1303 | if (ks->pass_exception) |
1302 | error = 1; | 1304 | error = 1; |
1303 | return error; | 1305 | return error; |
1304 | } | 1306 | } |
1305 | 1307 | ||
1306 | static int kgdb_reenter_check(struct kgdb_state *ks) | 1308 | static int kgdb_reenter_check(struct kgdb_state *ks) |
1307 | { | 1309 | { |
1308 | unsigned long addr; | 1310 | unsigned long addr; |
1309 | 1311 | ||
1310 | if (atomic_read(&kgdb_active) != raw_smp_processor_id()) | 1312 | if (atomic_read(&kgdb_active) != raw_smp_processor_id()) |
1311 | return 0; | 1313 | return 0; |
1312 | 1314 | ||
1313 | /* Panic on recursive debugger calls: */ | 1315 | /* Panic on recursive debugger calls: */ |
1314 | exception_level++; | 1316 | exception_level++; |
1315 | addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs); | 1317 | addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs); |
1316 | kgdb_deactivate_sw_breakpoints(); | 1318 | kgdb_deactivate_sw_breakpoints(); |
1317 | 1319 | ||
1318 | /* | 1320 | /* |
1319 | * If the break point removed ok at the place exception | 1321 | * If the break point removed ok at the place exception |
1320 | * occurred, try to recover and print a warning to the end | 1322 | * occurred, try to recover and print a warning to the end |
1321 | * user because the user planted a breakpoint in a place that | 1323 | * user because the user planted a breakpoint in a place that |
1322 | * KGDB needs in order to function. | 1324 | * KGDB needs in order to function. |
1323 | */ | 1325 | */ |
1324 | if (kgdb_remove_sw_break(addr) == 0) { | 1326 | if (kgdb_remove_sw_break(addr) == 0) { |
1325 | exception_level = 0; | 1327 | exception_level = 0; |
1326 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); | 1328 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); |
1327 | kgdb_activate_sw_breakpoints(); | 1329 | kgdb_activate_sw_breakpoints(); |
1328 | printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed\n"); | 1330 | printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed\n"); |
1329 | WARN_ON_ONCE(1); | 1331 | WARN_ON_ONCE(1); |
1330 | 1332 | ||
1331 | return 1; | 1333 | return 1; |
1332 | } | 1334 | } |
1333 | remove_all_break(); | 1335 | remove_all_break(); |
1334 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); | 1336 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); |
1335 | 1337 | ||
1336 | if (exception_level > 1) { | 1338 | if (exception_level > 1) { |
1337 | dump_stack(); | 1339 | dump_stack(); |
1338 | panic("Recursive entry to debugger"); | 1340 | panic("Recursive entry to debugger"); |
1339 | } | 1341 | } |
1340 | 1342 | ||
1341 | printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); | 1343 | printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); |
1342 | dump_stack(); | 1344 | dump_stack(); |
1343 | panic("Recursive entry to debugger"); | 1345 | panic("Recursive entry to debugger"); |
1344 | 1346 | ||
1345 | return 1; | 1347 | return 1; |
1346 | } | 1348 | } |
1347 | 1349 | ||
1348 | /* | 1350 | /* |
1349 | * kgdb_handle_exception() - main entry point from a kernel exception | 1351 | * kgdb_handle_exception() - main entry point from a kernel exception |
1350 | * | 1352 | * |
1351 | * Locking hierarchy: | 1353 | * Locking hierarchy: |
1352 | * interface locks, if any (begin_session) | 1354 | * interface locks, if any (begin_session) |
1353 | * kgdb lock (kgdb_active) | 1355 | * kgdb lock (kgdb_active) |
1354 | */ | 1356 | */ |
1355 | int | 1357 | int |
1356 | kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | 1358 | kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) |
1357 | { | 1359 | { |
1358 | struct kgdb_state kgdb_var; | 1360 | struct kgdb_state kgdb_var; |
1359 | struct kgdb_state *ks = &kgdb_var; | 1361 | struct kgdb_state *ks = &kgdb_var; |
1360 | unsigned long flags; | 1362 | unsigned long flags; |
1361 | int error = 0; | 1363 | int error = 0; |
1362 | int i, cpu; | 1364 | int i, cpu; |
1363 | 1365 | ||
1364 | ks->cpu = raw_smp_processor_id(); | 1366 | ks->cpu = raw_smp_processor_id(); |
1365 | ks->ex_vector = evector; | 1367 | ks->ex_vector = evector; |
1366 | ks->signo = signo; | 1368 | ks->signo = signo; |
1367 | ks->ex_vector = evector; | 1369 | ks->ex_vector = evector; |
1368 | ks->err_code = ecode; | 1370 | ks->err_code = ecode; |
1369 | ks->kgdb_usethreadid = 0; | 1371 | ks->kgdb_usethreadid = 0; |
1370 | ks->linux_regs = regs; | 1372 | ks->linux_regs = regs; |
1371 | 1373 | ||
1372 | if (kgdb_reenter_check(ks)) | 1374 | if (kgdb_reenter_check(ks)) |
1373 | return 0; /* Ouch, double exception ! */ | 1375 | return 0; /* Ouch, double exception ! */ |
1374 | 1376 | ||
1375 | acquirelock: | 1377 | acquirelock: |
1376 | /* | 1378 | /* |
1377 | * Interrupts will be restored by the 'trap return' code, except when | 1379 | * Interrupts will be restored by the 'trap return' code, except when |
1378 | * single stepping. | 1380 | * single stepping. |
1379 | */ | 1381 | */ |
1380 | local_irq_save(flags); | 1382 | local_irq_save(flags); |
1381 | 1383 | ||
1382 | cpu = raw_smp_processor_id(); | 1384 | cpu = raw_smp_processor_id(); |
1383 | 1385 | ||
1384 | /* | 1386 | /* |
1385 | * Acquire the kgdb_active lock: | 1387 | * Acquire the kgdb_active lock: |
1386 | */ | 1388 | */ |
1387 | while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1) | 1389 | while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1) |
1388 | cpu_relax(); | 1390 | cpu_relax(); |
1389 | 1391 | ||
1390 | /* | 1392 | /* |
1391 | * Do not start the debugger connection on this CPU if the last | 1393 | * Do not start the debugger connection on this CPU if the last |
1392 | * instance of the exception handler wanted to come into the | 1394 | * instance of the exception handler wanted to come into the |
1393 | * debugger on a different CPU via a single step | 1395 | * debugger on a different CPU via a single step |
1394 | */ | 1396 | */ |
1395 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && | 1397 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && |
1396 | atomic_read(&kgdb_cpu_doing_single_step) != cpu) { | 1398 | atomic_read(&kgdb_cpu_doing_single_step) != cpu) { |
1397 | 1399 | ||
1398 | atomic_set(&kgdb_active, -1); | 1400 | atomic_set(&kgdb_active, -1); |
1401 | clocksource_touch_watchdog(); | ||
1399 | local_irq_restore(flags); | 1402 | local_irq_restore(flags); |
1400 | 1403 | ||
1401 | goto acquirelock; | 1404 | goto acquirelock; |
1402 | } | 1405 | } |
1403 | 1406 | ||
1404 | if (!kgdb_io_ready(1)) { | 1407 | if (!kgdb_io_ready(1)) { |
1405 | error = 1; | 1408 | error = 1; |
1406 | goto kgdb_restore; /* No I/O connection, so resume the system */ | 1409 | goto kgdb_restore; /* No I/O connection, so resume the system */ |
1407 | } | 1410 | } |
1408 | 1411 | ||
1409 | /* | 1412 | /* |
1410 | * Don't enter if we have hit a removed breakpoint. | 1413 | * Don't enter if we have hit a removed breakpoint. |
1411 | */ | 1414 | */ |
1412 | if (kgdb_skipexception(ks->ex_vector, ks->linux_regs)) | 1415 | if (kgdb_skipexception(ks->ex_vector, ks->linux_regs)) |
1413 | goto kgdb_restore; | 1416 | goto kgdb_restore; |
1414 | 1417 | ||
1415 | /* Call the I/O driver's pre_exception routine */ | 1418 | /* Call the I/O driver's pre_exception routine */ |
1416 | if (kgdb_io_ops->pre_exception) | 1419 | if (kgdb_io_ops->pre_exception) |
1417 | kgdb_io_ops->pre_exception(); | 1420 | kgdb_io_ops->pre_exception(); |
1418 | 1421 | ||
1419 | kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs; | 1422 | kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs; |
1420 | kgdb_info[ks->cpu].task = current; | 1423 | kgdb_info[ks->cpu].task = current; |
1421 | 1424 | ||
1422 | kgdb_disable_hw_debug(ks->linux_regs); | 1425 | kgdb_disable_hw_debug(ks->linux_regs); |
1423 | 1426 | ||
1424 | /* | 1427 | /* |
1425 | * Get the passive CPU lock which will hold all the non-primary | 1428 | * Get the passive CPU lock which will hold all the non-primary |
1426 | * CPU in a spin state while the debugger is active | 1429 | * CPU in a spin state while the debugger is active |
1427 | */ | 1430 | */ |
1428 | if (!kgdb_single_step || !kgdb_contthread) { | 1431 | if (!kgdb_single_step || !kgdb_contthread) { |
1429 | for (i = 0; i < NR_CPUS; i++) | 1432 | for (i = 0; i < NR_CPUS; i++) |
1430 | atomic_set(&passive_cpu_wait[i], 1); | 1433 | atomic_set(&passive_cpu_wait[i], 1); |
1431 | } | 1434 | } |
1432 | 1435 | ||
1433 | #ifdef CONFIG_SMP | 1436 | #ifdef CONFIG_SMP |
1434 | /* Signal the other CPUs to enter kgdb_wait() */ | 1437 | /* Signal the other CPUs to enter kgdb_wait() */ |
1435 | if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup) | 1438 | if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup) |
1436 | kgdb_roundup_cpus(flags); | 1439 | kgdb_roundup_cpus(flags); |
1437 | #endif | 1440 | #endif |
1438 | 1441 | ||
1439 | /* | 1442 | /* |
1440 | * spin_lock code is good enough as a barrier so we don't | 1443 | * spin_lock code is good enough as a barrier so we don't |
1441 | * need one here: | 1444 | * need one here: |
1442 | */ | 1445 | */ |
1443 | atomic_set(&cpu_in_kgdb[ks->cpu], 1); | 1446 | atomic_set(&cpu_in_kgdb[ks->cpu], 1); |
1444 | 1447 | ||
1445 | /* | 1448 | /* |
1446 | * Wait for the other CPUs to be notified and be waiting for us: | 1449 | * Wait for the other CPUs to be notified and be waiting for us: |
1447 | */ | 1450 | */ |
1448 | for_each_online_cpu(i) { | 1451 | for_each_online_cpu(i) { |
1449 | while (!atomic_read(&cpu_in_kgdb[i])) | 1452 | while (!atomic_read(&cpu_in_kgdb[i])) |
1450 | cpu_relax(); | 1453 | cpu_relax(); |
1451 | } | 1454 | } |
1452 | 1455 | ||
1453 | /* | 1456 | /* |
1454 | * At this point the primary processor is completely | 1457 | * At this point the primary processor is completely |
1455 | * in the debugger and all secondary CPUs are quiescent | 1458 | * in the debugger and all secondary CPUs are quiescent |
1456 | */ | 1459 | */ |
1457 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); | 1460 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); |
1458 | kgdb_deactivate_sw_breakpoints(); | 1461 | kgdb_deactivate_sw_breakpoints(); |
1459 | kgdb_single_step = 0; | 1462 | kgdb_single_step = 0; |
1460 | kgdb_contthread = NULL; | 1463 | kgdb_contthread = NULL; |
1461 | exception_level = 0; | 1464 | exception_level = 0; |
1462 | 1465 | ||
1463 | /* Talk to debugger with gdbserial protocol */ | 1466 | /* Talk to debugger with gdbserial protocol */ |
1464 | error = gdb_serial_stub(ks); | 1467 | error = gdb_serial_stub(ks); |
1465 | 1468 | ||
1466 | /* Call the I/O driver's post_exception routine */ | 1469 | /* Call the I/O driver's post_exception routine */ |
1467 | if (kgdb_io_ops->post_exception) | 1470 | if (kgdb_io_ops->post_exception) |
1468 | kgdb_io_ops->post_exception(); | 1471 | kgdb_io_ops->post_exception(); |
1469 | 1472 | ||
1470 | kgdb_info[ks->cpu].debuggerinfo = NULL; | 1473 | kgdb_info[ks->cpu].debuggerinfo = NULL; |
1471 | kgdb_info[ks->cpu].task = NULL; | 1474 | kgdb_info[ks->cpu].task = NULL; |
1472 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); | 1475 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); |
1473 | 1476 | ||
1474 | if (!kgdb_single_step || !kgdb_contthread) { | 1477 | if (!kgdb_single_step || !kgdb_contthread) { |
1475 | for (i = NR_CPUS-1; i >= 0; i--) | 1478 | for (i = NR_CPUS-1; i >= 0; i--) |
1476 | atomic_set(&passive_cpu_wait[i], 0); | 1479 | atomic_set(&passive_cpu_wait[i], 0); |
1477 | /* | 1480 | /* |
1478 | * Wait till all the CPUs have quit | 1481 | * Wait till all the CPUs have quit |
1479 | * from the debugger. | 1482 | * from the debugger. |
1480 | */ | 1483 | */ |
1481 | for_each_online_cpu(i) { | 1484 | for_each_online_cpu(i) { |
1482 | while (atomic_read(&cpu_in_kgdb[i])) | 1485 | while (atomic_read(&cpu_in_kgdb[i])) |
1483 | cpu_relax(); | 1486 | cpu_relax(); |
1484 | } | 1487 | } |
1485 | } | 1488 | } |
1486 | 1489 | ||
1487 | kgdb_restore: | 1490 | kgdb_restore: |
1488 | /* Free kgdb_active */ | 1491 | /* Free kgdb_active */ |
1489 | atomic_set(&kgdb_active, -1); | 1492 | atomic_set(&kgdb_active, -1); |
1493 | clocksource_touch_watchdog(); | ||
1490 | local_irq_restore(flags); | 1494 | local_irq_restore(flags); |
1491 | 1495 | ||
1492 | return error; | 1496 | return error; |
1493 | } | 1497 | } |
1494 | 1498 | ||
1495 | int kgdb_nmicallback(int cpu, void *regs) | 1499 | int kgdb_nmicallback(int cpu, void *regs) |
1496 | { | 1500 | { |
1497 | #ifdef CONFIG_SMP | 1501 | #ifdef CONFIG_SMP |
1498 | if (!atomic_read(&cpu_in_kgdb[cpu]) && | 1502 | if (!atomic_read(&cpu_in_kgdb[cpu]) && |
1499 | atomic_read(&kgdb_active) != cpu) { | 1503 | atomic_read(&kgdb_active) != cpu) { |
1500 | kgdb_wait((struct pt_regs *)regs); | 1504 | kgdb_wait((struct pt_regs *)regs); |
1501 | return 0; | 1505 | return 0; |
1502 | } | 1506 | } |
1503 | #endif | 1507 | #endif |
1504 | return 1; | 1508 | return 1; |
1505 | } | 1509 | } |
1506 | 1510 | ||
1507 | void kgdb_console_write(struct console *co, const char *s, unsigned count) | 1511 | void kgdb_console_write(struct console *co, const char *s, unsigned count) |
1508 | { | 1512 | { |
1509 | unsigned long flags; | 1513 | unsigned long flags; |
1510 | 1514 | ||
1511 | /* If we're debugging, or KGDB has not connected, don't try | 1515 | /* If we're debugging, or KGDB has not connected, don't try |
1512 | * and print. */ | 1516 | * and print. */ |
1513 | if (!kgdb_connected || atomic_read(&kgdb_active) != -1) | 1517 | if (!kgdb_connected || atomic_read(&kgdb_active) != -1) |
1514 | return; | 1518 | return; |
1515 | 1519 | ||
1516 | local_irq_save(flags); | 1520 | local_irq_save(flags); |
1517 | kgdb_msg_write(s, count); | 1521 | kgdb_msg_write(s, count); |
1518 | local_irq_restore(flags); | 1522 | local_irq_restore(flags); |
1519 | } | 1523 | } |
1520 | 1524 | ||
1521 | static struct console kgdbcons = { | 1525 | static struct console kgdbcons = { |
1522 | .name = "kgdb", | 1526 | .name = "kgdb", |
1523 | .write = kgdb_console_write, | 1527 | .write = kgdb_console_write, |
1524 | .flags = CON_PRINTBUFFER | CON_ENABLED, | 1528 | .flags = CON_PRINTBUFFER | CON_ENABLED, |
1525 | .index = -1, | 1529 | .index = -1, |
1526 | }; | 1530 | }; |
1527 | 1531 | ||
1528 | #ifdef CONFIG_MAGIC_SYSRQ | 1532 | #ifdef CONFIG_MAGIC_SYSRQ |
1529 | static void sysrq_handle_gdb(int key, struct tty_struct *tty) | 1533 | static void sysrq_handle_gdb(int key, struct tty_struct *tty) |
1530 | { | 1534 | { |
1531 | if (!kgdb_io_ops) { | 1535 | if (!kgdb_io_ops) { |
1532 | printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); | 1536 | printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); |
1533 | return; | 1537 | return; |
1534 | } | 1538 | } |
1535 | if (!kgdb_connected) | 1539 | if (!kgdb_connected) |
1536 | printk(KERN_CRIT "Entering KGDB\n"); | 1540 | printk(KERN_CRIT "Entering KGDB\n"); |
1537 | 1541 | ||
1538 | kgdb_breakpoint(); | 1542 | kgdb_breakpoint(); |
1539 | } | 1543 | } |
1540 | 1544 | ||
1541 | static struct sysrq_key_op sysrq_gdb_op = { | 1545 | static struct sysrq_key_op sysrq_gdb_op = { |
1542 | .handler = sysrq_handle_gdb, | 1546 | .handler = sysrq_handle_gdb, |
1543 | .help_msg = "Gdb", | 1547 | .help_msg = "Gdb", |
1544 | .action_msg = "GDB", | 1548 | .action_msg = "GDB", |
1545 | }; | 1549 | }; |
1546 | #endif | 1550 | #endif |
1547 | 1551 | ||
1548 | static void kgdb_register_callbacks(void) | 1552 | static void kgdb_register_callbacks(void) |
1549 | { | 1553 | { |
1550 | if (!kgdb_io_module_registered) { | 1554 | if (!kgdb_io_module_registered) { |
1551 | kgdb_io_module_registered = 1; | 1555 | kgdb_io_module_registered = 1; |
1552 | kgdb_arch_init(); | 1556 | kgdb_arch_init(); |
1553 | #ifdef CONFIG_MAGIC_SYSRQ | 1557 | #ifdef CONFIG_MAGIC_SYSRQ |
1554 | register_sysrq_key('g', &sysrq_gdb_op); | 1558 | register_sysrq_key('g', &sysrq_gdb_op); |
1555 | #endif | 1559 | #endif |
1556 | if (kgdb_use_con && !kgdb_con_registered) { | 1560 | if (kgdb_use_con && !kgdb_con_registered) { |
1557 | register_console(&kgdbcons); | 1561 | register_console(&kgdbcons); |
1558 | kgdb_con_registered = 1; | 1562 | kgdb_con_registered = 1; |
1559 | } | 1563 | } |
1560 | } | 1564 | } |
1561 | } | 1565 | } |
1562 | 1566 | ||
1563 | static void kgdb_unregister_callbacks(void) | 1567 | static void kgdb_unregister_callbacks(void) |
1564 | { | 1568 | { |
1565 | /* | 1569 | /* |
1566 | * When this routine is called KGDB should unregister from the | 1570 | * When this routine is called KGDB should unregister from the |
1567 | * panic handler and clean up, making sure it is not handling any | 1571 | * panic handler and clean up, making sure it is not handling any |
1568 | * break exceptions at the time. | 1572 | * break exceptions at the time. |
1569 | */ | 1573 | */ |
1570 | if (kgdb_io_module_registered) { | 1574 | if (kgdb_io_module_registered) { |
1571 | kgdb_io_module_registered = 0; | 1575 | kgdb_io_module_registered = 0; |
1572 | kgdb_arch_exit(); | 1576 | kgdb_arch_exit(); |
1573 | #ifdef CONFIG_MAGIC_SYSRQ | 1577 | #ifdef CONFIG_MAGIC_SYSRQ |
1574 | unregister_sysrq_key('g', &sysrq_gdb_op); | 1578 | unregister_sysrq_key('g', &sysrq_gdb_op); |
1575 | #endif | 1579 | #endif |
1576 | if (kgdb_con_registered) { | 1580 | if (kgdb_con_registered) { |
1577 | unregister_console(&kgdbcons); | 1581 | unregister_console(&kgdbcons); |
1578 | kgdb_con_registered = 0; | 1582 | kgdb_con_registered = 0; |
1579 | } | 1583 | } |
1580 | } | 1584 | } |
1581 | } | 1585 | } |
1582 | 1586 | ||
1583 | static void kgdb_initial_breakpoint(void) | 1587 | static void kgdb_initial_breakpoint(void) |
1584 | { | 1588 | { |
1585 | kgdb_break_asap = 0; | 1589 | kgdb_break_asap = 0; |
1586 | 1590 | ||
1587 | printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); | 1591 | printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); |
1588 | kgdb_breakpoint(); | 1592 | kgdb_breakpoint(); |
1589 | } | 1593 | } |
1590 | 1594 | ||
1591 | /** | 1595 | /** |
1592 | * kkgdb_register_io_module - register KGDB IO module | 1596 | * kkgdb_register_io_module - register KGDB IO module |
1593 | * @new_kgdb_io_ops: the io ops vector | 1597 | * @new_kgdb_io_ops: the io ops vector |
1594 | * | 1598 | * |
1595 | * Register it with the KGDB core. | 1599 | * Register it with the KGDB core. |
1596 | */ | 1600 | */ |
1597 | int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops) | 1601 | int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops) |
1598 | { | 1602 | { |
1599 | int err; | 1603 | int err; |
1600 | 1604 | ||
1601 | spin_lock(&kgdb_registration_lock); | 1605 | spin_lock(&kgdb_registration_lock); |
1602 | 1606 | ||
1603 | if (kgdb_io_ops) { | 1607 | if (kgdb_io_ops) { |
1604 | spin_unlock(&kgdb_registration_lock); | 1608 | spin_unlock(&kgdb_registration_lock); |
1605 | 1609 | ||
1606 | printk(KERN_ERR "kgdb: Another I/O driver is already " | 1610 | printk(KERN_ERR "kgdb: Another I/O driver is already " |
1607 | "registered with KGDB.\n"); | 1611 | "registered with KGDB.\n"); |
1608 | return -EBUSY; | 1612 | return -EBUSY; |
1609 | } | 1613 | } |
1610 | 1614 | ||
1611 | if (new_kgdb_io_ops->init) { | 1615 | if (new_kgdb_io_ops->init) { |
1612 | err = new_kgdb_io_ops->init(); | 1616 | err = new_kgdb_io_ops->init(); |
1613 | if (err) { | 1617 | if (err) { |
1614 | spin_unlock(&kgdb_registration_lock); | 1618 | spin_unlock(&kgdb_registration_lock); |
1615 | return err; | 1619 | return err; |
1616 | } | 1620 | } |
1617 | } | 1621 | } |
1618 | 1622 | ||
1619 | kgdb_io_ops = new_kgdb_io_ops; | 1623 | kgdb_io_ops = new_kgdb_io_ops; |
1620 | 1624 | ||
1621 | spin_unlock(&kgdb_registration_lock); | 1625 | spin_unlock(&kgdb_registration_lock); |
1622 | 1626 | ||
1623 | printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", | 1627 | printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", |
1624 | new_kgdb_io_ops->name); | 1628 | new_kgdb_io_ops->name); |
1625 | 1629 | ||
1626 | /* Arm KGDB now. */ | 1630 | /* Arm KGDB now. */ |
1627 | kgdb_register_callbacks(); | 1631 | kgdb_register_callbacks(); |
1628 | 1632 | ||
1629 | if (kgdb_break_asap) | 1633 | if (kgdb_break_asap) |
1630 | kgdb_initial_breakpoint(); | 1634 | kgdb_initial_breakpoint(); |
1631 | 1635 | ||
1632 | return 0; | 1636 | return 0; |
1633 | } | 1637 | } |
1634 | EXPORT_SYMBOL_GPL(kgdb_register_io_module); | 1638 | EXPORT_SYMBOL_GPL(kgdb_register_io_module); |
1635 | 1639 | ||
1636 | /** | 1640 | /** |
1637 | * kkgdb_unregister_io_module - unregister KGDB IO module | 1641 | * kkgdb_unregister_io_module - unregister KGDB IO module |
1638 | * @old_kgdb_io_ops: the io ops vector | 1642 | * @old_kgdb_io_ops: the io ops vector |
1639 | * | 1643 | * |
1640 | * Unregister it with the KGDB core. | 1644 | * Unregister it with the KGDB core. |
1641 | */ | 1645 | */ |
1642 | void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops) | 1646 | void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops) |
1643 | { | 1647 | { |
1644 | BUG_ON(kgdb_connected); | 1648 | BUG_ON(kgdb_connected); |
1645 | 1649 | ||
1646 | /* | 1650 | /* |
1647 | * KGDB is no longer able to communicate out, so | 1651 | * KGDB is no longer able to communicate out, so |
1648 | * unregister our callbacks and reset state. | 1652 | * unregister our callbacks and reset state. |
1649 | */ | 1653 | */ |
1650 | kgdb_unregister_callbacks(); | 1654 | kgdb_unregister_callbacks(); |
1651 | 1655 | ||
1652 | spin_lock(&kgdb_registration_lock); | 1656 | spin_lock(&kgdb_registration_lock); |
1653 | 1657 | ||
1654 | WARN_ON_ONCE(kgdb_io_ops != old_kgdb_io_ops); | 1658 | WARN_ON_ONCE(kgdb_io_ops != old_kgdb_io_ops); |
1655 | kgdb_io_ops = NULL; | 1659 | kgdb_io_ops = NULL; |
1656 | 1660 | ||
1657 | spin_unlock(&kgdb_registration_lock); | 1661 | spin_unlock(&kgdb_registration_lock); |
1658 | 1662 | ||
1659 | printk(KERN_INFO | 1663 | printk(KERN_INFO |
1660 | "kgdb: Unregistered I/O driver %s, debugger disabled.\n", | 1664 | "kgdb: Unregistered I/O driver %s, debugger disabled.\n", |
1661 | old_kgdb_io_ops->name); | 1665 | old_kgdb_io_ops->name); |
1662 | } | 1666 | } |
1663 | EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); | 1667 | EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); |
1664 | 1668 | ||
1665 | /** | 1669 | /** |
1666 | * kgdb_breakpoint - generate breakpoint exception | 1670 | * kgdb_breakpoint - generate breakpoint exception |
1667 | * | 1671 | * |
1668 | * This function will generate a breakpoint exception. It is used at the | 1672 | * This function will generate a breakpoint exception. It is used at the |
1669 | * beginning of a program to sync up with a debugger and can be used | 1673 | * beginning of a program to sync up with a debugger and can be used |
1670 | * otherwise as a quick means to stop program execution and "break" into | 1674 | * otherwise as a quick means to stop program execution and "break" into |
1671 | * the debugger. | 1675 | * the debugger. |
1672 | */ | 1676 | */ |
1673 | void kgdb_breakpoint(void) | 1677 | void kgdb_breakpoint(void) |
1674 | { | 1678 | { |
1675 | atomic_set(&kgdb_setting_breakpoint, 1); | 1679 | atomic_set(&kgdb_setting_breakpoint, 1); |
1676 | wmb(); /* Sync point before breakpoint */ | 1680 | wmb(); /* Sync point before breakpoint */ |
1677 | arch_kgdb_breakpoint(); | 1681 | arch_kgdb_breakpoint(); |
1678 | wmb(); /* Sync point after breakpoint */ | 1682 | wmb(); /* Sync point after breakpoint */ |
1679 | atomic_set(&kgdb_setting_breakpoint, 0); | 1683 | atomic_set(&kgdb_setting_breakpoint, 0); |
1680 | } | 1684 | } |
1681 | EXPORT_SYMBOL_GPL(kgdb_breakpoint); | 1685 | EXPORT_SYMBOL_GPL(kgdb_breakpoint); |
1682 | 1686 | ||
1683 | static int __init opt_kgdb_wait(char *str) | 1687 | static int __init opt_kgdb_wait(char *str) |
1684 | { | 1688 | { |
1685 | kgdb_break_asap = 1; | 1689 | kgdb_break_asap = 1; |
1686 | 1690 | ||
1687 | if (kgdb_io_module_registered) | 1691 | if (kgdb_io_module_registered) |
1688 | kgdb_initial_breakpoint(); | 1692 | kgdb_initial_breakpoint(); |
1689 | 1693 | ||
1690 | return 0; | 1694 | return 0; |
1691 | } | 1695 | } |
1692 | 1696 | ||
1693 | early_param("kgdbwait", opt_kgdb_wait); | 1697 | early_param("kgdbwait", opt_kgdb_wait); |
1694 | 1698 |
kernel/time/clocksource.c
1 | /* | 1 | /* |
2 | * linux/kernel/time/clocksource.c | 2 | * linux/kernel/time/clocksource.c |
3 | * | 3 | * |
4 | * This file contains the functions which manage clocksource drivers. | 4 | * This file contains the functions which manage clocksource drivers. |
5 | * | 5 | * |
6 | * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) | 6 | * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or | 10 | * the Free Software Foundation; either version 2 of the License, or |
11 | * (at your option) any later version. | 11 | * (at your option) any later version. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, | 13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software | 19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
21 | * | 21 | * |
22 | * TODO WishList: | 22 | * TODO WishList: |
23 | * o Allow clocksource drivers to be unregistered | 23 | * o Allow clocksource drivers to be unregistered |
24 | * o get rid of clocksource_jiffies extern | 24 | * o get rid of clocksource_jiffies extern |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/clocksource.h> | 27 | #include <linux/clocksource.h> |
28 | #include <linux/sysdev.h> | 28 | #include <linux/sysdev.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ | 31 | #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ |
32 | #include <linux/tick.h> | 32 | #include <linux/tick.h> |
33 | 33 | ||
34 | /* XXX - Would like a better way for initializing curr_clocksource */ | 34 | /* XXX - Would like a better way for initializing curr_clocksource */ |
35 | extern struct clocksource clocksource_jiffies; | 35 | extern struct clocksource clocksource_jiffies; |
36 | 36 | ||
37 | /*[Clocksource internal variables]--------- | 37 | /*[Clocksource internal variables]--------- |
38 | * curr_clocksource: | 38 | * curr_clocksource: |
39 | * currently selected clocksource. Initialized to clocksource_jiffies. | 39 | * currently selected clocksource. Initialized to clocksource_jiffies. |
40 | * next_clocksource: | 40 | * next_clocksource: |
41 | * pending next selected clocksource. | 41 | * pending next selected clocksource. |
42 | * clocksource_list: | 42 | * clocksource_list: |
43 | * linked list with the registered clocksources | 43 | * linked list with the registered clocksources |
44 | * clocksource_lock: | 44 | * clocksource_lock: |
45 | * protects manipulations to curr_clocksource and next_clocksource | 45 | * protects manipulations to curr_clocksource and next_clocksource |
46 | * and the clocksource_list | 46 | * and the clocksource_list |
47 | * override_name: | 47 | * override_name: |
48 | * Name of the user-specified clocksource. | 48 | * Name of the user-specified clocksource. |
49 | */ | 49 | */ |
50 | static struct clocksource *curr_clocksource = &clocksource_jiffies; | 50 | static struct clocksource *curr_clocksource = &clocksource_jiffies; |
51 | static struct clocksource *next_clocksource; | 51 | static struct clocksource *next_clocksource; |
52 | static struct clocksource *clocksource_override; | 52 | static struct clocksource *clocksource_override; |
53 | static LIST_HEAD(clocksource_list); | 53 | static LIST_HEAD(clocksource_list); |
54 | static DEFINE_SPINLOCK(clocksource_lock); | 54 | static DEFINE_SPINLOCK(clocksource_lock); |
55 | static char override_name[32]; | 55 | static char override_name[32]; |
56 | static int finished_booting; | 56 | static int finished_booting; |
57 | 57 | ||
58 | /* clocksource_done_booting - Called near the end of core bootup | 58 | /* clocksource_done_booting - Called near the end of core bootup |
59 | * | 59 | * |
60 | * Hack to avoid lots of clocksource churn at boot time. | 60 | * Hack to avoid lots of clocksource churn at boot time. |
61 | * We use fs_initcall because we want this to start before | 61 | * We use fs_initcall because we want this to start before |
62 | * device_initcall but after subsys_initcall. | 62 | * device_initcall but after subsys_initcall. |
63 | */ | 63 | */ |
64 | static int __init clocksource_done_booting(void) | 64 | static int __init clocksource_done_booting(void) |
65 | { | 65 | { |
66 | finished_booting = 1; | 66 | finished_booting = 1; |
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | fs_initcall(clocksource_done_booting); | 69 | fs_initcall(clocksource_done_booting); |
70 | 70 | ||
71 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 71 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
72 | static LIST_HEAD(watchdog_list); | 72 | static LIST_HEAD(watchdog_list); |
73 | static struct clocksource *watchdog; | 73 | static struct clocksource *watchdog; |
74 | static struct timer_list watchdog_timer; | 74 | static struct timer_list watchdog_timer; |
75 | static DEFINE_SPINLOCK(watchdog_lock); | 75 | static DEFINE_SPINLOCK(watchdog_lock); |
76 | static cycle_t watchdog_last; | 76 | static cycle_t watchdog_last; |
77 | static unsigned long watchdog_resumed; | 77 | static unsigned long watchdog_resumed; |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * Interval: 0.5sec Threshold: 0.0625s | 80 | * Interval: 0.5sec Threshold: 0.0625s |
81 | */ | 81 | */ |
82 | #define WATCHDOG_INTERVAL (HZ >> 1) | 82 | #define WATCHDOG_INTERVAL (HZ >> 1) |
83 | #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) | 83 | #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) |
84 | 84 | ||
85 | static void clocksource_ratewd(struct clocksource *cs, int64_t delta) | 85 | static void clocksource_ratewd(struct clocksource *cs, int64_t delta) |
86 | { | 86 | { |
87 | if (delta > -WATCHDOG_THRESHOLD && delta < WATCHDOG_THRESHOLD) | 87 | if (delta > -WATCHDOG_THRESHOLD && delta < WATCHDOG_THRESHOLD) |
88 | return; | 88 | return; |
89 | 89 | ||
90 | printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", | 90 | printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", |
91 | cs->name, delta); | 91 | cs->name, delta); |
92 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); | 92 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); |
93 | clocksource_change_rating(cs, 0); | 93 | clocksource_change_rating(cs, 0); |
94 | list_del(&cs->wd_list); | 94 | list_del(&cs->wd_list); |
95 | } | 95 | } |
96 | 96 | ||
97 | static void clocksource_watchdog(unsigned long data) | 97 | static void clocksource_watchdog(unsigned long data) |
98 | { | 98 | { |
99 | struct clocksource *cs, *tmp; | 99 | struct clocksource *cs, *tmp; |
100 | cycle_t csnow, wdnow; | 100 | cycle_t csnow, wdnow; |
101 | int64_t wd_nsec, cs_nsec; | 101 | int64_t wd_nsec, cs_nsec; |
102 | int resumed; | 102 | int resumed; |
103 | 103 | ||
104 | spin_lock(&watchdog_lock); | 104 | spin_lock(&watchdog_lock); |
105 | 105 | ||
106 | resumed = test_and_clear_bit(0, &watchdog_resumed); | 106 | resumed = test_and_clear_bit(0, &watchdog_resumed); |
107 | 107 | ||
108 | wdnow = watchdog->read(); | 108 | wdnow = watchdog->read(); |
109 | wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); | 109 | wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); |
110 | watchdog_last = wdnow; | 110 | watchdog_last = wdnow; |
111 | 111 | ||
112 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { | 112 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { |
113 | csnow = cs->read(); | 113 | csnow = cs->read(); |
114 | 114 | ||
115 | if (unlikely(resumed)) { | 115 | if (unlikely(resumed)) { |
116 | cs->wd_last = csnow; | 116 | cs->wd_last = csnow; |
117 | continue; | 117 | continue; |
118 | } | 118 | } |
119 | 119 | ||
120 | /* Initialized ? */ | 120 | /* Initialized ? */ |
121 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { | 121 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { |
122 | if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && | 122 | if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && |
123 | (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { | 123 | (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { |
124 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | 124 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
125 | /* | 125 | /* |
126 | * We just marked the clocksource as | 126 | * We just marked the clocksource as |
127 | * highres-capable, notify the rest of the | 127 | * highres-capable, notify the rest of the |
128 | * system as well so that we transition | 128 | * system as well so that we transition |
129 | * into high-res mode: | 129 | * into high-res mode: |
130 | */ | 130 | */ |
131 | tick_clock_notify(); | 131 | tick_clock_notify(); |
132 | } | 132 | } |
133 | cs->flags |= CLOCK_SOURCE_WATCHDOG; | 133 | cs->flags |= CLOCK_SOURCE_WATCHDOG; |
134 | cs->wd_last = csnow; | 134 | cs->wd_last = csnow; |
135 | } else { | 135 | } else { |
136 | cs_nsec = cyc2ns(cs, (csnow - cs->wd_last) & cs->mask); | 136 | cs_nsec = cyc2ns(cs, (csnow - cs->wd_last) & cs->mask); |
137 | cs->wd_last = csnow; | 137 | cs->wd_last = csnow; |
138 | /* Check the delta. Might remove from the list ! */ | 138 | /* Check the delta. Might remove from the list ! */ |
139 | clocksource_ratewd(cs, cs_nsec - wd_nsec); | 139 | clocksource_ratewd(cs, cs_nsec - wd_nsec); |
140 | } | 140 | } |
141 | } | 141 | } |
142 | 142 | ||
143 | if (!list_empty(&watchdog_list)) { | 143 | if (!list_empty(&watchdog_list)) { |
144 | __mod_timer(&watchdog_timer, | 144 | __mod_timer(&watchdog_timer, |
145 | watchdog_timer.expires + WATCHDOG_INTERVAL); | 145 | watchdog_timer.expires + WATCHDOG_INTERVAL); |
146 | } | 146 | } |
147 | spin_unlock(&watchdog_lock); | 147 | spin_unlock(&watchdog_lock); |
148 | } | 148 | } |
149 | static void clocksource_resume_watchdog(void) | 149 | static void clocksource_resume_watchdog(void) |
150 | { | 150 | { |
151 | set_bit(0, &watchdog_resumed); | 151 | set_bit(0, &watchdog_resumed); |
152 | } | 152 | } |
153 | 153 | ||
154 | static void clocksource_check_watchdog(struct clocksource *cs) | 154 | static void clocksource_check_watchdog(struct clocksource *cs) |
155 | { | 155 | { |
156 | struct clocksource *cse; | 156 | struct clocksource *cse; |
157 | unsigned long flags; | 157 | unsigned long flags; |
158 | 158 | ||
159 | spin_lock_irqsave(&watchdog_lock, flags); | 159 | spin_lock_irqsave(&watchdog_lock, flags); |
160 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { | 160 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { |
161 | int started = !list_empty(&watchdog_list); | 161 | int started = !list_empty(&watchdog_list); |
162 | 162 | ||
163 | list_add(&cs->wd_list, &watchdog_list); | 163 | list_add(&cs->wd_list, &watchdog_list); |
164 | if (!started && watchdog) { | 164 | if (!started && watchdog) { |
165 | watchdog_last = watchdog->read(); | 165 | watchdog_last = watchdog->read(); |
166 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; | 166 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; |
167 | add_timer(&watchdog_timer); | 167 | add_timer(&watchdog_timer); |
168 | } | 168 | } |
169 | } else { | 169 | } else { |
170 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | 170 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
171 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | 171 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
172 | 172 | ||
173 | if (!watchdog || cs->rating > watchdog->rating) { | 173 | if (!watchdog || cs->rating > watchdog->rating) { |
174 | if (watchdog) | 174 | if (watchdog) |
175 | del_timer(&watchdog_timer); | 175 | del_timer(&watchdog_timer); |
176 | watchdog = cs; | 176 | watchdog = cs; |
177 | init_timer(&watchdog_timer); | 177 | init_timer(&watchdog_timer); |
178 | watchdog_timer.function = clocksource_watchdog; | 178 | watchdog_timer.function = clocksource_watchdog; |
179 | 179 | ||
180 | /* Reset watchdog cycles */ | 180 | /* Reset watchdog cycles */ |
181 | list_for_each_entry(cse, &watchdog_list, wd_list) | 181 | list_for_each_entry(cse, &watchdog_list, wd_list) |
182 | cse->flags &= ~CLOCK_SOURCE_WATCHDOG; | 182 | cse->flags &= ~CLOCK_SOURCE_WATCHDOG; |
183 | /* Start if list is not empty */ | 183 | /* Start if list is not empty */ |
184 | if (!list_empty(&watchdog_list)) { | 184 | if (!list_empty(&watchdog_list)) { |
185 | watchdog_last = watchdog->read(); | 185 | watchdog_last = watchdog->read(); |
186 | watchdog_timer.expires = | 186 | watchdog_timer.expires = |
187 | jiffies + WATCHDOG_INTERVAL; | 187 | jiffies + WATCHDOG_INTERVAL; |
188 | add_timer(&watchdog_timer); | 188 | add_timer(&watchdog_timer); |
189 | } | 189 | } |
190 | } | 190 | } |
191 | } | 191 | } |
192 | spin_unlock_irqrestore(&watchdog_lock, flags); | 192 | spin_unlock_irqrestore(&watchdog_lock, flags); |
193 | } | 193 | } |
194 | #else | 194 | #else |
195 | static void clocksource_check_watchdog(struct clocksource *cs) | 195 | static void clocksource_check_watchdog(struct clocksource *cs) |
196 | { | 196 | { |
197 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | 197 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
198 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | 198 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline void clocksource_resume_watchdog(void) { } | 201 | static inline void clocksource_resume_watchdog(void) { } |
202 | #endif | 202 | #endif |
203 | 203 | ||
204 | /** | 204 | /** |
205 | * clocksource_resume - resume the clocksource(s) | 205 | * clocksource_resume - resume the clocksource(s) |
206 | */ | 206 | */ |
207 | void clocksource_resume(void) | 207 | void clocksource_resume(void) |
208 | { | 208 | { |
209 | struct clocksource *cs; | 209 | struct clocksource *cs; |
210 | unsigned long flags; | 210 | unsigned long flags; |
211 | 211 | ||
212 | spin_lock_irqsave(&clocksource_lock, flags); | 212 | spin_lock_irqsave(&clocksource_lock, flags); |
213 | 213 | ||
214 | list_for_each_entry(cs, &clocksource_list, list) { | 214 | list_for_each_entry(cs, &clocksource_list, list) { |
215 | if (cs->resume) | 215 | if (cs->resume) |
216 | cs->resume(); | 216 | cs->resume(); |
217 | } | 217 | } |
218 | 218 | ||
219 | clocksource_resume_watchdog(); | 219 | clocksource_resume_watchdog(); |
220 | 220 | ||
221 | spin_unlock_irqrestore(&clocksource_lock, flags); | 221 | spin_unlock_irqrestore(&clocksource_lock, flags); |
222 | } | 222 | } |
223 | 223 | ||
224 | /** | 224 | /** |
225 | * clocksource_touch_watchdog - Update watchdog | ||
226 | * | ||
227 | * Update the watchdog after exception contexts such as kgdb so as not | ||
228 | * to incorrectly trip the watchdog. | ||
229 | * | ||
230 | */ | ||
231 | void clocksource_touch_watchdog(void) | ||
232 | { | ||
233 | clocksource_resume_watchdog(); | ||
234 | } | ||
235 | |||
236 | /** | ||
225 | * clocksource_get_next - Returns the selected clocksource | 237 | * clocksource_get_next - Returns the selected clocksource |
226 | * | 238 | * |
227 | */ | 239 | */ |
228 | struct clocksource *clocksource_get_next(void) | 240 | struct clocksource *clocksource_get_next(void) |
229 | { | 241 | { |
230 | unsigned long flags; | 242 | unsigned long flags; |
231 | 243 | ||
232 | spin_lock_irqsave(&clocksource_lock, flags); | 244 | spin_lock_irqsave(&clocksource_lock, flags); |
233 | if (next_clocksource && finished_booting) { | 245 | if (next_clocksource && finished_booting) { |
234 | curr_clocksource = next_clocksource; | 246 | curr_clocksource = next_clocksource; |
235 | next_clocksource = NULL; | 247 | next_clocksource = NULL; |
236 | } | 248 | } |
237 | spin_unlock_irqrestore(&clocksource_lock, flags); | 249 | spin_unlock_irqrestore(&clocksource_lock, flags); |
238 | 250 | ||
239 | return curr_clocksource; | 251 | return curr_clocksource; |
240 | } | 252 | } |
241 | 253 | ||
242 | /** | 254 | /** |
243 | * select_clocksource - Selects the best registered clocksource. | 255 | * select_clocksource - Selects the best registered clocksource. |
244 | * | 256 | * |
245 | * Private function. Must hold clocksource_lock when called. | 257 | * Private function. Must hold clocksource_lock when called. |
246 | * | 258 | * |
247 | * Select the clocksource with the best rating, or the clocksource, | 259 | * Select the clocksource with the best rating, or the clocksource, |
248 | * which is selected by userspace override. | 260 | * which is selected by userspace override. |
249 | */ | 261 | */ |
250 | static struct clocksource *select_clocksource(void) | 262 | static struct clocksource *select_clocksource(void) |
251 | { | 263 | { |
252 | struct clocksource *next; | 264 | struct clocksource *next; |
253 | 265 | ||
254 | if (list_empty(&clocksource_list)) | 266 | if (list_empty(&clocksource_list)) |
255 | return NULL; | 267 | return NULL; |
256 | 268 | ||
257 | if (clocksource_override) | 269 | if (clocksource_override) |
258 | next = clocksource_override; | 270 | next = clocksource_override; |
259 | else | 271 | else |
260 | next = list_entry(clocksource_list.next, struct clocksource, | 272 | next = list_entry(clocksource_list.next, struct clocksource, |
261 | list); | 273 | list); |
262 | 274 | ||
263 | if (next == curr_clocksource) | 275 | if (next == curr_clocksource) |
264 | return NULL; | 276 | return NULL; |
265 | 277 | ||
266 | return next; | 278 | return next; |
267 | } | 279 | } |
268 | 280 | ||
269 | /* | 281 | /* |
270 | * Enqueue the clocksource sorted by rating | 282 | * Enqueue the clocksource sorted by rating |
271 | */ | 283 | */ |
272 | static int clocksource_enqueue(struct clocksource *c) | 284 | static int clocksource_enqueue(struct clocksource *c) |
273 | { | 285 | { |
274 | struct list_head *tmp, *entry = &clocksource_list; | 286 | struct list_head *tmp, *entry = &clocksource_list; |
275 | 287 | ||
276 | list_for_each(tmp, &clocksource_list) { | 288 | list_for_each(tmp, &clocksource_list) { |
277 | struct clocksource *cs; | 289 | struct clocksource *cs; |
278 | 290 | ||
279 | cs = list_entry(tmp, struct clocksource, list); | 291 | cs = list_entry(tmp, struct clocksource, list); |
280 | if (cs == c) | 292 | if (cs == c) |
281 | return -EBUSY; | 293 | return -EBUSY; |
282 | /* Keep track of the place, where to insert */ | 294 | /* Keep track of the place, where to insert */ |
283 | if (cs->rating >= c->rating) | 295 | if (cs->rating >= c->rating) |
284 | entry = tmp; | 296 | entry = tmp; |
285 | } | 297 | } |
286 | list_add(&c->list, entry); | 298 | list_add(&c->list, entry); |
287 | 299 | ||
288 | if (strlen(c->name) == strlen(override_name) && | 300 | if (strlen(c->name) == strlen(override_name) && |
289 | !strcmp(c->name, override_name)) | 301 | !strcmp(c->name, override_name)) |
290 | clocksource_override = c; | 302 | clocksource_override = c; |
291 | 303 | ||
292 | return 0; | 304 | return 0; |
293 | } | 305 | } |
294 | 306 | ||
295 | /** | 307 | /** |
296 | * clocksource_register - Used to install new clocksources | 308 | * clocksource_register - Used to install new clocksources |
297 | * @t: clocksource to be registered | 309 | * @t: clocksource to be registered |
298 | * | 310 | * |
299 | * Returns -EBUSY if registration fails, zero otherwise. | 311 | * Returns -EBUSY if registration fails, zero otherwise. |
300 | */ | 312 | */ |
301 | int clocksource_register(struct clocksource *c) | 313 | int clocksource_register(struct clocksource *c) |
302 | { | 314 | { |
303 | unsigned long flags; | 315 | unsigned long flags; |
304 | int ret; | 316 | int ret; |
305 | 317 | ||
306 | spin_lock_irqsave(&clocksource_lock, flags); | 318 | spin_lock_irqsave(&clocksource_lock, flags); |
307 | ret = clocksource_enqueue(c); | 319 | ret = clocksource_enqueue(c); |
308 | if (!ret) | 320 | if (!ret) |
309 | next_clocksource = select_clocksource(); | 321 | next_clocksource = select_clocksource(); |
310 | spin_unlock_irqrestore(&clocksource_lock, flags); | 322 | spin_unlock_irqrestore(&clocksource_lock, flags); |
311 | if (!ret) | 323 | if (!ret) |
312 | clocksource_check_watchdog(c); | 324 | clocksource_check_watchdog(c); |
313 | return ret; | 325 | return ret; |
314 | } | 326 | } |
315 | EXPORT_SYMBOL(clocksource_register); | 327 | EXPORT_SYMBOL(clocksource_register); |
316 | 328 | ||
317 | /** | 329 | /** |
318 | * clocksource_change_rating - Change the rating of a registered clocksource | 330 | * clocksource_change_rating - Change the rating of a registered clocksource |
319 | * | 331 | * |
320 | */ | 332 | */ |
321 | void clocksource_change_rating(struct clocksource *cs, int rating) | 333 | void clocksource_change_rating(struct clocksource *cs, int rating) |
322 | { | 334 | { |
323 | unsigned long flags; | 335 | unsigned long flags; |
324 | 336 | ||
325 | spin_lock_irqsave(&clocksource_lock, flags); | 337 | spin_lock_irqsave(&clocksource_lock, flags); |
326 | list_del(&cs->list); | 338 | list_del(&cs->list); |
327 | cs->rating = rating; | 339 | cs->rating = rating; |
328 | clocksource_enqueue(cs); | 340 | clocksource_enqueue(cs); |
329 | next_clocksource = select_clocksource(); | 341 | next_clocksource = select_clocksource(); |
330 | spin_unlock_irqrestore(&clocksource_lock, flags); | 342 | spin_unlock_irqrestore(&clocksource_lock, flags); |
331 | } | 343 | } |
332 | 344 | ||
333 | /** | 345 | /** |
334 | * clocksource_unregister - remove a registered clocksource | 346 | * clocksource_unregister - remove a registered clocksource |
335 | */ | 347 | */ |
336 | void clocksource_unregister(struct clocksource *cs) | 348 | void clocksource_unregister(struct clocksource *cs) |
337 | { | 349 | { |
338 | unsigned long flags; | 350 | unsigned long flags; |
339 | 351 | ||
340 | spin_lock_irqsave(&clocksource_lock, flags); | 352 | spin_lock_irqsave(&clocksource_lock, flags); |
341 | list_del(&cs->list); | 353 | list_del(&cs->list); |
342 | if (clocksource_override == cs) | 354 | if (clocksource_override == cs) |
343 | clocksource_override = NULL; | 355 | clocksource_override = NULL; |
344 | next_clocksource = select_clocksource(); | 356 | next_clocksource = select_clocksource(); |
345 | spin_unlock_irqrestore(&clocksource_lock, flags); | 357 | spin_unlock_irqrestore(&clocksource_lock, flags); |
346 | } | 358 | } |
347 | 359 | ||
348 | #ifdef CONFIG_SYSFS | 360 | #ifdef CONFIG_SYSFS |
349 | /** | 361 | /** |
350 | * sysfs_show_current_clocksources - sysfs interface for current clocksource | 362 | * sysfs_show_current_clocksources - sysfs interface for current clocksource |
351 | * @dev: unused | 363 | * @dev: unused |
352 | * @buf: char buffer to be filled with clocksource list | 364 | * @buf: char buffer to be filled with clocksource list |
353 | * | 365 | * |
354 | * Provides sysfs interface for listing current clocksource. | 366 | * Provides sysfs interface for listing current clocksource. |
355 | */ | 367 | */ |
356 | static ssize_t | 368 | static ssize_t |
357 | sysfs_show_current_clocksources(struct sys_device *dev, char *buf) | 369 | sysfs_show_current_clocksources(struct sys_device *dev, char *buf) |
358 | { | 370 | { |
359 | ssize_t count = 0; | 371 | ssize_t count = 0; |
360 | 372 | ||
361 | spin_lock_irq(&clocksource_lock); | 373 | spin_lock_irq(&clocksource_lock); |
362 | count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); | 374 | count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); |
363 | spin_unlock_irq(&clocksource_lock); | 375 | spin_unlock_irq(&clocksource_lock); |
364 | 376 | ||
365 | return count; | 377 | return count; |
366 | } | 378 | } |
367 | 379 | ||
368 | /** | 380 | /** |
369 | * sysfs_override_clocksource - interface for manually overriding clocksource | 381 | * sysfs_override_clocksource - interface for manually overriding clocksource |
370 | * @dev: unused | 382 | * @dev: unused |
371 | * @buf: name of override clocksource | 383 | * @buf: name of override clocksource |
372 | * @count: length of buffer | 384 | * @count: length of buffer |
373 | * | 385 | * |
374 | * Takes input from sysfs interface for manually overriding the default | 386 | * Takes input from sysfs interface for manually overriding the default |
375 | * clocksource selction. | 387 | * clocksource selction. |
376 | */ | 388 | */ |
377 | static ssize_t sysfs_override_clocksource(struct sys_device *dev, | 389 | static ssize_t sysfs_override_clocksource(struct sys_device *dev, |
378 | const char *buf, size_t count) | 390 | const char *buf, size_t count) |
379 | { | 391 | { |
380 | struct clocksource *ovr = NULL; | 392 | struct clocksource *ovr = NULL; |
381 | size_t ret = count; | 393 | size_t ret = count; |
382 | int len; | 394 | int len; |
383 | 395 | ||
384 | /* strings from sysfs write are not 0 terminated! */ | 396 | /* strings from sysfs write are not 0 terminated! */ |
385 | if (count >= sizeof(override_name)) | 397 | if (count >= sizeof(override_name)) |
386 | return -EINVAL; | 398 | return -EINVAL; |
387 | 399 | ||
388 | /* strip of \n: */ | 400 | /* strip of \n: */ |
389 | if (buf[count-1] == '\n') | 401 | if (buf[count-1] == '\n') |
390 | count--; | 402 | count--; |
391 | 403 | ||
392 | spin_lock_irq(&clocksource_lock); | 404 | spin_lock_irq(&clocksource_lock); |
393 | 405 | ||
394 | if (count > 0) | 406 | if (count > 0) |
395 | memcpy(override_name, buf, count); | 407 | memcpy(override_name, buf, count); |
396 | override_name[count] = 0; | 408 | override_name[count] = 0; |
397 | 409 | ||
398 | len = strlen(override_name); | 410 | len = strlen(override_name); |
399 | if (len) { | 411 | if (len) { |
400 | struct clocksource *cs; | 412 | struct clocksource *cs; |
401 | 413 | ||
402 | ovr = clocksource_override; | 414 | ovr = clocksource_override; |
403 | /* try to select it: */ | 415 | /* try to select it: */ |
404 | list_for_each_entry(cs, &clocksource_list, list) { | 416 | list_for_each_entry(cs, &clocksource_list, list) { |
405 | if (strlen(cs->name) == len && | 417 | if (strlen(cs->name) == len && |
406 | !strcmp(cs->name, override_name)) | 418 | !strcmp(cs->name, override_name)) |
407 | ovr = cs; | 419 | ovr = cs; |
408 | } | 420 | } |
409 | } | 421 | } |
410 | 422 | ||
411 | /* Reselect, when the override name has changed */ | 423 | /* Reselect, when the override name has changed */ |
412 | if (ovr != clocksource_override) { | 424 | if (ovr != clocksource_override) { |
413 | clocksource_override = ovr; | 425 | clocksource_override = ovr; |
414 | next_clocksource = select_clocksource(); | 426 | next_clocksource = select_clocksource(); |
415 | } | 427 | } |
416 | 428 | ||
417 | spin_unlock_irq(&clocksource_lock); | 429 | spin_unlock_irq(&clocksource_lock); |
418 | 430 | ||
419 | return ret; | 431 | return ret; |
420 | } | 432 | } |
421 | 433 | ||
422 | /** | 434 | /** |
423 | * sysfs_show_available_clocksources - sysfs interface for listing clocksource | 435 | * sysfs_show_available_clocksources - sysfs interface for listing clocksource |
424 | * @dev: unused | 436 | * @dev: unused |
425 | * @buf: char buffer to be filled with clocksource list | 437 | * @buf: char buffer to be filled with clocksource list |
426 | * | 438 | * |
427 | * Provides sysfs interface for listing registered clocksources | 439 | * Provides sysfs interface for listing registered clocksources |
428 | */ | 440 | */ |
429 | static ssize_t | 441 | static ssize_t |
430 | sysfs_show_available_clocksources(struct sys_device *dev, char *buf) | 442 | sysfs_show_available_clocksources(struct sys_device *dev, char *buf) |
431 | { | 443 | { |
432 | struct clocksource *src; | 444 | struct clocksource *src; |
433 | ssize_t count = 0; | 445 | ssize_t count = 0; |
434 | 446 | ||
435 | spin_lock_irq(&clocksource_lock); | 447 | spin_lock_irq(&clocksource_lock); |
436 | list_for_each_entry(src, &clocksource_list, list) { | 448 | list_for_each_entry(src, &clocksource_list, list) { |
437 | count += snprintf(buf + count, | 449 | count += snprintf(buf + count, |
438 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), | 450 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), |
439 | "%s ", src->name); | 451 | "%s ", src->name); |
440 | } | 452 | } |
441 | spin_unlock_irq(&clocksource_lock); | 453 | spin_unlock_irq(&clocksource_lock); |
442 | 454 | ||
443 | count += snprintf(buf + count, | 455 | count += snprintf(buf + count, |
444 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); | 456 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); |
445 | 457 | ||
446 | return count; | 458 | return count; |
447 | } | 459 | } |
448 | 460 | ||
449 | /* | 461 | /* |
450 | * Sysfs setup bits: | 462 | * Sysfs setup bits: |
451 | */ | 463 | */ |
452 | static SYSDEV_ATTR(current_clocksource, 0600, sysfs_show_current_clocksources, | 464 | static SYSDEV_ATTR(current_clocksource, 0600, sysfs_show_current_clocksources, |
453 | sysfs_override_clocksource); | 465 | sysfs_override_clocksource); |
454 | 466 | ||
455 | static SYSDEV_ATTR(available_clocksource, 0600, | 467 | static SYSDEV_ATTR(available_clocksource, 0600, |
456 | sysfs_show_available_clocksources, NULL); | 468 | sysfs_show_available_clocksources, NULL); |
457 | 469 | ||
458 | static struct sysdev_class clocksource_sysclass = { | 470 | static struct sysdev_class clocksource_sysclass = { |
459 | .name = "clocksource", | 471 | .name = "clocksource", |
460 | }; | 472 | }; |
461 | 473 | ||
462 | static struct sys_device device_clocksource = { | 474 | static struct sys_device device_clocksource = { |
463 | .id = 0, | 475 | .id = 0, |
464 | .cls = &clocksource_sysclass, | 476 | .cls = &clocksource_sysclass, |
465 | }; | 477 | }; |
466 | 478 | ||
467 | static int __init init_clocksource_sysfs(void) | 479 | static int __init init_clocksource_sysfs(void) |
468 | { | 480 | { |
469 | int error = sysdev_class_register(&clocksource_sysclass); | 481 | int error = sysdev_class_register(&clocksource_sysclass); |
470 | 482 | ||
471 | if (!error) | 483 | if (!error) |
472 | error = sysdev_register(&device_clocksource); | 484 | error = sysdev_register(&device_clocksource); |
473 | if (!error) | 485 | if (!error) |
474 | error = sysdev_create_file( | 486 | error = sysdev_create_file( |
475 | &device_clocksource, | 487 | &device_clocksource, |
476 | &attr_current_clocksource); | 488 | &attr_current_clocksource); |
477 | if (!error) | 489 | if (!error) |
478 | error = sysdev_create_file( | 490 | error = sysdev_create_file( |
479 | &device_clocksource, | 491 | &device_clocksource, |
480 | &attr_available_clocksource); | 492 | &attr_available_clocksource); |
481 | return error; | 493 | return error; |
482 | } | 494 | } |
483 | 495 | ||
484 | device_initcall(init_clocksource_sysfs); | 496 | device_initcall(init_clocksource_sysfs); |
485 | #endif /* CONFIG_SYSFS */ | 497 | #endif /* CONFIG_SYSFS */ |
486 | 498 | ||
487 | /** | 499 | /** |
488 | * boot_override_clocksource - boot clock override | 500 | * boot_override_clocksource - boot clock override |
489 | * @str: override name | 501 | * @str: override name |
490 | * | 502 | * |
491 | * Takes a clocksource= boot argument and uses it | 503 | * Takes a clocksource= boot argument and uses it |
492 | * as the clocksource override name. | 504 | * as the clocksource override name. |
493 | */ | 505 | */ |
494 | static int __init boot_override_clocksource(char* str) | 506 | static int __init boot_override_clocksource(char* str) |
495 | { | 507 | { |
496 | unsigned long flags; | 508 | unsigned long flags; |
497 | spin_lock_irqsave(&clocksource_lock, flags); | 509 | spin_lock_irqsave(&clocksource_lock, flags); |
498 | if (str) | 510 | if (str) |
499 | strlcpy(override_name, str, sizeof(override_name)); | 511 | strlcpy(override_name, str, sizeof(override_name)); |
500 | spin_unlock_irqrestore(&clocksource_lock, flags); | 512 | spin_unlock_irqrestore(&clocksource_lock, flags); |
501 | return 1; | 513 | return 1; |
502 | } | 514 | } |
503 | 515 | ||
504 | __setup("clocksource=", boot_override_clocksource); | 516 | __setup("clocksource=", boot_override_clocksource); |
505 | 517 | ||
506 | /** | 518 | /** |
507 | * boot_override_clock - Compatibility layer for deprecated boot option | 519 | * boot_override_clock - Compatibility layer for deprecated boot option |
508 | * @str: override name | 520 | * @str: override name |
509 | * | 521 | * |
510 | * DEPRECATED! Takes a clock= boot argument and uses it | 522 | * DEPRECATED! Takes a clock= boot argument and uses it |
511 | * as the clocksource override name | 523 | * as the clocksource override name |
512 | */ | 524 | */ |
513 | static int __init boot_override_clock(char* str) | 525 | static int __init boot_override_clock(char* str) |
514 | { | 526 | { |
515 | if (!strcmp(str, "pmtmr")) { | 527 | if (!strcmp(str, "pmtmr")) { |
516 | printk("Warning: clock=pmtmr is deprecated. " | 528 | printk("Warning: clock=pmtmr is deprecated. " |
517 | "Use clocksource=acpi_pm.\n"); | 529 | "Use clocksource=acpi_pm.\n"); |
518 | return boot_override_clocksource("acpi_pm"); | 530 | return boot_override_clocksource("acpi_pm"); |
519 | } | 531 | } |
520 | printk("Warning! clock= boot option is deprecated. " | 532 | printk("Warning! clock= boot option is deprecated. " |
521 | "Use clocksource=xyz\n"); | 533 | "Use clocksource=xyz\n"); |
522 | return boot_override_clocksource(str); | 534 | return boot_override_clocksource(str); |
523 | } | 535 | } |
524 | 536 | ||
525 | __setup("clock=", boot_override_clock); | 537 | __setup("clock=", boot_override_clock); |
526 | 538 |