Commit 9afe33ada275f2413dfeae27cc58fbb27474ac72

Authored by Oleg Nesterov
Committed by Linus Torvalds
1 parent 29a5551341

ptrace/x86: introduce ptrace_register_breakpoint()

No functional changes, preparation.

Extract the "register breakpoint" code from ptrace_get_debugreg() into
the new/generic helper, ptrace_register_breakpoint().  It will have more
users.

The patch also adds another simple helper, ptrace_fill_bp_fields(), to
factor out the arch_bp_generic_fields() logic in register/modify.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jan Kratochvil <jan.kratochvil@redhat.com>
Cc: Michael Neuling <mikey@neuling.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Russell King <linux@arm.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 50 additions and 36 deletions Inline Diff

arch/x86/kernel/ptrace.c
1 /* By Ross Biro 1/23/92 */ 1 /* By Ross Biro 1/23/92 */
2 /* 2 /*
3 * Pentium III FXSR, SSE support 3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000 4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 */ 5 */
6 6
7 #include <linux/kernel.h> 7 #include <linux/kernel.h>
8 #include <linux/sched.h> 8 #include <linux/sched.h>
9 #include <linux/mm.h> 9 #include <linux/mm.h>
10 #include <linux/smp.h> 10 #include <linux/smp.h>
11 #include <linux/errno.h> 11 #include <linux/errno.h>
12 #include <linux/slab.h> 12 #include <linux/slab.h>
13 #include <linux/ptrace.h> 13 #include <linux/ptrace.h>
14 #include <linux/regset.h> 14 #include <linux/regset.h>
15 #include <linux/tracehook.h> 15 #include <linux/tracehook.h>
16 #include <linux/user.h> 16 #include <linux/user.h>
17 #include <linux/elf.h> 17 #include <linux/elf.h>
18 #include <linux/security.h> 18 #include <linux/security.h>
19 #include <linux/audit.h> 19 #include <linux/audit.h>
20 #include <linux/seccomp.h> 20 #include <linux/seccomp.h>
21 #include <linux/signal.h> 21 #include <linux/signal.h>
22 #include <linux/perf_event.h> 22 #include <linux/perf_event.h>
23 #include <linux/hw_breakpoint.h> 23 #include <linux/hw_breakpoint.h>
24 #include <linux/rcupdate.h> 24 #include <linux/rcupdate.h>
25 #include <linux/export.h> 25 #include <linux/export.h>
26 #include <linux/context_tracking.h> 26 #include <linux/context_tracking.h>
27 27
28 #include <asm/uaccess.h> 28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h> 29 #include <asm/pgtable.h>
30 #include <asm/processor.h> 30 #include <asm/processor.h>
31 #include <asm/i387.h> 31 #include <asm/i387.h>
32 #include <asm/fpu-internal.h> 32 #include <asm/fpu-internal.h>
33 #include <asm/debugreg.h> 33 #include <asm/debugreg.h>
34 #include <asm/ldt.h> 34 #include <asm/ldt.h>
35 #include <asm/desc.h> 35 #include <asm/desc.h>
36 #include <asm/prctl.h> 36 #include <asm/prctl.h>
37 #include <asm/proto.h> 37 #include <asm/proto.h>
38 #include <asm/hw_breakpoint.h> 38 #include <asm/hw_breakpoint.h>
39 #include <asm/traps.h> 39 #include <asm/traps.h>
40 40
41 #include "tls.h" 41 #include "tls.h"
42 42
43 #define CREATE_TRACE_POINTS 43 #define CREATE_TRACE_POINTS
44 #include <trace/events/syscalls.h> 44 #include <trace/events/syscalls.h>
45 45
46 enum x86_regset { 46 enum x86_regset {
47 REGSET_GENERAL, 47 REGSET_GENERAL,
48 REGSET_FP, 48 REGSET_FP,
49 REGSET_XFP, 49 REGSET_XFP,
50 REGSET_IOPERM64 = REGSET_XFP, 50 REGSET_IOPERM64 = REGSET_XFP,
51 REGSET_XSTATE, 51 REGSET_XSTATE,
52 REGSET_TLS, 52 REGSET_TLS,
53 REGSET_IOPERM32, 53 REGSET_IOPERM32,
54 }; 54 };
55 55
56 struct pt_regs_offset { 56 struct pt_regs_offset {
57 const char *name; 57 const char *name;
58 int offset; 58 int offset;
59 }; 59 };
60 60
61 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 61 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
62 #define REG_OFFSET_END {.name = NULL, .offset = 0} 62 #define REG_OFFSET_END {.name = NULL, .offset = 0}
63 63
64 static const struct pt_regs_offset regoffset_table[] = { 64 static const struct pt_regs_offset regoffset_table[] = {
65 #ifdef CONFIG_X86_64 65 #ifdef CONFIG_X86_64
66 REG_OFFSET_NAME(r15), 66 REG_OFFSET_NAME(r15),
67 REG_OFFSET_NAME(r14), 67 REG_OFFSET_NAME(r14),
68 REG_OFFSET_NAME(r13), 68 REG_OFFSET_NAME(r13),
69 REG_OFFSET_NAME(r12), 69 REG_OFFSET_NAME(r12),
70 REG_OFFSET_NAME(r11), 70 REG_OFFSET_NAME(r11),
71 REG_OFFSET_NAME(r10), 71 REG_OFFSET_NAME(r10),
72 REG_OFFSET_NAME(r9), 72 REG_OFFSET_NAME(r9),
73 REG_OFFSET_NAME(r8), 73 REG_OFFSET_NAME(r8),
74 #endif 74 #endif
75 REG_OFFSET_NAME(bx), 75 REG_OFFSET_NAME(bx),
76 REG_OFFSET_NAME(cx), 76 REG_OFFSET_NAME(cx),
77 REG_OFFSET_NAME(dx), 77 REG_OFFSET_NAME(dx),
78 REG_OFFSET_NAME(si), 78 REG_OFFSET_NAME(si),
79 REG_OFFSET_NAME(di), 79 REG_OFFSET_NAME(di),
80 REG_OFFSET_NAME(bp), 80 REG_OFFSET_NAME(bp),
81 REG_OFFSET_NAME(ax), 81 REG_OFFSET_NAME(ax),
82 #ifdef CONFIG_X86_32 82 #ifdef CONFIG_X86_32
83 REG_OFFSET_NAME(ds), 83 REG_OFFSET_NAME(ds),
84 REG_OFFSET_NAME(es), 84 REG_OFFSET_NAME(es),
85 REG_OFFSET_NAME(fs), 85 REG_OFFSET_NAME(fs),
86 REG_OFFSET_NAME(gs), 86 REG_OFFSET_NAME(gs),
87 #endif 87 #endif
88 REG_OFFSET_NAME(orig_ax), 88 REG_OFFSET_NAME(orig_ax),
89 REG_OFFSET_NAME(ip), 89 REG_OFFSET_NAME(ip),
90 REG_OFFSET_NAME(cs), 90 REG_OFFSET_NAME(cs),
91 REG_OFFSET_NAME(flags), 91 REG_OFFSET_NAME(flags),
92 REG_OFFSET_NAME(sp), 92 REG_OFFSET_NAME(sp),
93 REG_OFFSET_NAME(ss), 93 REG_OFFSET_NAME(ss),
94 REG_OFFSET_END, 94 REG_OFFSET_END,
95 }; 95 };
96 96
97 /** 97 /**
98 * regs_query_register_offset() - query register offset from its name 98 * regs_query_register_offset() - query register offset from its name
99 * @name: the name of a register 99 * @name: the name of a register
100 * 100 *
101 * regs_query_register_offset() returns the offset of a register in struct 101 * regs_query_register_offset() returns the offset of a register in struct
102 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 102 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
103 */ 103 */
104 int regs_query_register_offset(const char *name) 104 int regs_query_register_offset(const char *name)
105 { 105 {
106 const struct pt_regs_offset *roff; 106 const struct pt_regs_offset *roff;
107 for (roff = regoffset_table; roff->name != NULL; roff++) 107 for (roff = regoffset_table; roff->name != NULL; roff++)
108 if (!strcmp(roff->name, name)) 108 if (!strcmp(roff->name, name))
109 return roff->offset; 109 return roff->offset;
110 return -EINVAL; 110 return -EINVAL;
111 } 111 }
112 112
113 /** 113 /**
114 * regs_query_register_name() - query register name from its offset 114 * regs_query_register_name() - query register name from its offset
115 * @offset: the offset of a register in struct pt_regs. 115 * @offset: the offset of a register in struct pt_regs.
116 * 116 *
117 * regs_query_register_name() returns the name of a register from its 117 * regs_query_register_name() returns the name of a register from its
118 * offset in struct pt_regs. If the @offset is invalid, this returns NULL; 118 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
119 */ 119 */
120 const char *regs_query_register_name(unsigned int offset) 120 const char *regs_query_register_name(unsigned int offset)
121 { 121 {
122 const struct pt_regs_offset *roff; 122 const struct pt_regs_offset *roff;
123 for (roff = regoffset_table; roff->name != NULL; roff++) 123 for (roff = regoffset_table; roff->name != NULL; roff++)
124 if (roff->offset == offset) 124 if (roff->offset == offset)
125 return roff->name; 125 return roff->name;
126 return NULL; 126 return NULL;
127 } 127 }
128 128
129 static const int arg_offs_table[] = { 129 static const int arg_offs_table[] = {
130 #ifdef CONFIG_X86_32 130 #ifdef CONFIG_X86_32
131 [0] = offsetof(struct pt_regs, ax), 131 [0] = offsetof(struct pt_regs, ax),
132 [1] = offsetof(struct pt_regs, dx), 132 [1] = offsetof(struct pt_regs, dx),
133 [2] = offsetof(struct pt_regs, cx) 133 [2] = offsetof(struct pt_regs, cx)
134 #else /* CONFIG_X86_64 */ 134 #else /* CONFIG_X86_64 */
135 [0] = offsetof(struct pt_regs, di), 135 [0] = offsetof(struct pt_regs, di),
136 [1] = offsetof(struct pt_regs, si), 136 [1] = offsetof(struct pt_regs, si),
137 [2] = offsetof(struct pt_regs, dx), 137 [2] = offsetof(struct pt_regs, dx),
138 [3] = offsetof(struct pt_regs, cx), 138 [3] = offsetof(struct pt_regs, cx),
139 [4] = offsetof(struct pt_regs, r8), 139 [4] = offsetof(struct pt_regs, r8),
140 [5] = offsetof(struct pt_regs, r9) 140 [5] = offsetof(struct pt_regs, r9)
141 #endif 141 #endif
142 }; 142 };
143 143
144 /* 144 /*
145 * does not yet catch signals sent when the child dies. 145 * does not yet catch signals sent when the child dies.
146 * in exit.c or in signal.c. 146 * in exit.c or in signal.c.
147 */ 147 */
148 148
149 /* 149 /*
150 * Determines which flags the user has access to [1 = access, 0 = no access]. 150 * Determines which flags the user has access to [1 = access, 0 = no access].
151 */ 151 */
152 #define FLAG_MASK_32 ((unsigned long) \ 152 #define FLAG_MASK_32 ((unsigned long) \
153 (X86_EFLAGS_CF | X86_EFLAGS_PF | \ 153 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
154 X86_EFLAGS_AF | X86_EFLAGS_ZF | \ 154 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
155 X86_EFLAGS_SF | X86_EFLAGS_TF | \ 155 X86_EFLAGS_SF | X86_EFLAGS_TF | \
156 X86_EFLAGS_DF | X86_EFLAGS_OF | \ 156 X86_EFLAGS_DF | X86_EFLAGS_OF | \
157 X86_EFLAGS_RF | X86_EFLAGS_AC)) 157 X86_EFLAGS_RF | X86_EFLAGS_AC))
158 158
159 /* 159 /*
160 * Determines whether a value may be installed in a segment register. 160 * Determines whether a value may be installed in a segment register.
161 */ 161 */
162 static inline bool invalid_selector(u16 value) 162 static inline bool invalid_selector(u16 value)
163 { 163 {
164 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); 164 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
165 } 165 }
166 166
167 #ifdef CONFIG_X86_32 167 #ifdef CONFIG_X86_32
168 168
169 #define FLAG_MASK FLAG_MASK_32 169 #define FLAG_MASK FLAG_MASK_32
170 170
171 /* 171 /*
172 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode 172 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
173 * when it traps. The previous stack will be directly underneath the saved 173 * when it traps. The previous stack will be directly underneath the saved
174 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'. 174 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
175 * 175 *
176 * Now, if the stack is empty, '&regs->sp' is out of range. In this 176 * Now, if the stack is empty, '&regs->sp' is out of range. In this
177 * case we try to take the previous stack. To always return a non-null 177 * case we try to take the previous stack. To always return a non-null
178 * stack pointer we fall back to regs as stack if no previous stack 178 * stack pointer we fall back to regs as stack if no previous stack
179 * exists. 179 * exists.
180 * 180 *
181 * This is valid only for kernel mode traps. 181 * This is valid only for kernel mode traps.
182 */ 182 */
183 unsigned long kernel_stack_pointer(struct pt_regs *regs) 183 unsigned long kernel_stack_pointer(struct pt_regs *regs)
184 { 184 {
185 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); 185 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
186 unsigned long sp = (unsigned long)&regs->sp; 186 unsigned long sp = (unsigned long)&regs->sp;
187 struct thread_info *tinfo; 187 struct thread_info *tinfo;
188 188
189 if (context == (sp & ~(THREAD_SIZE - 1))) 189 if (context == (sp & ~(THREAD_SIZE - 1)))
190 return sp; 190 return sp;
191 191
192 tinfo = (struct thread_info *)context; 192 tinfo = (struct thread_info *)context;
193 if (tinfo->previous_esp) 193 if (tinfo->previous_esp)
194 return tinfo->previous_esp; 194 return tinfo->previous_esp;
195 195
196 return (unsigned long)regs; 196 return (unsigned long)regs;
197 } 197 }
198 EXPORT_SYMBOL_GPL(kernel_stack_pointer); 198 EXPORT_SYMBOL_GPL(kernel_stack_pointer);
199 199
200 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 200 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
201 { 201 {
202 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 202 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
203 return &regs->bx + (regno >> 2); 203 return &regs->bx + (regno >> 2);
204 } 204 }
205 205
206 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 206 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
207 { 207 {
208 /* 208 /*
209 * Returning the value truncates it to 16 bits. 209 * Returning the value truncates it to 16 bits.
210 */ 210 */
211 unsigned int retval; 211 unsigned int retval;
212 if (offset != offsetof(struct user_regs_struct, gs)) 212 if (offset != offsetof(struct user_regs_struct, gs))
213 retval = *pt_regs_access(task_pt_regs(task), offset); 213 retval = *pt_regs_access(task_pt_regs(task), offset);
214 else { 214 else {
215 if (task == current) 215 if (task == current)
216 retval = get_user_gs(task_pt_regs(task)); 216 retval = get_user_gs(task_pt_regs(task));
217 else 217 else
218 retval = task_user_gs(task); 218 retval = task_user_gs(task);
219 } 219 }
220 return retval; 220 return retval;
221 } 221 }
222 222
223 static int set_segment_reg(struct task_struct *task, 223 static int set_segment_reg(struct task_struct *task,
224 unsigned long offset, u16 value) 224 unsigned long offset, u16 value)
225 { 225 {
226 /* 226 /*
227 * The value argument was already truncated to 16 bits. 227 * The value argument was already truncated to 16 bits.
228 */ 228 */
229 if (invalid_selector(value)) 229 if (invalid_selector(value))
230 return -EIO; 230 return -EIO;
231 231
232 /* 232 /*
233 * For %cs and %ss we cannot permit a null selector. 233 * For %cs and %ss we cannot permit a null selector.
234 * We can permit a bogus selector as long as it has USER_RPL. 234 * We can permit a bogus selector as long as it has USER_RPL.
235 * Null selectors are fine for other segment registers, but 235 * Null selectors are fine for other segment registers, but
236 * we will never get back to user mode with invalid %cs or %ss 236 * we will never get back to user mode with invalid %cs or %ss
237 * and will take the trap in iret instead. Much code relies 237 * and will take the trap in iret instead. Much code relies
238 * on user_mode() to distinguish a user trap frame (which can 238 * on user_mode() to distinguish a user trap frame (which can
239 * safely use invalid selectors) from a kernel trap frame. 239 * safely use invalid selectors) from a kernel trap frame.
240 */ 240 */
241 switch (offset) { 241 switch (offset) {
242 case offsetof(struct user_regs_struct, cs): 242 case offsetof(struct user_regs_struct, cs):
243 case offsetof(struct user_regs_struct, ss): 243 case offsetof(struct user_regs_struct, ss):
244 if (unlikely(value == 0)) 244 if (unlikely(value == 0))
245 return -EIO; 245 return -EIO;
246 246
247 default: 247 default:
248 *pt_regs_access(task_pt_regs(task), offset) = value; 248 *pt_regs_access(task_pt_regs(task), offset) = value;
249 break; 249 break;
250 250
251 case offsetof(struct user_regs_struct, gs): 251 case offsetof(struct user_regs_struct, gs):
252 if (task == current) 252 if (task == current)
253 set_user_gs(task_pt_regs(task), value); 253 set_user_gs(task_pt_regs(task), value);
254 else 254 else
255 task_user_gs(task) = value; 255 task_user_gs(task) = value;
256 } 256 }
257 257
258 return 0; 258 return 0;
259 } 259 }
260 260
261 #else /* CONFIG_X86_64 */ 261 #else /* CONFIG_X86_64 */
262 262
263 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) 263 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
264 264
265 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) 265 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
266 { 266 {
267 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); 267 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
268 return &regs->r15 + (offset / sizeof(regs->r15)); 268 return &regs->r15 + (offset / sizeof(regs->r15));
269 } 269 }
270 270
271 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 271 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
272 { 272 {
273 /* 273 /*
274 * Returning the value truncates it to 16 bits. 274 * Returning the value truncates it to 16 bits.
275 */ 275 */
276 unsigned int seg; 276 unsigned int seg;
277 277
278 switch (offset) { 278 switch (offset) {
279 case offsetof(struct user_regs_struct, fs): 279 case offsetof(struct user_regs_struct, fs):
280 if (task == current) { 280 if (task == current) {
281 /* Older gas can't assemble movq %?s,%r?? */ 281 /* Older gas can't assemble movq %?s,%r?? */
282 asm("movl %%fs,%0" : "=r" (seg)); 282 asm("movl %%fs,%0" : "=r" (seg));
283 return seg; 283 return seg;
284 } 284 }
285 return task->thread.fsindex; 285 return task->thread.fsindex;
286 case offsetof(struct user_regs_struct, gs): 286 case offsetof(struct user_regs_struct, gs):
287 if (task == current) { 287 if (task == current) {
288 asm("movl %%gs,%0" : "=r" (seg)); 288 asm("movl %%gs,%0" : "=r" (seg));
289 return seg; 289 return seg;
290 } 290 }
291 return task->thread.gsindex; 291 return task->thread.gsindex;
292 case offsetof(struct user_regs_struct, ds): 292 case offsetof(struct user_regs_struct, ds):
293 if (task == current) { 293 if (task == current) {
294 asm("movl %%ds,%0" : "=r" (seg)); 294 asm("movl %%ds,%0" : "=r" (seg));
295 return seg; 295 return seg;
296 } 296 }
297 return task->thread.ds; 297 return task->thread.ds;
298 case offsetof(struct user_regs_struct, es): 298 case offsetof(struct user_regs_struct, es):
299 if (task == current) { 299 if (task == current) {
300 asm("movl %%es,%0" : "=r" (seg)); 300 asm("movl %%es,%0" : "=r" (seg));
301 return seg; 301 return seg;
302 } 302 }
303 return task->thread.es; 303 return task->thread.es;
304 304
305 case offsetof(struct user_regs_struct, cs): 305 case offsetof(struct user_regs_struct, cs):
306 case offsetof(struct user_regs_struct, ss): 306 case offsetof(struct user_regs_struct, ss):
307 break; 307 break;
308 } 308 }
309 return *pt_regs_access(task_pt_regs(task), offset); 309 return *pt_regs_access(task_pt_regs(task), offset);
310 } 310 }
311 311
312 static int set_segment_reg(struct task_struct *task, 312 static int set_segment_reg(struct task_struct *task,
313 unsigned long offset, u16 value) 313 unsigned long offset, u16 value)
314 { 314 {
315 /* 315 /*
316 * The value argument was already truncated to 16 bits. 316 * The value argument was already truncated to 16 bits.
317 */ 317 */
318 if (invalid_selector(value)) 318 if (invalid_selector(value))
319 return -EIO; 319 return -EIO;
320 320
321 switch (offset) { 321 switch (offset) {
322 case offsetof(struct user_regs_struct,fs): 322 case offsetof(struct user_regs_struct,fs):
323 /* 323 /*
324 * If this is setting fs as for normal 64-bit use but 324 * If this is setting fs as for normal 64-bit use but
325 * setting fs_base has implicitly changed it, leave it. 325 * setting fs_base has implicitly changed it, leave it.
326 */ 326 */
327 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && 327 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
328 task->thread.fs != 0) || 328 task->thread.fs != 0) ||
329 (value == 0 && task->thread.fsindex == FS_TLS_SEL && 329 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
330 task->thread.fs == 0)) 330 task->thread.fs == 0))
331 break; 331 break;
332 task->thread.fsindex = value; 332 task->thread.fsindex = value;
333 if (task == current) 333 if (task == current)
334 loadsegment(fs, task->thread.fsindex); 334 loadsegment(fs, task->thread.fsindex);
335 break; 335 break;
336 case offsetof(struct user_regs_struct,gs): 336 case offsetof(struct user_regs_struct,gs):
337 /* 337 /*
338 * If this is setting gs as for normal 64-bit use but 338 * If this is setting gs as for normal 64-bit use but
339 * setting gs_base has implicitly changed it, leave it. 339 * setting gs_base has implicitly changed it, leave it.
340 */ 340 */
341 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && 341 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
342 task->thread.gs != 0) || 342 task->thread.gs != 0) ||
343 (value == 0 && task->thread.gsindex == GS_TLS_SEL && 343 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
344 task->thread.gs == 0)) 344 task->thread.gs == 0))
345 break; 345 break;
346 task->thread.gsindex = value; 346 task->thread.gsindex = value;
347 if (task == current) 347 if (task == current)
348 load_gs_index(task->thread.gsindex); 348 load_gs_index(task->thread.gsindex);
349 break; 349 break;
350 case offsetof(struct user_regs_struct,ds): 350 case offsetof(struct user_regs_struct,ds):
351 task->thread.ds = value; 351 task->thread.ds = value;
352 if (task == current) 352 if (task == current)
353 loadsegment(ds, task->thread.ds); 353 loadsegment(ds, task->thread.ds);
354 break; 354 break;
355 case offsetof(struct user_regs_struct,es): 355 case offsetof(struct user_regs_struct,es):
356 task->thread.es = value; 356 task->thread.es = value;
357 if (task == current) 357 if (task == current)
358 loadsegment(es, task->thread.es); 358 loadsegment(es, task->thread.es);
359 break; 359 break;
360 360
361 /* 361 /*
362 * Can't actually change these in 64-bit mode. 362 * Can't actually change these in 64-bit mode.
363 */ 363 */
364 case offsetof(struct user_regs_struct,cs): 364 case offsetof(struct user_regs_struct,cs):
365 if (unlikely(value == 0)) 365 if (unlikely(value == 0))
366 return -EIO; 366 return -EIO;
367 #ifdef CONFIG_IA32_EMULATION 367 #ifdef CONFIG_IA32_EMULATION
368 if (test_tsk_thread_flag(task, TIF_IA32)) 368 if (test_tsk_thread_flag(task, TIF_IA32))
369 task_pt_regs(task)->cs = value; 369 task_pt_regs(task)->cs = value;
370 #endif 370 #endif
371 break; 371 break;
372 case offsetof(struct user_regs_struct,ss): 372 case offsetof(struct user_regs_struct,ss):
373 if (unlikely(value == 0)) 373 if (unlikely(value == 0))
374 return -EIO; 374 return -EIO;
375 #ifdef CONFIG_IA32_EMULATION 375 #ifdef CONFIG_IA32_EMULATION
376 if (test_tsk_thread_flag(task, TIF_IA32)) 376 if (test_tsk_thread_flag(task, TIF_IA32))
377 task_pt_regs(task)->ss = value; 377 task_pt_regs(task)->ss = value;
378 #endif 378 #endif
379 break; 379 break;
380 } 380 }
381 381
382 return 0; 382 return 0;
383 } 383 }
384 384
385 #endif /* CONFIG_X86_32 */ 385 #endif /* CONFIG_X86_32 */
386 386
387 static unsigned long get_flags(struct task_struct *task) 387 static unsigned long get_flags(struct task_struct *task)
388 { 388 {
389 unsigned long retval = task_pt_regs(task)->flags; 389 unsigned long retval = task_pt_regs(task)->flags;
390 390
391 /* 391 /*
392 * If the debugger set TF, hide it from the readout. 392 * If the debugger set TF, hide it from the readout.
393 */ 393 */
394 if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 394 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
395 retval &= ~X86_EFLAGS_TF; 395 retval &= ~X86_EFLAGS_TF;
396 396
397 return retval; 397 return retval;
398 } 398 }
399 399
400 static int set_flags(struct task_struct *task, unsigned long value) 400 static int set_flags(struct task_struct *task, unsigned long value)
401 { 401 {
402 struct pt_regs *regs = task_pt_regs(task); 402 struct pt_regs *regs = task_pt_regs(task);
403 403
404 /* 404 /*
405 * If the user value contains TF, mark that 405 * If the user value contains TF, mark that
406 * it was not "us" (the debugger) that set it. 406 * it was not "us" (the debugger) that set it.
407 * If not, make sure it stays set if we had. 407 * If not, make sure it stays set if we had.
408 */ 408 */
409 if (value & X86_EFLAGS_TF) 409 if (value & X86_EFLAGS_TF)
410 clear_tsk_thread_flag(task, TIF_FORCED_TF); 410 clear_tsk_thread_flag(task, TIF_FORCED_TF);
411 else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 411 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
412 value |= X86_EFLAGS_TF; 412 value |= X86_EFLAGS_TF;
413 413
414 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); 414 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
415 415
416 return 0; 416 return 0;
417 } 417 }
418 418
419 static int putreg(struct task_struct *child, 419 static int putreg(struct task_struct *child,
420 unsigned long offset, unsigned long value) 420 unsigned long offset, unsigned long value)
421 { 421 {
422 switch (offset) { 422 switch (offset) {
423 case offsetof(struct user_regs_struct, cs): 423 case offsetof(struct user_regs_struct, cs):
424 case offsetof(struct user_regs_struct, ds): 424 case offsetof(struct user_regs_struct, ds):
425 case offsetof(struct user_regs_struct, es): 425 case offsetof(struct user_regs_struct, es):
426 case offsetof(struct user_regs_struct, fs): 426 case offsetof(struct user_regs_struct, fs):
427 case offsetof(struct user_regs_struct, gs): 427 case offsetof(struct user_regs_struct, gs):
428 case offsetof(struct user_regs_struct, ss): 428 case offsetof(struct user_regs_struct, ss):
429 return set_segment_reg(child, offset, value); 429 return set_segment_reg(child, offset, value);
430 430
431 case offsetof(struct user_regs_struct, flags): 431 case offsetof(struct user_regs_struct, flags):
432 return set_flags(child, value); 432 return set_flags(child, value);
433 433
434 #ifdef CONFIG_X86_64 434 #ifdef CONFIG_X86_64
435 case offsetof(struct user_regs_struct,fs_base): 435 case offsetof(struct user_regs_struct,fs_base):
436 if (value >= TASK_SIZE_OF(child)) 436 if (value >= TASK_SIZE_OF(child))
437 return -EIO; 437 return -EIO;
438 /* 438 /*
439 * When changing the segment base, use do_arch_prctl 439 * When changing the segment base, use do_arch_prctl
440 * to set either thread.fs or thread.fsindex and the 440 * to set either thread.fs or thread.fsindex and the
441 * corresponding GDT slot. 441 * corresponding GDT slot.
442 */ 442 */
443 if (child->thread.fs != value) 443 if (child->thread.fs != value)
444 return do_arch_prctl(child, ARCH_SET_FS, value); 444 return do_arch_prctl(child, ARCH_SET_FS, value);
445 return 0; 445 return 0;
446 case offsetof(struct user_regs_struct,gs_base): 446 case offsetof(struct user_regs_struct,gs_base):
447 /* 447 /*
448 * Exactly the same here as the %fs handling above. 448 * Exactly the same here as the %fs handling above.
449 */ 449 */
450 if (value >= TASK_SIZE_OF(child)) 450 if (value >= TASK_SIZE_OF(child))
451 return -EIO; 451 return -EIO;
452 if (child->thread.gs != value) 452 if (child->thread.gs != value)
453 return do_arch_prctl(child, ARCH_SET_GS, value); 453 return do_arch_prctl(child, ARCH_SET_GS, value);
454 return 0; 454 return 0;
455 #endif 455 #endif
456 } 456 }
457 457
458 *pt_regs_access(task_pt_regs(child), offset) = value; 458 *pt_regs_access(task_pt_regs(child), offset) = value;
459 return 0; 459 return 0;
460 } 460 }
461 461
462 static unsigned long getreg(struct task_struct *task, unsigned long offset) 462 static unsigned long getreg(struct task_struct *task, unsigned long offset)
463 { 463 {
464 switch (offset) { 464 switch (offset) {
465 case offsetof(struct user_regs_struct, cs): 465 case offsetof(struct user_regs_struct, cs):
466 case offsetof(struct user_regs_struct, ds): 466 case offsetof(struct user_regs_struct, ds):
467 case offsetof(struct user_regs_struct, es): 467 case offsetof(struct user_regs_struct, es):
468 case offsetof(struct user_regs_struct, fs): 468 case offsetof(struct user_regs_struct, fs):
469 case offsetof(struct user_regs_struct, gs): 469 case offsetof(struct user_regs_struct, gs):
470 case offsetof(struct user_regs_struct, ss): 470 case offsetof(struct user_regs_struct, ss):
471 return get_segment_reg(task, offset); 471 return get_segment_reg(task, offset);
472 472
473 case offsetof(struct user_regs_struct, flags): 473 case offsetof(struct user_regs_struct, flags):
474 return get_flags(task); 474 return get_flags(task);
475 475
476 #ifdef CONFIG_X86_64 476 #ifdef CONFIG_X86_64
477 case offsetof(struct user_regs_struct, fs_base): { 477 case offsetof(struct user_regs_struct, fs_base): {
478 /* 478 /*
479 * do_arch_prctl may have used a GDT slot instead of 479 * do_arch_prctl may have used a GDT slot instead of
480 * the MSR. To userland, it appears the same either 480 * the MSR. To userland, it appears the same either
481 * way, except the %fs segment selector might not be 0. 481 * way, except the %fs segment selector might not be 0.
482 */ 482 */
483 unsigned int seg = task->thread.fsindex; 483 unsigned int seg = task->thread.fsindex;
484 if (task->thread.fs != 0) 484 if (task->thread.fs != 0)
485 return task->thread.fs; 485 return task->thread.fs;
486 if (task == current) 486 if (task == current)
487 asm("movl %%fs,%0" : "=r" (seg)); 487 asm("movl %%fs,%0" : "=r" (seg));
488 if (seg != FS_TLS_SEL) 488 if (seg != FS_TLS_SEL)
489 return 0; 489 return 0;
490 return get_desc_base(&task->thread.tls_array[FS_TLS]); 490 return get_desc_base(&task->thread.tls_array[FS_TLS]);
491 } 491 }
492 case offsetof(struct user_regs_struct, gs_base): { 492 case offsetof(struct user_regs_struct, gs_base): {
493 /* 493 /*
494 * Exactly the same here as the %fs handling above. 494 * Exactly the same here as the %fs handling above.
495 */ 495 */
496 unsigned int seg = task->thread.gsindex; 496 unsigned int seg = task->thread.gsindex;
497 if (task->thread.gs != 0) 497 if (task->thread.gs != 0)
498 return task->thread.gs; 498 return task->thread.gs;
499 if (task == current) 499 if (task == current)
500 asm("movl %%gs,%0" : "=r" (seg)); 500 asm("movl %%gs,%0" : "=r" (seg));
501 if (seg != GS_TLS_SEL) 501 if (seg != GS_TLS_SEL)
502 return 0; 502 return 0;
503 return get_desc_base(&task->thread.tls_array[GS_TLS]); 503 return get_desc_base(&task->thread.tls_array[GS_TLS]);
504 } 504 }
505 #endif 505 #endif
506 } 506 }
507 507
508 return *pt_regs_access(task_pt_regs(task), offset); 508 return *pt_regs_access(task_pt_regs(task), offset);
509 } 509 }
510 510
511 static int genregs_get(struct task_struct *target, 511 static int genregs_get(struct task_struct *target,
512 const struct user_regset *regset, 512 const struct user_regset *regset,
513 unsigned int pos, unsigned int count, 513 unsigned int pos, unsigned int count,
514 void *kbuf, void __user *ubuf) 514 void *kbuf, void __user *ubuf)
515 { 515 {
516 if (kbuf) { 516 if (kbuf) {
517 unsigned long *k = kbuf; 517 unsigned long *k = kbuf;
518 while (count >= sizeof(*k)) { 518 while (count >= sizeof(*k)) {
519 *k++ = getreg(target, pos); 519 *k++ = getreg(target, pos);
520 count -= sizeof(*k); 520 count -= sizeof(*k);
521 pos += sizeof(*k); 521 pos += sizeof(*k);
522 } 522 }
523 } else { 523 } else {
524 unsigned long __user *u = ubuf; 524 unsigned long __user *u = ubuf;
525 while (count >= sizeof(*u)) { 525 while (count >= sizeof(*u)) {
526 if (__put_user(getreg(target, pos), u++)) 526 if (__put_user(getreg(target, pos), u++))
527 return -EFAULT; 527 return -EFAULT;
528 count -= sizeof(*u); 528 count -= sizeof(*u);
529 pos += sizeof(*u); 529 pos += sizeof(*u);
530 } 530 }
531 } 531 }
532 532
533 return 0; 533 return 0;
534 } 534 }
535 535
536 static int genregs_set(struct task_struct *target, 536 static int genregs_set(struct task_struct *target,
537 const struct user_regset *regset, 537 const struct user_regset *regset,
538 unsigned int pos, unsigned int count, 538 unsigned int pos, unsigned int count,
539 const void *kbuf, const void __user *ubuf) 539 const void *kbuf, const void __user *ubuf)
540 { 540 {
541 int ret = 0; 541 int ret = 0;
542 if (kbuf) { 542 if (kbuf) {
543 const unsigned long *k = kbuf; 543 const unsigned long *k = kbuf;
544 while (count >= sizeof(*k) && !ret) { 544 while (count >= sizeof(*k) && !ret) {
545 ret = putreg(target, pos, *k++); 545 ret = putreg(target, pos, *k++);
546 count -= sizeof(*k); 546 count -= sizeof(*k);
547 pos += sizeof(*k); 547 pos += sizeof(*k);
548 } 548 }
549 } else { 549 } else {
550 const unsigned long __user *u = ubuf; 550 const unsigned long __user *u = ubuf;
551 while (count >= sizeof(*u) && !ret) { 551 while (count >= sizeof(*u) && !ret) {
552 unsigned long word; 552 unsigned long word;
553 ret = __get_user(word, u++); 553 ret = __get_user(word, u++);
554 if (ret) 554 if (ret)
555 break; 555 break;
556 ret = putreg(target, pos, word); 556 ret = putreg(target, pos, word);
557 count -= sizeof(*u); 557 count -= sizeof(*u);
558 pos += sizeof(*u); 558 pos += sizeof(*u);
559 } 559 }
560 } 560 }
561 return ret; 561 return ret;
562 } 562 }
563 563
564 static void ptrace_triggered(struct perf_event *bp, 564 static void ptrace_triggered(struct perf_event *bp,
565 struct perf_sample_data *data, 565 struct perf_sample_data *data,
566 struct pt_regs *regs) 566 struct pt_regs *regs)
567 { 567 {
568 int i; 568 int i;
569 struct thread_struct *thread = &(current->thread); 569 struct thread_struct *thread = &(current->thread);
570 570
571 /* 571 /*
572 * Store in the virtual DR6 register the fact that the breakpoint 572 * Store in the virtual DR6 register the fact that the breakpoint
573 * was hit so the thread's debugger will see it. 573 * was hit so the thread's debugger will see it.
574 */ 574 */
575 for (i = 0; i < HBP_NUM; i++) { 575 for (i = 0; i < HBP_NUM; i++) {
576 if (thread->ptrace_bps[i] == bp) 576 if (thread->ptrace_bps[i] == bp)
577 break; 577 break;
578 } 578 }
579 579
580 thread->debugreg6 |= (DR_TRAP0 << i); 580 thread->debugreg6 |= (DR_TRAP0 << i);
581 } 581 }
582 582
583 /* 583 /*
584 * Walk through every ptrace breakpoints for this thread and 584 * Walk through every ptrace breakpoints for this thread and
585 * build the dr7 value on top of their attributes. 585 * build the dr7 value on top of their attributes.
586 * 586 *
587 */ 587 */
588 static unsigned long ptrace_get_dr7(struct perf_event *bp[]) 588 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
589 { 589 {
590 int i; 590 int i;
591 int dr7 = 0; 591 int dr7 = 0;
592 struct arch_hw_breakpoint *info; 592 struct arch_hw_breakpoint *info;
593 593
594 for (i = 0; i < HBP_NUM; i++) { 594 for (i = 0; i < HBP_NUM; i++) {
595 if (bp[i] && !bp[i]->attr.disabled) { 595 if (bp[i] && !bp[i]->attr.disabled) {
596 info = counter_arch_bp(bp[i]); 596 info = counter_arch_bp(bp[i]);
597 dr7 |= encode_dr7(i, info->len, info->type); 597 dr7 |= encode_dr7(i, info->len, info->type);
598 } 598 }
599 } 599 }
600 600
601 return dr7; 601 return dr7;
602 } 602 }
603 603
604 static int 604 static int ptrace_fill_bp_fields(struct perf_event_attr *attr,
605 ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, 605 int len, int type, bool disabled)
606 struct task_struct *tsk, int disabled)
607 { 606 {
608 int err; 607 int err, bp_len, bp_type;
609 int gen_len, gen_type; 608
609 err = arch_bp_generic_fields(len, type, &bp_len, &bp_type);
610 if (!err) {
611 attr->bp_len = bp_len;
612 attr->bp_type = bp_type;
613 attr->disabled = disabled;
614 }
615
616 return err;
617 }
618
619 static struct perf_event *
620 ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
621 unsigned long addr, bool disabled)
622 {
610 struct perf_event_attr attr; 623 struct perf_event_attr attr;
624 int err;
611 625
612 err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); 626 ptrace_breakpoint_init(&attr);
627 attr.bp_addr = addr;
628
629 err = ptrace_fill_bp_fields(&attr, len, type, disabled);
613 if (err) 630 if (err)
614 return err; 631 return ERR_PTR(err);
615 632
616 attr = bp->attr; 633 return register_user_hw_breakpoint(&attr, ptrace_triggered,
617 attr.bp_len = gen_len; 634 NULL, tsk);
618 attr.bp_type = gen_type; 635 }
619 attr.disabled = disabled;
620 636
637 static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
638 int disabled)
639 {
640 struct perf_event_attr attr = bp->attr;
641 int err;
642
643 err = ptrace_fill_bp_fields(&attr, len, type, disabled);
644 if (err)
645 return err;
646
621 return modify_user_hw_breakpoint(bp, &attr); 647 return modify_user_hw_breakpoint(bp, &attr);
622 } 648 }
623 649
624 /* 650 /*
625 * Handle ptrace writes to debug register 7. 651 * Handle ptrace writes to debug register 7.
626 */ 652 */
627 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) 653 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
628 { 654 {
629 struct thread_struct *thread = &tsk->thread; 655 struct thread_struct *thread = &tsk->thread;
630 unsigned long old_dr7; 656 unsigned long old_dr7;
631 bool second_pass = false; 657 bool second_pass = false;
632 int i, rc, ret = 0; 658 int i, rc, ret = 0;
633 659
634 data &= ~DR_CONTROL_RESERVED; 660 data &= ~DR_CONTROL_RESERVED;
635 old_dr7 = ptrace_get_dr7(thread->ptrace_bps); 661 old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
636 662
637 restore: 663 restore:
638 rc = 0; 664 rc = 0;
639 for (i = 0; i < HBP_NUM; i++) { 665 for (i = 0; i < HBP_NUM; i++) {
640 unsigned len, type; 666 unsigned len, type;
641 bool disabled = !decode_dr7(data, i, &len, &type); 667 bool disabled = !decode_dr7(data, i, &len, &type);
642 struct perf_event *bp = thread->ptrace_bps[i]; 668 struct perf_event *bp = thread->ptrace_bps[i];
643 669
644 if (!bp) { 670 if (!bp) {
645 if (disabled) 671 if (disabled)
646 continue; 672 continue;
647 /* 673 /*
648 * We should have at least an inactive breakpoint at 674 * We should have at least an inactive breakpoint at
649 * this slot. It means the user is writing dr7 without 675 * this slot. It means the user is writing dr7 without
650 * having written the address register first. 676 * having written the address register first.
651 */ 677 */
652 rc = -EINVAL; 678 rc = -EINVAL;
653 break; 679 break;
654 } 680 }
655 681
656 rc = ptrace_modify_breakpoint(bp, len, type, tsk, disabled); 682 rc = ptrace_modify_breakpoint(bp, len, type, disabled);
657 if (rc) 683 if (rc)
658 break; 684 break;
659 } 685 }
660 686
661 /* Restore if the first pass failed, second_pass shouldn't fail. */ 687 /* Restore if the first pass failed, second_pass shouldn't fail. */
662 if (rc && !WARN_ON(second_pass)) { 688 if (rc && !WARN_ON(second_pass)) {
663 ret = rc; 689 ret = rc;
664 data = old_dr7; 690 data = old_dr7;
665 second_pass = true; 691 second_pass = true;
666 goto restore; 692 goto restore;
667 } 693 }
668 694
669 return ret; 695 return ret;
670 } 696 }
671 697
672 /* 698 /*
673 * Handle PTRACE_PEEKUSR calls for the debug register area. 699 * Handle PTRACE_PEEKUSR calls for the debug register area.
674 */ 700 */
675 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) 701 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
676 { 702 {
677 struct thread_struct *thread = &(tsk->thread); 703 struct thread_struct *thread = &(tsk->thread);
678 unsigned long val = 0; 704 unsigned long val = 0;
679 705
680 if (n < HBP_NUM) { 706 if (n < HBP_NUM) {
681 struct perf_event *bp = thread->ptrace_bps[n]; 707 struct perf_event *bp = thread->ptrace_bps[n];
682 708
683 if (bp) 709 if (bp)
684 val = bp->hw.info.address; 710 val = bp->hw.info.address;
685 } else if (n == 6) { 711 } else if (n == 6) {
686 val = thread->debugreg6; 712 val = thread->debugreg6;
687 } else if (n == 7) { 713 } else if (n == 7) {
688 val = thread->ptrace_dr7; 714 val = thread->ptrace_dr7;
689 } 715 }
690 return val; 716 return val;
691 } 717 }
692 718
693 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, 719 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
694 unsigned long addr) 720 unsigned long addr)
695 { 721 {
696 struct perf_event *bp;
697 struct thread_struct *t = &tsk->thread; 722 struct thread_struct *t = &tsk->thread;
698 struct perf_event_attr attr; 723 struct perf_event *bp = t->ptrace_bps[nr];
699 int err = 0; 724 int err = 0;
700 725
701 if (!t->ptrace_bps[nr]) { 726 if (!bp) {
702 ptrace_breakpoint_init(&attr);
703 /* 727 /*
704 * Put stub len and type to register (reserve) an inactive but 728 * Put stub len and type to create an inactive but correct bp.
705 * correct bp 729 *
706 */
707 attr.bp_addr = addr;
708 attr.bp_len = HW_BREAKPOINT_LEN_1;
709 attr.bp_type = HW_BREAKPOINT_W;
710 attr.disabled = 1;
711
712 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
713 NULL, tsk);
714
715 /*
716 * CHECKME: the previous code returned -EIO if the addr wasn't 730 * CHECKME: the previous code returned -EIO if the addr wasn't
717 * a valid task virtual addr. The new one will return -EINVAL in 731 * a valid task virtual addr. The new one will return -EINVAL in
718 * this case. 732 * this case.
719 * -EINVAL may be what we want for in-kernel breakpoints users, 733 * -EINVAL may be what we want for in-kernel breakpoints users,
720 * but -EIO looks better for ptrace, since we refuse a register 734 * but -EIO looks better for ptrace, since we refuse a register
721 * writing for the user. And anyway this is the previous 735 * writing for the user. And anyway this is the previous
722 * behaviour. 736 * behaviour.
723 */ 737 */
724 if (IS_ERR(bp)) { 738 bp = ptrace_register_breakpoint(tsk,
739 X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
740 addr, true);
741 if (IS_ERR(bp))
725 err = PTR_ERR(bp); 742 err = PTR_ERR(bp);
726 goto out; 743 else
727 } 744 t->ptrace_bps[nr] = bp;
728
729 t->ptrace_bps[nr] = bp;
730 } else { 745 } else {
731 bp = t->ptrace_bps[nr]; 746 struct perf_event_attr attr = bp->attr;
732 747
733 attr = bp->attr;
734 attr.bp_addr = addr; 748 attr.bp_addr = addr;
735 err = modify_user_hw_breakpoint(bp, &attr); 749 err = modify_user_hw_breakpoint(bp, &attr);
736 } 750 }
737 out: 751
738 return err; 752 return err;
739 } 753 }
740 754
741 /* 755 /*
742 * Handle PTRACE_POKEUSR calls for the debug register area. 756 * Handle PTRACE_POKEUSR calls for the debug register area.
743 */ 757 */
744 static int ptrace_set_debugreg(struct task_struct *tsk, int n, 758 static int ptrace_set_debugreg(struct task_struct *tsk, int n,
745 unsigned long val) 759 unsigned long val)
746 { 760 {
747 struct thread_struct *thread = &(tsk->thread); 761 struct thread_struct *thread = &(tsk->thread);
748 int rc = 0; 762 int rc = 0;
749 763
750 /* There are no DR4 or DR5 registers */ 764 /* There are no DR4 or DR5 registers */
751 if (n == 4 || n == 5) 765 if (n == 4 || n == 5)
752 return -EIO; 766 return -EIO;
753 767
754 if (n == 6) { 768 if (n == 6) {
755 thread->debugreg6 = val; 769 thread->debugreg6 = val;
756 goto ret_path; 770 goto ret_path;
757 } 771 }
758 if (n < HBP_NUM) { 772 if (n < HBP_NUM) {
759 rc = ptrace_set_breakpoint_addr(tsk, n, val); 773 rc = ptrace_set_breakpoint_addr(tsk, n, val);
760 if (rc) 774 if (rc)
761 return rc; 775 return rc;
762 } 776 }
763 /* All that's left is DR7 */ 777 /* All that's left is DR7 */
764 if (n == 7) { 778 if (n == 7) {
765 rc = ptrace_write_dr7(tsk, val); 779 rc = ptrace_write_dr7(tsk, val);
766 if (!rc) 780 if (!rc)
767 thread->ptrace_dr7 = val; 781 thread->ptrace_dr7 = val;
768 } 782 }
769 783
770 ret_path: 784 ret_path:
771 return rc; 785 return rc;
772 } 786 }
773 787
774 /* 788 /*
775 * These access the current or another (stopped) task's io permission 789 * These access the current or another (stopped) task's io permission
776 * bitmap for debugging or core dump. 790 * bitmap for debugging or core dump.
777 */ 791 */
778 static int ioperm_active(struct task_struct *target, 792 static int ioperm_active(struct task_struct *target,
779 const struct user_regset *regset) 793 const struct user_regset *regset)
780 { 794 {
781 return target->thread.io_bitmap_max / regset->size; 795 return target->thread.io_bitmap_max / regset->size;
782 } 796 }
783 797
784 static int ioperm_get(struct task_struct *target, 798 static int ioperm_get(struct task_struct *target,
785 const struct user_regset *regset, 799 const struct user_regset *regset,
786 unsigned int pos, unsigned int count, 800 unsigned int pos, unsigned int count,
787 void *kbuf, void __user *ubuf) 801 void *kbuf, void __user *ubuf)
788 { 802 {
789 if (!target->thread.io_bitmap_ptr) 803 if (!target->thread.io_bitmap_ptr)
790 return -ENXIO; 804 return -ENXIO;
791 805
792 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 806 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
793 target->thread.io_bitmap_ptr, 807 target->thread.io_bitmap_ptr,
794 0, IO_BITMAP_BYTES); 808 0, IO_BITMAP_BYTES);
795 } 809 }
796 810
797 /* 811 /*
798 * Called by kernel/ptrace.c when detaching.. 812 * Called by kernel/ptrace.c when detaching..
799 * 813 *
800 * Make sure the single step bit is not set. 814 * Make sure the single step bit is not set.
801 */ 815 */
802 void ptrace_disable(struct task_struct *child) 816 void ptrace_disable(struct task_struct *child)
803 { 817 {
804 user_disable_single_step(child); 818 user_disable_single_step(child);
805 #ifdef TIF_SYSCALL_EMU 819 #ifdef TIF_SYSCALL_EMU
806 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 820 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
807 #endif 821 #endif
808 } 822 }
809 823
810 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 824 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
811 static const struct user_regset_view user_x86_32_view; /* Initialized below. */ 825 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
812 #endif 826 #endif
813 827
814 long arch_ptrace(struct task_struct *child, long request, 828 long arch_ptrace(struct task_struct *child, long request,
815 unsigned long addr, unsigned long data) 829 unsigned long addr, unsigned long data)
816 { 830 {
817 int ret; 831 int ret;
818 unsigned long __user *datap = (unsigned long __user *)data; 832 unsigned long __user *datap = (unsigned long __user *)data;
819 833
820 switch (request) { 834 switch (request) {
821 /* read the word at location addr in the USER area. */ 835 /* read the word at location addr in the USER area. */
822 case PTRACE_PEEKUSR: { 836 case PTRACE_PEEKUSR: {
823 unsigned long tmp; 837 unsigned long tmp;
824 838
825 ret = -EIO; 839 ret = -EIO;
826 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) 840 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
827 break; 841 break;
828 842
829 tmp = 0; /* Default return condition */ 843 tmp = 0; /* Default return condition */
830 if (addr < sizeof(struct user_regs_struct)) 844 if (addr < sizeof(struct user_regs_struct))
831 tmp = getreg(child, addr); 845 tmp = getreg(child, addr);
832 else if (addr >= offsetof(struct user, u_debugreg[0]) && 846 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
833 addr <= offsetof(struct user, u_debugreg[7])) { 847 addr <= offsetof(struct user, u_debugreg[7])) {
834 addr -= offsetof(struct user, u_debugreg[0]); 848 addr -= offsetof(struct user, u_debugreg[0]);
835 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 849 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
836 } 850 }
837 ret = put_user(tmp, datap); 851 ret = put_user(tmp, datap);
838 break; 852 break;
839 } 853 }
840 854
841 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 855 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
842 ret = -EIO; 856 ret = -EIO;
843 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) 857 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
844 break; 858 break;
845 859
846 if (addr < sizeof(struct user_regs_struct)) 860 if (addr < sizeof(struct user_regs_struct))
847 ret = putreg(child, addr, data); 861 ret = putreg(child, addr, data);
848 else if (addr >= offsetof(struct user, u_debugreg[0]) && 862 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
849 addr <= offsetof(struct user, u_debugreg[7])) { 863 addr <= offsetof(struct user, u_debugreg[7])) {
850 addr -= offsetof(struct user, u_debugreg[0]); 864 addr -= offsetof(struct user, u_debugreg[0]);
851 ret = ptrace_set_debugreg(child, 865 ret = ptrace_set_debugreg(child,
852 addr / sizeof(data), data); 866 addr / sizeof(data), data);
853 } 867 }
854 break; 868 break;
855 869
856 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 870 case PTRACE_GETREGS: /* Get all gp regs from the child. */
857 return copy_regset_to_user(child, 871 return copy_regset_to_user(child,
858 task_user_regset_view(current), 872 task_user_regset_view(current),
859 REGSET_GENERAL, 873 REGSET_GENERAL,
860 0, sizeof(struct user_regs_struct), 874 0, sizeof(struct user_regs_struct),
861 datap); 875 datap);
862 876
863 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 877 case PTRACE_SETREGS: /* Set all gp regs in the child. */
864 return copy_regset_from_user(child, 878 return copy_regset_from_user(child,
865 task_user_regset_view(current), 879 task_user_regset_view(current),
866 REGSET_GENERAL, 880 REGSET_GENERAL,
867 0, sizeof(struct user_regs_struct), 881 0, sizeof(struct user_regs_struct),
868 datap); 882 datap);
869 883
870 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 884 case PTRACE_GETFPREGS: /* Get the child FPU state. */
871 return copy_regset_to_user(child, 885 return copy_regset_to_user(child,
872 task_user_regset_view(current), 886 task_user_regset_view(current),
873 REGSET_FP, 887 REGSET_FP,
874 0, sizeof(struct user_i387_struct), 888 0, sizeof(struct user_i387_struct),
875 datap); 889 datap);
876 890
877 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 891 case PTRACE_SETFPREGS: /* Set the child FPU state. */
878 return copy_regset_from_user(child, 892 return copy_regset_from_user(child,
879 task_user_regset_view(current), 893 task_user_regset_view(current),
880 REGSET_FP, 894 REGSET_FP,
881 0, sizeof(struct user_i387_struct), 895 0, sizeof(struct user_i387_struct),
882 datap); 896 datap);
883 897
884 #ifdef CONFIG_X86_32 898 #ifdef CONFIG_X86_32
885 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 899 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
886 return copy_regset_to_user(child, &user_x86_32_view, 900 return copy_regset_to_user(child, &user_x86_32_view,
887 REGSET_XFP, 901 REGSET_XFP,
888 0, sizeof(struct user_fxsr_struct), 902 0, sizeof(struct user_fxsr_struct),
889 datap) ? -EIO : 0; 903 datap) ? -EIO : 0;
890 904
891 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 905 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
892 return copy_regset_from_user(child, &user_x86_32_view, 906 return copy_regset_from_user(child, &user_x86_32_view,
893 REGSET_XFP, 907 REGSET_XFP,
894 0, sizeof(struct user_fxsr_struct), 908 0, sizeof(struct user_fxsr_struct),
895 datap) ? -EIO : 0; 909 datap) ? -EIO : 0;
896 #endif 910 #endif
897 911
898 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 912 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
899 case PTRACE_GET_THREAD_AREA: 913 case PTRACE_GET_THREAD_AREA:
900 if ((int) addr < 0) 914 if ((int) addr < 0)
901 return -EIO; 915 return -EIO;
902 ret = do_get_thread_area(child, addr, 916 ret = do_get_thread_area(child, addr,
903 (struct user_desc __user *)data); 917 (struct user_desc __user *)data);
904 break; 918 break;
905 919
906 case PTRACE_SET_THREAD_AREA: 920 case PTRACE_SET_THREAD_AREA:
907 if ((int) addr < 0) 921 if ((int) addr < 0)
908 return -EIO; 922 return -EIO;
909 ret = do_set_thread_area(child, addr, 923 ret = do_set_thread_area(child, addr,
910 (struct user_desc __user *)data, 0); 924 (struct user_desc __user *)data, 0);
911 break; 925 break;
912 #endif 926 #endif
913 927
914 #ifdef CONFIG_X86_64 928 #ifdef CONFIG_X86_64
915 /* normal 64bit interface to access TLS data. 929 /* normal 64bit interface to access TLS data.
916 Works just like arch_prctl, except that the arguments 930 Works just like arch_prctl, except that the arguments
917 are reversed. */ 931 are reversed. */
918 case PTRACE_ARCH_PRCTL: 932 case PTRACE_ARCH_PRCTL:
919 ret = do_arch_prctl(child, data, addr); 933 ret = do_arch_prctl(child, data, addr);
920 break; 934 break;
921 #endif 935 #endif
922 936
923 default: 937 default:
924 ret = ptrace_request(child, request, addr, data); 938 ret = ptrace_request(child, request, addr, data);
925 break; 939 break;
926 } 940 }
927 941
928 return ret; 942 return ret;
929 } 943 }
930 944
931 #ifdef CONFIG_IA32_EMULATION 945 #ifdef CONFIG_IA32_EMULATION
932 946
933 #include <linux/compat.h> 947 #include <linux/compat.h>
934 #include <linux/syscalls.h> 948 #include <linux/syscalls.h>
935 #include <asm/ia32.h> 949 #include <asm/ia32.h>
936 #include <asm/user32.h> 950 #include <asm/user32.h>
937 951
938 #define R32(l,q) \ 952 #define R32(l,q) \
939 case offsetof(struct user32, regs.l): \ 953 case offsetof(struct user32, regs.l): \
940 regs->q = value; break 954 regs->q = value; break
941 955
942 #define SEG32(rs) \ 956 #define SEG32(rs) \
943 case offsetof(struct user32, regs.rs): \ 957 case offsetof(struct user32, regs.rs): \
944 return set_segment_reg(child, \ 958 return set_segment_reg(child, \
945 offsetof(struct user_regs_struct, rs), \ 959 offsetof(struct user_regs_struct, rs), \
946 value); \ 960 value); \
947 break 961 break
948 962
949 static int putreg32(struct task_struct *child, unsigned regno, u32 value) 963 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
950 { 964 {
951 struct pt_regs *regs = task_pt_regs(child); 965 struct pt_regs *regs = task_pt_regs(child);
952 966
953 switch (regno) { 967 switch (regno) {
954 968
955 SEG32(cs); 969 SEG32(cs);
956 SEG32(ds); 970 SEG32(ds);
957 SEG32(es); 971 SEG32(es);
958 SEG32(fs); 972 SEG32(fs);
959 SEG32(gs); 973 SEG32(gs);
960 SEG32(ss); 974 SEG32(ss);
961 975
962 R32(ebx, bx); 976 R32(ebx, bx);
963 R32(ecx, cx); 977 R32(ecx, cx);
964 R32(edx, dx); 978 R32(edx, dx);
965 R32(edi, di); 979 R32(edi, di);
966 R32(esi, si); 980 R32(esi, si);
967 R32(ebp, bp); 981 R32(ebp, bp);
968 R32(eax, ax); 982 R32(eax, ax);
969 R32(eip, ip); 983 R32(eip, ip);
970 R32(esp, sp); 984 R32(esp, sp);
971 985
972 case offsetof(struct user32, regs.orig_eax): 986 case offsetof(struct user32, regs.orig_eax):
973 /* 987 /*
974 * A 32-bit debugger setting orig_eax means to restore 988 * A 32-bit debugger setting orig_eax means to restore
975 * the state of the task restarting a 32-bit syscall. 989 * the state of the task restarting a 32-bit syscall.
976 * Make sure we interpret the -ERESTART* codes correctly 990 * Make sure we interpret the -ERESTART* codes correctly
977 * in case the task is not actually still sitting at the 991 * in case the task is not actually still sitting at the
978 * exit from a 32-bit syscall with TS_COMPAT still set. 992 * exit from a 32-bit syscall with TS_COMPAT still set.
979 */ 993 */
980 regs->orig_ax = value; 994 regs->orig_ax = value;
981 if (syscall_get_nr(child, regs) >= 0) 995 if (syscall_get_nr(child, regs) >= 0)
982 task_thread_info(child)->status |= TS_COMPAT; 996 task_thread_info(child)->status |= TS_COMPAT;
983 break; 997 break;
984 998
985 case offsetof(struct user32, regs.eflags): 999 case offsetof(struct user32, regs.eflags):
986 return set_flags(child, value); 1000 return set_flags(child, value);
987 1001
988 case offsetof(struct user32, u_debugreg[0]) ... 1002 case offsetof(struct user32, u_debugreg[0]) ...
989 offsetof(struct user32, u_debugreg[7]): 1003 offsetof(struct user32, u_debugreg[7]):
990 regno -= offsetof(struct user32, u_debugreg[0]); 1004 regno -= offsetof(struct user32, u_debugreg[0]);
991 return ptrace_set_debugreg(child, regno / 4, value); 1005 return ptrace_set_debugreg(child, regno / 4, value);
992 1006
993 default: 1007 default:
994 if (regno > sizeof(struct user32) || (regno & 3)) 1008 if (regno > sizeof(struct user32) || (regno & 3))
995 return -EIO; 1009 return -EIO;
996 1010
997 /* 1011 /*
998 * Other dummy fields in the virtual user structure 1012 * Other dummy fields in the virtual user structure
999 * are ignored 1013 * are ignored
1000 */ 1014 */
1001 break; 1015 break;
1002 } 1016 }
1003 return 0; 1017 return 0;
1004 } 1018 }
1005 1019
1006 #undef R32 1020 #undef R32
1007 #undef SEG32 1021 #undef SEG32
1008 1022
1009 #define R32(l,q) \ 1023 #define R32(l,q) \
1010 case offsetof(struct user32, regs.l): \ 1024 case offsetof(struct user32, regs.l): \
1011 *val = regs->q; break 1025 *val = regs->q; break
1012 1026
1013 #define SEG32(rs) \ 1027 #define SEG32(rs) \
1014 case offsetof(struct user32, regs.rs): \ 1028 case offsetof(struct user32, regs.rs): \
1015 *val = get_segment_reg(child, \ 1029 *val = get_segment_reg(child, \
1016 offsetof(struct user_regs_struct, rs)); \ 1030 offsetof(struct user_regs_struct, rs)); \
1017 break 1031 break
1018 1032
1019 static int getreg32(struct task_struct *child, unsigned regno, u32 *val) 1033 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1020 { 1034 {
1021 struct pt_regs *regs = task_pt_regs(child); 1035 struct pt_regs *regs = task_pt_regs(child);
1022 1036
1023 switch (regno) { 1037 switch (regno) {
1024 1038
1025 SEG32(ds); 1039 SEG32(ds);
1026 SEG32(es); 1040 SEG32(es);
1027 SEG32(fs); 1041 SEG32(fs);
1028 SEG32(gs); 1042 SEG32(gs);
1029 1043
1030 R32(cs, cs); 1044 R32(cs, cs);
1031 R32(ss, ss); 1045 R32(ss, ss);
1032 R32(ebx, bx); 1046 R32(ebx, bx);
1033 R32(ecx, cx); 1047 R32(ecx, cx);
1034 R32(edx, dx); 1048 R32(edx, dx);
1035 R32(edi, di); 1049 R32(edi, di);
1036 R32(esi, si); 1050 R32(esi, si);
1037 R32(ebp, bp); 1051 R32(ebp, bp);
1038 R32(eax, ax); 1052 R32(eax, ax);
1039 R32(orig_eax, orig_ax); 1053 R32(orig_eax, orig_ax);
1040 R32(eip, ip); 1054 R32(eip, ip);
1041 R32(esp, sp); 1055 R32(esp, sp);
1042 1056
1043 case offsetof(struct user32, regs.eflags): 1057 case offsetof(struct user32, regs.eflags):
1044 *val = get_flags(child); 1058 *val = get_flags(child);
1045 break; 1059 break;
1046 1060
1047 case offsetof(struct user32, u_debugreg[0]) ... 1061 case offsetof(struct user32, u_debugreg[0]) ...
1048 offsetof(struct user32, u_debugreg[7]): 1062 offsetof(struct user32, u_debugreg[7]):
1049 regno -= offsetof(struct user32, u_debugreg[0]); 1063 regno -= offsetof(struct user32, u_debugreg[0]);
1050 *val = ptrace_get_debugreg(child, regno / 4); 1064 *val = ptrace_get_debugreg(child, regno / 4);
1051 break; 1065 break;
1052 1066
1053 default: 1067 default:
1054 if (regno > sizeof(struct user32) || (regno & 3)) 1068 if (regno > sizeof(struct user32) || (regno & 3))
1055 return -EIO; 1069 return -EIO;
1056 1070
1057 /* 1071 /*
1058 * Other dummy fields in the virtual user structure 1072 * Other dummy fields in the virtual user structure
1059 * are ignored 1073 * are ignored
1060 */ 1074 */
1061 *val = 0; 1075 *val = 0;
1062 break; 1076 break;
1063 } 1077 }
1064 return 0; 1078 return 0;
1065 } 1079 }
1066 1080
1067 #undef R32 1081 #undef R32
1068 #undef SEG32 1082 #undef SEG32
1069 1083
1070 static int genregs32_get(struct task_struct *target, 1084 static int genregs32_get(struct task_struct *target,
1071 const struct user_regset *regset, 1085 const struct user_regset *regset,
1072 unsigned int pos, unsigned int count, 1086 unsigned int pos, unsigned int count,
1073 void *kbuf, void __user *ubuf) 1087 void *kbuf, void __user *ubuf)
1074 { 1088 {
1075 if (kbuf) { 1089 if (kbuf) {
1076 compat_ulong_t *k = kbuf; 1090 compat_ulong_t *k = kbuf;
1077 while (count >= sizeof(*k)) { 1091 while (count >= sizeof(*k)) {
1078 getreg32(target, pos, k++); 1092 getreg32(target, pos, k++);
1079 count -= sizeof(*k); 1093 count -= sizeof(*k);
1080 pos += sizeof(*k); 1094 pos += sizeof(*k);
1081 } 1095 }
1082 } else { 1096 } else {
1083 compat_ulong_t __user *u = ubuf; 1097 compat_ulong_t __user *u = ubuf;
1084 while (count >= sizeof(*u)) { 1098 while (count >= sizeof(*u)) {
1085 compat_ulong_t word; 1099 compat_ulong_t word;
1086 getreg32(target, pos, &word); 1100 getreg32(target, pos, &word);
1087 if (__put_user(word, u++)) 1101 if (__put_user(word, u++))
1088 return -EFAULT; 1102 return -EFAULT;
1089 count -= sizeof(*u); 1103 count -= sizeof(*u);
1090 pos += sizeof(*u); 1104 pos += sizeof(*u);
1091 } 1105 }
1092 } 1106 }
1093 1107
1094 return 0; 1108 return 0;
1095 } 1109 }
1096 1110
1097 static int genregs32_set(struct task_struct *target, 1111 static int genregs32_set(struct task_struct *target,
1098 const struct user_regset *regset, 1112 const struct user_regset *regset,
1099 unsigned int pos, unsigned int count, 1113 unsigned int pos, unsigned int count,
1100 const void *kbuf, const void __user *ubuf) 1114 const void *kbuf, const void __user *ubuf)
1101 { 1115 {
1102 int ret = 0; 1116 int ret = 0;
1103 if (kbuf) { 1117 if (kbuf) {
1104 const compat_ulong_t *k = kbuf; 1118 const compat_ulong_t *k = kbuf;
1105 while (count >= sizeof(*k) && !ret) { 1119 while (count >= sizeof(*k) && !ret) {
1106 ret = putreg32(target, pos, *k++); 1120 ret = putreg32(target, pos, *k++);
1107 count -= sizeof(*k); 1121 count -= sizeof(*k);
1108 pos += sizeof(*k); 1122 pos += sizeof(*k);
1109 } 1123 }
1110 } else { 1124 } else {
1111 const compat_ulong_t __user *u = ubuf; 1125 const compat_ulong_t __user *u = ubuf;
1112 while (count >= sizeof(*u) && !ret) { 1126 while (count >= sizeof(*u) && !ret) {
1113 compat_ulong_t word; 1127 compat_ulong_t word;
1114 ret = __get_user(word, u++); 1128 ret = __get_user(word, u++);
1115 if (ret) 1129 if (ret)
1116 break; 1130 break;
1117 ret = putreg32(target, pos, word); 1131 ret = putreg32(target, pos, word);
1118 count -= sizeof(*u); 1132 count -= sizeof(*u);
1119 pos += sizeof(*u); 1133 pos += sizeof(*u);
1120 } 1134 }
1121 } 1135 }
1122 return ret; 1136 return ret;
1123 } 1137 }
1124 1138
1125 #ifdef CONFIG_X86_X32_ABI 1139 #ifdef CONFIG_X86_X32_ABI
1126 static long x32_arch_ptrace(struct task_struct *child, 1140 static long x32_arch_ptrace(struct task_struct *child,
1127 compat_long_t request, compat_ulong_t caddr, 1141 compat_long_t request, compat_ulong_t caddr,
1128 compat_ulong_t cdata) 1142 compat_ulong_t cdata)
1129 { 1143 {
1130 unsigned long addr = caddr; 1144 unsigned long addr = caddr;
1131 unsigned long data = cdata; 1145 unsigned long data = cdata;
1132 void __user *datap = compat_ptr(data); 1146 void __user *datap = compat_ptr(data);
1133 int ret; 1147 int ret;
1134 1148
1135 switch (request) { 1149 switch (request) {
1136 /* Read 32bits at location addr in the USER area. Only allow 1150 /* Read 32bits at location addr in the USER area. Only allow
1137 to return the lower 32bits of segment and debug registers. */ 1151 to return the lower 32bits of segment and debug registers. */
1138 case PTRACE_PEEKUSR: { 1152 case PTRACE_PEEKUSR: {
1139 u32 tmp; 1153 u32 tmp;
1140 1154
1141 ret = -EIO; 1155 ret = -EIO;
1142 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || 1156 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1143 addr < offsetof(struct user_regs_struct, cs)) 1157 addr < offsetof(struct user_regs_struct, cs))
1144 break; 1158 break;
1145 1159
1146 tmp = 0; /* Default return condition */ 1160 tmp = 0; /* Default return condition */
1147 if (addr < sizeof(struct user_regs_struct)) 1161 if (addr < sizeof(struct user_regs_struct))
1148 tmp = getreg(child, addr); 1162 tmp = getreg(child, addr);
1149 else if (addr >= offsetof(struct user, u_debugreg[0]) && 1163 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1150 addr <= offsetof(struct user, u_debugreg[7])) { 1164 addr <= offsetof(struct user, u_debugreg[7])) {
1151 addr -= offsetof(struct user, u_debugreg[0]); 1165 addr -= offsetof(struct user, u_debugreg[0]);
1152 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 1166 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1153 } 1167 }
1154 ret = put_user(tmp, (__u32 __user *)datap); 1168 ret = put_user(tmp, (__u32 __user *)datap);
1155 break; 1169 break;
1156 } 1170 }
1157 1171
1158 /* Write the word at location addr in the USER area. Only allow 1172 /* Write the word at location addr in the USER area. Only allow
1159 to update segment and debug registers with the upper 32bits 1173 to update segment and debug registers with the upper 32bits
1160 zero-extended. */ 1174 zero-extended. */
1161 case PTRACE_POKEUSR: 1175 case PTRACE_POKEUSR:
1162 ret = -EIO; 1176 ret = -EIO;
1163 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || 1177 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1164 addr < offsetof(struct user_regs_struct, cs)) 1178 addr < offsetof(struct user_regs_struct, cs))
1165 break; 1179 break;
1166 1180
1167 if (addr < sizeof(struct user_regs_struct)) 1181 if (addr < sizeof(struct user_regs_struct))
1168 ret = putreg(child, addr, data); 1182 ret = putreg(child, addr, data);
1169 else if (addr >= offsetof(struct user, u_debugreg[0]) && 1183 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1170 addr <= offsetof(struct user, u_debugreg[7])) { 1184 addr <= offsetof(struct user, u_debugreg[7])) {
1171 addr -= offsetof(struct user, u_debugreg[0]); 1185 addr -= offsetof(struct user, u_debugreg[0]);
1172 ret = ptrace_set_debugreg(child, 1186 ret = ptrace_set_debugreg(child,
1173 addr / sizeof(data), data); 1187 addr / sizeof(data), data);
1174 } 1188 }
1175 break; 1189 break;
1176 1190
1177 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1191 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1178 return copy_regset_to_user(child, 1192 return copy_regset_to_user(child,
1179 task_user_regset_view(current), 1193 task_user_regset_view(current),
1180 REGSET_GENERAL, 1194 REGSET_GENERAL,
1181 0, sizeof(struct user_regs_struct), 1195 0, sizeof(struct user_regs_struct),
1182 datap); 1196 datap);
1183 1197
1184 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1198 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1185 return copy_regset_from_user(child, 1199 return copy_regset_from_user(child,
1186 task_user_regset_view(current), 1200 task_user_regset_view(current),
1187 REGSET_GENERAL, 1201 REGSET_GENERAL,
1188 0, sizeof(struct user_regs_struct), 1202 0, sizeof(struct user_regs_struct),
1189 datap); 1203 datap);
1190 1204
1191 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1205 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1192 return copy_regset_to_user(child, 1206 return copy_regset_to_user(child,
1193 task_user_regset_view(current), 1207 task_user_regset_view(current),
1194 REGSET_FP, 1208 REGSET_FP,
1195 0, sizeof(struct user_i387_struct), 1209 0, sizeof(struct user_i387_struct),
1196 datap); 1210 datap);
1197 1211
1198 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1212 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1199 return copy_regset_from_user(child, 1213 return copy_regset_from_user(child,
1200 task_user_regset_view(current), 1214 task_user_regset_view(current),
1201 REGSET_FP, 1215 REGSET_FP,
1202 0, sizeof(struct user_i387_struct), 1216 0, sizeof(struct user_i387_struct),
1203 datap); 1217 datap);
1204 1218
1205 default: 1219 default:
1206 return compat_ptrace_request(child, request, addr, data); 1220 return compat_ptrace_request(child, request, addr, data);
1207 } 1221 }
1208 1222
1209 return ret; 1223 return ret;
1210 } 1224 }
1211 #endif 1225 #endif
1212 1226
1213 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1227 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1214 compat_ulong_t caddr, compat_ulong_t cdata) 1228 compat_ulong_t caddr, compat_ulong_t cdata)
1215 { 1229 {
1216 unsigned long addr = caddr; 1230 unsigned long addr = caddr;
1217 unsigned long data = cdata; 1231 unsigned long data = cdata;
1218 void __user *datap = compat_ptr(data); 1232 void __user *datap = compat_ptr(data);
1219 int ret; 1233 int ret;
1220 __u32 val; 1234 __u32 val;
1221 1235
1222 #ifdef CONFIG_X86_X32_ABI 1236 #ifdef CONFIG_X86_X32_ABI
1223 if (!is_ia32_task()) 1237 if (!is_ia32_task())
1224 return x32_arch_ptrace(child, request, caddr, cdata); 1238 return x32_arch_ptrace(child, request, caddr, cdata);
1225 #endif 1239 #endif
1226 1240
1227 switch (request) { 1241 switch (request) {
1228 case PTRACE_PEEKUSR: 1242 case PTRACE_PEEKUSR:
1229 ret = getreg32(child, addr, &val); 1243 ret = getreg32(child, addr, &val);
1230 if (ret == 0) 1244 if (ret == 0)
1231 ret = put_user(val, (__u32 __user *)datap); 1245 ret = put_user(val, (__u32 __user *)datap);
1232 break; 1246 break;
1233 1247
1234 case PTRACE_POKEUSR: 1248 case PTRACE_POKEUSR:
1235 ret = putreg32(child, addr, data); 1249 ret = putreg32(child, addr, data);
1236 break; 1250 break;
1237 1251
1238 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1252 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1239 return copy_regset_to_user(child, &user_x86_32_view, 1253 return copy_regset_to_user(child, &user_x86_32_view,
1240 REGSET_GENERAL, 1254 REGSET_GENERAL,
1241 0, sizeof(struct user_regs_struct32), 1255 0, sizeof(struct user_regs_struct32),
1242 datap); 1256 datap);
1243 1257
1244 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1258 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1245 return copy_regset_from_user(child, &user_x86_32_view, 1259 return copy_regset_from_user(child, &user_x86_32_view,
1246 REGSET_GENERAL, 0, 1260 REGSET_GENERAL, 0,
1247 sizeof(struct user_regs_struct32), 1261 sizeof(struct user_regs_struct32),
1248 datap); 1262 datap);
1249 1263
1250 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1264 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1251 return copy_regset_to_user(child, &user_x86_32_view, 1265 return copy_regset_to_user(child, &user_x86_32_view,
1252 REGSET_FP, 0, 1266 REGSET_FP, 0,
1253 sizeof(struct user_i387_ia32_struct), 1267 sizeof(struct user_i387_ia32_struct),
1254 datap); 1268 datap);
1255 1269
1256 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1270 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1257 return copy_regset_from_user( 1271 return copy_regset_from_user(
1258 child, &user_x86_32_view, REGSET_FP, 1272 child, &user_x86_32_view, REGSET_FP,
1259 0, sizeof(struct user_i387_ia32_struct), datap); 1273 0, sizeof(struct user_i387_ia32_struct), datap);
1260 1274
1261 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1275 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1262 return copy_regset_to_user(child, &user_x86_32_view, 1276 return copy_regset_to_user(child, &user_x86_32_view,
1263 REGSET_XFP, 0, 1277 REGSET_XFP, 0,
1264 sizeof(struct user32_fxsr_struct), 1278 sizeof(struct user32_fxsr_struct),
1265 datap); 1279 datap);
1266 1280
1267 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1281 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1268 return copy_regset_from_user(child, &user_x86_32_view, 1282 return copy_regset_from_user(child, &user_x86_32_view,
1269 REGSET_XFP, 0, 1283 REGSET_XFP, 0,
1270 sizeof(struct user32_fxsr_struct), 1284 sizeof(struct user32_fxsr_struct),
1271 datap); 1285 datap);
1272 1286
1273 case PTRACE_GET_THREAD_AREA: 1287 case PTRACE_GET_THREAD_AREA:
1274 case PTRACE_SET_THREAD_AREA: 1288 case PTRACE_SET_THREAD_AREA:
1275 return arch_ptrace(child, request, addr, data); 1289 return arch_ptrace(child, request, addr, data);
1276 1290
1277 default: 1291 default:
1278 return compat_ptrace_request(child, request, addr, data); 1292 return compat_ptrace_request(child, request, addr, data);
1279 } 1293 }
1280 1294
1281 return ret; 1295 return ret;
1282 } 1296 }
1283 1297
1284 #endif /* CONFIG_IA32_EMULATION */ 1298 #endif /* CONFIG_IA32_EMULATION */
1285 1299
1286 #ifdef CONFIG_X86_64 1300 #ifdef CONFIG_X86_64
1287 1301
1288 static struct user_regset x86_64_regsets[] __read_mostly = { 1302 static struct user_regset x86_64_regsets[] __read_mostly = {
1289 [REGSET_GENERAL] = { 1303 [REGSET_GENERAL] = {
1290 .core_note_type = NT_PRSTATUS, 1304 .core_note_type = NT_PRSTATUS,
1291 .n = sizeof(struct user_regs_struct) / sizeof(long), 1305 .n = sizeof(struct user_regs_struct) / sizeof(long),
1292 .size = sizeof(long), .align = sizeof(long), 1306 .size = sizeof(long), .align = sizeof(long),
1293 .get = genregs_get, .set = genregs_set 1307 .get = genregs_get, .set = genregs_set
1294 }, 1308 },
1295 [REGSET_FP] = { 1309 [REGSET_FP] = {
1296 .core_note_type = NT_PRFPREG, 1310 .core_note_type = NT_PRFPREG,
1297 .n = sizeof(struct user_i387_struct) / sizeof(long), 1311 .n = sizeof(struct user_i387_struct) / sizeof(long),
1298 .size = sizeof(long), .align = sizeof(long), 1312 .size = sizeof(long), .align = sizeof(long),
1299 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1313 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1300 }, 1314 },
1301 [REGSET_XSTATE] = { 1315 [REGSET_XSTATE] = {
1302 .core_note_type = NT_X86_XSTATE, 1316 .core_note_type = NT_X86_XSTATE,
1303 .size = sizeof(u64), .align = sizeof(u64), 1317 .size = sizeof(u64), .align = sizeof(u64),
1304 .active = xstateregs_active, .get = xstateregs_get, 1318 .active = xstateregs_active, .get = xstateregs_get,
1305 .set = xstateregs_set 1319 .set = xstateregs_set
1306 }, 1320 },
1307 [REGSET_IOPERM64] = { 1321 [REGSET_IOPERM64] = {
1308 .core_note_type = NT_386_IOPERM, 1322 .core_note_type = NT_386_IOPERM,
1309 .n = IO_BITMAP_LONGS, 1323 .n = IO_BITMAP_LONGS,
1310 .size = sizeof(long), .align = sizeof(long), 1324 .size = sizeof(long), .align = sizeof(long),
1311 .active = ioperm_active, .get = ioperm_get 1325 .active = ioperm_active, .get = ioperm_get
1312 }, 1326 },
1313 }; 1327 };
1314 1328
1315 static const struct user_regset_view user_x86_64_view = { 1329 static const struct user_regset_view user_x86_64_view = {
1316 .name = "x86_64", .e_machine = EM_X86_64, 1330 .name = "x86_64", .e_machine = EM_X86_64,
1317 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) 1331 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1318 }; 1332 };
1319 1333
1320 #else /* CONFIG_X86_32 */ 1334 #else /* CONFIG_X86_32 */
1321 1335
1322 #define user_regs_struct32 user_regs_struct 1336 #define user_regs_struct32 user_regs_struct
1323 #define genregs32_get genregs_get 1337 #define genregs32_get genregs_get
1324 #define genregs32_set genregs_set 1338 #define genregs32_set genregs_set
1325 1339
1326 #endif /* CONFIG_X86_64 */ 1340 #endif /* CONFIG_X86_64 */
1327 1341
1328 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1342 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1329 static struct user_regset x86_32_regsets[] __read_mostly = { 1343 static struct user_regset x86_32_regsets[] __read_mostly = {
1330 [REGSET_GENERAL] = { 1344 [REGSET_GENERAL] = {
1331 .core_note_type = NT_PRSTATUS, 1345 .core_note_type = NT_PRSTATUS,
1332 .n = sizeof(struct user_regs_struct32) / sizeof(u32), 1346 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
1333 .size = sizeof(u32), .align = sizeof(u32), 1347 .size = sizeof(u32), .align = sizeof(u32),
1334 .get = genregs32_get, .set = genregs32_set 1348 .get = genregs32_get, .set = genregs32_set
1335 }, 1349 },
1336 [REGSET_FP] = { 1350 [REGSET_FP] = {
1337 .core_note_type = NT_PRFPREG, 1351 .core_note_type = NT_PRFPREG,
1338 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), 1352 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1339 .size = sizeof(u32), .align = sizeof(u32), 1353 .size = sizeof(u32), .align = sizeof(u32),
1340 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set 1354 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
1341 }, 1355 },
1342 [REGSET_XFP] = { 1356 [REGSET_XFP] = {
1343 .core_note_type = NT_PRXFPREG, 1357 .core_note_type = NT_PRXFPREG,
1344 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), 1358 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1345 .size = sizeof(u32), .align = sizeof(u32), 1359 .size = sizeof(u32), .align = sizeof(u32),
1346 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1360 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1347 }, 1361 },
1348 [REGSET_XSTATE] = { 1362 [REGSET_XSTATE] = {
1349 .core_note_type = NT_X86_XSTATE, 1363 .core_note_type = NT_X86_XSTATE,
1350 .size = sizeof(u64), .align = sizeof(u64), 1364 .size = sizeof(u64), .align = sizeof(u64),
1351 .active = xstateregs_active, .get = xstateregs_get, 1365 .active = xstateregs_active, .get = xstateregs_get,
1352 .set = xstateregs_set 1366 .set = xstateregs_set
1353 }, 1367 },
1354 [REGSET_TLS] = { 1368 [REGSET_TLS] = {
1355 .core_note_type = NT_386_TLS, 1369 .core_note_type = NT_386_TLS,
1356 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, 1370 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1357 .size = sizeof(struct user_desc), 1371 .size = sizeof(struct user_desc),
1358 .align = sizeof(struct user_desc), 1372 .align = sizeof(struct user_desc),
1359 .active = regset_tls_active, 1373 .active = regset_tls_active,
1360 .get = regset_tls_get, .set = regset_tls_set 1374 .get = regset_tls_get, .set = regset_tls_set
1361 }, 1375 },
1362 [REGSET_IOPERM32] = { 1376 [REGSET_IOPERM32] = {
1363 .core_note_type = NT_386_IOPERM, 1377 .core_note_type = NT_386_IOPERM,
1364 .n = IO_BITMAP_BYTES / sizeof(u32), 1378 .n = IO_BITMAP_BYTES / sizeof(u32),
1365 .size = sizeof(u32), .align = sizeof(u32), 1379 .size = sizeof(u32), .align = sizeof(u32),
1366 .active = ioperm_active, .get = ioperm_get 1380 .active = ioperm_active, .get = ioperm_get
1367 }, 1381 },
1368 }; 1382 };
1369 1383
1370 static const struct user_regset_view user_x86_32_view = { 1384 static const struct user_regset_view user_x86_32_view = {
1371 .name = "i386", .e_machine = EM_386, 1385 .name = "i386", .e_machine = EM_386,
1372 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) 1386 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1373 }; 1387 };
1374 #endif 1388 #endif
1375 1389
1376 /* 1390 /*
1377 * This represents bytes 464..511 in the memory layout exported through 1391 * This represents bytes 464..511 in the memory layout exported through
1378 * the REGSET_XSTATE interface. 1392 * the REGSET_XSTATE interface.
1379 */ 1393 */
1380 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; 1394 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
1381 1395
1382 void update_regset_xstate_info(unsigned int size, u64 xstate_mask) 1396 void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
1383 { 1397 {
1384 #ifdef CONFIG_X86_64 1398 #ifdef CONFIG_X86_64
1385 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64); 1399 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1386 #endif 1400 #endif
1387 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1401 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1388 x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64); 1402 x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1389 #endif 1403 #endif
1390 xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask; 1404 xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
1391 } 1405 }
1392 1406
1393 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1407 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1394 { 1408 {
1395 #ifdef CONFIG_IA32_EMULATION 1409 #ifdef CONFIG_IA32_EMULATION
1396 if (test_tsk_thread_flag(task, TIF_IA32)) 1410 if (test_tsk_thread_flag(task, TIF_IA32))
1397 #endif 1411 #endif
1398 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1412 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1399 return &user_x86_32_view; 1413 return &user_x86_32_view;
1400 #endif 1414 #endif
1401 #ifdef CONFIG_X86_64 1415 #ifdef CONFIG_X86_64
1402 return &user_x86_64_view; 1416 return &user_x86_64_view;
1403 #endif 1417 #endif
1404 } 1418 }
1405 1419
1406 static void fill_sigtrap_info(struct task_struct *tsk, 1420 static void fill_sigtrap_info(struct task_struct *tsk,
1407 struct pt_regs *regs, 1421 struct pt_regs *regs,
1408 int error_code, int si_code, 1422 int error_code, int si_code,
1409 struct siginfo *info) 1423 struct siginfo *info)
1410 { 1424 {
1411 tsk->thread.trap_nr = X86_TRAP_DB; 1425 tsk->thread.trap_nr = X86_TRAP_DB;
1412 tsk->thread.error_code = error_code; 1426 tsk->thread.error_code = error_code;
1413 1427
1414 memset(info, 0, sizeof(*info)); 1428 memset(info, 0, sizeof(*info));
1415 info->si_signo = SIGTRAP; 1429 info->si_signo = SIGTRAP;
1416 info->si_code = si_code; 1430 info->si_code = si_code;
1417 info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL; 1431 info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
1418 } 1432 }
1419 1433
1420 void user_single_step_siginfo(struct task_struct *tsk, 1434 void user_single_step_siginfo(struct task_struct *tsk,
1421 struct pt_regs *regs, 1435 struct pt_regs *regs,
1422 struct siginfo *info) 1436 struct siginfo *info)
1423 { 1437 {
1424 fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info); 1438 fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
1425 } 1439 }
1426 1440
1427 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 1441 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1428 int error_code, int si_code) 1442 int error_code, int si_code)
1429 { 1443 {
1430 struct siginfo info; 1444 struct siginfo info;
1431 1445
1432 fill_sigtrap_info(tsk, regs, error_code, si_code, &info); 1446 fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
1433 /* Send us the fake SIGTRAP */ 1447 /* Send us the fake SIGTRAP */
1434 force_sig_info(SIGTRAP, &info, tsk); 1448 force_sig_info(SIGTRAP, &info, tsk);
1435 } 1449 }
1436 1450
1437 1451
1438 #ifdef CONFIG_X86_32 1452 #ifdef CONFIG_X86_32
1439 # define IS_IA32 1 1453 # define IS_IA32 1
1440 #elif defined CONFIG_IA32_EMULATION 1454 #elif defined CONFIG_IA32_EMULATION
1441 # define IS_IA32 is_compat_task() 1455 # define IS_IA32 is_compat_task()
1442 #else 1456 #else
1443 # define IS_IA32 0 1457 # define IS_IA32 0
1444 #endif 1458 #endif
1445 1459
1446 /* 1460 /*
1447 * We must return the syscall number to actually look up in the table. 1461 * We must return the syscall number to actually look up in the table.
1448 * This can be -1L to skip running any syscall at all. 1462 * This can be -1L to skip running any syscall at all.
1449 */ 1463 */
1450 long syscall_trace_enter(struct pt_regs *regs) 1464 long syscall_trace_enter(struct pt_regs *regs)
1451 { 1465 {
1452 long ret = 0; 1466 long ret = 0;
1453 1467
1454 user_exit(); 1468 user_exit();
1455 1469
1456 /* 1470 /*
1457 * If we stepped into a sysenter/syscall insn, it trapped in 1471 * If we stepped into a sysenter/syscall insn, it trapped in
1458 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. 1472 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
1459 * If user-mode had set TF itself, then it's still clear from 1473 * If user-mode had set TF itself, then it's still clear from
1460 * do_debug() and we need to set it again to restore the user 1474 * do_debug() and we need to set it again to restore the user
1461 * state. If we entered on the slow path, TF was already set. 1475 * state. If we entered on the slow path, TF was already set.
1462 */ 1476 */
1463 if (test_thread_flag(TIF_SINGLESTEP)) 1477 if (test_thread_flag(TIF_SINGLESTEP))
1464 regs->flags |= X86_EFLAGS_TF; 1478 regs->flags |= X86_EFLAGS_TF;
1465 1479
1466 /* do the secure computing check first */ 1480 /* do the secure computing check first */
1467 if (secure_computing(regs->orig_ax)) { 1481 if (secure_computing(regs->orig_ax)) {
1468 /* seccomp failures shouldn't expose any additional code. */ 1482 /* seccomp failures shouldn't expose any additional code. */
1469 ret = -1L; 1483 ret = -1L;
1470 goto out; 1484 goto out;
1471 } 1485 }
1472 1486
1473 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1487 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1474 ret = -1L; 1488 ret = -1L;
1475 1489
1476 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && 1490 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1477 tracehook_report_syscall_entry(regs)) 1491 tracehook_report_syscall_entry(regs))
1478 ret = -1L; 1492 ret = -1L;
1479 1493
1480 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1494 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1481 trace_sys_enter(regs, regs->orig_ax); 1495 trace_sys_enter(regs, regs->orig_ax);
1482 1496
1483 if (IS_IA32) 1497 if (IS_IA32)
1484 audit_syscall_entry(AUDIT_ARCH_I386, 1498 audit_syscall_entry(AUDIT_ARCH_I386,
1485 regs->orig_ax, 1499 regs->orig_ax,
1486 regs->bx, regs->cx, 1500 regs->bx, regs->cx,
1487 regs->dx, regs->si); 1501 regs->dx, regs->si);
1488 #ifdef CONFIG_X86_64 1502 #ifdef CONFIG_X86_64
1489 else 1503 else
1490 audit_syscall_entry(AUDIT_ARCH_X86_64, 1504 audit_syscall_entry(AUDIT_ARCH_X86_64,
1491 regs->orig_ax, 1505 regs->orig_ax,
1492 regs->di, regs->si, 1506 regs->di, regs->si,
1493 regs->dx, regs->r10); 1507 regs->dx, regs->r10);
1494 #endif 1508 #endif
1495 1509
1496 out: 1510 out:
1497 return ret ?: regs->orig_ax; 1511 return ret ?: regs->orig_ax;
1498 } 1512 }
1499 1513
1500 void syscall_trace_leave(struct pt_regs *regs) 1514 void syscall_trace_leave(struct pt_regs *regs)
1501 { 1515 {
1502 bool step; 1516 bool step;
1503 1517
1504 /* 1518 /*
1505 * We may come here right after calling schedule_user() 1519 * We may come here right after calling schedule_user()
1506 * or do_notify_resume(), in which case we can be in RCU 1520 * or do_notify_resume(), in which case we can be in RCU
1507 * user mode. 1521 * user mode.
1508 */ 1522 */
1509 user_exit(); 1523 user_exit();
1510 1524
1511 audit_syscall_exit(regs); 1525 audit_syscall_exit(regs);
1512 1526