Blame view
kernel/ptrace.c
20.2 KB
1da177e4c
|
1 2 3 4 5 6 7 8 |
/* * linux/kernel/ptrace.c * * (C) Copyright 1999 Linus Torvalds * * Common interfaces for "ptrace()" which we do not want * to continually duplicate across every architecture. */ |
c59ede7b7
|
9 |
#include <linux/capability.h> |
1da177e4c
|
10 11 12 13 14 15 |
#include <linux/module.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/pagemap.h> |
1da177e4c
|
16 17 |
#include <linux/ptrace.h> #include <linux/security.h> |
7ed20e1ad
|
18 |
#include <linux/signal.h> |
a5cb013da
|
19 |
#include <linux/audit.h> |
b488893a3
|
20 |
#include <linux/pid_namespace.h> |
f17d30a80
|
21 |
#include <linux/syscalls.h> |
3a7097035
|
22 |
#include <linux/uaccess.h> |
2225a122a
|
23 |
#include <linux/regset.h> |
1da177e4c
|
24 |
|
bf53de907
|
25 26 |
/* |
1da177e4c
|
27 28 29 30 31 |
* ptrace a task: make the debugger its new parent and * move it to the ptrace list. * * Must be called with the tasklist lock write-held. */ |
36c8b5868
|
32 |
void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) |
1da177e4c
|
33 |
{ |
f470021ad
|
34 35 |
BUG_ON(!list_empty(&child->ptrace_entry)); list_add(&child->ptrace_entry, &new_parent->ptraced); |
1da177e4c
|
36 |
child->parent = new_parent; |
1da177e4c
|
37 |
} |
3a7097035
|
38 |
|
1da177e4c
|
39 40 41 42 43 44 45 |
/* * Turn a tracing stop into a normal stop now, since with no tracer there * would be no way to wake it up with SIGCONT or SIGKILL. If there was a * signal sent that would resume the child, but didn't because it was in * TASK_TRACED, resume it now. * Requires that irqs be disabled. */ |
b747c8c10
|
46 |
static void ptrace_untrace(struct task_struct *child) |
1da177e4c
|
47 48 |
{ spin_lock(&child->sighand->siglock); |
6618a3e27
|
49 |
if (task_is_traced(child)) { |
1ee118448
|
50 51 52 53 54 55 |
/* * If the group stop is completed or in progress, * this thread was already counted as stopped. */ if (child->signal->flags & SIGNAL_STOP_STOPPED || child->signal->group_stop_count) |
d9ae90ac4
|
56 |
__set_task_state(child, TASK_STOPPED); |
1ee118448
|
57 |
else |
1da177e4c
|
58 |
signal_wake_up(child, 1); |
1da177e4c
|
59 60 61 62 63 64 65 66 67 68 |
} spin_unlock(&child->sighand->siglock); } /* * unptrace a task: move it back to its original parent and * remove it from the ptrace list. * * Must be called with the tasklist lock write-held. */ |
36c8b5868
|
69 |
void __ptrace_unlink(struct task_struct *child) |
1da177e4c
|
70 |
{ |
5ecfbae09
|
71 |
BUG_ON(!child->ptrace); |
1da177e4c
|
72 |
child->ptrace = 0; |
f470021ad
|
73 74 |
child->parent = child->real_parent; list_del_init(&child->ptrace_entry); |
1da177e4c
|
75 |
|
6618a3e27
|
76 |
if (task_is_traced(child)) |
e57a50598
|
77 |
ptrace_untrace(child); |
1da177e4c
|
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
} /* * Check that we have indeed attached to the thing.. */ int ptrace_check_attach(struct task_struct *child, int kill) { int ret = -ESRCH; /* * We take the read lock around doing both checks to close a * possible race where someone else was tracing our child and * detached between these two checks. After this locked check, * we are sure that this is our traced child and that can only * be changed by us so it's not changing right after this. */ read_lock(&tasklist_lock); |
c0c0b649d
|
95 |
if ((child->ptrace & PT_PTRACED) && child->parent == current) { |
1da177e4c
|
96 |
ret = 0; |
c0c0b649d
|
97 98 99 100 |
/* * child->sighand can't be NULL, release_task() * does ptrace_unlink() before __exit_signal(). */ |
1da177e4c
|
101 |
spin_lock_irq(&child->sighand->siglock); |
d9ae90ac4
|
102 |
if (task_is_stopped(child)) |
1da177e4c
|
103 |
child->state = TASK_TRACED; |
d9ae90ac4
|
104 |
else if (!task_is_traced(child) && !kill) |
1da177e4c
|
105 |
ret = -ESRCH; |
1da177e4c
|
106 107 108 |
spin_unlock_irq(&child->sighand->siglock); } read_unlock(&tasklist_lock); |
d9ae90ac4
|
109 |
if (!ret && !kill) |
85ba2d862
|
110 |
ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; |
1da177e4c
|
111 112 113 114 |
/* All systems go.. */ return ret; } |
006ebb40d
|
115 |
int __ptrace_may_access(struct task_struct *task, unsigned int mode) |
ab8d11beb
|
116 |
{ |
c69e8d9c0
|
117 |
const struct cred *cred = current_cred(), *tcred; |
b6dff3ec5
|
118 |
|
df26c40e5
|
119 120 121 122 123 124 125 126 127 128 129 130 |
/* May we inspect the given task? * This check is used both for attaching with ptrace * and for allowing access to sensitive information in /proc. * * ptrace_attach denies several cases that /proc allows * because setting up the necessary parent/child relationship * or halting the specified task is impossible. */ int dumpable = 0; /* Don't let security modules deny introspection */ if (task == current) return 0; |
c69e8d9c0
|
131 132 133 134 135 136 137 138 139 140 |
rcu_read_lock(); tcred = __task_cred(task); if ((cred->uid != tcred->euid || cred->uid != tcred->suid || cred->uid != tcred->uid || cred->gid != tcred->egid || cred->gid != tcred->sgid || cred->gid != tcred->gid) && !capable(CAP_SYS_PTRACE)) { rcu_read_unlock(); |
ab8d11beb
|
141 |
return -EPERM; |
c69e8d9c0
|
142 143 |
} rcu_read_unlock(); |
ab8d11beb
|
144 |
smp_rmb(); |
df26c40e5
|
145 |
if (task->mm) |
6c5d52382
|
146 |
dumpable = get_dumpable(task->mm); |
df26c40e5
|
147 |
if (!dumpable && !capable(CAP_SYS_PTRACE)) |
ab8d11beb
|
148 |
return -EPERM; |
9e48858f7
|
149 |
return security_ptrace_access_check(task, mode); |
ab8d11beb
|
150 |
} |
006ebb40d
|
151 |
bool ptrace_may_access(struct task_struct *task, unsigned int mode) |
ab8d11beb
|
152 153 154 |
{ int err; task_lock(task); |
006ebb40d
|
155 |
err = __ptrace_may_access(task, mode); |
ab8d11beb
|
156 |
task_unlock(task); |
3a7097035
|
157 |
return !err; |
ab8d11beb
|
158 |
} |
e3e89cc53
|
159 |
static int ptrace_attach(struct task_struct *task) |
1da177e4c
|
160 161 |
{ int retval; |
f5b40e363
|
162 |
|
a5cb013da
|
163 |
audit_ptrace(task); |
1da177e4c
|
164 |
retval = -EPERM; |
b79b7ba93
|
165 166 |
if (unlikely(task->flags & PF_KTHREAD)) goto out; |
bac0abd61
|
167 |
if (same_thread_group(task, current)) |
f5b40e363
|
168 |
goto out; |
f2f0b00ad
|
169 170 |
/* * Protect exec's credential calculations against our interference; |
5e751e992
|
171 172 |
* interference; SUID, SGID and LSM creds get determined differently * under ptrace. |
d84f4f992
|
173 |
*/ |
793285fca
|
174 |
retval = -ERESTARTNOINTR; |
9b1bf12d5
|
175 |
if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) |
d84f4f992
|
176 |
goto out; |
f5b40e363
|
177 |
|
4b105cbba
|
178 |
task_lock(task); |
006ebb40d
|
179 |
retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); |
4b105cbba
|
180 |
task_unlock(task); |
1da177e4c
|
181 |
if (retval) |
4b105cbba
|
182 |
goto unlock_creds; |
1da177e4c
|
183 |
|
4b105cbba
|
184 |
write_lock_irq(&tasklist_lock); |
b79b7ba93
|
185 186 |
retval = -EPERM; if (unlikely(task->exit_state)) |
4b105cbba
|
187 |
goto unlock_tasklist; |
f2f0b00ad
|
188 |
if (task->ptrace) |
4b105cbba
|
189 |
goto unlock_tasklist; |
b79b7ba93
|
190 |
|
f2f0b00ad
|
191 |
task->ptrace = PT_PTRACED; |
1da177e4c
|
192 193 |
if (capable(CAP_SYS_PTRACE)) task->ptrace |= PT_PTRACE_CAP; |
1da177e4c
|
194 |
|
1da177e4c
|
195 |
__ptrace_link(task, current); |
33e9fc7d0
|
196 |
send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); |
b79b7ba93
|
197 198 |
retval = 0; |
4b105cbba
|
199 200 201 |
unlock_tasklist: write_unlock_irq(&tasklist_lock); unlock_creds: |
9b1bf12d5
|
202 |
mutex_unlock(&task->signal->cred_guard_mutex); |
f5b40e363
|
203 |
out: |
1da177e4c
|
204 205 |
return retval; } |
f2f0b00ad
|
206 207 208 209 210 211 |
/** * ptrace_traceme -- helper for PTRACE_TRACEME * * Performs checks and sets PT_PTRACED. * Should be used by all ptrace implementations for PTRACE_TRACEME. */ |
e3e89cc53
|
212 |
static int ptrace_traceme(void) |
f2f0b00ad
|
213 214 |
{ int ret = -EPERM; |
4b105cbba
|
215 216 |
write_lock_irq(&tasklist_lock); /* Are we already being traced? */ |
f2f0b00ad
|
217 |
if (!current->ptrace) { |
f2f0b00ad
|
218 |
ret = security_ptrace_traceme(current->parent); |
f2f0b00ad
|
219 220 221 222 223 224 225 226 227 |
/* * Check PF_EXITING to ensure ->real_parent has not passed * exit_ptrace(). Otherwise we don't report the error but * pretend ->real_parent untraces us right after return. */ if (!ret && !(current->real_parent->flags & PF_EXITING)) { current->ptrace = PT_PTRACED; __ptrace_link(current, current->real_parent); } |
f2f0b00ad
|
228 |
} |
4b105cbba
|
229 |
write_unlock_irq(&tasklist_lock); |
f2f0b00ad
|
230 231 |
return ret; } |
39c626ae4
|
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 |
/* * Called with irqs disabled, returns true if childs should reap themselves. */ static int ignoring_children(struct sighand_struct *sigh) { int ret; spin_lock(&sigh->siglock); ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); spin_unlock(&sigh->siglock); return ret; } /* * Called with tasklist_lock held for writing. * Unlink a traced task, and clean it up if it was a traced zombie. * Return true if it needs to be reaped with release_task(). * (We can't call release_task() here because we already hold tasklist_lock.) * * If it's a zombie, our attachedness prevented normal parent notification * or self-reaping. Do notification now if it would have happened earlier. * If it should reap itself, return true. * |
a7f0765ed
|
255 256 257 258 |
* If it's our own child, there is no notification to do. But if our normal * children self-reap, then this child was prevented by ptrace and we must * reap it now, in that case we must also wake up sub-threads sleeping in * do_wait(). |
39c626ae4
|
259 260 261 262 263 264 265 266 267 |
*/ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) { __ptrace_unlink(p); if (p->exit_state == EXIT_ZOMBIE) { if (!task_detached(p) && thread_group_empty(p)) { if (!same_thread_group(p->real_parent, tracer)) do_notify_parent(p, p->exit_signal); |
a7f0765ed
|
268 269 |
else if (ignoring_children(tracer->sighand)) { __wake_up_parent(p, tracer); |
39c626ae4
|
270 |
p->exit_signal = -1; |
a7f0765ed
|
271 |
} |
39c626ae4
|
272 273 274 275 276 277 278 279 280 281 |
} if (task_detached(p)) { /* Mark it as in the process of being reaped. */ p->exit_state = EXIT_DEAD; return true; } } return false; } |
e3e89cc53
|
282 |
static int ptrace_detach(struct task_struct *child, unsigned int data) |
1da177e4c
|
283 |
{ |
39c626ae4
|
284 |
bool dead = false; |
4576145c1
|
285 |
|
7ed20e1ad
|
286 |
if (!valid_signal(data)) |
5ecfbae09
|
287 |
return -EIO; |
1da177e4c
|
288 289 290 |
/* Architecture-specific hardware disable .. */ ptrace_disable(child); |
7d9414329
|
291 |
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
1da177e4c
|
292 |
|
95c3eb76d
|
293 |
write_lock_irq(&tasklist_lock); |
39c626ae4
|
294 295 296 297 |
/* * This child can be already killed. Make sure de_thread() or * our sub-thread doing do_wait() didn't do release_task() yet. */ |
95c3eb76d
|
298 299 |
if (child->ptrace) { child->exit_code = data; |
4576145c1
|
300 |
dead = __ptrace_detach(current, child); |
edaba2c53
|
301 |
if (!child->exit_state) |
01e05e9a9
|
302 |
wake_up_state(child, TASK_TRACED | TASK_STOPPED); |
95c3eb76d
|
303 |
} |
1da177e4c
|
304 |
write_unlock_irq(&tasklist_lock); |
4576145c1
|
305 306 |
if (unlikely(dead)) release_task(child); |
1da177e4c
|
307 308 |
return 0; } |
39c626ae4
|
309 |
/* |
c7e49c148
|
310 311 312 |
* Detach all tasks we were using ptrace on. Called with tasklist held * for writing, and returns with it held too. But note it can release * and reacquire the lock. |
39c626ae4
|
313 314 |
*/ void exit_ptrace(struct task_struct *tracer) |
c4b5ed250
|
315 316 |
__releases(&tasklist_lock) __acquires(&tasklist_lock) |
39c626ae4
|
317 318 319 |
{ struct task_struct *p, *n; LIST_HEAD(ptrace_dead); |
c7e49c148
|
320 321 |
if (likely(list_empty(&tracer->ptraced))) return; |
39c626ae4
|
322 323 324 325 |
list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { if (__ptrace_detach(tracer, p)) list_add(&p->ptrace_entry, &ptrace_dead); } |
39c626ae4
|
326 |
|
c7e49c148
|
327 |
write_unlock_irq(&tasklist_lock); |
39c626ae4
|
328 329 330 331 332 333 |
BUG_ON(!list_empty(&tracer->ptraced)); list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { list_del_init(&p->ptrace_entry); release_task(p); } |
c7e49c148
|
334 335 |
write_lock_irq(&tasklist_lock); |
39c626ae4
|
336 |
} |
1da177e4c
|
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 |
int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) { int copied = 0; while (len > 0) { char buf[128]; int this_len, retval; this_len = (len > sizeof(buf)) ? sizeof(buf) : len; retval = access_process_vm(tsk, src, buf, this_len, 0); if (!retval) { if (copied) break; return -EIO; } if (copy_to_user(dst, buf, retval)) return -EFAULT; copied += retval; src += retval; dst += retval; |
3a7097035
|
357 |
len -= retval; |
1da177e4c
|
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 |
} return copied; } int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) { int copied = 0; while (len > 0) { char buf[128]; int this_len, retval; this_len = (len > sizeof(buf)) ? sizeof(buf) : len; if (copy_from_user(buf, src, this_len)) return -EFAULT; retval = access_process_vm(tsk, dst, buf, this_len, 1); if (!retval) { if (copied) break; return -EIO; } copied += retval; src += retval; dst += retval; |
3a7097035
|
382 |
len -= retval; |
1da177e4c
|
383 384 385 |
} return copied; } |
4abf98696
|
386 |
static int ptrace_setoptions(struct task_struct *child, unsigned long data) |
1da177e4c
|
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 |
{ child->ptrace &= ~PT_TRACE_MASK; if (data & PTRACE_O_TRACESYSGOOD) child->ptrace |= PT_TRACESYSGOOD; if (data & PTRACE_O_TRACEFORK) child->ptrace |= PT_TRACE_FORK; if (data & PTRACE_O_TRACEVFORK) child->ptrace |= PT_TRACE_VFORK; if (data & PTRACE_O_TRACECLONE) child->ptrace |= PT_TRACE_CLONE; if (data & PTRACE_O_TRACEEXEC) child->ptrace |= PT_TRACE_EXEC; if (data & PTRACE_O_TRACEVFORKDONE) child->ptrace |= PT_TRACE_VFORK_DONE; if (data & PTRACE_O_TRACEEXIT) child->ptrace |= PT_TRACE_EXIT; return (data & ~PTRACE_O_MASK) ? -EINVAL : 0; } |
e16b27816
|
413 |
static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) |
1da177e4c
|
414 |
{ |
e49612544
|
415 |
unsigned long flags; |
1da177e4c
|
416 |
int error = -ESRCH; |
e49612544
|
417 |
if (lock_task_sighand(child, &flags)) { |
1da177e4c
|
418 |
error = -EINVAL; |
1da177e4c
|
419 |
if (likely(child->last_siginfo != NULL)) { |
e16b27816
|
420 |
*info = *child->last_siginfo; |
1da177e4c
|
421 422 |
error = 0; } |
e49612544
|
423 |
unlock_task_sighand(child, &flags); |
1da177e4c
|
424 |
} |
1da177e4c
|
425 426 |
return error; } |
e16b27816
|
427 |
static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) |
1da177e4c
|
428 |
{ |
e49612544
|
429 |
unsigned long flags; |
1da177e4c
|
430 |
int error = -ESRCH; |
e49612544
|
431 |
if (lock_task_sighand(child, &flags)) { |
1da177e4c
|
432 |
error = -EINVAL; |
1da177e4c
|
433 |
if (likely(child->last_siginfo != NULL)) { |
e16b27816
|
434 |
*child->last_siginfo = *info; |
1da177e4c
|
435 436 |
error = 0; } |
e49612544
|
437 |
unlock_task_sighand(child, &flags); |
1da177e4c
|
438 |
} |
1da177e4c
|
439 440 |
return error; } |
36df29d79
|
441 442 443 444 445 446 |
#ifdef PTRACE_SINGLESTEP #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) #else #define is_singlestep(request) 0 #endif |
5b88abbf7
|
447 448 449 450 451 |
#ifdef PTRACE_SINGLEBLOCK #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) #else #define is_singleblock(request) 0 #endif |
36df29d79
|
452 453 454 455 456 |
#ifdef PTRACE_SYSEMU #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) #else #define is_sysemu_singlestep(request) 0 #endif |
4abf98696
|
457 458 |
static int ptrace_resume(struct task_struct *child, long request, unsigned long data) |
36df29d79
|
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 |
{ if (!valid_signal(data)) return -EIO; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); else clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) set_tsk_thread_flag(child, TIF_SYSCALL_EMU); else clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); #endif |
5b88abbf7
|
474 475 476 477 478 |
if (is_singleblock(request)) { if (unlikely(!arch_has_block_step())) return -EIO; user_enable_block_step(child); } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { |
36df29d79
|
479 480 481 |
if (unlikely(!arch_has_single_step())) return -EIO; user_enable_single_step(child); |
3a7097035
|
482 |
} else { |
36df29d79
|
483 |
user_disable_single_step(child); |
3a7097035
|
484 |
} |
36df29d79
|
485 486 487 488 489 490 |
child->exit_code = data; wake_up_process(child); return 0; } |
2225a122a
|
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 |
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK static const struct user_regset * find_regset(const struct user_regset_view *view, unsigned int type) { const struct user_regset *regset; int n; for (n = 0; n < view->n; ++n) { regset = view->regsets + n; if (regset->core_note_type == type) return regset; } return NULL; } static int ptrace_regset(struct task_struct *task, int req, unsigned int type, struct iovec *kiov) { const struct user_regset_view *view = task_user_regset_view(task); const struct user_regset *regset = find_regset(view, type); int regset_no; if (!regset || (kiov->iov_len % regset->size) != 0) |
c6a0dd7ec
|
516 |
return -EINVAL; |
2225a122a
|
517 518 519 520 521 522 523 524 525 526 527 528 529 530 |
regset_no = regset - view->regsets; kiov->iov_len = min(kiov->iov_len, (__kernel_size_t) (regset->n * regset->size)); if (req == PTRACE_GETREGSET) return copy_regset_to_user(task, view, regset_no, 0, kiov->iov_len, kiov->iov_base); else return copy_regset_from_user(task, view, regset_no, 0, kiov->iov_len, kiov->iov_base); } #endif |
1da177e4c
|
531 |
int ptrace_request(struct task_struct *child, long request, |
4abf98696
|
532 |
unsigned long addr, unsigned long data) |
1da177e4c
|
533 534 |
{ int ret = -EIO; |
e16b27816
|
535 |
siginfo_t siginfo; |
9fed81dc4
|
536 537 |
void __user *datavp = (void __user *) data; unsigned long __user *datalp = datavp; |
1da177e4c
|
538 539 |
switch (request) { |
16c3e389e
|
540 541 542 543 544 545 |
case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: return generic_ptrace_peekdata(child, addr, data); case PTRACE_POKETEXT: case PTRACE_POKEDATA: return generic_ptrace_pokedata(child, addr, data); |
1da177e4c
|
546 547 548 549 550 551 552 |
#ifdef PTRACE_OLDSETOPTIONS case PTRACE_OLDSETOPTIONS: #endif case PTRACE_SETOPTIONS: ret = ptrace_setoptions(child, data); break; case PTRACE_GETEVENTMSG: |
9fed81dc4
|
553 |
ret = put_user(child->ptrace_message, datalp); |
1da177e4c
|
554 |
break; |
e16b27816
|
555 |
|
1da177e4c
|
556 |
case PTRACE_GETSIGINFO: |
e16b27816
|
557 558 |
ret = ptrace_getsiginfo(child, &siginfo); if (!ret) |
9fed81dc4
|
559 |
ret = copy_siginfo_to_user(datavp, &siginfo); |
1da177e4c
|
560 |
break; |
e16b27816
|
561 |
|
1da177e4c
|
562 |
case PTRACE_SETSIGINFO: |
9fed81dc4
|
563 |
if (copy_from_user(&siginfo, datavp, sizeof siginfo)) |
e16b27816
|
564 565 566 |
ret = -EFAULT; else ret = ptrace_setsiginfo(child, &siginfo); |
1da177e4c
|
567 |
break; |
e16b27816
|
568 |
|
1bcf54829
|
569 570 571 |
case PTRACE_DETACH: /* detach a process that was attached. */ ret = ptrace_detach(child, data); break; |
36df29d79
|
572 |
|
9c1a12592
|
573 574 |
#ifdef CONFIG_BINFMT_ELF_FDPIC case PTRACE_GETFDPIC: { |
e0129ef91
|
575 |
struct mm_struct *mm = get_task_mm(child); |
9c1a12592
|
576 |
unsigned long tmp = 0; |
e0129ef91
|
577 578 579 |
ret = -ESRCH; if (!mm) break; |
9c1a12592
|
580 581 |
switch (addr) { case PTRACE_GETFDPIC_EXEC: |
e0129ef91
|
582 |
tmp = mm->context.exec_fdpic_loadmap; |
9c1a12592
|
583 584 |
break; case PTRACE_GETFDPIC_INTERP: |
e0129ef91
|
585 |
tmp = mm->context.interp_fdpic_loadmap; |
9c1a12592
|
586 587 588 589 |
break; default: break; } |
e0129ef91
|
590 |
mmput(mm); |
9c1a12592
|
591 |
|
9fed81dc4
|
592 |
ret = put_user(tmp, datalp); |
9c1a12592
|
593 594 595 |
break; } #endif |
36df29d79
|
596 597 598 |
#ifdef PTRACE_SINGLESTEP case PTRACE_SINGLESTEP: #endif |
5b88abbf7
|
599 600 601 |
#ifdef PTRACE_SINGLEBLOCK case PTRACE_SINGLEBLOCK: #endif |
36df29d79
|
602 603 604 605 606 607 608 609 610 611 612 613 |
#ifdef PTRACE_SYSEMU case PTRACE_SYSEMU: case PTRACE_SYSEMU_SINGLESTEP: #endif case PTRACE_SYSCALL: case PTRACE_CONT: return ptrace_resume(child, request, data); case PTRACE_KILL: if (child->exit_state) /* already dead */ return 0; return ptrace_resume(child, request, SIGKILL); |
2225a122a
|
614 615 616 617 618 |
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK case PTRACE_GETREGSET: case PTRACE_SETREGSET: { struct iovec kiov; |
9fed81dc4
|
619 |
struct iovec __user *uiov = datavp; |
2225a122a
|
620 621 622 623 624 625 626 627 628 629 630 631 632 633 |
if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) return -EFAULT; if (__get_user(kiov.iov_base, &uiov->iov_base) || __get_user(kiov.iov_len, &uiov->iov_len)) return -EFAULT; ret = ptrace_regset(child, request, addr, &kiov); if (!ret) ret = __put_user(kiov.iov_len, &uiov->iov_len); break; } #endif |
1da177e4c
|
634 635 636 637 638 639 |
default: break; } return ret; } |
481bed454
|
640 |
|
8053bdd5c
|
641 |
static struct task_struct *ptrace_get_task_struct(pid_t pid) |
6b9c7ed84
|
642 643 |
{ struct task_struct *child; |
481bed454
|
644 |
|
8053bdd5c
|
645 |
rcu_read_lock(); |
228ebcbe6
|
646 |
child = find_task_by_vpid(pid); |
481bed454
|
647 648 |
if (child) get_task_struct(child); |
8053bdd5c
|
649 |
rcu_read_unlock(); |
f400e198b
|
650 |
|
481bed454
|
651 |
if (!child) |
6b9c7ed84
|
652 653 |
return ERR_PTR(-ESRCH); return child; |
481bed454
|
654 |
} |
0ac155591
|
655 656 657 |
#ifndef arch_ptrace_attach #define arch_ptrace_attach(child) do { } while (0) #endif |
4abf98696
|
658 659 |
SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, unsigned long, data) |
481bed454
|
660 661 662 |
{ struct task_struct *child; long ret; |
6b9c7ed84
|
663 664 |
if (request == PTRACE_TRACEME) { ret = ptrace_traceme(); |
6ea6dd93c
|
665 666 |
if (!ret) arch_ptrace_attach(current); |
481bed454
|
667 |
goto out; |
6b9c7ed84
|
668 669 670 671 672 673 674 |
} child = ptrace_get_task_struct(pid); if (IS_ERR(child)) { ret = PTR_ERR(child); goto out; } |
481bed454
|
675 676 677 |
if (request == PTRACE_ATTACH) { ret = ptrace_attach(child); |
0ac155591
|
678 679 680 681 682 683 |
/* * Some architectures need to do book-keeping after * a ptrace attach. */ if (!ret) arch_ptrace_attach(child); |
005f18dfd
|
684 |
goto out_put_task_struct; |
481bed454
|
685 686 687 688 689 690 691 |
} ret = ptrace_check_attach(child, request == PTRACE_KILL); if (ret < 0) goto out_put_task_struct; ret = arch_ptrace(child, request, addr, data); |
481bed454
|
692 693 694 695 |
out_put_task_struct: put_task_struct(child); out: |
481bed454
|
696 697 |
return ret; } |
766473231
|
698 |
|
4abf98696
|
699 700 |
int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, unsigned long data) |
766473231
|
701 702 703 704 705 706 707 708 709 |
{ unsigned long tmp; int copied; copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); if (copied != sizeof(tmp)) return -EIO; return put_user(tmp, (unsigned long __user *)data); } |
f284ce726
|
710 |
|
4abf98696
|
711 712 |
int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, unsigned long data) |
f284ce726
|
713 714 715 716 717 718 |
{ int copied; copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); return (copied == sizeof(data)) ? 0 : -EIO; } |
032d82d90
|
719 |
|
96b8936a9
|
720 |
#if defined CONFIG_COMPAT |
032d82d90
|
721 722 723 724 725 726 727 |
#include <linux/compat.h> int compat_ptrace_request(struct task_struct *child, compat_long_t request, compat_ulong_t addr, compat_ulong_t data) { compat_ulong_t __user *datap = compat_ptr(data); compat_ulong_t word; |
e16b27816
|
728 |
siginfo_t siginfo; |
032d82d90
|
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 |
int ret; switch (request) { case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: ret = access_process_vm(child, addr, &word, sizeof(word), 0); if (ret != sizeof(word)) ret = -EIO; else ret = put_user(word, datap); break; case PTRACE_POKETEXT: case PTRACE_POKEDATA: ret = access_process_vm(child, addr, &data, sizeof(data), 1); ret = (ret != sizeof(data) ? -EIO : 0); break; case PTRACE_GETEVENTMSG: ret = put_user((compat_ulong_t) child->ptrace_message, datap); break; |
e16b27816
|
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 |
case PTRACE_GETSIGINFO: ret = ptrace_getsiginfo(child, &siginfo); if (!ret) ret = copy_siginfo_to_user32( (struct compat_siginfo __user *) datap, &siginfo); break; case PTRACE_SETSIGINFO: memset(&siginfo, 0, sizeof siginfo); if (copy_siginfo_from_user32( &siginfo, (struct compat_siginfo __user *) datap)) ret = -EFAULT; else ret = ptrace_setsiginfo(child, &siginfo); break; |
2225a122a
|
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 |
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK case PTRACE_GETREGSET: case PTRACE_SETREGSET: { struct iovec kiov; struct compat_iovec __user *uiov = (struct compat_iovec __user *) datap; compat_uptr_t ptr; compat_size_t len; if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) return -EFAULT; if (__get_user(ptr, &uiov->iov_base) || __get_user(len, &uiov->iov_len)) return -EFAULT; kiov.iov_base = compat_ptr(ptr); kiov.iov_len = len; ret = ptrace_regset(child, request, addr, &kiov); if (!ret) ret = __put_user(kiov.iov_len, &uiov->iov_len); break; } #endif |
e16b27816
|
792 |
|
032d82d90
|
793 794 795 796 797 798 |
default: ret = ptrace_request(child, request, addr, data); } return ret; } |
c269f1961
|
799 |
|
c269f1961
|
800 801 802 803 804 |
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, compat_long_t addr, compat_long_t data) { struct task_struct *child; long ret; |
c269f1961
|
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 |
if (request == PTRACE_TRACEME) { ret = ptrace_traceme(); goto out; } child = ptrace_get_task_struct(pid); if (IS_ERR(child)) { ret = PTR_ERR(child); goto out; } if (request == PTRACE_ATTACH) { ret = ptrace_attach(child); /* * Some architectures need to do book-keeping after * a ptrace attach. */ if (!ret) arch_ptrace_attach(child); goto out_put_task_struct; } ret = ptrace_check_attach(child, request == PTRACE_KILL); if (!ret) ret = compat_arch_ptrace(child, request, addr, data); out_put_task_struct: put_task_struct(child); out: |
c269f1961
|
834 835 |
return ret; } |
96b8936a9
|
836 |
#endif /* CONFIG_COMPAT */ |