Commit 9026843952adac5b123c7b8dc961e5c15828d9e1
1 parent
6bf9adfc90
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
generic compat_sys_sigaltstack()
Again, conditional on CONFIG_GENERIC_SIGALTSTACK Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Showing 10 changed files with 67 additions and 67 deletions Inline Diff
arch/x86/ia32/ia32_signal.c
1 | /* | 1 | /* |
2 | * linux/arch/x86_64/ia32/ia32_signal.c | 2 | * linux/arch/x86_64/ia32/ia32_signal.c |
3 | * | 3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * | 5 | * |
6 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | 6 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson |
7 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes | 7 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes |
8 | * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen | 8 | * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
17 | #include <linux/unistd.h> | 17 | #include <linux/unistd.h> |
18 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
19 | #include <linux/personality.h> | 19 | #include <linux/personality.h> |
20 | #include <linux/compat.h> | 20 | #include <linux/compat.h> |
21 | #include <linux/binfmts.h> | 21 | #include <linux/binfmts.h> |
22 | #include <asm/ucontext.h> | 22 | #include <asm/ucontext.h> |
23 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
24 | #include <asm/i387.h> | 24 | #include <asm/i387.h> |
25 | #include <asm/fpu-internal.h> | 25 | #include <asm/fpu-internal.h> |
26 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
27 | #include <asm/ia32_unistd.h> | 27 | #include <asm/ia32_unistd.h> |
28 | #include <asm/user32.h> | 28 | #include <asm/user32.h> |
29 | #include <asm/sigcontext32.h> | 29 | #include <asm/sigcontext32.h> |
30 | #include <asm/proto.h> | 30 | #include <asm/proto.h> |
31 | #include <asm/vdso.h> | 31 | #include <asm/vdso.h> |
32 | #include <asm/sigframe.h> | 32 | #include <asm/sigframe.h> |
33 | #include <asm/sighandling.h> | 33 | #include <asm/sighandling.h> |
34 | #include <asm/sys_ia32.h> | 34 | #include <asm/sys_ia32.h> |
35 | #include <asm/smap.h> | 35 | #include <asm/smap.h> |
36 | 36 | ||
37 | #define FIX_EFLAGS __FIX_EFLAGS | 37 | #define FIX_EFLAGS __FIX_EFLAGS |
38 | 38 | ||
39 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | 39 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) |
40 | { | 40 | { |
41 | int err = 0; | 41 | int err = 0; |
42 | bool ia32 = test_thread_flag(TIF_IA32); | 42 | bool ia32 = test_thread_flag(TIF_IA32); |
43 | 43 | ||
44 | if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) | 44 | if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) |
45 | return -EFAULT; | 45 | return -EFAULT; |
46 | 46 | ||
47 | put_user_try { | 47 | put_user_try { |
48 | /* If you change siginfo_t structure, please make sure that | 48 | /* If you change siginfo_t structure, please make sure that |
49 | this code is fixed accordingly. | 49 | this code is fixed accordingly. |
50 | It should never copy any pad contained in the structure | 50 | It should never copy any pad contained in the structure |
51 | to avoid security leaks, but must copy the generic | 51 | to avoid security leaks, but must copy the generic |
52 | 3 ints plus the relevant union member. */ | 52 | 3 ints plus the relevant union member. */ |
53 | put_user_ex(from->si_signo, &to->si_signo); | 53 | put_user_ex(from->si_signo, &to->si_signo); |
54 | put_user_ex(from->si_errno, &to->si_errno); | 54 | put_user_ex(from->si_errno, &to->si_errno); |
55 | put_user_ex((short)from->si_code, &to->si_code); | 55 | put_user_ex((short)from->si_code, &to->si_code); |
56 | 56 | ||
57 | if (from->si_code < 0) { | 57 | if (from->si_code < 0) { |
58 | put_user_ex(from->si_pid, &to->si_pid); | 58 | put_user_ex(from->si_pid, &to->si_pid); |
59 | put_user_ex(from->si_uid, &to->si_uid); | 59 | put_user_ex(from->si_uid, &to->si_uid); |
60 | put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr); | 60 | put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr); |
61 | } else { | 61 | } else { |
62 | /* | 62 | /* |
63 | * First 32bits of unions are always present: | 63 | * First 32bits of unions are always present: |
64 | * si_pid === si_band === si_tid === si_addr(LS half) | 64 | * si_pid === si_band === si_tid === si_addr(LS half) |
65 | */ | 65 | */ |
66 | put_user_ex(from->_sifields._pad[0], | 66 | put_user_ex(from->_sifields._pad[0], |
67 | &to->_sifields._pad[0]); | 67 | &to->_sifields._pad[0]); |
68 | switch (from->si_code >> 16) { | 68 | switch (from->si_code >> 16) { |
69 | case __SI_FAULT >> 16: | 69 | case __SI_FAULT >> 16: |
70 | break; | 70 | break; |
71 | case __SI_SYS >> 16: | 71 | case __SI_SYS >> 16: |
72 | put_user_ex(from->si_syscall, &to->si_syscall); | 72 | put_user_ex(from->si_syscall, &to->si_syscall); |
73 | put_user_ex(from->si_arch, &to->si_arch); | 73 | put_user_ex(from->si_arch, &to->si_arch); |
74 | break; | 74 | break; |
75 | case __SI_CHLD >> 16: | 75 | case __SI_CHLD >> 16: |
76 | if (ia32) { | 76 | if (ia32) { |
77 | put_user_ex(from->si_utime, &to->si_utime); | 77 | put_user_ex(from->si_utime, &to->si_utime); |
78 | put_user_ex(from->si_stime, &to->si_stime); | 78 | put_user_ex(from->si_stime, &to->si_stime); |
79 | } else { | 79 | } else { |
80 | put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime); | 80 | put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime); |
81 | put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime); | 81 | put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime); |
82 | } | 82 | } |
83 | put_user_ex(from->si_status, &to->si_status); | 83 | put_user_ex(from->si_status, &to->si_status); |
84 | /* FALL THROUGH */ | 84 | /* FALL THROUGH */ |
85 | default: | 85 | default: |
86 | case __SI_KILL >> 16: | 86 | case __SI_KILL >> 16: |
87 | put_user_ex(from->si_uid, &to->si_uid); | 87 | put_user_ex(from->si_uid, &to->si_uid); |
88 | break; | 88 | break; |
89 | case __SI_POLL >> 16: | 89 | case __SI_POLL >> 16: |
90 | put_user_ex(from->si_fd, &to->si_fd); | 90 | put_user_ex(from->si_fd, &to->si_fd); |
91 | break; | 91 | break; |
92 | case __SI_TIMER >> 16: | 92 | case __SI_TIMER >> 16: |
93 | put_user_ex(from->si_overrun, &to->si_overrun); | 93 | put_user_ex(from->si_overrun, &to->si_overrun); |
94 | put_user_ex(ptr_to_compat(from->si_ptr), | 94 | put_user_ex(ptr_to_compat(from->si_ptr), |
95 | &to->si_ptr); | 95 | &to->si_ptr); |
96 | break; | 96 | break; |
97 | /* This is not generated by the kernel as of now. */ | 97 | /* This is not generated by the kernel as of now. */ |
98 | case __SI_RT >> 16: | 98 | case __SI_RT >> 16: |
99 | case __SI_MESGQ >> 16: | 99 | case __SI_MESGQ >> 16: |
100 | put_user_ex(from->si_uid, &to->si_uid); | 100 | put_user_ex(from->si_uid, &to->si_uid); |
101 | put_user_ex(from->si_int, &to->si_int); | 101 | put_user_ex(from->si_int, &to->si_int); |
102 | break; | 102 | break; |
103 | } | 103 | } |
104 | } | 104 | } |
105 | } put_user_catch(err); | 105 | } put_user_catch(err); |
106 | 106 | ||
107 | return err; | 107 | return err; |
108 | } | 108 | } |
109 | 109 | ||
110 | int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | 110 | int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) |
111 | { | 111 | { |
112 | int err = 0; | 112 | int err = 0; |
113 | u32 ptr32; | 113 | u32 ptr32; |
114 | 114 | ||
115 | if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) | 115 | if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) |
116 | return -EFAULT; | 116 | return -EFAULT; |
117 | 117 | ||
118 | get_user_try { | 118 | get_user_try { |
119 | get_user_ex(to->si_signo, &from->si_signo); | 119 | get_user_ex(to->si_signo, &from->si_signo); |
120 | get_user_ex(to->si_errno, &from->si_errno); | 120 | get_user_ex(to->si_errno, &from->si_errno); |
121 | get_user_ex(to->si_code, &from->si_code); | 121 | get_user_ex(to->si_code, &from->si_code); |
122 | 122 | ||
123 | get_user_ex(to->si_pid, &from->si_pid); | 123 | get_user_ex(to->si_pid, &from->si_pid); |
124 | get_user_ex(to->si_uid, &from->si_uid); | 124 | get_user_ex(to->si_uid, &from->si_uid); |
125 | get_user_ex(ptr32, &from->si_ptr); | 125 | get_user_ex(ptr32, &from->si_ptr); |
126 | to->si_ptr = compat_ptr(ptr32); | 126 | to->si_ptr = compat_ptr(ptr32); |
127 | } get_user_catch(err); | 127 | } get_user_catch(err); |
128 | 128 | ||
129 | return err; | 129 | return err; |
130 | } | 130 | } |
131 | 131 | ||
132 | asmlinkage long sys32_sigsuspend(int history0, int history1, old_sigset_t mask) | 132 | asmlinkage long sys32_sigsuspend(int history0, int history1, old_sigset_t mask) |
133 | { | 133 | { |
134 | sigset_t blocked; | 134 | sigset_t blocked; |
135 | siginitset(&blocked, mask); | 135 | siginitset(&blocked, mask); |
136 | return sigsuspend(&blocked); | 136 | return sigsuspend(&blocked); |
137 | } | 137 | } |
138 | 138 | ||
139 | asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, | ||
140 | stack_ia32_t __user *uoss_ptr, | ||
141 | struct pt_regs *regs) | ||
142 | { | ||
143 | stack_t uss, uoss; | ||
144 | int ret, err = 0; | ||
145 | mm_segment_t seg; | ||
146 | |||
147 | if (uss_ptr) { | ||
148 | u32 ptr; | ||
149 | |||
150 | memset(&uss, 0, sizeof(stack_t)); | ||
151 | if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t))) | ||
152 | return -EFAULT; | ||
153 | |||
154 | get_user_try { | ||
155 | get_user_ex(ptr, &uss_ptr->ss_sp); | ||
156 | get_user_ex(uss.ss_flags, &uss_ptr->ss_flags); | ||
157 | get_user_ex(uss.ss_size, &uss_ptr->ss_size); | ||
158 | } get_user_catch(err); | ||
159 | |||
160 | if (err) | ||
161 | return -EFAULT; | ||
162 | uss.ss_sp = compat_ptr(ptr); | ||
163 | } | ||
164 | seg = get_fs(); | ||
165 | set_fs(KERNEL_DS); | ||
166 | ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), | ||
167 | (stack_t __force __user *) &uoss, regs->sp); | ||
168 | set_fs(seg); | ||
169 | if (ret >= 0 && uoss_ptr) { | ||
170 | if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t))) | ||
171 | return -EFAULT; | ||
172 | |||
173 | put_user_try { | ||
174 | put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp); | ||
175 | put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags); | ||
176 | put_user_ex(uoss.ss_size, &uoss_ptr->ss_size); | ||
177 | } put_user_catch(err); | ||
178 | |||
179 | if (err) | ||
180 | ret = -EFAULT; | ||
181 | } | ||
182 | return ret; | ||
183 | } | ||
184 | |||
185 | /* | 139 | /* |
186 | * Do a signal return; undo the signal stack. | 140 | * Do a signal return; undo the signal stack. |
187 | */ | 141 | */ |
188 | #define loadsegment_gs(v) load_gs_index(v) | 142 | #define loadsegment_gs(v) load_gs_index(v) |
189 | #define loadsegment_fs(v) loadsegment(fs, v) | 143 | #define loadsegment_fs(v) loadsegment(fs, v) |
190 | #define loadsegment_ds(v) loadsegment(ds, v) | 144 | #define loadsegment_ds(v) loadsegment(ds, v) |
191 | #define loadsegment_es(v) loadsegment(es, v) | 145 | #define loadsegment_es(v) loadsegment(es, v) |
192 | 146 | ||
193 | #define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; }) | 147 | #define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; }) |
194 | #define set_user_seg(seg, v) loadsegment_##seg(v) | 148 | #define set_user_seg(seg, v) loadsegment_##seg(v) |
195 | 149 | ||
196 | #define COPY(x) { \ | 150 | #define COPY(x) { \ |
197 | get_user_ex(regs->x, &sc->x); \ | 151 | get_user_ex(regs->x, &sc->x); \ |
198 | } | 152 | } |
199 | 153 | ||
200 | #define GET_SEG(seg) ({ \ | 154 | #define GET_SEG(seg) ({ \ |
201 | unsigned short tmp; \ | 155 | unsigned short tmp; \ |
202 | get_user_ex(tmp, &sc->seg); \ | 156 | get_user_ex(tmp, &sc->seg); \ |
203 | tmp; \ | 157 | tmp; \ |
204 | }) | 158 | }) |
205 | 159 | ||
206 | #define COPY_SEG_CPL3(seg) do { \ | 160 | #define COPY_SEG_CPL3(seg) do { \ |
207 | regs->seg = GET_SEG(seg) | 3; \ | 161 | regs->seg = GET_SEG(seg) | 3; \ |
208 | } while (0) | 162 | } while (0) |
209 | 163 | ||
210 | #define RELOAD_SEG(seg) { \ | 164 | #define RELOAD_SEG(seg) { \ |
211 | unsigned int pre = GET_SEG(seg); \ | 165 | unsigned int pre = GET_SEG(seg); \ |
212 | unsigned int cur = get_user_seg(seg); \ | 166 | unsigned int cur = get_user_seg(seg); \ |
213 | pre |= 3; \ | 167 | pre |= 3; \ |
214 | if (pre != cur) \ | 168 | if (pre != cur) \ |
215 | set_user_seg(seg, pre); \ | 169 | set_user_seg(seg, pre); \ |
216 | } | 170 | } |
217 | 171 | ||
218 | static int ia32_restore_sigcontext(struct pt_regs *regs, | 172 | static int ia32_restore_sigcontext(struct pt_regs *regs, |
219 | struct sigcontext_ia32 __user *sc, | 173 | struct sigcontext_ia32 __user *sc, |
220 | unsigned int *pax) | 174 | unsigned int *pax) |
221 | { | 175 | { |
222 | unsigned int tmpflags, err = 0; | 176 | unsigned int tmpflags, err = 0; |
223 | void __user *buf; | 177 | void __user *buf; |
224 | u32 tmp; | 178 | u32 tmp; |
225 | 179 | ||
226 | /* Always make any pending restarted system calls return -EINTR */ | 180 | /* Always make any pending restarted system calls return -EINTR */ |
227 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 181 | current_thread_info()->restart_block.fn = do_no_restart_syscall; |
228 | 182 | ||
229 | get_user_try { | 183 | get_user_try { |
230 | /* | 184 | /* |
231 | * Reload fs and gs if they have changed in the signal | 185 | * Reload fs and gs if they have changed in the signal |
232 | * handler. This does not handle long fs/gs base changes in | 186 | * handler. This does not handle long fs/gs base changes in |
233 | * the handler, but does not clobber them at least in the | 187 | * the handler, but does not clobber them at least in the |
234 | * normal case. | 188 | * normal case. |
235 | */ | 189 | */ |
236 | RELOAD_SEG(gs); | 190 | RELOAD_SEG(gs); |
237 | RELOAD_SEG(fs); | 191 | RELOAD_SEG(fs); |
238 | RELOAD_SEG(ds); | 192 | RELOAD_SEG(ds); |
239 | RELOAD_SEG(es); | 193 | RELOAD_SEG(es); |
240 | 194 | ||
241 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); | 195 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); |
242 | COPY(dx); COPY(cx); COPY(ip); | 196 | COPY(dx); COPY(cx); COPY(ip); |
243 | /* Don't touch extended registers */ | 197 | /* Don't touch extended registers */ |
244 | 198 | ||
245 | COPY_SEG_CPL3(cs); | 199 | COPY_SEG_CPL3(cs); |
246 | COPY_SEG_CPL3(ss); | 200 | COPY_SEG_CPL3(ss); |
247 | 201 | ||
248 | get_user_ex(tmpflags, &sc->flags); | 202 | get_user_ex(tmpflags, &sc->flags); |
249 | regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); | 203 | regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); |
250 | /* disable syscall checks */ | 204 | /* disable syscall checks */ |
251 | regs->orig_ax = -1; | 205 | regs->orig_ax = -1; |
252 | 206 | ||
253 | get_user_ex(tmp, &sc->fpstate); | 207 | get_user_ex(tmp, &sc->fpstate); |
254 | buf = compat_ptr(tmp); | 208 | buf = compat_ptr(tmp); |
255 | 209 | ||
256 | get_user_ex(*pax, &sc->ax); | 210 | get_user_ex(*pax, &sc->ax); |
257 | } get_user_catch(err); | 211 | } get_user_catch(err); |
258 | 212 | ||
259 | err |= restore_xstate_sig(buf, 1); | 213 | err |= restore_xstate_sig(buf, 1); |
260 | 214 | ||
261 | return err; | 215 | return err; |
262 | } | 216 | } |
263 | 217 | ||
264 | asmlinkage long sys32_sigreturn(struct pt_regs *regs) | 218 | asmlinkage long sys32_sigreturn(struct pt_regs *regs) |
265 | { | 219 | { |
266 | struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); | 220 | struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); |
267 | sigset_t set; | 221 | sigset_t set; |
268 | unsigned int ax; | 222 | unsigned int ax; |
269 | 223 | ||
270 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 224 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
271 | goto badframe; | 225 | goto badframe; |
272 | if (__get_user(set.sig[0], &frame->sc.oldmask) | 226 | if (__get_user(set.sig[0], &frame->sc.oldmask) |
273 | || (_COMPAT_NSIG_WORDS > 1 | 227 | || (_COMPAT_NSIG_WORDS > 1 |
274 | && __copy_from_user((((char *) &set.sig) + 4), | 228 | && __copy_from_user((((char *) &set.sig) + 4), |
275 | &frame->extramask, | 229 | &frame->extramask, |
276 | sizeof(frame->extramask)))) | 230 | sizeof(frame->extramask)))) |
277 | goto badframe; | 231 | goto badframe; |
278 | 232 | ||
279 | set_current_blocked(&set); | 233 | set_current_blocked(&set); |
280 | 234 | ||
281 | if (ia32_restore_sigcontext(regs, &frame->sc, &ax)) | 235 | if (ia32_restore_sigcontext(regs, &frame->sc, &ax)) |
282 | goto badframe; | 236 | goto badframe; |
283 | return ax; | 237 | return ax; |
284 | 238 | ||
285 | badframe: | 239 | badframe: |
286 | signal_fault(regs, frame, "32bit sigreturn"); | 240 | signal_fault(regs, frame, "32bit sigreturn"); |
287 | return 0; | 241 | return 0; |
288 | } | 242 | } |
289 | 243 | ||
290 | asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) | 244 | asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) |
291 | { | 245 | { |
292 | struct rt_sigframe_ia32 __user *frame; | 246 | struct rt_sigframe_ia32 __user *frame; |
293 | sigset_t set; | 247 | sigset_t set; |
294 | unsigned int ax; | 248 | unsigned int ax; |
295 | struct pt_regs tregs; | ||
296 | 249 | ||
297 | frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4); | 250 | frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4); |
298 | 251 | ||
299 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 252 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
300 | goto badframe; | 253 | goto badframe; |
301 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | 254 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) |
302 | goto badframe; | 255 | goto badframe; |
303 | 256 | ||
304 | set_current_blocked(&set); | 257 | set_current_blocked(&set); |
305 | 258 | ||
306 | if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) | 259 | if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) |
307 | goto badframe; | 260 | goto badframe; |
308 | 261 | ||
309 | tregs = *regs; | 262 | if (compat_restore_altstack(&frame->uc.uc_stack)) |
310 | if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT) | ||
311 | goto badframe; | 263 | goto badframe; |
312 | 264 | ||
313 | return ax; | 265 | return ax; |
314 | 266 | ||
315 | badframe: | 267 | badframe: |
316 | signal_fault(regs, frame, "32bit rt sigreturn"); | 268 | signal_fault(regs, frame, "32bit rt sigreturn"); |
317 | return 0; | 269 | return 0; |
318 | } | 270 | } |
319 | 271 | ||
320 | /* | 272 | /* |
321 | * Set up a signal frame. | 273 | * Set up a signal frame. |
322 | */ | 274 | */ |
323 | 275 | ||
324 | static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | 276 | static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, |
325 | void __user *fpstate, | 277 | void __user *fpstate, |
326 | struct pt_regs *regs, unsigned int mask) | 278 | struct pt_regs *regs, unsigned int mask) |
327 | { | 279 | { |
328 | int err = 0; | 280 | int err = 0; |
329 | 281 | ||
330 | put_user_try { | 282 | put_user_try { |
331 | put_user_ex(get_user_seg(gs), (unsigned int __user *)&sc->gs); | 283 | put_user_ex(get_user_seg(gs), (unsigned int __user *)&sc->gs); |
332 | put_user_ex(get_user_seg(fs), (unsigned int __user *)&sc->fs); | 284 | put_user_ex(get_user_seg(fs), (unsigned int __user *)&sc->fs); |
333 | put_user_ex(get_user_seg(ds), (unsigned int __user *)&sc->ds); | 285 | put_user_ex(get_user_seg(ds), (unsigned int __user *)&sc->ds); |
334 | put_user_ex(get_user_seg(es), (unsigned int __user *)&sc->es); | 286 | put_user_ex(get_user_seg(es), (unsigned int __user *)&sc->es); |
335 | 287 | ||
336 | put_user_ex(regs->di, &sc->di); | 288 | put_user_ex(regs->di, &sc->di); |
337 | put_user_ex(regs->si, &sc->si); | 289 | put_user_ex(regs->si, &sc->si); |
338 | put_user_ex(regs->bp, &sc->bp); | 290 | put_user_ex(regs->bp, &sc->bp); |
339 | put_user_ex(regs->sp, &sc->sp); | 291 | put_user_ex(regs->sp, &sc->sp); |
340 | put_user_ex(regs->bx, &sc->bx); | 292 | put_user_ex(regs->bx, &sc->bx); |
341 | put_user_ex(regs->dx, &sc->dx); | 293 | put_user_ex(regs->dx, &sc->dx); |
342 | put_user_ex(regs->cx, &sc->cx); | 294 | put_user_ex(regs->cx, &sc->cx); |
343 | put_user_ex(regs->ax, &sc->ax); | 295 | put_user_ex(regs->ax, &sc->ax); |
344 | put_user_ex(current->thread.trap_nr, &sc->trapno); | 296 | put_user_ex(current->thread.trap_nr, &sc->trapno); |
345 | put_user_ex(current->thread.error_code, &sc->err); | 297 | put_user_ex(current->thread.error_code, &sc->err); |
346 | put_user_ex(regs->ip, &sc->ip); | 298 | put_user_ex(regs->ip, &sc->ip); |
347 | put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); | 299 | put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); |
348 | put_user_ex(regs->flags, &sc->flags); | 300 | put_user_ex(regs->flags, &sc->flags); |
349 | put_user_ex(regs->sp, &sc->sp_at_signal); | 301 | put_user_ex(regs->sp, &sc->sp_at_signal); |
350 | put_user_ex(regs->ss, (unsigned int __user *)&sc->ss); | 302 | put_user_ex(regs->ss, (unsigned int __user *)&sc->ss); |
351 | 303 | ||
352 | put_user_ex(ptr_to_compat(fpstate), &sc->fpstate); | 304 | put_user_ex(ptr_to_compat(fpstate), &sc->fpstate); |
353 | 305 | ||
354 | /* non-iBCS2 extensions.. */ | 306 | /* non-iBCS2 extensions.. */ |
355 | put_user_ex(mask, &sc->oldmask); | 307 | put_user_ex(mask, &sc->oldmask); |
356 | put_user_ex(current->thread.cr2, &sc->cr2); | 308 | put_user_ex(current->thread.cr2, &sc->cr2); |
357 | } put_user_catch(err); | 309 | } put_user_catch(err); |
358 | 310 | ||
359 | return err; | 311 | return err; |
360 | } | 312 | } |
361 | 313 | ||
362 | /* | 314 | /* |
363 | * Determine which stack to use.. | 315 | * Determine which stack to use.. |
364 | */ | 316 | */ |
365 | static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 317 | static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, |
366 | size_t frame_size, | 318 | size_t frame_size, |
367 | void __user **fpstate) | 319 | void __user **fpstate) |
368 | { | 320 | { |
369 | unsigned long sp; | 321 | unsigned long sp; |
370 | 322 | ||
371 | /* Default to using normal stack */ | 323 | /* Default to using normal stack */ |
372 | sp = regs->sp; | 324 | sp = regs->sp; |
373 | 325 | ||
374 | /* This is the X/Open sanctioned signal stack switching. */ | 326 | /* This is the X/Open sanctioned signal stack switching. */ |
375 | if (ka->sa.sa_flags & SA_ONSTACK) { | 327 | if (ka->sa.sa_flags & SA_ONSTACK) { |
376 | if (sas_ss_flags(sp) == 0) | 328 | if (sas_ss_flags(sp) == 0) |
377 | sp = current->sas_ss_sp + current->sas_ss_size; | 329 | sp = current->sas_ss_sp + current->sas_ss_size; |
378 | } | 330 | } |
379 | 331 | ||
380 | /* This is the legacy signal stack switching. */ | 332 | /* This is the legacy signal stack switching. */ |
381 | else if ((regs->ss & 0xffff) != __USER32_DS && | 333 | else if ((regs->ss & 0xffff) != __USER32_DS && |
382 | !(ka->sa.sa_flags & SA_RESTORER) && | 334 | !(ka->sa.sa_flags & SA_RESTORER) && |
383 | ka->sa.sa_restorer) | 335 | ka->sa.sa_restorer) |
384 | sp = (unsigned long) ka->sa.sa_restorer; | 336 | sp = (unsigned long) ka->sa.sa_restorer; |
385 | 337 | ||
386 | if (used_math()) { | 338 | if (used_math()) { |
387 | unsigned long fx_aligned, math_size; | 339 | unsigned long fx_aligned, math_size; |
388 | 340 | ||
389 | sp = alloc_mathframe(sp, 1, &fx_aligned, &math_size); | 341 | sp = alloc_mathframe(sp, 1, &fx_aligned, &math_size); |
390 | *fpstate = (struct _fpstate_ia32 __user *) sp; | 342 | *fpstate = (struct _fpstate_ia32 __user *) sp; |
391 | if (save_xstate_sig(*fpstate, (void __user *)fx_aligned, | 343 | if (save_xstate_sig(*fpstate, (void __user *)fx_aligned, |
392 | math_size) < 0) | 344 | math_size) < 0) |
393 | return (void __user *) -1L; | 345 | return (void __user *) -1L; |
394 | } | 346 | } |
395 | 347 | ||
396 | sp -= frame_size; | 348 | sp -= frame_size; |
397 | /* Align the stack pointer according to the i386 ABI, | 349 | /* Align the stack pointer according to the i386 ABI, |
398 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ | 350 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ |
399 | sp = ((sp + 4) & -16ul) - 4; | 351 | sp = ((sp + 4) & -16ul) - 4; |
400 | return (void __user *) sp; | 352 | return (void __user *) sp; |
401 | } | 353 | } |
402 | 354 | ||
403 | int ia32_setup_frame(int sig, struct k_sigaction *ka, | 355 | int ia32_setup_frame(int sig, struct k_sigaction *ka, |
404 | compat_sigset_t *set, struct pt_regs *regs) | 356 | compat_sigset_t *set, struct pt_regs *regs) |
405 | { | 357 | { |
406 | struct sigframe_ia32 __user *frame; | 358 | struct sigframe_ia32 __user *frame; |
407 | void __user *restorer; | 359 | void __user *restorer; |
408 | int err = 0; | 360 | int err = 0; |
409 | void __user *fpstate = NULL; | 361 | void __user *fpstate = NULL; |
410 | 362 | ||
411 | /* copy_to_user optimizes that into a single 8 byte store */ | 363 | /* copy_to_user optimizes that into a single 8 byte store */ |
412 | static const struct { | 364 | static const struct { |
413 | u16 poplmovl; | 365 | u16 poplmovl; |
414 | u32 val; | 366 | u32 val; |
415 | u16 int80; | 367 | u16 int80; |
416 | } __attribute__((packed)) code = { | 368 | } __attribute__((packed)) code = { |
417 | 0xb858, /* popl %eax ; movl $...,%eax */ | 369 | 0xb858, /* popl %eax ; movl $...,%eax */ |
418 | __NR_ia32_sigreturn, | 370 | __NR_ia32_sigreturn, |
419 | 0x80cd, /* int $0x80 */ | 371 | 0x80cd, /* int $0x80 */ |
420 | }; | 372 | }; |
421 | 373 | ||
422 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); | 374 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
423 | 375 | ||
424 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 376 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
425 | return -EFAULT; | 377 | return -EFAULT; |
426 | 378 | ||
427 | if (__put_user(sig, &frame->sig)) | 379 | if (__put_user(sig, &frame->sig)) |
428 | return -EFAULT; | 380 | return -EFAULT; |
429 | 381 | ||
430 | if (ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0])) | 382 | if (ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0])) |
431 | return -EFAULT; | 383 | return -EFAULT; |
432 | 384 | ||
433 | if (_COMPAT_NSIG_WORDS > 1) { | 385 | if (_COMPAT_NSIG_WORDS > 1) { |
434 | if (__copy_to_user(frame->extramask, &set->sig[1], | 386 | if (__copy_to_user(frame->extramask, &set->sig[1], |
435 | sizeof(frame->extramask))) | 387 | sizeof(frame->extramask))) |
436 | return -EFAULT; | 388 | return -EFAULT; |
437 | } | 389 | } |
438 | 390 | ||
439 | if (ka->sa.sa_flags & SA_RESTORER) { | 391 | if (ka->sa.sa_flags & SA_RESTORER) { |
440 | restorer = ka->sa.sa_restorer; | 392 | restorer = ka->sa.sa_restorer; |
441 | } else { | 393 | } else { |
442 | /* Return stub is in 32bit vsyscall page */ | 394 | /* Return stub is in 32bit vsyscall page */ |
443 | if (current->mm->context.vdso) | 395 | if (current->mm->context.vdso) |
444 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, | 396 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, |
445 | sigreturn); | 397 | sigreturn); |
446 | else | 398 | else |
447 | restorer = &frame->retcode; | 399 | restorer = &frame->retcode; |
448 | } | 400 | } |
449 | 401 | ||
450 | put_user_try { | 402 | put_user_try { |
451 | put_user_ex(ptr_to_compat(restorer), &frame->pretcode); | 403 | put_user_ex(ptr_to_compat(restorer), &frame->pretcode); |
452 | 404 | ||
453 | /* | 405 | /* |
454 | * These are actually not used anymore, but left because some | 406 | * These are actually not used anymore, but left because some |
455 | * gdb versions depend on them as a marker. | 407 | * gdb versions depend on them as a marker. |
456 | */ | 408 | */ |
457 | put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); | 409 | put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); |
458 | } put_user_catch(err); | 410 | } put_user_catch(err); |
459 | 411 | ||
460 | if (err) | 412 | if (err) |
461 | return -EFAULT; | 413 | return -EFAULT; |
462 | 414 | ||
463 | /* Set up registers for signal handler */ | 415 | /* Set up registers for signal handler */ |
464 | regs->sp = (unsigned long) frame; | 416 | regs->sp = (unsigned long) frame; |
465 | regs->ip = (unsigned long) ka->sa.sa_handler; | 417 | regs->ip = (unsigned long) ka->sa.sa_handler; |
466 | 418 | ||
467 | /* Make -mregparm=3 work */ | 419 | /* Make -mregparm=3 work */ |
468 | regs->ax = sig; | 420 | regs->ax = sig; |
469 | regs->dx = 0; | 421 | regs->dx = 0; |
470 | regs->cx = 0; | 422 | regs->cx = 0; |
471 | 423 | ||
472 | loadsegment(ds, __USER32_DS); | 424 | loadsegment(ds, __USER32_DS); |
473 | loadsegment(es, __USER32_DS); | 425 | loadsegment(es, __USER32_DS); |
474 | 426 | ||
475 | regs->cs = __USER32_CS; | 427 | regs->cs = __USER32_CS; |
476 | regs->ss = __USER32_DS; | 428 | regs->ss = __USER32_DS; |
477 | 429 | ||
478 | return 0; | 430 | return 0; |
479 | } | 431 | } |
480 | 432 | ||
481 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 433 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
482 | compat_sigset_t *set, struct pt_regs *regs) | 434 | compat_sigset_t *set, struct pt_regs *regs) |
483 | { | 435 | { |
484 | struct rt_sigframe_ia32 __user *frame; | 436 | struct rt_sigframe_ia32 __user *frame; |
485 | void __user *restorer; | 437 | void __user *restorer; |
486 | int err = 0; | 438 | int err = 0; |
487 | void __user *fpstate = NULL; | 439 | void __user *fpstate = NULL; |
488 | 440 | ||
489 | /* __copy_to_user optimizes that into a single 8 byte store */ | 441 | /* __copy_to_user optimizes that into a single 8 byte store */ |
490 | static const struct { | 442 | static const struct { |
491 | u8 movl; | 443 | u8 movl; |
492 | u32 val; | 444 | u32 val; |
493 | u16 int80; | 445 | u16 int80; |
494 | u8 pad; | 446 | u8 pad; |
495 | } __attribute__((packed)) code = { | 447 | } __attribute__((packed)) code = { |
496 | 0xb8, | 448 | 0xb8, |
497 | __NR_ia32_rt_sigreturn, | 449 | __NR_ia32_rt_sigreturn, |
498 | 0x80cd, | 450 | 0x80cd, |
499 | 0, | 451 | 0, |
500 | }; | 452 | }; |
501 | 453 | ||
502 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); | 454 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
503 | 455 | ||
504 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 456 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
505 | return -EFAULT; | 457 | return -EFAULT; |
506 | 458 | ||
507 | put_user_try { | 459 | put_user_try { |
508 | put_user_ex(sig, &frame->sig); | 460 | put_user_ex(sig, &frame->sig); |
509 | put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo); | 461 | put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo); |
510 | put_user_ex(ptr_to_compat(&frame->uc), &frame->puc); | 462 | put_user_ex(ptr_to_compat(&frame->uc), &frame->puc); |
511 | 463 | ||
512 | /* Create the ucontext. */ | 464 | /* Create the ucontext. */ |
513 | if (cpu_has_xsave) | 465 | if (cpu_has_xsave) |
514 | put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); | 466 | put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); |
515 | else | 467 | else |
516 | put_user_ex(0, &frame->uc.uc_flags); | 468 | put_user_ex(0, &frame->uc.uc_flags); |
517 | put_user_ex(0, &frame->uc.uc_link); | 469 | put_user_ex(0, &frame->uc.uc_link); |
518 | put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 470 | put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
519 | put_user_ex(sas_ss_flags(regs->sp), | 471 | put_user_ex(sas_ss_flags(regs->sp), |
520 | &frame->uc.uc_stack.ss_flags); | 472 | &frame->uc.uc_stack.ss_flags); |
521 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 473 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
522 | 474 | ||
523 | if (ka->sa.sa_flags & SA_RESTORER) | 475 | if (ka->sa.sa_flags & SA_RESTORER) |
524 | restorer = ka->sa.sa_restorer; | 476 | restorer = ka->sa.sa_restorer; |
525 | else | 477 | else |
526 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, | 478 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, |
527 | rt_sigreturn); | 479 | rt_sigreturn); |
528 | put_user_ex(ptr_to_compat(restorer), &frame->pretcode); | 480 | put_user_ex(ptr_to_compat(restorer), &frame->pretcode); |
529 | 481 | ||
530 | /* | 482 | /* |
531 | * Not actually used anymore, but left because some gdb | 483 | * Not actually used anymore, but left because some gdb |
532 | * versions need it. | 484 | * versions need it. |
533 | */ | 485 | */ |
534 | put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); | 486 | put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); |
535 | } put_user_catch(err); | 487 | } put_user_catch(err); |
536 | 488 | ||
537 | err |= copy_siginfo_to_user32(&frame->info, info); | 489 | err |= copy_siginfo_to_user32(&frame->info, info); |
538 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, | 490 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
539 | regs, set->sig[0]); | 491 | regs, set->sig[0]); |
540 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 492 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
541 | 493 | ||
542 | if (err) | 494 | if (err) |
543 | return -EFAULT; | 495 | return -EFAULT; |
544 | 496 | ||
545 | /* Set up registers for signal handler */ | 497 | /* Set up registers for signal handler */ |
546 | regs->sp = (unsigned long) frame; | 498 | regs->sp = (unsigned long) frame; |
547 | regs->ip = (unsigned long) ka->sa.sa_handler; | 499 | regs->ip = (unsigned long) ka->sa.sa_handler; |
548 | 500 | ||
549 | /* Make -mregparm=3 work */ | 501 | /* Make -mregparm=3 work */ |
550 | regs->ax = sig; | 502 | regs->ax = sig; |
551 | regs->dx = (unsigned long) &frame->info; | 503 | regs->dx = (unsigned long) &frame->info; |
552 | regs->cx = (unsigned long) &frame->uc; | 504 | regs->cx = (unsigned long) &frame->uc; |
553 | 505 | ||
554 | loadsegment(ds, __USER32_DS); | 506 | loadsegment(ds, __USER32_DS); |
555 | loadsegment(es, __USER32_DS); | 507 | loadsegment(es, __USER32_DS); |
556 | 508 | ||
557 | regs->cs = __USER32_CS; | 509 | regs->cs = __USER32_CS; |
558 | regs->ss = __USER32_DS; | 510 | regs->ss = __USER32_DS; |
559 | 511 | ||
560 | return 0; | 512 | return 0; |
561 | } | 513 | } |
562 | 514 |
arch/x86/ia32/ia32entry.S
1 | /* | 1 | /* |
2 | * Compatibility mode system call entry point for x86-64. | 2 | * Compatibility mode system call entry point for x86-64. |
3 | * | 3 | * |
4 | * Copyright 2000-2002 Andi Kleen, SuSE Labs. | 4 | * Copyright 2000-2002 Andi Kleen, SuSE Labs. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <asm/dwarf2.h> | 7 | #include <asm/dwarf2.h> |
8 | #include <asm/calling.h> | 8 | #include <asm/calling.h> |
9 | #include <asm/asm-offsets.h> | 9 | #include <asm/asm-offsets.h> |
10 | #include <asm/current.h> | 10 | #include <asm/current.h> |
11 | #include <asm/errno.h> | 11 | #include <asm/errno.h> |
12 | #include <asm/ia32_unistd.h> | 12 | #include <asm/ia32_unistd.h> |
13 | #include <asm/thread_info.h> | 13 | #include <asm/thread_info.h> |
14 | #include <asm/segment.h> | 14 | #include <asm/segment.h> |
15 | #include <asm/irqflags.h> | 15 | #include <asm/irqflags.h> |
16 | #include <asm/asm.h> | 16 | #include <asm/asm.h> |
17 | #include <asm/smap.h> | 17 | #include <asm/smap.h> |
18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | 20 | ||
21 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | 21 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
22 | #include <linux/elf-em.h> | 22 | #include <linux/elf-em.h> |
23 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | 23 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) |
24 | #define __AUDIT_ARCH_LE 0x40000000 | 24 | #define __AUDIT_ARCH_LE 0x40000000 |
25 | 25 | ||
26 | #ifndef CONFIG_AUDITSYSCALL | 26 | #ifndef CONFIG_AUDITSYSCALL |
27 | #define sysexit_audit ia32_ret_from_sys_call | 27 | #define sysexit_audit ia32_ret_from_sys_call |
28 | #define sysretl_audit ia32_ret_from_sys_call | 28 | #define sysretl_audit ia32_ret_from_sys_call |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | .section .entry.text, "ax" | 31 | .section .entry.text, "ax" |
32 | 32 | ||
33 | .macro IA32_ARG_FIXUP noebp=0 | 33 | .macro IA32_ARG_FIXUP noebp=0 |
34 | movl %edi,%r8d | 34 | movl %edi,%r8d |
35 | .if \noebp | 35 | .if \noebp |
36 | .else | 36 | .else |
37 | movl %ebp,%r9d | 37 | movl %ebp,%r9d |
38 | .endif | 38 | .endif |
39 | xchg %ecx,%esi | 39 | xchg %ecx,%esi |
40 | movl %ebx,%edi | 40 | movl %ebx,%edi |
41 | movl %edx,%edx /* zero extension */ | 41 | movl %edx,%edx /* zero extension */ |
42 | .endm | 42 | .endm |
43 | 43 | ||
44 | /* clobbers %eax */ | 44 | /* clobbers %eax */ |
45 | .macro CLEAR_RREGS offset=0, _r9=rax | 45 | .macro CLEAR_RREGS offset=0, _r9=rax |
46 | xorl %eax,%eax | 46 | xorl %eax,%eax |
47 | movq %rax,\offset+R11(%rsp) | 47 | movq %rax,\offset+R11(%rsp) |
48 | movq %rax,\offset+R10(%rsp) | 48 | movq %rax,\offset+R10(%rsp) |
49 | movq %\_r9,\offset+R9(%rsp) | 49 | movq %\_r9,\offset+R9(%rsp) |
50 | movq %rax,\offset+R8(%rsp) | 50 | movq %rax,\offset+R8(%rsp) |
51 | .endm | 51 | .endm |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Reload arg registers from stack in case ptrace changed them. | 54 | * Reload arg registers from stack in case ptrace changed them. |
55 | * We don't reload %eax because syscall_trace_enter() returned | 55 | * We don't reload %eax because syscall_trace_enter() returned |
56 | * the %rax value we should see. Instead, we just truncate that | 56 | * the %rax value we should see. Instead, we just truncate that |
57 | * value to 32 bits again as we did on entry from user mode. | 57 | * value to 32 bits again as we did on entry from user mode. |
58 | * If it's a new value set by user_regset during entry tracing, | 58 | * If it's a new value set by user_regset during entry tracing, |
59 | * this matches the normal truncation of the user-mode value. | 59 | * this matches the normal truncation of the user-mode value. |
60 | * If it's -1 to make us punt the syscall, then (u32)-1 is still | 60 | * If it's -1 to make us punt the syscall, then (u32)-1 is still |
61 | * an appropriately invalid value. | 61 | * an appropriately invalid value. |
62 | */ | 62 | */ |
63 | .macro LOAD_ARGS32 offset, _r9=0 | 63 | .macro LOAD_ARGS32 offset, _r9=0 |
64 | .if \_r9 | 64 | .if \_r9 |
65 | movl \offset+16(%rsp),%r9d | 65 | movl \offset+16(%rsp),%r9d |
66 | .endif | 66 | .endif |
67 | movl \offset+40(%rsp),%ecx | 67 | movl \offset+40(%rsp),%ecx |
68 | movl \offset+48(%rsp),%edx | 68 | movl \offset+48(%rsp),%edx |
69 | movl \offset+56(%rsp),%esi | 69 | movl \offset+56(%rsp),%esi |
70 | movl \offset+64(%rsp),%edi | 70 | movl \offset+64(%rsp),%edi |
71 | movl %eax,%eax /* zero extension */ | 71 | movl %eax,%eax /* zero extension */ |
72 | .endm | 72 | .endm |
73 | 73 | ||
74 | .macro CFI_STARTPROC32 simple | 74 | .macro CFI_STARTPROC32 simple |
75 | CFI_STARTPROC \simple | 75 | CFI_STARTPROC \simple |
76 | CFI_UNDEFINED r8 | 76 | CFI_UNDEFINED r8 |
77 | CFI_UNDEFINED r9 | 77 | CFI_UNDEFINED r9 |
78 | CFI_UNDEFINED r10 | 78 | CFI_UNDEFINED r10 |
79 | CFI_UNDEFINED r11 | 79 | CFI_UNDEFINED r11 |
80 | CFI_UNDEFINED r12 | 80 | CFI_UNDEFINED r12 |
81 | CFI_UNDEFINED r13 | 81 | CFI_UNDEFINED r13 |
82 | CFI_UNDEFINED r14 | 82 | CFI_UNDEFINED r14 |
83 | CFI_UNDEFINED r15 | 83 | CFI_UNDEFINED r15 |
84 | .endm | 84 | .endm |
85 | 85 | ||
86 | #ifdef CONFIG_PARAVIRT | 86 | #ifdef CONFIG_PARAVIRT |
87 | ENTRY(native_usergs_sysret32) | 87 | ENTRY(native_usergs_sysret32) |
88 | swapgs | 88 | swapgs |
89 | sysretl | 89 | sysretl |
90 | ENDPROC(native_usergs_sysret32) | 90 | ENDPROC(native_usergs_sysret32) |
91 | 91 | ||
92 | ENTRY(native_irq_enable_sysexit) | 92 | ENTRY(native_irq_enable_sysexit) |
93 | swapgs | 93 | swapgs |
94 | sti | 94 | sti |
95 | sysexit | 95 | sysexit |
96 | ENDPROC(native_irq_enable_sysexit) | 96 | ENDPROC(native_irq_enable_sysexit) |
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * 32bit SYSENTER instruction entry. | 100 | * 32bit SYSENTER instruction entry. |
101 | * | 101 | * |
102 | * Arguments: | 102 | * Arguments: |
103 | * %eax System call number. | 103 | * %eax System call number. |
104 | * %ebx Arg1 | 104 | * %ebx Arg1 |
105 | * %ecx Arg2 | 105 | * %ecx Arg2 |
106 | * %edx Arg3 | 106 | * %edx Arg3 |
107 | * %esi Arg4 | 107 | * %esi Arg4 |
108 | * %edi Arg5 | 108 | * %edi Arg5 |
109 | * %ebp user stack | 109 | * %ebp user stack |
110 | * 0(%ebp) Arg6 | 110 | * 0(%ebp) Arg6 |
111 | * | 111 | * |
112 | * Interrupts off. | 112 | * Interrupts off. |
113 | * | 113 | * |
114 | * This is purely a fast path. For anything complicated we use the int 0x80 | 114 | * This is purely a fast path. For anything complicated we use the int 0x80 |
115 | * path below. Set up a complete hardware stack frame to share code | 115 | * path below. Set up a complete hardware stack frame to share code |
116 | * with the int 0x80 path. | 116 | * with the int 0x80 path. |
117 | */ | 117 | */ |
118 | ENTRY(ia32_sysenter_target) | 118 | ENTRY(ia32_sysenter_target) |
119 | CFI_STARTPROC32 simple | 119 | CFI_STARTPROC32 simple |
120 | CFI_SIGNAL_FRAME | 120 | CFI_SIGNAL_FRAME |
121 | CFI_DEF_CFA rsp,0 | 121 | CFI_DEF_CFA rsp,0 |
122 | CFI_REGISTER rsp,rbp | 122 | CFI_REGISTER rsp,rbp |
123 | SWAPGS_UNSAFE_STACK | 123 | SWAPGS_UNSAFE_STACK |
124 | movq PER_CPU_VAR(kernel_stack), %rsp | 124 | movq PER_CPU_VAR(kernel_stack), %rsp |
125 | addq $(KERNEL_STACK_OFFSET),%rsp | 125 | addq $(KERNEL_STACK_OFFSET),%rsp |
126 | /* | 126 | /* |
127 | * No need to follow this irqs on/off section: the syscall | 127 | * No need to follow this irqs on/off section: the syscall |
128 | * disabled irqs, here we enable it straight after entry: | 128 | * disabled irqs, here we enable it straight after entry: |
129 | */ | 129 | */ |
130 | ENABLE_INTERRUPTS(CLBR_NONE) | 130 | ENABLE_INTERRUPTS(CLBR_NONE) |
131 | movl %ebp,%ebp /* zero extension */ | 131 | movl %ebp,%ebp /* zero extension */ |
132 | pushq_cfi $__USER32_DS | 132 | pushq_cfi $__USER32_DS |
133 | /*CFI_REL_OFFSET ss,0*/ | 133 | /*CFI_REL_OFFSET ss,0*/ |
134 | pushq_cfi %rbp | 134 | pushq_cfi %rbp |
135 | CFI_REL_OFFSET rsp,0 | 135 | CFI_REL_OFFSET rsp,0 |
136 | pushfq_cfi | 136 | pushfq_cfi |
137 | /*CFI_REL_OFFSET rflags,0*/ | 137 | /*CFI_REL_OFFSET rflags,0*/ |
138 | movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d | 138 | movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d |
139 | CFI_REGISTER rip,r10 | 139 | CFI_REGISTER rip,r10 |
140 | pushq_cfi $__USER32_CS | 140 | pushq_cfi $__USER32_CS |
141 | /*CFI_REL_OFFSET cs,0*/ | 141 | /*CFI_REL_OFFSET cs,0*/ |
142 | movl %eax, %eax | 142 | movl %eax, %eax |
143 | pushq_cfi %r10 | 143 | pushq_cfi %r10 |
144 | CFI_REL_OFFSET rip,0 | 144 | CFI_REL_OFFSET rip,0 |
145 | pushq_cfi %rax | 145 | pushq_cfi %rax |
146 | cld | 146 | cld |
147 | SAVE_ARGS 0,1,0 | 147 | SAVE_ARGS 0,1,0 |
148 | /* no need to do an access_ok check here because rbp has been | 148 | /* no need to do an access_ok check here because rbp has been |
149 | 32bit zero extended */ | 149 | 32bit zero extended */ |
150 | ASM_STAC | 150 | ASM_STAC |
151 | 1: movl (%rbp),%ebp | 151 | 1: movl (%rbp),%ebp |
152 | _ASM_EXTABLE(1b,ia32_badarg) | 152 | _ASM_EXTABLE(1b,ia32_badarg) |
153 | ASM_CLAC | 153 | ASM_CLAC |
154 | orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 154 | orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
155 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 155 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
156 | CFI_REMEMBER_STATE | 156 | CFI_REMEMBER_STATE |
157 | jnz sysenter_tracesys | 157 | jnz sysenter_tracesys |
158 | cmpq $(IA32_NR_syscalls-1),%rax | 158 | cmpq $(IA32_NR_syscalls-1),%rax |
159 | ja ia32_badsys | 159 | ja ia32_badsys |
160 | sysenter_do_call: | 160 | sysenter_do_call: |
161 | IA32_ARG_FIXUP | 161 | IA32_ARG_FIXUP |
162 | sysenter_dispatch: | 162 | sysenter_dispatch: |
163 | call *ia32_sys_call_table(,%rax,8) | 163 | call *ia32_sys_call_table(,%rax,8) |
164 | movq %rax,RAX-ARGOFFSET(%rsp) | 164 | movq %rax,RAX-ARGOFFSET(%rsp) |
165 | DISABLE_INTERRUPTS(CLBR_NONE) | 165 | DISABLE_INTERRUPTS(CLBR_NONE) |
166 | TRACE_IRQS_OFF | 166 | TRACE_IRQS_OFF |
167 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 167 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
168 | jnz sysexit_audit | 168 | jnz sysexit_audit |
169 | sysexit_from_sys_call: | 169 | sysexit_from_sys_call: |
170 | andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 170 | andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
171 | /* clear IF, that popfq doesn't enable interrupts early */ | 171 | /* clear IF, that popfq doesn't enable interrupts early */ |
172 | andl $~0x200,EFLAGS-R11(%rsp) | 172 | andl $~0x200,EFLAGS-R11(%rsp) |
173 | movl RIP-R11(%rsp),%edx /* User %eip */ | 173 | movl RIP-R11(%rsp),%edx /* User %eip */ |
174 | CFI_REGISTER rip,rdx | 174 | CFI_REGISTER rip,rdx |
175 | RESTORE_ARGS 0,24,0,0,0,0 | 175 | RESTORE_ARGS 0,24,0,0,0,0 |
176 | xorq %r8,%r8 | 176 | xorq %r8,%r8 |
177 | xorq %r9,%r9 | 177 | xorq %r9,%r9 |
178 | xorq %r10,%r10 | 178 | xorq %r10,%r10 |
179 | xorq %r11,%r11 | 179 | xorq %r11,%r11 |
180 | popfq_cfi | 180 | popfq_cfi |
181 | /*CFI_RESTORE rflags*/ | 181 | /*CFI_RESTORE rflags*/ |
182 | popq_cfi %rcx /* User %esp */ | 182 | popq_cfi %rcx /* User %esp */ |
183 | CFI_REGISTER rsp,rcx | 183 | CFI_REGISTER rsp,rcx |
184 | TRACE_IRQS_ON | 184 | TRACE_IRQS_ON |
185 | ENABLE_INTERRUPTS_SYSEXIT32 | 185 | ENABLE_INTERRUPTS_SYSEXIT32 |
186 | 186 | ||
187 | #ifdef CONFIG_AUDITSYSCALL | 187 | #ifdef CONFIG_AUDITSYSCALL |
188 | .macro auditsys_entry_common | 188 | .macro auditsys_entry_common |
189 | movl %esi,%r9d /* 6th arg: 4th syscall arg */ | 189 | movl %esi,%r9d /* 6th arg: 4th syscall arg */ |
190 | movl %edx,%r8d /* 5th arg: 3rd syscall arg */ | 190 | movl %edx,%r8d /* 5th arg: 3rd syscall arg */ |
191 | /* (already in %ecx) 4th arg: 2nd syscall arg */ | 191 | /* (already in %ecx) 4th arg: 2nd syscall arg */ |
192 | movl %ebx,%edx /* 3rd arg: 1st syscall arg */ | 192 | movl %ebx,%edx /* 3rd arg: 1st syscall arg */ |
193 | movl %eax,%esi /* 2nd arg: syscall number */ | 193 | movl %eax,%esi /* 2nd arg: syscall number */ |
194 | movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ | 194 | movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ |
195 | call __audit_syscall_entry | 195 | call __audit_syscall_entry |
196 | movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ | 196 | movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ |
197 | cmpq $(IA32_NR_syscalls-1),%rax | 197 | cmpq $(IA32_NR_syscalls-1),%rax |
198 | ja ia32_badsys | 198 | ja ia32_badsys |
199 | movl %ebx,%edi /* reload 1st syscall arg */ | 199 | movl %ebx,%edi /* reload 1st syscall arg */ |
200 | movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ | 200 | movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ |
201 | movl RDX-ARGOFFSET(%rsp),%edx /* reload 3rd syscall arg */ | 201 | movl RDX-ARGOFFSET(%rsp),%edx /* reload 3rd syscall arg */ |
202 | movl RSI-ARGOFFSET(%rsp),%ecx /* reload 4th syscall arg */ | 202 | movl RSI-ARGOFFSET(%rsp),%ecx /* reload 4th syscall arg */ |
203 | movl RDI-ARGOFFSET(%rsp),%r8d /* reload 5th syscall arg */ | 203 | movl RDI-ARGOFFSET(%rsp),%r8d /* reload 5th syscall arg */ |
204 | .endm | 204 | .endm |
205 | 205 | ||
206 | .macro auditsys_exit exit | 206 | .macro auditsys_exit exit |
207 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 207 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
208 | jnz ia32_ret_from_sys_call | 208 | jnz ia32_ret_from_sys_call |
209 | TRACE_IRQS_ON | 209 | TRACE_IRQS_ON |
210 | sti | 210 | sti |
211 | movl %eax,%esi /* second arg, syscall return value */ | 211 | movl %eax,%esi /* second arg, syscall return value */ |
212 | cmpl $-MAX_ERRNO,%eax /* is it an error ? */ | 212 | cmpl $-MAX_ERRNO,%eax /* is it an error ? */ |
213 | jbe 1f | 213 | jbe 1f |
214 | movslq %eax, %rsi /* if error sign extend to 64 bits */ | 214 | movslq %eax, %rsi /* if error sign extend to 64 bits */ |
215 | 1: setbe %al /* 1 if error, 0 if not */ | 215 | 1: setbe %al /* 1 if error, 0 if not */ |
216 | movzbl %al,%edi /* zero-extend that into %edi */ | 216 | movzbl %al,%edi /* zero-extend that into %edi */ |
217 | call __audit_syscall_exit | 217 | call __audit_syscall_exit |
218 | movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ | 218 | movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ |
219 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi | 219 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi |
220 | cli | 220 | cli |
221 | TRACE_IRQS_OFF | 221 | TRACE_IRQS_OFF |
222 | testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 222 | testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
223 | jz \exit | 223 | jz \exit |
224 | CLEAR_RREGS -ARGOFFSET | 224 | CLEAR_RREGS -ARGOFFSET |
225 | jmp int_with_check | 225 | jmp int_with_check |
226 | .endm | 226 | .endm |
227 | 227 | ||
228 | sysenter_auditsys: | 228 | sysenter_auditsys: |
229 | CFI_RESTORE_STATE | 229 | CFI_RESTORE_STATE |
230 | auditsys_entry_common | 230 | auditsys_entry_common |
231 | movl %ebp,%r9d /* reload 6th syscall arg */ | 231 | movl %ebp,%r9d /* reload 6th syscall arg */ |
232 | jmp sysenter_dispatch | 232 | jmp sysenter_dispatch |
233 | 233 | ||
234 | sysexit_audit: | 234 | sysexit_audit: |
235 | auditsys_exit sysexit_from_sys_call | 235 | auditsys_exit sysexit_from_sys_call |
236 | #endif | 236 | #endif |
237 | 237 | ||
238 | sysenter_tracesys: | 238 | sysenter_tracesys: |
239 | #ifdef CONFIG_AUDITSYSCALL | 239 | #ifdef CONFIG_AUDITSYSCALL |
240 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 240 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
241 | jz sysenter_auditsys | 241 | jz sysenter_auditsys |
242 | #endif | 242 | #endif |
243 | SAVE_REST | 243 | SAVE_REST |
244 | CLEAR_RREGS | 244 | CLEAR_RREGS |
245 | movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */ | 245 | movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */ |
246 | movq %rsp,%rdi /* &pt_regs -> arg1 */ | 246 | movq %rsp,%rdi /* &pt_regs -> arg1 */ |
247 | call syscall_trace_enter | 247 | call syscall_trace_enter |
248 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ | 248 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ |
249 | RESTORE_REST | 249 | RESTORE_REST |
250 | cmpq $(IA32_NR_syscalls-1),%rax | 250 | cmpq $(IA32_NR_syscalls-1),%rax |
251 | ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ | 251 | ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ |
252 | jmp sysenter_do_call | 252 | jmp sysenter_do_call |
253 | CFI_ENDPROC | 253 | CFI_ENDPROC |
254 | ENDPROC(ia32_sysenter_target) | 254 | ENDPROC(ia32_sysenter_target) |
255 | 255 | ||
256 | /* | 256 | /* |
257 | * 32bit SYSCALL instruction entry. | 257 | * 32bit SYSCALL instruction entry. |
258 | * | 258 | * |
259 | * Arguments: | 259 | * Arguments: |
260 | * %eax System call number. | 260 | * %eax System call number. |
261 | * %ebx Arg1 | 261 | * %ebx Arg1 |
262 | * %ecx return EIP | 262 | * %ecx return EIP |
263 | * %edx Arg3 | 263 | * %edx Arg3 |
264 | * %esi Arg4 | 264 | * %esi Arg4 |
265 | * %edi Arg5 | 265 | * %edi Arg5 |
266 | * %ebp Arg2 [note: not saved in the stack frame, should not be touched] | 266 | * %ebp Arg2 [note: not saved in the stack frame, should not be touched] |
267 | * %esp user stack | 267 | * %esp user stack |
268 | * 0(%esp) Arg6 | 268 | * 0(%esp) Arg6 |
269 | * | 269 | * |
270 | * Interrupts off. | 270 | * Interrupts off. |
271 | * | 271 | * |
272 | * This is purely a fast path. For anything complicated we use the int 0x80 | 272 | * This is purely a fast path. For anything complicated we use the int 0x80 |
273 | * path below. Set up a complete hardware stack frame to share code | 273 | * path below. Set up a complete hardware stack frame to share code |
274 | * with the int 0x80 path. | 274 | * with the int 0x80 path. |
275 | */ | 275 | */ |
276 | ENTRY(ia32_cstar_target) | 276 | ENTRY(ia32_cstar_target) |
277 | CFI_STARTPROC32 simple | 277 | CFI_STARTPROC32 simple |
278 | CFI_SIGNAL_FRAME | 278 | CFI_SIGNAL_FRAME |
279 | CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET | 279 | CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET |
280 | CFI_REGISTER rip,rcx | 280 | CFI_REGISTER rip,rcx |
281 | /*CFI_REGISTER rflags,r11*/ | 281 | /*CFI_REGISTER rflags,r11*/ |
282 | SWAPGS_UNSAFE_STACK | 282 | SWAPGS_UNSAFE_STACK |
283 | movl %esp,%r8d | 283 | movl %esp,%r8d |
284 | CFI_REGISTER rsp,r8 | 284 | CFI_REGISTER rsp,r8 |
285 | movq PER_CPU_VAR(kernel_stack),%rsp | 285 | movq PER_CPU_VAR(kernel_stack),%rsp |
286 | /* | 286 | /* |
287 | * No need to follow this irqs on/off section: the syscall | 287 | * No need to follow this irqs on/off section: the syscall |
288 | * disabled irqs and here we enable it straight after entry: | 288 | * disabled irqs and here we enable it straight after entry: |
289 | */ | 289 | */ |
290 | ENABLE_INTERRUPTS(CLBR_NONE) | 290 | ENABLE_INTERRUPTS(CLBR_NONE) |
291 | SAVE_ARGS 8,0,0 | 291 | SAVE_ARGS 8,0,0 |
292 | movl %eax,%eax /* zero extension */ | 292 | movl %eax,%eax /* zero extension */ |
293 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) | 293 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) |
294 | movq %rcx,RIP-ARGOFFSET(%rsp) | 294 | movq %rcx,RIP-ARGOFFSET(%rsp) |
295 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | 295 | CFI_REL_OFFSET rip,RIP-ARGOFFSET |
296 | movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */ | 296 | movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */ |
297 | movl %ebp,%ecx | 297 | movl %ebp,%ecx |
298 | movq $__USER32_CS,CS-ARGOFFSET(%rsp) | 298 | movq $__USER32_CS,CS-ARGOFFSET(%rsp) |
299 | movq $__USER32_DS,SS-ARGOFFSET(%rsp) | 299 | movq $__USER32_DS,SS-ARGOFFSET(%rsp) |
300 | movq %r11,EFLAGS-ARGOFFSET(%rsp) | 300 | movq %r11,EFLAGS-ARGOFFSET(%rsp) |
301 | /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/ | 301 | /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/ |
302 | movq %r8,RSP-ARGOFFSET(%rsp) | 302 | movq %r8,RSP-ARGOFFSET(%rsp) |
303 | CFI_REL_OFFSET rsp,RSP-ARGOFFSET | 303 | CFI_REL_OFFSET rsp,RSP-ARGOFFSET |
304 | /* no need to do an access_ok check here because r8 has been | 304 | /* no need to do an access_ok check here because r8 has been |
305 | 32bit zero extended */ | 305 | 32bit zero extended */ |
306 | /* hardware stack frame is complete now */ | 306 | /* hardware stack frame is complete now */ |
307 | ASM_STAC | 307 | ASM_STAC |
308 | 1: movl (%r8),%r9d | 308 | 1: movl (%r8),%r9d |
309 | _ASM_EXTABLE(1b,ia32_badarg) | 309 | _ASM_EXTABLE(1b,ia32_badarg) |
310 | ASM_CLAC | 310 | ASM_CLAC |
311 | orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 311 | orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
312 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 312 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
313 | CFI_REMEMBER_STATE | 313 | CFI_REMEMBER_STATE |
314 | jnz cstar_tracesys | 314 | jnz cstar_tracesys |
315 | cmpq $IA32_NR_syscalls-1,%rax | 315 | cmpq $IA32_NR_syscalls-1,%rax |
316 | ja ia32_badsys | 316 | ja ia32_badsys |
317 | cstar_do_call: | 317 | cstar_do_call: |
318 | IA32_ARG_FIXUP 1 | 318 | IA32_ARG_FIXUP 1 |
319 | cstar_dispatch: | 319 | cstar_dispatch: |
320 | call *ia32_sys_call_table(,%rax,8) | 320 | call *ia32_sys_call_table(,%rax,8) |
321 | movq %rax,RAX-ARGOFFSET(%rsp) | 321 | movq %rax,RAX-ARGOFFSET(%rsp) |
322 | DISABLE_INTERRUPTS(CLBR_NONE) | 322 | DISABLE_INTERRUPTS(CLBR_NONE) |
323 | TRACE_IRQS_OFF | 323 | TRACE_IRQS_OFF |
324 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 324 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
325 | jnz sysretl_audit | 325 | jnz sysretl_audit |
326 | sysretl_from_sys_call: | 326 | sysretl_from_sys_call: |
327 | andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 327 | andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
328 | RESTORE_ARGS 0,-ARG_SKIP,0,0,0 | 328 | RESTORE_ARGS 0,-ARG_SKIP,0,0,0 |
329 | movl RIP-ARGOFFSET(%rsp),%ecx | 329 | movl RIP-ARGOFFSET(%rsp),%ecx |
330 | CFI_REGISTER rip,rcx | 330 | CFI_REGISTER rip,rcx |
331 | movl EFLAGS-ARGOFFSET(%rsp),%r11d | 331 | movl EFLAGS-ARGOFFSET(%rsp),%r11d |
332 | /*CFI_REGISTER rflags,r11*/ | 332 | /*CFI_REGISTER rflags,r11*/ |
333 | xorq %r10,%r10 | 333 | xorq %r10,%r10 |
334 | xorq %r9,%r9 | 334 | xorq %r9,%r9 |
335 | xorq %r8,%r8 | 335 | xorq %r8,%r8 |
336 | TRACE_IRQS_ON | 336 | TRACE_IRQS_ON |
337 | movl RSP-ARGOFFSET(%rsp),%esp | 337 | movl RSP-ARGOFFSET(%rsp),%esp |
338 | CFI_RESTORE rsp | 338 | CFI_RESTORE rsp |
339 | USERGS_SYSRET32 | 339 | USERGS_SYSRET32 |
340 | 340 | ||
341 | #ifdef CONFIG_AUDITSYSCALL | 341 | #ifdef CONFIG_AUDITSYSCALL |
342 | cstar_auditsys: | 342 | cstar_auditsys: |
343 | CFI_RESTORE_STATE | 343 | CFI_RESTORE_STATE |
344 | movl %r9d,R9-ARGOFFSET(%rsp) /* register to be clobbered by call */ | 344 | movl %r9d,R9-ARGOFFSET(%rsp) /* register to be clobbered by call */ |
345 | auditsys_entry_common | 345 | auditsys_entry_common |
346 | movl R9-ARGOFFSET(%rsp),%r9d /* reload 6th syscall arg */ | 346 | movl R9-ARGOFFSET(%rsp),%r9d /* reload 6th syscall arg */ |
347 | jmp cstar_dispatch | 347 | jmp cstar_dispatch |
348 | 348 | ||
349 | sysretl_audit: | 349 | sysretl_audit: |
350 | auditsys_exit sysretl_from_sys_call | 350 | auditsys_exit sysretl_from_sys_call |
351 | #endif | 351 | #endif |
352 | 352 | ||
353 | cstar_tracesys: | 353 | cstar_tracesys: |
354 | #ifdef CONFIG_AUDITSYSCALL | 354 | #ifdef CONFIG_AUDITSYSCALL |
355 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 355 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
356 | jz cstar_auditsys | 356 | jz cstar_auditsys |
357 | #endif | 357 | #endif |
358 | xchgl %r9d,%ebp | 358 | xchgl %r9d,%ebp |
359 | SAVE_REST | 359 | SAVE_REST |
360 | CLEAR_RREGS 0, r9 | 360 | CLEAR_RREGS 0, r9 |
361 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ | 361 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
362 | movq %rsp,%rdi /* &pt_regs -> arg1 */ | 362 | movq %rsp,%rdi /* &pt_regs -> arg1 */ |
363 | call syscall_trace_enter | 363 | call syscall_trace_enter |
364 | LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ | 364 | LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ |
365 | RESTORE_REST | 365 | RESTORE_REST |
366 | xchgl %ebp,%r9d | 366 | xchgl %ebp,%r9d |
367 | cmpq $(IA32_NR_syscalls-1),%rax | 367 | cmpq $(IA32_NR_syscalls-1),%rax |
368 | ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ | 368 | ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ |
369 | jmp cstar_do_call | 369 | jmp cstar_do_call |
370 | END(ia32_cstar_target) | 370 | END(ia32_cstar_target) |
371 | 371 | ||
372 | ia32_badarg: | 372 | ia32_badarg: |
373 | ASM_CLAC | 373 | ASM_CLAC |
374 | movq $-EFAULT,%rax | 374 | movq $-EFAULT,%rax |
375 | jmp ia32_sysret | 375 | jmp ia32_sysret |
376 | CFI_ENDPROC | 376 | CFI_ENDPROC |
377 | 377 | ||
378 | /* | 378 | /* |
379 | * Emulated IA32 system calls via int 0x80. | 379 | * Emulated IA32 system calls via int 0x80. |
380 | * | 380 | * |
381 | * Arguments: | 381 | * Arguments: |
382 | * %eax System call number. | 382 | * %eax System call number. |
383 | * %ebx Arg1 | 383 | * %ebx Arg1 |
384 | * %ecx Arg2 | 384 | * %ecx Arg2 |
385 | * %edx Arg3 | 385 | * %edx Arg3 |
386 | * %esi Arg4 | 386 | * %esi Arg4 |
387 | * %edi Arg5 | 387 | * %edi Arg5 |
388 | * %ebp Arg6 [note: not saved in the stack frame, should not be touched] | 388 | * %ebp Arg6 [note: not saved in the stack frame, should not be touched] |
389 | * | 389 | * |
390 | * Notes: | 390 | * Notes: |
391 | * Uses the same stack frame as the x86-64 version. | 391 | * Uses the same stack frame as the x86-64 version. |
392 | * All registers except %eax must be saved (but ptrace may violate that) | 392 | * All registers except %eax must be saved (but ptrace may violate that) |
393 | * Arguments are zero extended. For system calls that want sign extension and | 393 | * Arguments are zero extended. For system calls that want sign extension and |
394 | * take long arguments a wrapper is needed. Most calls can just be called | 394 | * take long arguments a wrapper is needed. Most calls can just be called |
395 | * directly. | 395 | * directly. |
396 | * Assumes it is only called from user space and entered with interrupts off. | 396 | * Assumes it is only called from user space and entered with interrupts off. |
397 | */ | 397 | */ |
398 | 398 | ||
399 | ENTRY(ia32_syscall) | 399 | ENTRY(ia32_syscall) |
400 | CFI_STARTPROC32 simple | 400 | CFI_STARTPROC32 simple |
401 | CFI_SIGNAL_FRAME | 401 | CFI_SIGNAL_FRAME |
402 | CFI_DEF_CFA rsp,SS+8-RIP | 402 | CFI_DEF_CFA rsp,SS+8-RIP |
403 | /*CFI_REL_OFFSET ss,SS-RIP*/ | 403 | /*CFI_REL_OFFSET ss,SS-RIP*/ |
404 | CFI_REL_OFFSET rsp,RSP-RIP | 404 | CFI_REL_OFFSET rsp,RSP-RIP |
405 | /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/ | 405 | /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/ |
406 | /*CFI_REL_OFFSET cs,CS-RIP*/ | 406 | /*CFI_REL_OFFSET cs,CS-RIP*/ |
407 | CFI_REL_OFFSET rip,RIP-RIP | 407 | CFI_REL_OFFSET rip,RIP-RIP |
408 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 408 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
409 | SWAPGS | 409 | SWAPGS |
410 | /* | 410 | /* |
411 | * No need to follow this irqs on/off section: the syscall | 411 | * No need to follow this irqs on/off section: the syscall |
412 | * disabled irqs and here we enable it straight after entry: | 412 | * disabled irqs and here we enable it straight after entry: |
413 | */ | 413 | */ |
414 | ENABLE_INTERRUPTS(CLBR_NONE) | 414 | ENABLE_INTERRUPTS(CLBR_NONE) |
415 | movl %eax,%eax | 415 | movl %eax,%eax |
416 | pushq_cfi %rax | 416 | pushq_cfi %rax |
417 | cld | 417 | cld |
418 | /* note the registers are not zero extended to the sf. | 418 | /* note the registers are not zero extended to the sf. |
419 | this could be a problem. */ | 419 | this could be a problem. */ |
420 | SAVE_ARGS 0,1,0 | 420 | SAVE_ARGS 0,1,0 |
421 | orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 421 | orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
422 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 422 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
423 | jnz ia32_tracesys | 423 | jnz ia32_tracesys |
424 | cmpq $(IA32_NR_syscalls-1),%rax | 424 | cmpq $(IA32_NR_syscalls-1),%rax |
425 | ja ia32_badsys | 425 | ja ia32_badsys |
426 | ia32_do_call: | 426 | ia32_do_call: |
427 | IA32_ARG_FIXUP | 427 | IA32_ARG_FIXUP |
428 | call *ia32_sys_call_table(,%rax,8) # xxx: rip relative | 428 | call *ia32_sys_call_table(,%rax,8) # xxx: rip relative |
429 | ia32_sysret: | 429 | ia32_sysret: |
430 | movq %rax,RAX-ARGOFFSET(%rsp) | 430 | movq %rax,RAX-ARGOFFSET(%rsp) |
431 | ia32_ret_from_sys_call: | 431 | ia32_ret_from_sys_call: |
432 | CLEAR_RREGS -ARGOFFSET | 432 | CLEAR_RREGS -ARGOFFSET |
433 | jmp int_ret_from_sys_call | 433 | jmp int_ret_from_sys_call |
434 | 434 | ||
435 | ia32_tracesys: | 435 | ia32_tracesys: |
436 | SAVE_REST | 436 | SAVE_REST |
437 | CLEAR_RREGS | 437 | CLEAR_RREGS |
438 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ | 438 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
439 | movq %rsp,%rdi /* &pt_regs -> arg1 */ | 439 | movq %rsp,%rdi /* &pt_regs -> arg1 */ |
440 | call syscall_trace_enter | 440 | call syscall_trace_enter |
441 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ | 441 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ |
442 | RESTORE_REST | 442 | RESTORE_REST |
443 | cmpq $(IA32_NR_syscalls-1),%rax | 443 | cmpq $(IA32_NR_syscalls-1),%rax |
444 | ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ | 444 | ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ |
445 | jmp ia32_do_call | 445 | jmp ia32_do_call |
446 | END(ia32_syscall) | 446 | END(ia32_syscall) |
447 | 447 | ||
448 | ia32_badsys: | 448 | ia32_badsys: |
449 | movq $0,ORIG_RAX-ARGOFFSET(%rsp) | 449 | movq $0,ORIG_RAX-ARGOFFSET(%rsp) |
450 | movq $-ENOSYS,%rax | 450 | movq $-ENOSYS,%rax |
451 | jmp ia32_sysret | 451 | jmp ia32_sysret |
452 | 452 | ||
453 | CFI_ENDPROC | 453 | CFI_ENDPROC |
454 | 454 | ||
455 | .macro PTREGSCALL label, func, arg | 455 | .macro PTREGSCALL label, func, arg |
456 | ALIGN | 456 | ALIGN |
457 | GLOBAL(\label) | 457 | GLOBAL(\label) |
458 | leaq \func(%rip),%rax | 458 | leaq \func(%rip),%rax |
459 | leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ | 459 | leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ |
460 | jmp ia32_ptregs_common | 460 | jmp ia32_ptregs_common |
461 | .endm | 461 | .endm |
462 | 462 | ||
463 | CFI_STARTPROC32 | 463 | CFI_STARTPROC32 |
464 | 464 | ||
465 | PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi | 465 | PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi |
466 | PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi | 466 | PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi |
467 | PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx | ||
468 | PTREGSCALL stub32_execve, compat_sys_execve, %rcx | 467 | PTREGSCALL stub32_execve, compat_sys_execve, %rcx |
469 | PTREGSCALL stub32_fork, sys_fork, %rdi | 468 | PTREGSCALL stub32_fork, sys_fork, %rdi |
470 | PTREGSCALL stub32_vfork, sys_vfork, %rdi | 469 | PTREGSCALL stub32_vfork, sys_vfork, %rdi |
471 | PTREGSCALL stub32_iopl, sys_iopl, %rsi | 470 | PTREGSCALL stub32_iopl, sys_iopl, %rsi |
472 | 471 | ||
473 | ALIGN | 472 | ALIGN |
474 | GLOBAL(stub32_clone) | 473 | GLOBAL(stub32_clone) |
475 | leaq sys_clone(%rip),%rax | 474 | leaq sys_clone(%rip),%rax |
476 | mov %r8, %rcx | 475 | mov %r8, %rcx |
477 | jmp ia32_ptregs_common | 476 | jmp ia32_ptregs_common |
478 | 477 | ||
479 | ALIGN | 478 | ALIGN |
480 | ia32_ptregs_common: | 479 | ia32_ptregs_common: |
481 | popq %r11 | 480 | popq %r11 |
482 | CFI_ENDPROC | 481 | CFI_ENDPROC |
483 | CFI_STARTPROC32 simple | 482 | CFI_STARTPROC32 simple |
484 | CFI_SIGNAL_FRAME | 483 | CFI_SIGNAL_FRAME |
485 | CFI_DEF_CFA rsp,SS+8-ARGOFFSET | 484 | CFI_DEF_CFA rsp,SS+8-ARGOFFSET |
486 | CFI_REL_OFFSET rax,RAX-ARGOFFSET | 485 | CFI_REL_OFFSET rax,RAX-ARGOFFSET |
487 | CFI_REL_OFFSET rcx,RCX-ARGOFFSET | 486 | CFI_REL_OFFSET rcx,RCX-ARGOFFSET |
488 | CFI_REL_OFFSET rdx,RDX-ARGOFFSET | 487 | CFI_REL_OFFSET rdx,RDX-ARGOFFSET |
489 | CFI_REL_OFFSET rsi,RSI-ARGOFFSET | 488 | CFI_REL_OFFSET rsi,RSI-ARGOFFSET |
490 | CFI_REL_OFFSET rdi,RDI-ARGOFFSET | 489 | CFI_REL_OFFSET rdi,RDI-ARGOFFSET |
491 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | 490 | CFI_REL_OFFSET rip,RIP-ARGOFFSET |
492 | /* CFI_REL_OFFSET cs,CS-ARGOFFSET*/ | 491 | /* CFI_REL_OFFSET cs,CS-ARGOFFSET*/ |
493 | /* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/ | 492 | /* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/ |
494 | CFI_REL_OFFSET rsp,RSP-ARGOFFSET | 493 | CFI_REL_OFFSET rsp,RSP-ARGOFFSET |
495 | /* CFI_REL_OFFSET ss,SS-ARGOFFSET*/ | 494 | /* CFI_REL_OFFSET ss,SS-ARGOFFSET*/ |
496 | SAVE_REST | 495 | SAVE_REST |
497 | call *%rax | 496 | call *%rax |
498 | RESTORE_REST | 497 | RESTORE_REST |
499 | jmp ia32_sysret /* misbalances the return cache */ | 498 | jmp ia32_sysret /* misbalances the return cache */ |
500 | CFI_ENDPROC | 499 | CFI_ENDPROC |
501 | END(ia32_ptregs_common) | 500 | END(ia32_ptregs_common) |
502 | 501 |
arch/x86/include/asm/ia32.h
1 | #ifndef _ASM_X86_IA32_H | 1 | #ifndef _ASM_X86_IA32_H |
2 | #define _ASM_X86_IA32_H | 2 | #define _ASM_X86_IA32_H |
3 | 3 | ||
4 | 4 | ||
5 | #ifdef CONFIG_IA32_EMULATION | 5 | #ifdef CONFIG_IA32_EMULATION |
6 | 6 | ||
7 | #include <linux/compat.h> | 7 | #include <linux/compat.h> |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * 32 bit structures for IA32 support. | 10 | * 32 bit structures for IA32 support. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <asm/sigcontext32.h> | 13 | #include <asm/sigcontext32.h> |
14 | 14 | ||
15 | /* signal.h */ | 15 | /* signal.h */ |
16 | struct sigaction32 { | 16 | struct sigaction32 { |
17 | unsigned int sa_handler; /* Really a pointer, but need to deal | 17 | unsigned int sa_handler; /* Really a pointer, but need to deal |
18 | with 32 bits */ | 18 | with 32 bits */ |
19 | unsigned int sa_flags; | 19 | unsigned int sa_flags; |
20 | unsigned int sa_restorer; /* Another 32 bit pointer */ | 20 | unsigned int sa_restorer; /* Another 32 bit pointer */ |
21 | compat_sigset_t sa_mask; /* A 32 bit mask */ | 21 | compat_sigset_t sa_mask; /* A 32 bit mask */ |
22 | }; | 22 | }; |
23 | 23 | ||
24 | struct old_sigaction32 { | 24 | struct old_sigaction32 { |
25 | unsigned int sa_handler; /* Really a pointer, but need to deal | 25 | unsigned int sa_handler; /* Really a pointer, but need to deal |
26 | with 32 bits */ | 26 | with 32 bits */ |
27 | compat_old_sigset_t sa_mask; /* A 32 bit mask */ | 27 | compat_old_sigset_t sa_mask; /* A 32 bit mask */ |
28 | unsigned int sa_flags; | 28 | unsigned int sa_flags; |
29 | unsigned int sa_restorer; /* Another 32 bit pointer */ | 29 | unsigned int sa_restorer; /* Another 32 bit pointer */ |
30 | }; | 30 | }; |
31 | 31 | ||
32 | typedef struct sigaltstack_ia32 { | ||
33 | unsigned int ss_sp; | ||
34 | int ss_flags; | ||
35 | unsigned int ss_size; | ||
36 | } stack_ia32_t; | ||
37 | |||
38 | struct ucontext_ia32 { | 32 | struct ucontext_ia32 { |
39 | unsigned int uc_flags; | 33 | unsigned int uc_flags; |
40 | unsigned int uc_link; | 34 | unsigned int uc_link; |
41 | stack_ia32_t uc_stack; | 35 | compat_stack_t uc_stack; |
42 | struct sigcontext_ia32 uc_mcontext; | 36 | struct sigcontext_ia32 uc_mcontext; |
43 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | 37 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ |
44 | }; | 38 | }; |
45 | 39 | ||
46 | struct ucontext_x32 { | 40 | struct ucontext_x32 { |
47 | unsigned int uc_flags; | 41 | unsigned int uc_flags; |
48 | unsigned int uc_link; | 42 | unsigned int uc_link; |
49 | stack_ia32_t uc_stack; | 43 | compat_stack_t uc_stack; |
50 | unsigned int uc__pad0; /* needed for alignment */ | 44 | unsigned int uc__pad0; /* needed for alignment */ |
51 | struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */ | 45 | struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */ |
52 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | 46 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ |
53 | }; | 47 | }; |
54 | 48 | ||
55 | /* This matches struct stat64 in glibc2.2, hence the absolutely | 49 | /* This matches struct stat64 in glibc2.2, hence the absolutely |
56 | * insane amounts of padding around dev_t's. | 50 | * insane amounts of padding around dev_t's. |
57 | */ | 51 | */ |
58 | struct stat64 { | 52 | struct stat64 { |
59 | unsigned long long st_dev; | 53 | unsigned long long st_dev; |
60 | unsigned char __pad0[4]; | 54 | unsigned char __pad0[4]; |
61 | 55 | ||
62 | #define STAT64_HAS_BROKEN_ST_INO 1 | 56 | #define STAT64_HAS_BROKEN_ST_INO 1 |
63 | unsigned int __st_ino; | 57 | unsigned int __st_ino; |
64 | 58 | ||
65 | unsigned int st_mode; | 59 | unsigned int st_mode; |
66 | unsigned int st_nlink; | 60 | unsigned int st_nlink; |
67 | 61 | ||
68 | unsigned int st_uid; | 62 | unsigned int st_uid; |
69 | unsigned int st_gid; | 63 | unsigned int st_gid; |
70 | 64 | ||
71 | unsigned long long st_rdev; | 65 | unsigned long long st_rdev; |
72 | unsigned char __pad3[4]; | 66 | unsigned char __pad3[4]; |
73 | 67 | ||
74 | long long st_size; | 68 | long long st_size; |
75 | unsigned int st_blksize; | 69 | unsigned int st_blksize; |
76 | 70 | ||
77 | long long st_blocks;/* Number 512-byte blocks allocated */ | 71 | long long st_blocks;/* Number 512-byte blocks allocated */ |
78 | 72 | ||
79 | unsigned st_atime; | 73 | unsigned st_atime; |
80 | unsigned st_atime_nsec; | 74 | unsigned st_atime_nsec; |
81 | unsigned st_mtime; | 75 | unsigned st_mtime; |
82 | unsigned st_mtime_nsec; | 76 | unsigned st_mtime_nsec; |
83 | unsigned st_ctime; | 77 | unsigned st_ctime; |
84 | unsigned st_ctime_nsec; | 78 | unsigned st_ctime_nsec; |
85 | 79 | ||
86 | unsigned long long st_ino; | 80 | unsigned long long st_ino; |
87 | } __attribute__((packed)); | 81 | } __attribute__((packed)); |
88 | 82 | ||
89 | #define IA32_STACK_TOP IA32_PAGE_OFFSET | 83 | #define IA32_STACK_TOP IA32_PAGE_OFFSET |
90 | 84 | ||
91 | #ifdef __KERNEL__ | 85 | #ifdef __KERNEL__ |
92 | struct linux_binprm; | 86 | struct linux_binprm; |
93 | extern int ia32_setup_arg_pages(struct linux_binprm *bprm, | 87 | extern int ia32_setup_arg_pages(struct linux_binprm *bprm, |
94 | unsigned long stack_top, int exec_stack); | 88 | unsigned long stack_top, int exec_stack); |
95 | struct mm_struct; | 89 | struct mm_struct; |
96 | extern void ia32_pick_mmap_layout(struct mm_struct *mm); | 90 | extern void ia32_pick_mmap_layout(struct mm_struct *mm); |
97 | 91 | ||
98 | #endif | 92 | #endif |
99 | 93 | ||
100 | #endif /* !CONFIG_IA32_SUPPORT */ | 94 | #endif /* !CONFIG_IA32_SUPPORT */ |
101 | 95 | ||
102 | #endif /* _ASM_X86_IA32_H */ | 96 | #endif /* _ASM_X86_IA32_H */ |
103 | 97 |
arch/x86/include/asm/sys_ia32.h
1 | /* | 1 | /* |
2 | * sys_ia32.h - Linux ia32 syscall interfaces | 2 | * sys_ia32.h - Linux ia32 syscall interfaces |
3 | * | 3 | * |
4 | * Copyright (c) 2008 Jaswinder Singh Rajput | 4 | * Copyright (c) 2008 Jaswinder Singh Rajput |
5 | * | 5 | * |
6 | * This file is released under the GPLv2. | 6 | * This file is released under the GPLv2. |
7 | * See the file COPYING for more details. | 7 | * See the file COPYING for more details. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef _ASM_X86_SYS_IA32_H | 10 | #ifndef _ASM_X86_SYS_IA32_H |
11 | #define _ASM_X86_SYS_IA32_H | 11 | #define _ASM_X86_SYS_IA32_H |
12 | 12 | ||
13 | #ifdef CONFIG_COMPAT | 13 | #ifdef CONFIG_COMPAT |
14 | 14 | ||
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/signal.h> | 18 | #include <linux/signal.h> |
19 | #include <asm/compat.h> | 19 | #include <asm/compat.h> |
20 | #include <asm/ia32.h> | 20 | #include <asm/ia32.h> |
21 | 21 | ||
22 | /* ia32/sys_ia32.c */ | 22 | /* ia32/sys_ia32.c */ |
23 | asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long); | 23 | asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long); |
24 | asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long); | 24 | asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long); |
25 | 25 | ||
26 | asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *); | 26 | asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *); |
27 | asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *); | 27 | asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *); |
28 | asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); | 28 | asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); |
29 | asmlinkage long sys32_fstatat(unsigned int, const char __user *, | 29 | asmlinkage long sys32_fstatat(unsigned int, const char __user *, |
30 | struct stat64 __user *, int); | 30 | struct stat64 __user *, int); |
31 | struct mmap_arg_struct32; | 31 | struct mmap_arg_struct32; |
32 | asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *); | 32 | asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *); |
33 | asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long); | 33 | asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long); |
34 | 34 | ||
35 | struct sigaction32; | 35 | struct sigaction32; |
36 | struct old_sigaction32; | 36 | struct old_sigaction32; |
37 | asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *, | 37 | asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *, |
38 | struct sigaction32 __user *, unsigned int); | 38 | struct sigaction32 __user *, unsigned int); |
39 | asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, | 39 | asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, |
40 | struct old_sigaction32 __user *); | 40 | struct old_sigaction32 __user *); |
41 | asmlinkage long sys32_alarm(unsigned int); | 41 | asmlinkage long sys32_alarm(unsigned int); |
42 | 42 | ||
43 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); | 43 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); |
44 | asmlinkage long sys32_sysfs(int, u32, u32); | 44 | asmlinkage long sys32_sysfs(int, u32, u32); |
45 | 45 | ||
46 | asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, | 46 | asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, |
47 | struct compat_timespec __user *); | 47 | struct compat_timespec __user *); |
48 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t); | 48 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t); |
49 | asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *); | 49 | asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *); |
50 | 50 | ||
51 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); | 51 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); |
52 | asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32); | 52 | asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32); |
53 | 53 | ||
54 | asmlinkage long sys32_personality(unsigned long); | 54 | asmlinkage long sys32_personality(unsigned long); |
55 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); | 55 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); |
56 | 56 | ||
57 | long sys32_lseek(unsigned int, int, unsigned int); | 57 | long sys32_lseek(unsigned int, int, unsigned int); |
58 | long sys32_kill(int, int); | 58 | long sys32_kill(int, int); |
59 | long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); | 59 | long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); |
60 | long sys32_vm86_warning(void); | 60 | long sys32_vm86_warning(void); |
61 | long sys32_lookup_dcookie(u32, u32, char __user *, size_t); | 61 | long sys32_lookup_dcookie(u32, u32, char __user *, size_t); |
62 | 62 | ||
63 | asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); | 63 | asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); |
64 | asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, | 64 | asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, |
65 | unsigned, unsigned, int); | 65 | unsigned, unsigned, int); |
66 | asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int); | 66 | asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int); |
67 | asmlinkage long sys32_fallocate(int, int, unsigned, | 67 | asmlinkage long sys32_fallocate(int, int, unsigned, |
68 | unsigned, unsigned, unsigned); | 68 | unsigned, unsigned, unsigned); |
69 | 69 | ||
70 | /* ia32/ia32_signal.c */ | 70 | /* ia32/ia32_signal.c */ |
71 | asmlinkage long sys32_sigsuspend(int, int, old_sigset_t); | 71 | asmlinkage long sys32_sigsuspend(int, int, old_sigset_t); |
72 | asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *, | ||
73 | stack_ia32_t __user *, struct pt_regs *); | ||
74 | asmlinkage long sys32_sigreturn(struct pt_regs *); | 72 | asmlinkage long sys32_sigreturn(struct pt_regs *); |
75 | asmlinkage long sys32_rt_sigreturn(struct pt_regs *); | 73 | asmlinkage long sys32_rt_sigreturn(struct pt_regs *); |
76 | 74 | ||
77 | /* ia32/ipc32.c */ | 75 | /* ia32/ipc32.c */ |
78 | asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32); | 76 | asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32); |
79 | 77 | ||
80 | asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int, | 78 | asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int, |
81 | const char __user *); | 79 | const char __user *); |
82 | 80 | ||
83 | #endif /* CONFIG_COMPAT */ | 81 | #endif /* CONFIG_COMPAT */ |
84 | 82 | ||
85 | #endif /* _ASM_X86_SYS_IA32_H */ | 83 | #endif /* _ASM_X86_SYS_IA32_H */ |
86 | 84 |
arch/x86/kernel/entry_64.S
1 | /* | 1 | /* |
2 | * linux/arch/x86_64/entry.S | 2 | * linux/arch/x86_64/entry.S |
3 | * | 3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | 5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs |
6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | 6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * entry.S contains the system-call and fault low-level handling routines. | 10 | * entry.S contains the system-call and fault low-level handling routines. |
11 | * | 11 | * |
12 | * Some of this is documented in Documentation/x86/entry_64.txt | 12 | * Some of this is documented in Documentation/x86/entry_64.txt |
13 | * | 13 | * |
14 | * NOTE: This code handles signal-recognition, which happens every time | 14 | * NOTE: This code handles signal-recognition, which happens every time |
15 | * after an interrupt and after each system call. | 15 | * after an interrupt and after each system call. |
16 | * | 16 | * |
17 | * Normal syscalls and interrupts don't save a full stack frame, this is | 17 | * Normal syscalls and interrupts don't save a full stack frame, this is |
18 | * only done for syscall tracing, signals or fork/exec et.al. | 18 | * only done for syscall tracing, signals or fork/exec et.al. |
19 | * | 19 | * |
20 | * A note on terminology: | 20 | * A note on terminology: |
21 | * - top of stack: Architecture defined interrupt frame from SS to RIP | 21 | * - top of stack: Architecture defined interrupt frame from SS to RIP |
22 | * at the top of the kernel process stack. | 22 | * at the top of the kernel process stack. |
23 | * - partial stack frame: partially saved registers up to R11. | 23 | * - partial stack frame: partially saved registers up to R11. |
24 | * - full stack frame: Like partial stack frame, but all register saved. | 24 | * - full stack frame: Like partial stack frame, but all register saved. |
25 | * | 25 | * |
26 | * Some macro usage: | 26 | * Some macro usage: |
27 | * - CFI macros are used to generate dwarf2 unwind information for better | 27 | * - CFI macros are used to generate dwarf2 unwind information for better |
28 | * backtraces. They don't change any code. | 28 | * backtraces. They don't change any code. |
29 | * - SAVE_ALL/RESTORE_ALL - Save/restore all registers | 29 | * - SAVE_ALL/RESTORE_ALL - Save/restore all registers |
30 | * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify. | 30 | * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify. |
31 | * There are unfortunately lots of special cases where some registers | 31 | * There are unfortunately lots of special cases where some registers |
32 | * not touched. The macro is a big mess that should be cleaned up. | 32 | * not touched. The macro is a big mess that should be cleaned up. |
33 | * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS. | 33 | * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS. |
34 | * Gives a full stack frame. | 34 | * Gives a full stack frame. |
35 | * - ENTRY/END Define functions in the symbol table. | 35 | * - ENTRY/END Define functions in the symbol table. |
36 | * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack | 36 | * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack |
37 | * frame that is otherwise undefined after a SYSCALL | 37 | * frame that is otherwise undefined after a SYSCALL |
38 | * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. | 38 | * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. |
39 | * - errorentry/paranoidentry/zeroentry - Define exception entry points. | 39 | * - errorentry/paranoidentry/zeroentry - Define exception entry points. |
40 | */ | 40 | */ |
41 | 41 | ||
42 | #include <linux/linkage.h> | 42 | #include <linux/linkage.h> |
43 | #include <asm/segment.h> | 43 | #include <asm/segment.h> |
44 | #include <asm/cache.h> | 44 | #include <asm/cache.h> |
45 | #include <asm/errno.h> | 45 | #include <asm/errno.h> |
46 | #include <asm/dwarf2.h> | 46 | #include <asm/dwarf2.h> |
47 | #include <asm/calling.h> | 47 | #include <asm/calling.h> |
48 | #include <asm/asm-offsets.h> | 48 | #include <asm/asm-offsets.h> |
49 | #include <asm/msr.h> | 49 | #include <asm/msr.h> |
50 | #include <asm/unistd.h> | 50 | #include <asm/unistd.h> |
51 | #include <asm/thread_info.h> | 51 | #include <asm/thread_info.h> |
52 | #include <asm/hw_irq.h> | 52 | #include <asm/hw_irq.h> |
53 | #include <asm/page_types.h> | 53 | #include <asm/page_types.h> |
54 | #include <asm/irqflags.h> | 54 | #include <asm/irqflags.h> |
55 | #include <asm/paravirt.h> | 55 | #include <asm/paravirt.h> |
56 | #include <asm/ftrace.h> | 56 | #include <asm/ftrace.h> |
57 | #include <asm/percpu.h> | 57 | #include <asm/percpu.h> |
58 | #include <asm/asm.h> | 58 | #include <asm/asm.h> |
59 | #include <asm/rcu.h> | 59 | #include <asm/rcu.h> |
60 | #include <asm/smap.h> | 60 | #include <asm/smap.h> |
61 | #include <linux/err.h> | 61 | #include <linux/err.h> |
62 | 62 | ||
63 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | 63 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
64 | #include <linux/elf-em.h> | 64 | #include <linux/elf-em.h> |
65 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) | 65 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) |
66 | #define __AUDIT_ARCH_64BIT 0x80000000 | 66 | #define __AUDIT_ARCH_64BIT 0x80000000 |
67 | #define __AUDIT_ARCH_LE 0x40000000 | 67 | #define __AUDIT_ARCH_LE 0x40000000 |
68 | 68 | ||
69 | .code64 | 69 | .code64 |
70 | .section .entry.text, "ax" | 70 | .section .entry.text, "ax" |
71 | 71 | ||
72 | #ifdef CONFIG_FUNCTION_TRACER | 72 | #ifdef CONFIG_FUNCTION_TRACER |
73 | 73 | ||
74 | #ifdef CC_USING_FENTRY | 74 | #ifdef CC_USING_FENTRY |
75 | # define function_hook __fentry__ | 75 | # define function_hook __fentry__ |
76 | #else | 76 | #else |
77 | # define function_hook mcount | 77 | # define function_hook mcount |
78 | #endif | 78 | #endif |
79 | 79 | ||
80 | #ifdef CONFIG_DYNAMIC_FTRACE | 80 | #ifdef CONFIG_DYNAMIC_FTRACE |
81 | 81 | ||
82 | ENTRY(function_hook) | 82 | ENTRY(function_hook) |
83 | retq | 83 | retq |
84 | END(function_hook) | 84 | END(function_hook) |
85 | 85 | ||
86 | /* skip is set if stack has been adjusted */ | 86 | /* skip is set if stack has been adjusted */ |
87 | .macro ftrace_caller_setup skip=0 | 87 | .macro ftrace_caller_setup skip=0 |
88 | MCOUNT_SAVE_FRAME \skip | 88 | MCOUNT_SAVE_FRAME \skip |
89 | 89 | ||
90 | /* Load the ftrace_ops into the 3rd parameter */ | 90 | /* Load the ftrace_ops into the 3rd parameter */ |
91 | leaq function_trace_op, %rdx | 91 | leaq function_trace_op, %rdx |
92 | 92 | ||
93 | /* Load ip into the first parameter */ | 93 | /* Load ip into the first parameter */ |
94 | movq RIP(%rsp), %rdi | 94 | movq RIP(%rsp), %rdi |
95 | subq $MCOUNT_INSN_SIZE, %rdi | 95 | subq $MCOUNT_INSN_SIZE, %rdi |
96 | /* Load the parent_ip into the second parameter */ | 96 | /* Load the parent_ip into the second parameter */ |
97 | #ifdef CC_USING_FENTRY | 97 | #ifdef CC_USING_FENTRY |
98 | movq SS+16(%rsp), %rsi | 98 | movq SS+16(%rsp), %rsi |
99 | #else | 99 | #else |
100 | movq 8(%rbp), %rsi | 100 | movq 8(%rbp), %rsi |
101 | #endif | 101 | #endif |
102 | .endm | 102 | .endm |
103 | 103 | ||
104 | ENTRY(ftrace_caller) | 104 | ENTRY(ftrace_caller) |
105 | /* Check if tracing was disabled (quick check) */ | 105 | /* Check if tracing was disabled (quick check) */ |
106 | cmpl $0, function_trace_stop | 106 | cmpl $0, function_trace_stop |
107 | jne ftrace_stub | 107 | jne ftrace_stub |
108 | 108 | ||
109 | ftrace_caller_setup | 109 | ftrace_caller_setup |
110 | /* regs go into 4th parameter (but make it NULL) */ | 110 | /* regs go into 4th parameter (but make it NULL) */ |
111 | movq $0, %rcx | 111 | movq $0, %rcx |
112 | 112 | ||
113 | GLOBAL(ftrace_call) | 113 | GLOBAL(ftrace_call) |
114 | call ftrace_stub | 114 | call ftrace_stub |
115 | 115 | ||
116 | MCOUNT_RESTORE_FRAME | 116 | MCOUNT_RESTORE_FRAME |
117 | ftrace_return: | 117 | ftrace_return: |
118 | 118 | ||
119 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 119 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
120 | GLOBAL(ftrace_graph_call) | 120 | GLOBAL(ftrace_graph_call) |
121 | jmp ftrace_stub | 121 | jmp ftrace_stub |
122 | #endif | 122 | #endif |
123 | 123 | ||
124 | GLOBAL(ftrace_stub) | 124 | GLOBAL(ftrace_stub) |
125 | retq | 125 | retq |
126 | END(ftrace_caller) | 126 | END(ftrace_caller) |
127 | 127 | ||
128 | ENTRY(ftrace_regs_caller) | 128 | ENTRY(ftrace_regs_caller) |
129 | /* Save the current flags before compare (in SS location)*/ | 129 | /* Save the current flags before compare (in SS location)*/ |
130 | pushfq | 130 | pushfq |
131 | 131 | ||
132 | /* Check if tracing was disabled (quick check) */ | 132 | /* Check if tracing was disabled (quick check) */ |
133 | cmpl $0, function_trace_stop | 133 | cmpl $0, function_trace_stop |
134 | jne ftrace_restore_flags | 134 | jne ftrace_restore_flags |
135 | 135 | ||
136 | /* skip=8 to skip flags saved in SS */ | 136 | /* skip=8 to skip flags saved in SS */ |
137 | ftrace_caller_setup 8 | 137 | ftrace_caller_setup 8 |
138 | 138 | ||
139 | /* Save the rest of pt_regs */ | 139 | /* Save the rest of pt_regs */ |
140 | movq %r15, R15(%rsp) | 140 | movq %r15, R15(%rsp) |
141 | movq %r14, R14(%rsp) | 141 | movq %r14, R14(%rsp) |
142 | movq %r13, R13(%rsp) | 142 | movq %r13, R13(%rsp) |
143 | movq %r12, R12(%rsp) | 143 | movq %r12, R12(%rsp) |
144 | movq %r11, R11(%rsp) | 144 | movq %r11, R11(%rsp) |
145 | movq %r10, R10(%rsp) | 145 | movq %r10, R10(%rsp) |
146 | movq %rbp, RBP(%rsp) | 146 | movq %rbp, RBP(%rsp) |
147 | movq %rbx, RBX(%rsp) | 147 | movq %rbx, RBX(%rsp) |
148 | /* Copy saved flags */ | 148 | /* Copy saved flags */ |
149 | movq SS(%rsp), %rcx | 149 | movq SS(%rsp), %rcx |
150 | movq %rcx, EFLAGS(%rsp) | 150 | movq %rcx, EFLAGS(%rsp) |
151 | /* Kernel segments */ | 151 | /* Kernel segments */ |
152 | movq $__KERNEL_DS, %rcx | 152 | movq $__KERNEL_DS, %rcx |
153 | movq %rcx, SS(%rsp) | 153 | movq %rcx, SS(%rsp) |
154 | movq $__KERNEL_CS, %rcx | 154 | movq $__KERNEL_CS, %rcx |
155 | movq %rcx, CS(%rsp) | 155 | movq %rcx, CS(%rsp) |
156 | /* Stack - skipping return address */ | 156 | /* Stack - skipping return address */ |
157 | leaq SS+16(%rsp), %rcx | 157 | leaq SS+16(%rsp), %rcx |
158 | movq %rcx, RSP(%rsp) | 158 | movq %rcx, RSP(%rsp) |
159 | 159 | ||
160 | /* regs go into 4th parameter */ | 160 | /* regs go into 4th parameter */ |
161 | leaq (%rsp), %rcx | 161 | leaq (%rsp), %rcx |
162 | 162 | ||
163 | GLOBAL(ftrace_regs_call) | 163 | GLOBAL(ftrace_regs_call) |
164 | call ftrace_stub | 164 | call ftrace_stub |
165 | 165 | ||
166 | /* Copy flags back to SS, to restore them */ | 166 | /* Copy flags back to SS, to restore them */ |
167 | movq EFLAGS(%rsp), %rax | 167 | movq EFLAGS(%rsp), %rax |
168 | movq %rax, SS(%rsp) | 168 | movq %rax, SS(%rsp) |
169 | 169 | ||
170 | /* Handlers can change the RIP */ | 170 | /* Handlers can change the RIP */ |
171 | movq RIP(%rsp), %rax | 171 | movq RIP(%rsp), %rax |
172 | movq %rax, SS+8(%rsp) | 172 | movq %rax, SS+8(%rsp) |
173 | 173 | ||
174 | /* restore the rest of pt_regs */ | 174 | /* restore the rest of pt_regs */ |
175 | movq R15(%rsp), %r15 | 175 | movq R15(%rsp), %r15 |
176 | movq R14(%rsp), %r14 | 176 | movq R14(%rsp), %r14 |
177 | movq R13(%rsp), %r13 | 177 | movq R13(%rsp), %r13 |
178 | movq R12(%rsp), %r12 | 178 | movq R12(%rsp), %r12 |
179 | movq R10(%rsp), %r10 | 179 | movq R10(%rsp), %r10 |
180 | movq RBP(%rsp), %rbp | 180 | movq RBP(%rsp), %rbp |
181 | movq RBX(%rsp), %rbx | 181 | movq RBX(%rsp), %rbx |
182 | 182 | ||
183 | /* skip=8 to skip flags saved in SS */ | 183 | /* skip=8 to skip flags saved in SS */ |
184 | MCOUNT_RESTORE_FRAME 8 | 184 | MCOUNT_RESTORE_FRAME 8 |
185 | 185 | ||
186 | /* Restore flags */ | 186 | /* Restore flags */ |
187 | popfq | 187 | popfq |
188 | 188 | ||
189 | jmp ftrace_return | 189 | jmp ftrace_return |
190 | ftrace_restore_flags: | 190 | ftrace_restore_flags: |
191 | popfq | 191 | popfq |
192 | jmp ftrace_stub | 192 | jmp ftrace_stub |
193 | 193 | ||
194 | END(ftrace_regs_caller) | 194 | END(ftrace_regs_caller) |
195 | 195 | ||
196 | 196 | ||
197 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | 197 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
198 | 198 | ||
199 | ENTRY(function_hook) | 199 | ENTRY(function_hook) |
200 | cmpl $0, function_trace_stop | 200 | cmpl $0, function_trace_stop |
201 | jne ftrace_stub | 201 | jne ftrace_stub |
202 | 202 | ||
203 | cmpq $ftrace_stub, ftrace_trace_function | 203 | cmpq $ftrace_stub, ftrace_trace_function |
204 | jnz trace | 204 | jnz trace |
205 | 205 | ||
206 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 206 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
207 | cmpq $ftrace_stub, ftrace_graph_return | 207 | cmpq $ftrace_stub, ftrace_graph_return |
208 | jnz ftrace_graph_caller | 208 | jnz ftrace_graph_caller |
209 | 209 | ||
210 | cmpq $ftrace_graph_entry_stub, ftrace_graph_entry | 210 | cmpq $ftrace_graph_entry_stub, ftrace_graph_entry |
211 | jnz ftrace_graph_caller | 211 | jnz ftrace_graph_caller |
212 | #endif | 212 | #endif |
213 | 213 | ||
214 | GLOBAL(ftrace_stub) | 214 | GLOBAL(ftrace_stub) |
215 | retq | 215 | retq |
216 | 216 | ||
217 | trace: | 217 | trace: |
218 | MCOUNT_SAVE_FRAME | 218 | MCOUNT_SAVE_FRAME |
219 | 219 | ||
220 | movq RIP(%rsp), %rdi | 220 | movq RIP(%rsp), %rdi |
221 | #ifdef CC_USING_FENTRY | 221 | #ifdef CC_USING_FENTRY |
222 | movq SS+16(%rsp), %rsi | 222 | movq SS+16(%rsp), %rsi |
223 | #else | 223 | #else |
224 | movq 8(%rbp), %rsi | 224 | movq 8(%rbp), %rsi |
225 | #endif | 225 | #endif |
226 | subq $MCOUNT_INSN_SIZE, %rdi | 226 | subq $MCOUNT_INSN_SIZE, %rdi |
227 | 227 | ||
228 | call *ftrace_trace_function | 228 | call *ftrace_trace_function |
229 | 229 | ||
230 | MCOUNT_RESTORE_FRAME | 230 | MCOUNT_RESTORE_FRAME |
231 | 231 | ||
232 | jmp ftrace_stub | 232 | jmp ftrace_stub |
233 | END(function_hook) | 233 | END(function_hook) |
234 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 234 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
235 | #endif /* CONFIG_FUNCTION_TRACER */ | 235 | #endif /* CONFIG_FUNCTION_TRACER */ |
236 | 236 | ||
237 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 237 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
238 | ENTRY(ftrace_graph_caller) | 238 | ENTRY(ftrace_graph_caller) |
239 | MCOUNT_SAVE_FRAME | 239 | MCOUNT_SAVE_FRAME |
240 | 240 | ||
241 | #ifdef CC_USING_FENTRY | 241 | #ifdef CC_USING_FENTRY |
242 | leaq SS+16(%rsp), %rdi | 242 | leaq SS+16(%rsp), %rdi |
243 | movq $0, %rdx /* No framepointers needed */ | 243 | movq $0, %rdx /* No framepointers needed */ |
244 | #else | 244 | #else |
245 | leaq 8(%rbp), %rdi | 245 | leaq 8(%rbp), %rdi |
246 | movq (%rbp), %rdx | 246 | movq (%rbp), %rdx |
247 | #endif | 247 | #endif |
248 | movq RIP(%rsp), %rsi | 248 | movq RIP(%rsp), %rsi |
249 | subq $MCOUNT_INSN_SIZE, %rsi | 249 | subq $MCOUNT_INSN_SIZE, %rsi |
250 | 250 | ||
251 | call prepare_ftrace_return | 251 | call prepare_ftrace_return |
252 | 252 | ||
253 | MCOUNT_RESTORE_FRAME | 253 | MCOUNT_RESTORE_FRAME |
254 | 254 | ||
255 | retq | 255 | retq |
256 | END(ftrace_graph_caller) | 256 | END(ftrace_graph_caller) |
257 | 257 | ||
258 | GLOBAL(return_to_handler) | 258 | GLOBAL(return_to_handler) |
259 | subq $24, %rsp | 259 | subq $24, %rsp |
260 | 260 | ||
261 | /* Save the return values */ | 261 | /* Save the return values */ |
262 | movq %rax, (%rsp) | 262 | movq %rax, (%rsp) |
263 | movq %rdx, 8(%rsp) | 263 | movq %rdx, 8(%rsp) |
264 | movq %rbp, %rdi | 264 | movq %rbp, %rdi |
265 | 265 | ||
266 | call ftrace_return_to_handler | 266 | call ftrace_return_to_handler |
267 | 267 | ||
268 | movq %rax, %rdi | 268 | movq %rax, %rdi |
269 | movq 8(%rsp), %rdx | 269 | movq 8(%rsp), %rdx |
270 | movq (%rsp), %rax | 270 | movq (%rsp), %rax |
271 | addq $24, %rsp | 271 | addq $24, %rsp |
272 | jmp *%rdi | 272 | jmp *%rdi |
273 | #endif | 273 | #endif |
274 | 274 | ||
275 | 275 | ||
276 | #ifndef CONFIG_PREEMPT | 276 | #ifndef CONFIG_PREEMPT |
277 | #define retint_kernel retint_restore_args | 277 | #define retint_kernel retint_restore_args |
278 | #endif | 278 | #endif |
279 | 279 | ||
280 | #ifdef CONFIG_PARAVIRT | 280 | #ifdef CONFIG_PARAVIRT |
281 | ENTRY(native_usergs_sysret64) | 281 | ENTRY(native_usergs_sysret64) |
282 | swapgs | 282 | swapgs |
283 | sysretq | 283 | sysretq |
284 | ENDPROC(native_usergs_sysret64) | 284 | ENDPROC(native_usergs_sysret64) |
285 | #endif /* CONFIG_PARAVIRT */ | 285 | #endif /* CONFIG_PARAVIRT */ |
286 | 286 | ||
287 | 287 | ||
288 | .macro TRACE_IRQS_IRETQ offset=ARGOFFSET | 288 | .macro TRACE_IRQS_IRETQ offset=ARGOFFSET |
289 | #ifdef CONFIG_TRACE_IRQFLAGS | 289 | #ifdef CONFIG_TRACE_IRQFLAGS |
290 | bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ | 290 | bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ |
291 | jnc 1f | 291 | jnc 1f |
292 | TRACE_IRQS_ON | 292 | TRACE_IRQS_ON |
293 | 1: | 293 | 1: |
294 | #endif | 294 | #endif |
295 | .endm | 295 | .endm |
296 | 296 | ||
297 | /* | 297 | /* |
298 | * When dynamic function tracer is enabled it will add a breakpoint | 298 | * When dynamic function tracer is enabled it will add a breakpoint |
299 | * to all locations that it is about to modify, sync CPUs, update | 299 | * to all locations that it is about to modify, sync CPUs, update |
300 | * all the code, sync CPUs, then remove the breakpoints. In this time | 300 | * all the code, sync CPUs, then remove the breakpoints. In this time |
301 | * if lockdep is enabled, it might jump back into the debug handler | 301 | * if lockdep is enabled, it might jump back into the debug handler |
302 | * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). | 302 | * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). |
303 | * | 303 | * |
304 | * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to | 304 | * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to |
305 | * make sure the stack pointer does not get reset back to the top | 305 | * make sure the stack pointer does not get reset back to the top |
306 | * of the debug stack, and instead just reuses the current stack. | 306 | * of the debug stack, and instead just reuses the current stack. |
307 | */ | 307 | */ |
308 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) | 308 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) |
309 | 309 | ||
310 | .macro TRACE_IRQS_OFF_DEBUG | 310 | .macro TRACE_IRQS_OFF_DEBUG |
311 | call debug_stack_set_zero | 311 | call debug_stack_set_zero |
312 | TRACE_IRQS_OFF | 312 | TRACE_IRQS_OFF |
313 | call debug_stack_reset | 313 | call debug_stack_reset |
314 | .endm | 314 | .endm |
315 | 315 | ||
316 | .macro TRACE_IRQS_ON_DEBUG | 316 | .macro TRACE_IRQS_ON_DEBUG |
317 | call debug_stack_set_zero | 317 | call debug_stack_set_zero |
318 | TRACE_IRQS_ON | 318 | TRACE_IRQS_ON |
319 | call debug_stack_reset | 319 | call debug_stack_reset |
320 | .endm | 320 | .endm |
321 | 321 | ||
322 | .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET | 322 | .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET |
323 | bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ | 323 | bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ |
324 | jnc 1f | 324 | jnc 1f |
325 | TRACE_IRQS_ON_DEBUG | 325 | TRACE_IRQS_ON_DEBUG |
326 | 1: | 326 | 1: |
327 | .endm | 327 | .endm |
328 | 328 | ||
329 | #else | 329 | #else |
330 | # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF | 330 | # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF |
331 | # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON | 331 | # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON |
332 | # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ | 332 | # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ |
333 | #endif | 333 | #endif |
334 | 334 | ||
335 | /* | 335 | /* |
336 | * C code is not supposed to know about undefined top of stack. Every time | 336 | * C code is not supposed to know about undefined top of stack. Every time |
337 | * a C function with an pt_regs argument is called from the SYSCALL based | 337 | * a C function with an pt_regs argument is called from the SYSCALL based |
338 | * fast path FIXUP_TOP_OF_STACK is needed. | 338 | * fast path FIXUP_TOP_OF_STACK is needed. |
339 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs | 339 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs |
340 | * manipulation. | 340 | * manipulation. |
341 | */ | 341 | */ |
342 | 342 | ||
343 | /* %rsp:at FRAMEEND */ | 343 | /* %rsp:at FRAMEEND */ |
344 | .macro FIXUP_TOP_OF_STACK tmp offset=0 | 344 | .macro FIXUP_TOP_OF_STACK tmp offset=0 |
345 | movq PER_CPU_VAR(old_rsp),\tmp | 345 | movq PER_CPU_VAR(old_rsp),\tmp |
346 | movq \tmp,RSP+\offset(%rsp) | 346 | movq \tmp,RSP+\offset(%rsp) |
347 | movq $__USER_DS,SS+\offset(%rsp) | 347 | movq $__USER_DS,SS+\offset(%rsp) |
348 | movq $__USER_CS,CS+\offset(%rsp) | 348 | movq $__USER_CS,CS+\offset(%rsp) |
349 | movq $-1,RCX+\offset(%rsp) | 349 | movq $-1,RCX+\offset(%rsp) |
350 | movq R11+\offset(%rsp),\tmp /* get eflags */ | 350 | movq R11+\offset(%rsp),\tmp /* get eflags */ |
351 | movq \tmp,EFLAGS+\offset(%rsp) | 351 | movq \tmp,EFLAGS+\offset(%rsp) |
352 | .endm | 352 | .endm |
353 | 353 | ||
354 | .macro RESTORE_TOP_OF_STACK tmp offset=0 | 354 | .macro RESTORE_TOP_OF_STACK tmp offset=0 |
355 | movq RSP+\offset(%rsp),\tmp | 355 | movq RSP+\offset(%rsp),\tmp |
356 | movq \tmp,PER_CPU_VAR(old_rsp) | 356 | movq \tmp,PER_CPU_VAR(old_rsp) |
357 | movq EFLAGS+\offset(%rsp),\tmp | 357 | movq EFLAGS+\offset(%rsp),\tmp |
358 | movq \tmp,R11+\offset(%rsp) | 358 | movq \tmp,R11+\offset(%rsp) |
359 | .endm | 359 | .endm |
360 | 360 | ||
361 | .macro FAKE_STACK_FRAME child_rip | 361 | .macro FAKE_STACK_FRAME child_rip |
362 | /* push in order ss, rsp, eflags, cs, rip */ | 362 | /* push in order ss, rsp, eflags, cs, rip */ |
363 | xorl %eax, %eax | 363 | xorl %eax, %eax |
364 | pushq_cfi $__KERNEL_DS /* ss */ | 364 | pushq_cfi $__KERNEL_DS /* ss */ |
365 | /*CFI_REL_OFFSET ss,0*/ | 365 | /*CFI_REL_OFFSET ss,0*/ |
366 | pushq_cfi %rax /* rsp */ | 366 | pushq_cfi %rax /* rsp */ |
367 | CFI_REL_OFFSET rsp,0 | 367 | CFI_REL_OFFSET rsp,0 |
368 | pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */ | 368 | pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */ |
369 | /*CFI_REL_OFFSET rflags,0*/ | 369 | /*CFI_REL_OFFSET rflags,0*/ |
370 | pushq_cfi $__KERNEL_CS /* cs */ | 370 | pushq_cfi $__KERNEL_CS /* cs */ |
371 | /*CFI_REL_OFFSET cs,0*/ | 371 | /*CFI_REL_OFFSET cs,0*/ |
372 | pushq_cfi \child_rip /* rip */ | 372 | pushq_cfi \child_rip /* rip */ |
373 | CFI_REL_OFFSET rip,0 | 373 | CFI_REL_OFFSET rip,0 |
374 | pushq_cfi %rax /* orig rax */ | 374 | pushq_cfi %rax /* orig rax */ |
375 | .endm | 375 | .endm |
376 | 376 | ||
377 | .macro UNFAKE_STACK_FRAME | 377 | .macro UNFAKE_STACK_FRAME |
378 | addq $8*6, %rsp | 378 | addq $8*6, %rsp |
379 | CFI_ADJUST_CFA_OFFSET -(6*8) | 379 | CFI_ADJUST_CFA_OFFSET -(6*8) |
380 | .endm | 380 | .endm |
381 | 381 | ||
382 | /* | 382 | /* |
383 | * initial frame state for interrupts (and exceptions without error code) | 383 | * initial frame state for interrupts (and exceptions without error code) |
384 | */ | 384 | */ |
385 | .macro EMPTY_FRAME start=1 offset=0 | 385 | .macro EMPTY_FRAME start=1 offset=0 |
386 | .if \start | 386 | .if \start |
387 | CFI_STARTPROC simple | 387 | CFI_STARTPROC simple |
388 | CFI_SIGNAL_FRAME | 388 | CFI_SIGNAL_FRAME |
389 | CFI_DEF_CFA rsp,8+\offset | 389 | CFI_DEF_CFA rsp,8+\offset |
390 | .else | 390 | .else |
391 | CFI_DEF_CFA_OFFSET 8+\offset | 391 | CFI_DEF_CFA_OFFSET 8+\offset |
392 | .endif | 392 | .endif |
393 | .endm | 393 | .endm |
394 | 394 | ||
395 | /* | 395 | /* |
396 | * initial frame state for interrupts (and exceptions without error code) | 396 | * initial frame state for interrupts (and exceptions without error code) |
397 | */ | 397 | */ |
398 | .macro INTR_FRAME start=1 offset=0 | 398 | .macro INTR_FRAME start=1 offset=0 |
399 | EMPTY_FRAME \start, SS+8+\offset-RIP | 399 | EMPTY_FRAME \start, SS+8+\offset-RIP |
400 | /*CFI_REL_OFFSET ss, SS+\offset-RIP*/ | 400 | /*CFI_REL_OFFSET ss, SS+\offset-RIP*/ |
401 | CFI_REL_OFFSET rsp, RSP+\offset-RIP | 401 | CFI_REL_OFFSET rsp, RSP+\offset-RIP |
402 | /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/ | 402 | /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/ |
403 | /*CFI_REL_OFFSET cs, CS+\offset-RIP*/ | 403 | /*CFI_REL_OFFSET cs, CS+\offset-RIP*/ |
404 | CFI_REL_OFFSET rip, RIP+\offset-RIP | 404 | CFI_REL_OFFSET rip, RIP+\offset-RIP |
405 | .endm | 405 | .endm |
406 | 406 | ||
407 | /* | 407 | /* |
408 | * initial frame state for exceptions with error code (and interrupts | 408 | * initial frame state for exceptions with error code (and interrupts |
409 | * with vector already pushed) | 409 | * with vector already pushed) |
410 | */ | 410 | */ |
411 | .macro XCPT_FRAME start=1 offset=0 | 411 | .macro XCPT_FRAME start=1 offset=0 |
412 | INTR_FRAME \start, RIP+\offset-ORIG_RAX | 412 | INTR_FRAME \start, RIP+\offset-ORIG_RAX |
413 | /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ | 413 | /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ |
414 | .endm | 414 | .endm |
415 | 415 | ||
416 | /* | 416 | /* |
417 | * frame that enables calling into C. | 417 | * frame that enables calling into C. |
418 | */ | 418 | */ |
419 | .macro PARTIAL_FRAME start=1 offset=0 | 419 | .macro PARTIAL_FRAME start=1 offset=0 |
420 | XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET | 420 | XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET |
421 | CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET | 421 | CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET |
422 | CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET | 422 | CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET |
423 | CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET | 423 | CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET |
424 | CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET | 424 | CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET |
425 | CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET | 425 | CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET |
426 | CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET | 426 | CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET |
427 | CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET | 427 | CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET |
428 | CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET | 428 | CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET |
429 | CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET | 429 | CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET |
430 | .endm | 430 | .endm |
431 | 431 | ||
432 | /* | 432 | /* |
433 | * frame that enables passing a complete pt_regs to a C function. | 433 | * frame that enables passing a complete pt_regs to a C function. |
434 | */ | 434 | */ |
435 | .macro DEFAULT_FRAME start=1 offset=0 | 435 | .macro DEFAULT_FRAME start=1 offset=0 |
436 | PARTIAL_FRAME \start, R11+\offset-R15 | 436 | PARTIAL_FRAME \start, R11+\offset-R15 |
437 | CFI_REL_OFFSET rbx, RBX+\offset | 437 | CFI_REL_OFFSET rbx, RBX+\offset |
438 | CFI_REL_OFFSET rbp, RBP+\offset | 438 | CFI_REL_OFFSET rbp, RBP+\offset |
439 | CFI_REL_OFFSET r12, R12+\offset | 439 | CFI_REL_OFFSET r12, R12+\offset |
440 | CFI_REL_OFFSET r13, R13+\offset | 440 | CFI_REL_OFFSET r13, R13+\offset |
441 | CFI_REL_OFFSET r14, R14+\offset | 441 | CFI_REL_OFFSET r14, R14+\offset |
442 | CFI_REL_OFFSET r15, R15+\offset | 442 | CFI_REL_OFFSET r15, R15+\offset |
443 | .endm | 443 | .endm |
444 | 444 | ||
445 | /* save partial stack frame */ | 445 | /* save partial stack frame */ |
446 | .macro SAVE_ARGS_IRQ | 446 | .macro SAVE_ARGS_IRQ |
447 | cld | 447 | cld |
448 | /* start from rbp in pt_regs and jump over */ | 448 | /* start from rbp in pt_regs and jump over */ |
449 | movq_cfi rdi, (RDI-RBP) | 449 | movq_cfi rdi, (RDI-RBP) |
450 | movq_cfi rsi, (RSI-RBP) | 450 | movq_cfi rsi, (RSI-RBP) |
451 | movq_cfi rdx, (RDX-RBP) | 451 | movq_cfi rdx, (RDX-RBP) |
452 | movq_cfi rcx, (RCX-RBP) | 452 | movq_cfi rcx, (RCX-RBP) |
453 | movq_cfi rax, (RAX-RBP) | 453 | movq_cfi rax, (RAX-RBP) |
454 | movq_cfi r8, (R8-RBP) | 454 | movq_cfi r8, (R8-RBP) |
455 | movq_cfi r9, (R9-RBP) | 455 | movq_cfi r9, (R9-RBP) |
456 | movq_cfi r10, (R10-RBP) | 456 | movq_cfi r10, (R10-RBP) |
457 | movq_cfi r11, (R11-RBP) | 457 | movq_cfi r11, (R11-RBP) |
458 | 458 | ||
459 | /* Save rbp so that we can unwind from get_irq_regs() */ | 459 | /* Save rbp so that we can unwind from get_irq_regs() */ |
460 | movq_cfi rbp, 0 | 460 | movq_cfi rbp, 0 |
461 | 461 | ||
462 | /* Save previous stack value */ | 462 | /* Save previous stack value */ |
463 | movq %rsp, %rsi | 463 | movq %rsp, %rsi |
464 | 464 | ||
465 | leaq -RBP(%rsp),%rdi /* arg1 for handler */ | 465 | leaq -RBP(%rsp),%rdi /* arg1 for handler */ |
466 | testl $3, CS-RBP(%rsi) | 466 | testl $3, CS-RBP(%rsi) |
467 | je 1f | 467 | je 1f |
468 | SWAPGS | 468 | SWAPGS |
469 | /* | 469 | /* |
470 | * irq_count is used to check if a CPU is already on an interrupt stack | 470 | * irq_count is used to check if a CPU is already on an interrupt stack |
471 | * or not. While this is essentially redundant with preempt_count it is | 471 | * or not. While this is essentially redundant with preempt_count it is |
472 | * a little cheaper to use a separate counter in the PDA (short of | 472 | * a little cheaper to use a separate counter in the PDA (short of |
473 | * moving irq_enter into assembly, which would be too much work) | 473 | * moving irq_enter into assembly, which would be too much work) |
474 | */ | 474 | */ |
475 | 1: incl PER_CPU_VAR(irq_count) | 475 | 1: incl PER_CPU_VAR(irq_count) |
476 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp | 476 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp |
477 | CFI_DEF_CFA_REGISTER rsi | 477 | CFI_DEF_CFA_REGISTER rsi |
478 | 478 | ||
479 | /* Store previous stack value */ | 479 | /* Store previous stack value */ |
480 | pushq %rsi | 480 | pushq %rsi |
481 | CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \ | 481 | CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \ |
482 | 0x77 /* DW_OP_breg7 */, 0, \ | 482 | 0x77 /* DW_OP_breg7 */, 0, \ |
483 | 0x06 /* DW_OP_deref */, \ | 483 | 0x06 /* DW_OP_deref */, \ |
484 | 0x08 /* DW_OP_const1u */, SS+8-RBP, \ | 484 | 0x08 /* DW_OP_const1u */, SS+8-RBP, \ |
485 | 0x22 /* DW_OP_plus */ | 485 | 0x22 /* DW_OP_plus */ |
486 | /* We entered an interrupt context - irqs are off: */ | 486 | /* We entered an interrupt context - irqs are off: */ |
487 | TRACE_IRQS_OFF | 487 | TRACE_IRQS_OFF |
488 | .endm | 488 | .endm |
489 | 489 | ||
490 | ENTRY(save_rest) | 490 | ENTRY(save_rest) |
491 | PARTIAL_FRAME 1 (REST_SKIP+8) | 491 | PARTIAL_FRAME 1 (REST_SKIP+8) |
492 | movq 5*8+16(%rsp), %r11 /* save return address */ | 492 | movq 5*8+16(%rsp), %r11 /* save return address */ |
493 | movq_cfi rbx, RBX+16 | 493 | movq_cfi rbx, RBX+16 |
494 | movq_cfi rbp, RBP+16 | 494 | movq_cfi rbp, RBP+16 |
495 | movq_cfi r12, R12+16 | 495 | movq_cfi r12, R12+16 |
496 | movq_cfi r13, R13+16 | 496 | movq_cfi r13, R13+16 |
497 | movq_cfi r14, R14+16 | 497 | movq_cfi r14, R14+16 |
498 | movq_cfi r15, R15+16 | 498 | movq_cfi r15, R15+16 |
499 | movq %r11, 8(%rsp) /* return address */ | 499 | movq %r11, 8(%rsp) /* return address */ |
500 | FIXUP_TOP_OF_STACK %r11, 16 | 500 | FIXUP_TOP_OF_STACK %r11, 16 |
501 | ret | 501 | ret |
502 | CFI_ENDPROC | 502 | CFI_ENDPROC |
503 | END(save_rest) | 503 | END(save_rest) |
504 | 504 | ||
505 | /* save complete stack frame */ | 505 | /* save complete stack frame */ |
506 | .pushsection .kprobes.text, "ax" | 506 | .pushsection .kprobes.text, "ax" |
507 | ENTRY(save_paranoid) | 507 | ENTRY(save_paranoid) |
508 | XCPT_FRAME 1 RDI+8 | 508 | XCPT_FRAME 1 RDI+8 |
509 | cld | 509 | cld |
510 | movq_cfi rdi, RDI+8 | 510 | movq_cfi rdi, RDI+8 |
511 | movq_cfi rsi, RSI+8 | 511 | movq_cfi rsi, RSI+8 |
512 | movq_cfi rdx, RDX+8 | 512 | movq_cfi rdx, RDX+8 |
513 | movq_cfi rcx, RCX+8 | 513 | movq_cfi rcx, RCX+8 |
514 | movq_cfi rax, RAX+8 | 514 | movq_cfi rax, RAX+8 |
515 | movq_cfi r8, R8+8 | 515 | movq_cfi r8, R8+8 |
516 | movq_cfi r9, R9+8 | 516 | movq_cfi r9, R9+8 |
517 | movq_cfi r10, R10+8 | 517 | movq_cfi r10, R10+8 |
518 | movq_cfi r11, R11+8 | 518 | movq_cfi r11, R11+8 |
519 | movq_cfi rbx, RBX+8 | 519 | movq_cfi rbx, RBX+8 |
520 | movq_cfi rbp, RBP+8 | 520 | movq_cfi rbp, RBP+8 |
521 | movq_cfi r12, R12+8 | 521 | movq_cfi r12, R12+8 |
522 | movq_cfi r13, R13+8 | 522 | movq_cfi r13, R13+8 |
523 | movq_cfi r14, R14+8 | 523 | movq_cfi r14, R14+8 |
524 | movq_cfi r15, R15+8 | 524 | movq_cfi r15, R15+8 |
525 | movl $1,%ebx | 525 | movl $1,%ebx |
526 | movl $MSR_GS_BASE,%ecx | 526 | movl $MSR_GS_BASE,%ecx |
527 | rdmsr | 527 | rdmsr |
528 | testl %edx,%edx | 528 | testl %edx,%edx |
529 | js 1f /* negative -> in kernel */ | 529 | js 1f /* negative -> in kernel */ |
530 | SWAPGS | 530 | SWAPGS |
531 | xorl %ebx,%ebx | 531 | xorl %ebx,%ebx |
532 | 1: ret | 532 | 1: ret |
533 | CFI_ENDPROC | 533 | CFI_ENDPROC |
534 | END(save_paranoid) | 534 | END(save_paranoid) |
535 | .popsection | 535 | .popsection |
536 | 536 | ||
537 | /* | 537 | /* |
538 | * A newly forked process directly context switches into this address. | 538 | * A newly forked process directly context switches into this address. |
539 | * | 539 | * |
540 | * rdi: prev task we switched from | 540 | * rdi: prev task we switched from |
541 | */ | 541 | */ |
542 | ENTRY(ret_from_fork) | 542 | ENTRY(ret_from_fork) |
543 | DEFAULT_FRAME | 543 | DEFAULT_FRAME |
544 | 544 | ||
545 | LOCK ; btr $TIF_FORK,TI_flags(%r8) | 545 | LOCK ; btr $TIF_FORK,TI_flags(%r8) |
546 | 546 | ||
547 | pushq_cfi $0x0002 | 547 | pushq_cfi $0x0002 |
548 | popfq_cfi # reset kernel eflags | 548 | popfq_cfi # reset kernel eflags |
549 | 549 | ||
550 | call schedule_tail # rdi: 'prev' task parameter | 550 | call schedule_tail # rdi: 'prev' task parameter |
551 | 551 | ||
552 | GET_THREAD_INFO(%rcx) | 552 | GET_THREAD_INFO(%rcx) |
553 | 553 | ||
554 | RESTORE_REST | 554 | RESTORE_REST |
555 | 555 | ||
556 | testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? | 556 | testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? |
557 | jz 1f | 557 | jz 1f |
558 | 558 | ||
559 | testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET | 559 | testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET |
560 | jnz int_ret_from_sys_call | 560 | jnz int_ret_from_sys_call |
561 | 561 | ||
562 | RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET | 562 | RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET |
563 | jmp ret_from_sys_call # go to the SYSRET fastpath | 563 | jmp ret_from_sys_call # go to the SYSRET fastpath |
564 | 564 | ||
565 | 1: | 565 | 1: |
566 | subq $REST_SKIP, %rsp # leave space for volatiles | 566 | subq $REST_SKIP, %rsp # leave space for volatiles |
567 | CFI_ADJUST_CFA_OFFSET REST_SKIP | 567 | CFI_ADJUST_CFA_OFFSET REST_SKIP |
568 | movq %rbp, %rdi | 568 | movq %rbp, %rdi |
569 | call *%rbx | 569 | call *%rbx |
570 | movl $0, RAX(%rsp) | 570 | movl $0, RAX(%rsp) |
571 | RESTORE_REST | 571 | RESTORE_REST |
572 | jmp int_ret_from_sys_call | 572 | jmp int_ret_from_sys_call |
573 | CFI_ENDPROC | 573 | CFI_ENDPROC |
574 | END(ret_from_fork) | 574 | END(ret_from_fork) |
575 | 575 | ||
576 | /* | 576 | /* |
577 | * System call entry. Up to 6 arguments in registers are supported. | 577 | * System call entry. Up to 6 arguments in registers are supported. |
578 | * | 578 | * |
579 | * SYSCALL does not save anything on the stack and does not change the | 579 | * SYSCALL does not save anything on the stack and does not change the |
580 | * stack pointer. However, it does mask the flags register for us, so | 580 | * stack pointer. However, it does mask the flags register for us, so |
581 | * CLD and CLAC are not needed. | 581 | * CLD and CLAC are not needed. |
582 | */ | 582 | */ |
583 | 583 | ||
584 | /* | 584 | /* |
585 | * Register setup: | 585 | * Register setup: |
586 | * rax system call number | 586 | * rax system call number |
587 | * rdi arg0 | 587 | * rdi arg0 |
588 | * rcx return address for syscall/sysret, C arg3 | 588 | * rcx return address for syscall/sysret, C arg3 |
589 | * rsi arg1 | 589 | * rsi arg1 |
590 | * rdx arg2 | 590 | * rdx arg2 |
591 | * r10 arg3 (--> moved to rcx for C) | 591 | * r10 arg3 (--> moved to rcx for C) |
592 | * r8 arg4 | 592 | * r8 arg4 |
593 | * r9 arg5 | 593 | * r9 arg5 |
594 | * r11 eflags for syscall/sysret, temporary for C | 594 | * r11 eflags for syscall/sysret, temporary for C |
595 | * r12-r15,rbp,rbx saved by C code, not touched. | 595 | * r12-r15,rbp,rbx saved by C code, not touched. |
596 | * | 596 | * |
597 | * Interrupts are off on entry. | 597 | * Interrupts are off on entry. |
598 | * Only called from user space. | 598 | * Only called from user space. |
599 | * | 599 | * |
600 | * XXX if we had a free scratch register we could save the RSP into the stack frame | 600 | * XXX if we had a free scratch register we could save the RSP into the stack frame |
601 | * and report it properly in ps. Unfortunately we haven't. | 601 | * and report it properly in ps. Unfortunately we haven't. |
602 | * | 602 | * |
603 | * When user can change the frames always force IRET. That is because | 603 | * When user can change the frames always force IRET. That is because |
604 | * it deals with uncanonical addresses better. SYSRET has trouble | 604 | * it deals with uncanonical addresses better. SYSRET has trouble |
605 | * with them due to bugs in both AMD and Intel CPUs. | 605 | * with them due to bugs in both AMD and Intel CPUs. |
606 | */ | 606 | */ |
607 | 607 | ||
608 | ENTRY(system_call) | 608 | ENTRY(system_call) |
609 | CFI_STARTPROC simple | 609 | CFI_STARTPROC simple |
610 | CFI_SIGNAL_FRAME | 610 | CFI_SIGNAL_FRAME |
611 | CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET | 611 | CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET |
612 | CFI_REGISTER rip,rcx | 612 | CFI_REGISTER rip,rcx |
613 | /*CFI_REGISTER rflags,r11*/ | 613 | /*CFI_REGISTER rflags,r11*/ |
614 | SWAPGS_UNSAFE_STACK | 614 | SWAPGS_UNSAFE_STACK |
615 | /* | 615 | /* |
616 | * A hypervisor implementation might want to use a label | 616 | * A hypervisor implementation might want to use a label |
617 | * after the swapgs, so that it can do the swapgs | 617 | * after the swapgs, so that it can do the swapgs |
618 | * for the guest and jump here on syscall. | 618 | * for the guest and jump here on syscall. |
619 | */ | 619 | */ |
620 | GLOBAL(system_call_after_swapgs) | 620 | GLOBAL(system_call_after_swapgs) |
621 | 621 | ||
622 | movq %rsp,PER_CPU_VAR(old_rsp) | 622 | movq %rsp,PER_CPU_VAR(old_rsp) |
623 | movq PER_CPU_VAR(kernel_stack),%rsp | 623 | movq PER_CPU_VAR(kernel_stack),%rsp |
624 | /* | 624 | /* |
625 | * No need to follow this irqs off/on section - it's straight | 625 | * No need to follow this irqs off/on section - it's straight |
626 | * and short: | 626 | * and short: |
627 | */ | 627 | */ |
628 | ENABLE_INTERRUPTS(CLBR_NONE) | 628 | ENABLE_INTERRUPTS(CLBR_NONE) |
629 | SAVE_ARGS 8,0 | 629 | SAVE_ARGS 8,0 |
630 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) | 630 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) |
631 | movq %rcx,RIP-ARGOFFSET(%rsp) | 631 | movq %rcx,RIP-ARGOFFSET(%rsp) |
632 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | 632 | CFI_REL_OFFSET rip,RIP-ARGOFFSET |
633 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 633 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
634 | jnz tracesys | 634 | jnz tracesys |
635 | system_call_fastpath: | 635 | system_call_fastpath: |
636 | #if __SYSCALL_MASK == ~0 | 636 | #if __SYSCALL_MASK == ~0 |
637 | cmpq $__NR_syscall_max,%rax | 637 | cmpq $__NR_syscall_max,%rax |
638 | #else | 638 | #else |
639 | andl $__SYSCALL_MASK,%eax | 639 | andl $__SYSCALL_MASK,%eax |
640 | cmpl $__NR_syscall_max,%eax | 640 | cmpl $__NR_syscall_max,%eax |
641 | #endif | 641 | #endif |
642 | ja badsys | 642 | ja badsys |
643 | movq %r10,%rcx | 643 | movq %r10,%rcx |
644 | call *sys_call_table(,%rax,8) # XXX: rip relative | 644 | call *sys_call_table(,%rax,8) # XXX: rip relative |
645 | movq %rax,RAX-ARGOFFSET(%rsp) | 645 | movq %rax,RAX-ARGOFFSET(%rsp) |
646 | /* | 646 | /* |
647 | * Syscall return path ending with SYSRET (fast path) | 647 | * Syscall return path ending with SYSRET (fast path) |
648 | * Has incomplete stack frame and undefined top of stack. | 648 | * Has incomplete stack frame and undefined top of stack. |
649 | */ | 649 | */ |
650 | ret_from_sys_call: | 650 | ret_from_sys_call: |
651 | movl $_TIF_ALLWORK_MASK,%edi | 651 | movl $_TIF_ALLWORK_MASK,%edi |
652 | /* edi: flagmask */ | 652 | /* edi: flagmask */ |
653 | sysret_check: | 653 | sysret_check: |
654 | LOCKDEP_SYS_EXIT | 654 | LOCKDEP_SYS_EXIT |
655 | DISABLE_INTERRUPTS(CLBR_NONE) | 655 | DISABLE_INTERRUPTS(CLBR_NONE) |
656 | TRACE_IRQS_OFF | 656 | TRACE_IRQS_OFF |
657 | movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx | 657 | movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx |
658 | andl %edi,%edx | 658 | andl %edi,%edx |
659 | jnz sysret_careful | 659 | jnz sysret_careful |
660 | CFI_REMEMBER_STATE | 660 | CFI_REMEMBER_STATE |
661 | /* | 661 | /* |
662 | * sysretq will re-enable interrupts: | 662 | * sysretq will re-enable interrupts: |
663 | */ | 663 | */ |
664 | TRACE_IRQS_ON | 664 | TRACE_IRQS_ON |
665 | movq RIP-ARGOFFSET(%rsp),%rcx | 665 | movq RIP-ARGOFFSET(%rsp),%rcx |
666 | CFI_REGISTER rip,rcx | 666 | CFI_REGISTER rip,rcx |
667 | RESTORE_ARGS 1,-ARG_SKIP,0 | 667 | RESTORE_ARGS 1,-ARG_SKIP,0 |
668 | /*CFI_REGISTER rflags,r11*/ | 668 | /*CFI_REGISTER rflags,r11*/ |
669 | movq PER_CPU_VAR(old_rsp), %rsp | 669 | movq PER_CPU_VAR(old_rsp), %rsp |
670 | USERGS_SYSRET64 | 670 | USERGS_SYSRET64 |
671 | 671 | ||
672 | CFI_RESTORE_STATE | 672 | CFI_RESTORE_STATE |
673 | /* Handle reschedules */ | 673 | /* Handle reschedules */ |
674 | /* edx: work, edi: workmask */ | 674 | /* edx: work, edi: workmask */ |
675 | sysret_careful: | 675 | sysret_careful: |
676 | bt $TIF_NEED_RESCHED,%edx | 676 | bt $TIF_NEED_RESCHED,%edx |
677 | jnc sysret_signal | 677 | jnc sysret_signal |
678 | TRACE_IRQS_ON | 678 | TRACE_IRQS_ON |
679 | ENABLE_INTERRUPTS(CLBR_NONE) | 679 | ENABLE_INTERRUPTS(CLBR_NONE) |
680 | pushq_cfi %rdi | 680 | pushq_cfi %rdi |
681 | SCHEDULE_USER | 681 | SCHEDULE_USER |
682 | popq_cfi %rdi | 682 | popq_cfi %rdi |
683 | jmp sysret_check | 683 | jmp sysret_check |
684 | 684 | ||
685 | /* Handle a signal */ | 685 | /* Handle a signal */ |
686 | sysret_signal: | 686 | sysret_signal: |
687 | TRACE_IRQS_ON | 687 | TRACE_IRQS_ON |
688 | ENABLE_INTERRUPTS(CLBR_NONE) | 688 | ENABLE_INTERRUPTS(CLBR_NONE) |
689 | #ifdef CONFIG_AUDITSYSCALL | 689 | #ifdef CONFIG_AUDITSYSCALL |
690 | bt $TIF_SYSCALL_AUDIT,%edx | 690 | bt $TIF_SYSCALL_AUDIT,%edx |
691 | jc sysret_audit | 691 | jc sysret_audit |
692 | #endif | 692 | #endif |
693 | /* | 693 | /* |
694 | * We have a signal, or exit tracing or single-step. | 694 | * We have a signal, or exit tracing or single-step. |
695 | * These all wind up with the iret return path anyway, | 695 | * These all wind up with the iret return path anyway, |
696 | * so just join that path right now. | 696 | * so just join that path right now. |
697 | */ | 697 | */ |
698 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET | 698 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET |
699 | jmp int_check_syscall_exit_work | 699 | jmp int_check_syscall_exit_work |
700 | 700 | ||
701 | badsys: | 701 | badsys: |
702 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | 702 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) |
703 | jmp ret_from_sys_call | 703 | jmp ret_from_sys_call |
704 | 704 | ||
705 | #ifdef CONFIG_AUDITSYSCALL | 705 | #ifdef CONFIG_AUDITSYSCALL |
706 | /* | 706 | /* |
707 | * Fast path for syscall audit without full syscall trace. | 707 | * Fast path for syscall audit without full syscall trace. |
708 | * We just call __audit_syscall_entry() directly, and then | 708 | * We just call __audit_syscall_entry() directly, and then |
709 | * jump back to the normal fast path. | 709 | * jump back to the normal fast path. |
710 | */ | 710 | */ |
711 | auditsys: | 711 | auditsys: |
712 | movq %r10,%r9 /* 6th arg: 4th syscall arg */ | 712 | movq %r10,%r9 /* 6th arg: 4th syscall arg */ |
713 | movq %rdx,%r8 /* 5th arg: 3rd syscall arg */ | 713 | movq %rdx,%r8 /* 5th arg: 3rd syscall arg */ |
714 | movq %rsi,%rcx /* 4th arg: 2nd syscall arg */ | 714 | movq %rsi,%rcx /* 4th arg: 2nd syscall arg */ |
715 | movq %rdi,%rdx /* 3rd arg: 1st syscall arg */ | 715 | movq %rdi,%rdx /* 3rd arg: 1st syscall arg */ |
716 | movq %rax,%rsi /* 2nd arg: syscall number */ | 716 | movq %rax,%rsi /* 2nd arg: syscall number */ |
717 | movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ | 717 | movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ |
718 | call __audit_syscall_entry | 718 | call __audit_syscall_entry |
719 | LOAD_ARGS 0 /* reload call-clobbered registers */ | 719 | LOAD_ARGS 0 /* reload call-clobbered registers */ |
720 | jmp system_call_fastpath | 720 | jmp system_call_fastpath |
721 | 721 | ||
722 | /* | 722 | /* |
723 | * Return fast path for syscall audit. Call __audit_syscall_exit() | 723 | * Return fast path for syscall audit. Call __audit_syscall_exit() |
724 | * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT | 724 | * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT |
725 | * masked off. | 725 | * masked off. |
726 | */ | 726 | */ |
727 | sysret_audit: | 727 | sysret_audit: |
728 | movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */ | 728 | movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */ |
729 | cmpq $-MAX_ERRNO,%rsi /* is it < -MAX_ERRNO? */ | 729 | cmpq $-MAX_ERRNO,%rsi /* is it < -MAX_ERRNO? */ |
730 | setbe %al /* 1 if so, 0 if not */ | 730 | setbe %al /* 1 if so, 0 if not */ |
731 | movzbl %al,%edi /* zero-extend that into %edi */ | 731 | movzbl %al,%edi /* zero-extend that into %edi */ |
732 | call __audit_syscall_exit | 732 | call __audit_syscall_exit |
733 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi | 733 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi |
734 | jmp sysret_check | 734 | jmp sysret_check |
735 | #endif /* CONFIG_AUDITSYSCALL */ | 735 | #endif /* CONFIG_AUDITSYSCALL */ |
736 | 736 | ||
737 | /* Do syscall tracing */ | 737 | /* Do syscall tracing */ |
738 | tracesys: | 738 | tracesys: |
739 | #ifdef CONFIG_AUDITSYSCALL | 739 | #ifdef CONFIG_AUDITSYSCALL |
740 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 740 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
741 | jz auditsys | 741 | jz auditsys |
742 | #endif | 742 | #endif |
743 | SAVE_REST | 743 | SAVE_REST |
744 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ | 744 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
745 | FIXUP_TOP_OF_STACK %rdi | 745 | FIXUP_TOP_OF_STACK %rdi |
746 | movq %rsp,%rdi | 746 | movq %rsp,%rdi |
747 | call syscall_trace_enter | 747 | call syscall_trace_enter |
748 | /* | 748 | /* |
749 | * Reload arg registers from stack in case ptrace changed them. | 749 | * Reload arg registers from stack in case ptrace changed them. |
750 | * We don't reload %rax because syscall_trace_enter() returned | 750 | * We don't reload %rax because syscall_trace_enter() returned |
751 | * the value it wants us to use in the table lookup. | 751 | * the value it wants us to use in the table lookup. |
752 | */ | 752 | */ |
753 | LOAD_ARGS ARGOFFSET, 1 | 753 | LOAD_ARGS ARGOFFSET, 1 |
754 | RESTORE_REST | 754 | RESTORE_REST |
755 | #if __SYSCALL_MASK == ~0 | 755 | #if __SYSCALL_MASK == ~0 |
756 | cmpq $__NR_syscall_max,%rax | 756 | cmpq $__NR_syscall_max,%rax |
757 | #else | 757 | #else |
758 | andl $__SYSCALL_MASK,%eax | 758 | andl $__SYSCALL_MASK,%eax |
759 | cmpl $__NR_syscall_max,%eax | 759 | cmpl $__NR_syscall_max,%eax |
760 | #endif | 760 | #endif |
761 | ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ | 761 | ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ |
762 | movq %r10,%rcx /* fixup for C */ | 762 | movq %r10,%rcx /* fixup for C */ |
763 | call *sys_call_table(,%rax,8) | 763 | call *sys_call_table(,%rax,8) |
764 | movq %rax,RAX-ARGOFFSET(%rsp) | 764 | movq %rax,RAX-ARGOFFSET(%rsp) |
765 | /* Use IRET because user could have changed frame */ | 765 | /* Use IRET because user could have changed frame */ |
766 | 766 | ||
767 | /* | 767 | /* |
768 | * Syscall return path ending with IRET. | 768 | * Syscall return path ending with IRET. |
769 | * Has correct top of stack, but partial stack frame. | 769 | * Has correct top of stack, but partial stack frame. |
770 | */ | 770 | */ |
771 | GLOBAL(int_ret_from_sys_call) | 771 | GLOBAL(int_ret_from_sys_call) |
772 | DISABLE_INTERRUPTS(CLBR_NONE) | 772 | DISABLE_INTERRUPTS(CLBR_NONE) |
773 | TRACE_IRQS_OFF | 773 | TRACE_IRQS_OFF |
774 | movl $_TIF_ALLWORK_MASK,%edi | 774 | movl $_TIF_ALLWORK_MASK,%edi |
775 | /* edi: mask to check */ | 775 | /* edi: mask to check */ |
776 | GLOBAL(int_with_check) | 776 | GLOBAL(int_with_check) |
777 | LOCKDEP_SYS_EXIT_IRQ | 777 | LOCKDEP_SYS_EXIT_IRQ |
778 | GET_THREAD_INFO(%rcx) | 778 | GET_THREAD_INFO(%rcx) |
779 | movl TI_flags(%rcx),%edx | 779 | movl TI_flags(%rcx),%edx |
780 | andl %edi,%edx | 780 | andl %edi,%edx |
781 | jnz int_careful | 781 | jnz int_careful |
782 | andl $~TS_COMPAT,TI_status(%rcx) | 782 | andl $~TS_COMPAT,TI_status(%rcx) |
783 | jmp retint_swapgs | 783 | jmp retint_swapgs |
784 | 784 | ||
785 | /* Either reschedule or signal or syscall exit tracking needed. */ | 785 | /* Either reschedule or signal or syscall exit tracking needed. */ |
786 | /* First do a reschedule test. */ | 786 | /* First do a reschedule test. */ |
787 | /* edx: work, edi: workmask */ | 787 | /* edx: work, edi: workmask */ |
788 | int_careful: | 788 | int_careful: |
789 | bt $TIF_NEED_RESCHED,%edx | 789 | bt $TIF_NEED_RESCHED,%edx |
790 | jnc int_very_careful | 790 | jnc int_very_careful |
791 | TRACE_IRQS_ON | 791 | TRACE_IRQS_ON |
792 | ENABLE_INTERRUPTS(CLBR_NONE) | 792 | ENABLE_INTERRUPTS(CLBR_NONE) |
793 | pushq_cfi %rdi | 793 | pushq_cfi %rdi |
794 | SCHEDULE_USER | 794 | SCHEDULE_USER |
795 | popq_cfi %rdi | 795 | popq_cfi %rdi |
796 | DISABLE_INTERRUPTS(CLBR_NONE) | 796 | DISABLE_INTERRUPTS(CLBR_NONE) |
797 | TRACE_IRQS_OFF | 797 | TRACE_IRQS_OFF |
798 | jmp int_with_check | 798 | jmp int_with_check |
799 | 799 | ||
800 | /* handle signals and tracing -- both require a full stack frame */ | 800 | /* handle signals and tracing -- both require a full stack frame */ |
801 | int_very_careful: | 801 | int_very_careful: |
802 | TRACE_IRQS_ON | 802 | TRACE_IRQS_ON |
803 | ENABLE_INTERRUPTS(CLBR_NONE) | 803 | ENABLE_INTERRUPTS(CLBR_NONE) |
804 | int_check_syscall_exit_work: | 804 | int_check_syscall_exit_work: |
805 | SAVE_REST | 805 | SAVE_REST |
806 | /* Check for syscall exit trace */ | 806 | /* Check for syscall exit trace */ |
807 | testl $_TIF_WORK_SYSCALL_EXIT,%edx | 807 | testl $_TIF_WORK_SYSCALL_EXIT,%edx |
808 | jz int_signal | 808 | jz int_signal |
809 | pushq_cfi %rdi | 809 | pushq_cfi %rdi |
810 | leaq 8(%rsp),%rdi # &ptregs -> arg1 | 810 | leaq 8(%rsp),%rdi # &ptregs -> arg1 |
811 | call syscall_trace_leave | 811 | call syscall_trace_leave |
812 | popq_cfi %rdi | 812 | popq_cfi %rdi |
813 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi | 813 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi |
814 | jmp int_restore_rest | 814 | jmp int_restore_rest |
815 | 815 | ||
816 | int_signal: | 816 | int_signal: |
817 | testl $_TIF_DO_NOTIFY_MASK,%edx | 817 | testl $_TIF_DO_NOTIFY_MASK,%edx |
818 | jz 1f | 818 | jz 1f |
819 | movq %rsp,%rdi # &ptregs -> arg1 | 819 | movq %rsp,%rdi # &ptregs -> arg1 |
820 | xorl %esi,%esi # oldset -> arg2 | 820 | xorl %esi,%esi # oldset -> arg2 |
821 | call do_notify_resume | 821 | call do_notify_resume |
822 | 1: movl $_TIF_WORK_MASK,%edi | 822 | 1: movl $_TIF_WORK_MASK,%edi |
823 | int_restore_rest: | 823 | int_restore_rest: |
824 | RESTORE_REST | 824 | RESTORE_REST |
825 | DISABLE_INTERRUPTS(CLBR_NONE) | 825 | DISABLE_INTERRUPTS(CLBR_NONE) |
826 | TRACE_IRQS_OFF | 826 | TRACE_IRQS_OFF |
827 | jmp int_with_check | 827 | jmp int_with_check |
828 | CFI_ENDPROC | 828 | CFI_ENDPROC |
829 | END(system_call) | 829 | END(system_call) |
830 | 830 | ||
831 | /* | 831 | /* |
832 | * Certain special system calls that need to save a complete full stack frame. | 832 | * Certain special system calls that need to save a complete full stack frame. |
833 | */ | 833 | */ |
834 | .macro PTREGSCALL label,func,arg | 834 | .macro PTREGSCALL label,func,arg |
835 | ENTRY(\label) | 835 | ENTRY(\label) |
836 | PARTIAL_FRAME 1 8 /* offset 8: return address */ | 836 | PARTIAL_FRAME 1 8 /* offset 8: return address */ |
837 | subq $REST_SKIP, %rsp | 837 | subq $REST_SKIP, %rsp |
838 | CFI_ADJUST_CFA_OFFSET REST_SKIP | 838 | CFI_ADJUST_CFA_OFFSET REST_SKIP |
839 | call save_rest | 839 | call save_rest |
840 | DEFAULT_FRAME 0 8 /* offset 8: return address */ | 840 | DEFAULT_FRAME 0 8 /* offset 8: return address */ |
841 | leaq 8(%rsp), \arg /* pt_regs pointer */ | 841 | leaq 8(%rsp), \arg /* pt_regs pointer */ |
842 | call \func | 842 | call \func |
843 | jmp ptregscall_common | 843 | jmp ptregscall_common |
844 | CFI_ENDPROC | 844 | CFI_ENDPROC |
845 | END(\label) | 845 | END(\label) |
846 | .endm | 846 | .endm |
847 | 847 | ||
848 | .macro FORK_LIKE func | 848 | .macro FORK_LIKE func |
849 | ENTRY(stub_\func) | 849 | ENTRY(stub_\func) |
850 | CFI_STARTPROC | 850 | CFI_STARTPROC |
851 | popq %r11 /* save return address */ | 851 | popq %r11 /* save return address */ |
852 | PARTIAL_FRAME 0 | 852 | PARTIAL_FRAME 0 |
853 | SAVE_REST | 853 | SAVE_REST |
854 | pushq %r11 /* put it back on stack */ | 854 | pushq %r11 /* put it back on stack */ |
855 | FIXUP_TOP_OF_STACK %r11, 8 | 855 | FIXUP_TOP_OF_STACK %r11, 8 |
856 | DEFAULT_FRAME 0 8 /* offset 8: return address */ | 856 | DEFAULT_FRAME 0 8 /* offset 8: return address */ |
857 | call sys_\func | 857 | call sys_\func |
858 | RESTORE_TOP_OF_STACK %r11, 8 | 858 | RESTORE_TOP_OF_STACK %r11, 8 |
859 | ret $REST_SKIP /* pop extended registers */ | 859 | ret $REST_SKIP /* pop extended registers */ |
860 | CFI_ENDPROC | 860 | CFI_ENDPROC |
861 | END(stub_\func) | 861 | END(stub_\func) |
862 | .endm | 862 | .endm |
863 | 863 | ||
864 | FORK_LIKE clone | 864 | FORK_LIKE clone |
865 | FORK_LIKE fork | 865 | FORK_LIKE fork |
866 | FORK_LIKE vfork | 866 | FORK_LIKE vfork |
867 | PTREGSCALL stub_iopl, sys_iopl, %rsi | 867 | PTREGSCALL stub_iopl, sys_iopl, %rsi |
868 | 868 | ||
869 | ENTRY(ptregscall_common) | 869 | ENTRY(ptregscall_common) |
870 | DEFAULT_FRAME 1 8 /* offset 8: return address */ | 870 | DEFAULT_FRAME 1 8 /* offset 8: return address */ |
871 | RESTORE_TOP_OF_STACK %r11, 8 | 871 | RESTORE_TOP_OF_STACK %r11, 8 |
872 | movq_cfi_restore R15+8, r15 | 872 | movq_cfi_restore R15+8, r15 |
873 | movq_cfi_restore R14+8, r14 | 873 | movq_cfi_restore R14+8, r14 |
874 | movq_cfi_restore R13+8, r13 | 874 | movq_cfi_restore R13+8, r13 |
875 | movq_cfi_restore R12+8, r12 | 875 | movq_cfi_restore R12+8, r12 |
876 | movq_cfi_restore RBP+8, rbp | 876 | movq_cfi_restore RBP+8, rbp |
877 | movq_cfi_restore RBX+8, rbx | 877 | movq_cfi_restore RBX+8, rbx |
878 | ret $REST_SKIP /* pop extended registers */ | 878 | ret $REST_SKIP /* pop extended registers */ |
879 | CFI_ENDPROC | 879 | CFI_ENDPROC |
880 | END(ptregscall_common) | 880 | END(ptregscall_common) |
881 | 881 | ||
882 | ENTRY(stub_execve) | 882 | ENTRY(stub_execve) |
883 | CFI_STARTPROC | 883 | CFI_STARTPROC |
884 | addq $8, %rsp | 884 | addq $8, %rsp |
885 | PARTIAL_FRAME 0 | 885 | PARTIAL_FRAME 0 |
886 | SAVE_REST | 886 | SAVE_REST |
887 | FIXUP_TOP_OF_STACK %r11 | 887 | FIXUP_TOP_OF_STACK %r11 |
888 | call sys_execve | 888 | call sys_execve |
889 | RESTORE_TOP_OF_STACK %r11 | 889 | RESTORE_TOP_OF_STACK %r11 |
890 | movq %rax,RAX(%rsp) | 890 | movq %rax,RAX(%rsp) |
891 | RESTORE_REST | 891 | RESTORE_REST |
892 | jmp int_ret_from_sys_call | 892 | jmp int_ret_from_sys_call |
893 | CFI_ENDPROC | 893 | CFI_ENDPROC |
894 | END(stub_execve) | 894 | END(stub_execve) |
895 | 895 | ||
896 | /* | 896 | /* |
897 | * sigreturn is special because it needs to restore all registers on return. | 897 | * sigreturn is special because it needs to restore all registers on return. |
898 | * This cannot be done with SYSRET, so use the IRET return path instead. | 898 | * This cannot be done with SYSRET, so use the IRET return path instead. |
899 | */ | 899 | */ |
900 | ENTRY(stub_rt_sigreturn) | 900 | ENTRY(stub_rt_sigreturn) |
901 | CFI_STARTPROC | 901 | CFI_STARTPROC |
902 | addq $8, %rsp | 902 | addq $8, %rsp |
903 | PARTIAL_FRAME 0 | 903 | PARTIAL_FRAME 0 |
904 | SAVE_REST | 904 | SAVE_REST |
905 | movq %rsp,%rdi | 905 | movq %rsp,%rdi |
906 | FIXUP_TOP_OF_STACK %r11 | 906 | FIXUP_TOP_OF_STACK %r11 |
907 | call sys_rt_sigreturn | 907 | call sys_rt_sigreturn |
908 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer | 908 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer |
909 | RESTORE_REST | 909 | RESTORE_REST |
910 | jmp int_ret_from_sys_call | 910 | jmp int_ret_from_sys_call |
911 | CFI_ENDPROC | 911 | CFI_ENDPROC |
912 | END(stub_rt_sigreturn) | 912 | END(stub_rt_sigreturn) |
913 | 913 | ||
914 | #ifdef CONFIG_X86_X32_ABI | 914 | #ifdef CONFIG_X86_X32_ABI |
915 | PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx | ||
916 | |||
917 | ENTRY(stub_x32_rt_sigreturn) | 915 | ENTRY(stub_x32_rt_sigreturn) |
918 | CFI_STARTPROC | 916 | CFI_STARTPROC |
919 | addq $8, %rsp | 917 | addq $8, %rsp |
920 | PARTIAL_FRAME 0 | 918 | PARTIAL_FRAME 0 |
921 | SAVE_REST | 919 | SAVE_REST |
922 | movq %rsp,%rdi | 920 | movq %rsp,%rdi |
923 | FIXUP_TOP_OF_STACK %r11 | 921 | FIXUP_TOP_OF_STACK %r11 |
924 | call sys32_x32_rt_sigreturn | 922 | call sys32_x32_rt_sigreturn |
925 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer | 923 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer |
926 | RESTORE_REST | 924 | RESTORE_REST |
927 | jmp int_ret_from_sys_call | 925 | jmp int_ret_from_sys_call |
928 | CFI_ENDPROC | 926 | CFI_ENDPROC |
929 | END(stub_x32_rt_sigreturn) | 927 | END(stub_x32_rt_sigreturn) |
930 | 928 | ||
931 | ENTRY(stub_x32_execve) | 929 | ENTRY(stub_x32_execve) |
932 | CFI_STARTPROC | 930 | CFI_STARTPROC |
933 | addq $8, %rsp | 931 | addq $8, %rsp |
934 | PARTIAL_FRAME 0 | 932 | PARTIAL_FRAME 0 |
935 | SAVE_REST | 933 | SAVE_REST |
936 | FIXUP_TOP_OF_STACK %r11 | 934 | FIXUP_TOP_OF_STACK %r11 |
937 | call compat_sys_execve | 935 | call compat_sys_execve |
938 | RESTORE_TOP_OF_STACK %r11 | 936 | RESTORE_TOP_OF_STACK %r11 |
939 | movq %rax,RAX(%rsp) | 937 | movq %rax,RAX(%rsp) |
940 | RESTORE_REST | 938 | RESTORE_REST |
941 | jmp int_ret_from_sys_call | 939 | jmp int_ret_from_sys_call |
942 | CFI_ENDPROC | 940 | CFI_ENDPROC |
943 | END(stub_x32_execve) | 941 | END(stub_x32_execve) |
944 | 942 | ||
945 | #endif | 943 | #endif |
946 | 944 | ||
947 | /* | 945 | /* |
948 | * Build the entry stubs and pointer table with some assembler magic. | 946 | * Build the entry stubs and pointer table with some assembler magic. |
949 | * We pack 7 stubs into a single 32-byte chunk, which will fit in a | 947 | * We pack 7 stubs into a single 32-byte chunk, which will fit in a |
950 | * single cache line on all modern x86 implementations. | 948 | * single cache line on all modern x86 implementations. |
951 | */ | 949 | */ |
952 | .section .init.rodata,"a" | 950 | .section .init.rodata,"a" |
953 | ENTRY(interrupt) | 951 | ENTRY(interrupt) |
954 | .section .entry.text | 952 | .section .entry.text |
955 | .p2align 5 | 953 | .p2align 5 |
956 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 954 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
957 | ENTRY(irq_entries_start) | 955 | ENTRY(irq_entries_start) |
958 | INTR_FRAME | 956 | INTR_FRAME |
959 | vector=FIRST_EXTERNAL_VECTOR | 957 | vector=FIRST_EXTERNAL_VECTOR |
960 | .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 | 958 | .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 |
961 | .balign 32 | 959 | .balign 32 |
962 | .rept 7 | 960 | .rept 7 |
963 | .if vector < NR_VECTORS | 961 | .if vector < NR_VECTORS |
964 | .if vector <> FIRST_EXTERNAL_VECTOR | 962 | .if vector <> FIRST_EXTERNAL_VECTOR |
965 | CFI_ADJUST_CFA_OFFSET -8 | 963 | CFI_ADJUST_CFA_OFFSET -8 |
966 | .endif | 964 | .endif |
967 | 1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ | 965 | 1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ |
968 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 | 966 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 |
969 | jmp 2f | 967 | jmp 2f |
970 | .endif | 968 | .endif |
971 | .previous | 969 | .previous |
972 | .quad 1b | 970 | .quad 1b |
973 | .section .entry.text | 971 | .section .entry.text |
974 | vector=vector+1 | 972 | vector=vector+1 |
975 | .endif | 973 | .endif |
976 | .endr | 974 | .endr |
977 | 2: jmp common_interrupt | 975 | 2: jmp common_interrupt |
978 | .endr | 976 | .endr |
979 | CFI_ENDPROC | 977 | CFI_ENDPROC |
980 | END(irq_entries_start) | 978 | END(irq_entries_start) |
981 | 979 | ||
982 | .previous | 980 | .previous |
983 | END(interrupt) | 981 | END(interrupt) |
984 | .previous | 982 | .previous |
985 | 983 | ||
986 | /* | 984 | /* |
987 | * Interrupt entry/exit. | 985 | * Interrupt entry/exit. |
988 | * | 986 | * |
989 | * Interrupt entry points save only callee clobbered registers in fast path. | 987 | * Interrupt entry points save only callee clobbered registers in fast path. |
990 | * | 988 | * |
991 | * Entry runs with interrupts off. | 989 | * Entry runs with interrupts off. |
992 | */ | 990 | */ |
993 | 991 | ||
994 | /* 0(%rsp): ~(interrupt number) */ | 992 | /* 0(%rsp): ~(interrupt number) */ |
995 | .macro interrupt func | 993 | .macro interrupt func |
996 | /* reserve pt_regs for scratch regs and rbp */ | 994 | /* reserve pt_regs for scratch regs and rbp */ |
997 | subq $ORIG_RAX-RBP, %rsp | 995 | subq $ORIG_RAX-RBP, %rsp |
998 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP | 996 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP |
999 | SAVE_ARGS_IRQ | 997 | SAVE_ARGS_IRQ |
1000 | call \func | 998 | call \func |
1001 | .endm | 999 | .endm |
1002 | 1000 | ||
1003 | /* | 1001 | /* |
1004 | * Interrupt entry/exit should be protected against kprobes | 1002 | * Interrupt entry/exit should be protected against kprobes |
1005 | */ | 1003 | */ |
1006 | .pushsection .kprobes.text, "ax" | 1004 | .pushsection .kprobes.text, "ax" |
1007 | /* | 1005 | /* |
1008 | * The interrupt stubs push (~vector+0x80) onto the stack and | 1006 | * The interrupt stubs push (~vector+0x80) onto the stack and |
1009 | * then jump to common_interrupt. | 1007 | * then jump to common_interrupt. |
1010 | */ | 1008 | */ |
1011 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 1009 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
1012 | common_interrupt: | 1010 | common_interrupt: |
1013 | XCPT_FRAME | 1011 | XCPT_FRAME |
1014 | ASM_CLAC | 1012 | ASM_CLAC |
1015 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ | 1013 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ |
1016 | interrupt do_IRQ | 1014 | interrupt do_IRQ |
1017 | /* 0(%rsp): old_rsp-ARGOFFSET */ | 1015 | /* 0(%rsp): old_rsp-ARGOFFSET */ |
1018 | ret_from_intr: | 1016 | ret_from_intr: |
1019 | DISABLE_INTERRUPTS(CLBR_NONE) | 1017 | DISABLE_INTERRUPTS(CLBR_NONE) |
1020 | TRACE_IRQS_OFF | 1018 | TRACE_IRQS_OFF |
1021 | decl PER_CPU_VAR(irq_count) | 1019 | decl PER_CPU_VAR(irq_count) |
1022 | 1020 | ||
1023 | /* Restore saved previous stack */ | 1021 | /* Restore saved previous stack */ |
1024 | popq %rsi | 1022 | popq %rsi |
1025 | CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */ | 1023 | CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */ |
1026 | leaq ARGOFFSET-RBP(%rsi), %rsp | 1024 | leaq ARGOFFSET-RBP(%rsi), %rsp |
1027 | CFI_DEF_CFA_REGISTER rsp | 1025 | CFI_DEF_CFA_REGISTER rsp |
1028 | CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET | 1026 | CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET |
1029 | 1027 | ||
1030 | exit_intr: | 1028 | exit_intr: |
1031 | GET_THREAD_INFO(%rcx) | 1029 | GET_THREAD_INFO(%rcx) |
1032 | testl $3,CS-ARGOFFSET(%rsp) | 1030 | testl $3,CS-ARGOFFSET(%rsp) |
1033 | je retint_kernel | 1031 | je retint_kernel |
1034 | 1032 | ||
1035 | /* Interrupt came from user space */ | 1033 | /* Interrupt came from user space */ |
1036 | /* | 1034 | /* |
1037 | * Has a correct top of stack, but a partial stack frame | 1035 | * Has a correct top of stack, but a partial stack frame |
1038 | * %rcx: thread info. Interrupts off. | 1036 | * %rcx: thread info. Interrupts off. |
1039 | */ | 1037 | */ |
1040 | retint_with_reschedule: | 1038 | retint_with_reschedule: |
1041 | movl $_TIF_WORK_MASK,%edi | 1039 | movl $_TIF_WORK_MASK,%edi |
1042 | retint_check: | 1040 | retint_check: |
1043 | LOCKDEP_SYS_EXIT_IRQ | 1041 | LOCKDEP_SYS_EXIT_IRQ |
1044 | movl TI_flags(%rcx),%edx | 1042 | movl TI_flags(%rcx),%edx |
1045 | andl %edi,%edx | 1043 | andl %edi,%edx |
1046 | CFI_REMEMBER_STATE | 1044 | CFI_REMEMBER_STATE |
1047 | jnz retint_careful | 1045 | jnz retint_careful |
1048 | 1046 | ||
1049 | retint_swapgs: /* return to user-space */ | 1047 | retint_swapgs: /* return to user-space */ |
1050 | /* | 1048 | /* |
1051 | * The iretq could re-enable interrupts: | 1049 | * The iretq could re-enable interrupts: |
1052 | */ | 1050 | */ |
1053 | DISABLE_INTERRUPTS(CLBR_ANY) | 1051 | DISABLE_INTERRUPTS(CLBR_ANY) |
1054 | TRACE_IRQS_IRETQ | 1052 | TRACE_IRQS_IRETQ |
1055 | SWAPGS | 1053 | SWAPGS |
1056 | jmp restore_args | 1054 | jmp restore_args |
1057 | 1055 | ||
1058 | retint_restore_args: /* return to kernel space */ | 1056 | retint_restore_args: /* return to kernel space */ |
1059 | DISABLE_INTERRUPTS(CLBR_ANY) | 1057 | DISABLE_INTERRUPTS(CLBR_ANY) |
1060 | /* | 1058 | /* |
1061 | * The iretq could re-enable interrupts: | 1059 | * The iretq could re-enable interrupts: |
1062 | */ | 1060 | */ |
1063 | TRACE_IRQS_IRETQ | 1061 | TRACE_IRQS_IRETQ |
1064 | restore_args: | 1062 | restore_args: |
1065 | RESTORE_ARGS 1,8,1 | 1063 | RESTORE_ARGS 1,8,1 |
1066 | 1064 | ||
1067 | irq_return: | 1065 | irq_return: |
1068 | INTERRUPT_RETURN | 1066 | INTERRUPT_RETURN |
1069 | _ASM_EXTABLE(irq_return, bad_iret) | 1067 | _ASM_EXTABLE(irq_return, bad_iret) |
1070 | 1068 | ||
1071 | #ifdef CONFIG_PARAVIRT | 1069 | #ifdef CONFIG_PARAVIRT |
1072 | ENTRY(native_iret) | 1070 | ENTRY(native_iret) |
1073 | iretq | 1071 | iretq |
1074 | _ASM_EXTABLE(native_iret, bad_iret) | 1072 | _ASM_EXTABLE(native_iret, bad_iret) |
1075 | #endif | 1073 | #endif |
1076 | 1074 | ||
1077 | .section .fixup,"ax" | 1075 | .section .fixup,"ax" |
1078 | bad_iret: | 1076 | bad_iret: |
1079 | /* | 1077 | /* |
1080 | * The iret traps when the %cs or %ss being restored is bogus. | 1078 | * The iret traps when the %cs or %ss being restored is bogus. |
1081 | * We've lost the original trap vector and error code. | 1079 | * We've lost the original trap vector and error code. |
1082 | * #GPF is the most likely one to get for an invalid selector. | 1080 | * #GPF is the most likely one to get for an invalid selector. |
1083 | * So pretend we completed the iret and took the #GPF in user mode. | 1081 | * So pretend we completed the iret and took the #GPF in user mode. |
1084 | * | 1082 | * |
1085 | * We are now running with the kernel GS after exception recovery. | 1083 | * We are now running with the kernel GS after exception recovery. |
1086 | * But error_entry expects us to have user GS to match the user %cs, | 1084 | * But error_entry expects us to have user GS to match the user %cs, |
1087 | * so swap back. | 1085 | * so swap back. |
1088 | */ | 1086 | */ |
1089 | pushq $0 | 1087 | pushq $0 |
1090 | 1088 | ||
1091 | SWAPGS | 1089 | SWAPGS |
1092 | jmp general_protection | 1090 | jmp general_protection |
1093 | 1091 | ||
1094 | .previous | 1092 | .previous |
1095 | 1093 | ||
1096 | /* edi: workmask, edx: work */ | 1094 | /* edi: workmask, edx: work */ |
1097 | retint_careful: | 1095 | retint_careful: |
1098 | CFI_RESTORE_STATE | 1096 | CFI_RESTORE_STATE |
1099 | bt $TIF_NEED_RESCHED,%edx | 1097 | bt $TIF_NEED_RESCHED,%edx |
1100 | jnc retint_signal | 1098 | jnc retint_signal |
1101 | TRACE_IRQS_ON | 1099 | TRACE_IRQS_ON |
1102 | ENABLE_INTERRUPTS(CLBR_NONE) | 1100 | ENABLE_INTERRUPTS(CLBR_NONE) |
1103 | pushq_cfi %rdi | 1101 | pushq_cfi %rdi |
1104 | SCHEDULE_USER | 1102 | SCHEDULE_USER |
1105 | popq_cfi %rdi | 1103 | popq_cfi %rdi |
1106 | GET_THREAD_INFO(%rcx) | 1104 | GET_THREAD_INFO(%rcx) |
1107 | DISABLE_INTERRUPTS(CLBR_NONE) | 1105 | DISABLE_INTERRUPTS(CLBR_NONE) |
1108 | TRACE_IRQS_OFF | 1106 | TRACE_IRQS_OFF |
1109 | jmp retint_check | 1107 | jmp retint_check |
1110 | 1108 | ||
1111 | retint_signal: | 1109 | retint_signal: |
1112 | testl $_TIF_DO_NOTIFY_MASK,%edx | 1110 | testl $_TIF_DO_NOTIFY_MASK,%edx |
1113 | jz retint_swapgs | 1111 | jz retint_swapgs |
1114 | TRACE_IRQS_ON | 1112 | TRACE_IRQS_ON |
1115 | ENABLE_INTERRUPTS(CLBR_NONE) | 1113 | ENABLE_INTERRUPTS(CLBR_NONE) |
1116 | SAVE_REST | 1114 | SAVE_REST |
1117 | movq $-1,ORIG_RAX(%rsp) | 1115 | movq $-1,ORIG_RAX(%rsp) |
1118 | xorl %esi,%esi # oldset | 1116 | xorl %esi,%esi # oldset |
1119 | movq %rsp,%rdi # &pt_regs | 1117 | movq %rsp,%rdi # &pt_regs |
1120 | call do_notify_resume | 1118 | call do_notify_resume |
1121 | RESTORE_REST | 1119 | RESTORE_REST |
1122 | DISABLE_INTERRUPTS(CLBR_NONE) | 1120 | DISABLE_INTERRUPTS(CLBR_NONE) |
1123 | TRACE_IRQS_OFF | 1121 | TRACE_IRQS_OFF |
1124 | GET_THREAD_INFO(%rcx) | 1122 | GET_THREAD_INFO(%rcx) |
1125 | jmp retint_with_reschedule | 1123 | jmp retint_with_reschedule |
1126 | 1124 | ||
1127 | #ifdef CONFIG_PREEMPT | 1125 | #ifdef CONFIG_PREEMPT |
1128 | /* Returning to kernel space. Check if we need preemption */ | 1126 | /* Returning to kernel space. Check if we need preemption */ |
1129 | /* rcx: threadinfo. interrupts off. */ | 1127 | /* rcx: threadinfo. interrupts off. */ |
1130 | ENTRY(retint_kernel) | 1128 | ENTRY(retint_kernel) |
1131 | cmpl $0,TI_preempt_count(%rcx) | 1129 | cmpl $0,TI_preempt_count(%rcx) |
1132 | jnz retint_restore_args | 1130 | jnz retint_restore_args |
1133 | bt $TIF_NEED_RESCHED,TI_flags(%rcx) | 1131 | bt $TIF_NEED_RESCHED,TI_flags(%rcx) |
1134 | jnc retint_restore_args | 1132 | jnc retint_restore_args |
1135 | bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ | 1133 | bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ |
1136 | jnc retint_restore_args | 1134 | jnc retint_restore_args |
1137 | call preempt_schedule_irq | 1135 | call preempt_schedule_irq |
1138 | jmp exit_intr | 1136 | jmp exit_intr |
1139 | #endif | 1137 | #endif |
1140 | 1138 | ||
1141 | CFI_ENDPROC | 1139 | CFI_ENDPROC |
1142 | END(common_interrupt) | 1140 | END(common_interrupt) |
1143 | /* | 1141 | /* |
1144 | * End of kprobes section | 1142 | * End of kprobes section |
1145 | */ | 1143 | */ |
1146 | .popsection | 1144 | .popsection |
1147 | 1145 | ||
1148 | /* | 1146 | /* |
1149 | * APIC interrupts. | 1147 | * APIC interrupts. |
1150 | */ | 1148 | */ |
1151 | .macro apicinterrupt num sym do_sym | 1149 | .macro apicinterrupt num sym do_sym |
1152 | ENTRY(\sym) | 1150 | ENTRY(\sym) |
1153 | INTR_FRAME | 1151 | INTR_FRAME |
1154 | ASM_CLAC | 1152 | ASM_CLAC |
1155 | pushq_cfi $~(\num) | 1153 | pushq_cfi $~(\num) |
1156 | .Lcommon_\sym: | 1154 | .Lcommon_\sym: |
1157 | interrupt \do_sym | 1155 | interrupt \do_sym |
1158 | jmp ret_from_intr | 1156 | jmp ret_from_intr |
1159 | CFI_ENDPROC | 1157 | CFI_ENDPROC |
1160 | END(\sym) | 1158 | END(\sym) |
1161 | .endm | 1159 | .endm |
1162 | 1160 | ||
1163 | #ifdef CONFIG_SMP | 1161 | #ifdef CONFIG_SMP |
1164 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \ | 1162 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \ |
1165 | irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt | 1163 | irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
1166 | apicinterrupt REBOOT_VECTOR \ | 1164 | apicinterrupt REBOOT_VECTOR \ |
1167 | reboot_interrupt smp_reboot_interrupt | 1165 | reboot_interrupt smp_reboot_interrupt |
1168 | #endif | 1166 | #endif |
1169 | 1167 | ||
1170 | #ifdef CONFIG_X86_UV | 1168 | #ifdef CONFIG_X86_UV |
1171 | apicinterrupt UV_BAU_MESSAGE \ | 1169 | apicinterrupt UV_BAU_MESSAGE \ |
1172 | uv_bau_message_intr1 uv_bau_message_interrupt | 1170 | uv_bau_message_intr1 uv_bau_message_interrupt |
1173 | #endif | 1171 | #endif |
1174 | apicinterrupt LOCAL_TIMER_VECTOR \ | 1172 | apicinterrupt LOCAL_TIMER_VECTOR \ |
1175 | apic_timer_interrupt smp_apic_timer_interrupt | 1173 | apic_timer_interrupt smp_apic_timer_interrupt |
1176 | apicinterrupt X86_PLATFORM_IPI_VECTOR \ | 1174 | apicinterrupt X86_PLATFORM_IPI_VECTOR \ |
1177 | x86_platform_ipi smp_x86_platform_ipi | 1175 | x86_platform_ipi smp_x86_platform_ipi |
1178 | 1176 | ||
1179 | apicinterrupt THRESHOLD_APIC_VECTOR \ | 1177 | apicinterrupt THRESHOLD_APIC_VECTOR \ |
1180 | threshold_interrupt smp_threshold_interrupt | 1178 | threshold_interrupt smp_threshold_interrupt |
1181 | apicinterrupt THERMAL_APIC_VECTOR \ | 1179 | apicinterrupt THERMAL_APIC_VECTOR \ |
1182 | thermal_interrupt smp_thermal_interrupt | 1180 | thermal_interrupt smp_thermal_interrupt |
1183 | 1181 | ||
1184 | #ifdef CONFIG_SMP | 1182 | #ifdef CONFIG_SMP |
1185 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ | 1183 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ |
1186 | call_function_single_interrupt smp_call_function_single_interrupt | 1184 | call_function_single_interrupt smp_call_function_single_interrupt |
1187 | apicinterrupt CALL_FUNCTION_VECTOR \ | 1185 | apicinterrupt CALL_FUNCTION_VECTOR \ |
1188 | call_function_interrupt smp_call_function_interrupt | 1186 | call_function_interrupt smp_call_function_interrupt |
1189 | apicinterrupt RESCHEDULE_VECTOR \ | 1187 | apicinterrupt RESCHEDULE_VECTOR \ |
1190 | reschedule_interrupt smp_reschedule_interrupt | 1188 | reschedule_interrupt smp_reschedule_interrupt |
1191 | #endif | 1189 | #endif |
1192 | 1190 | ||
1193 | apicinterrupt ERROR_APIC_VECTOR \ | 1191 | apicinterrupt ERROR_APIC_VECTOR \ |
1194 | error_interrupt smp_error_interrupt | 1192 | error_interrupt smp_error_interrupt |
1195 | apicinterrupt SPURIOUS_APIC_VECTOR \ | 1193 | apicinterrupt SPURIOUS_APIC_VECTOR \ |
1196 | spurious_interrupt smp_spurious_interrupt | 1194 | spurious_interrupt smp_spurious_interrupt |
1197 | 1195 | ||
1198 | #ifdef CONFIG_IRQ_WORK | 1196 | #ifdef CONFIG_IRQ_WORK |
1199 | apicinterrupt IRQ_WORK_VECTOR \ | 1197 | apicinterrupt IRQ_WORK_VECTOR \ |
1200 | irq_work_interrupt smp_irq_work_interrupt | 1198 | irq_work_interrupt smp_irq_work_interrupt |
1201 | #endif | 1199 | #endif |
1202 | 1200 | ||
1203 | /* | 1201 | /* |
1204 | * Exception entry points. | 1202 | * Exception entry points. |
1205 | */ | 1203 | */ |
1206 | .macro zeroentry sym do_sym | 1204 | .macro zeroentry sym do_sym |
1207 | ENTRY(\sym) | 1205 | ENTRY(\sym) |
1208 | INTR_FRAME | 1206 | INTR_FRAME |
1209 | ASM_CLAC | 1207 | ASM_CLAC |
1210 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1208 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1211 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1209 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1212 | subq $ORIG_RAX-R15, %rsp | 1210 | subq $ORIG_RAX-R15, %rsp |
1213 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | 1211 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1214 | call error_entry | 1212 | call error_entry |
1215 | DEFAULT_FRAME 0 | 1213 | DEFAULT_FRAME 0 |
1216 | movq %rsp,%rdi /* pt_regs pointer */ | 1214 | movq %rsp,%rdi /* pt_regs pointer */ |
1217 | xorl %esi,%esi /* no error code */ | 1215 | xorl %esi,%esi /* no error code */ |
1218 | call \do_sym | 1216 | call \do_sym |
1219 | jmp error_exit /* %ebx: no swapgs flag */ | 1217 | jmp error_exit /* %ebx: no swapgs flag */ |
1220 | CFI_ENDPROC | 1218 | CFI_ENDPROC |
1221 | END(\sym) | 1219 | END(\sym) |
1222 | .endm | 1220 | .endm |
1223 | 1221 | ||
1224 | .macro paranoidzeroentry sym do_sym | 1222 | .macro paranoidzeroentry sym do_sym |
1225 | ENTRY(\sym) | 1223 | ENTRY(\sym) |
1226 | INTR_FRAME | 1224 | INTR_FRAME |
1227 | ASM_CLAC | 1225 | ASM_CLAC |
1228 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1226 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1229 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1227 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1230 | subq $ORIG_RAX-R15, %rsp | 1228 | subq $ORIG_RAX-R15, %rsp |
1231 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | 1229 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1232 | call save_paranoid | 1230 | call save_paranoid |
1233 | TRACE_IRQS_OFF | 1231 | TRACE_IRQS_OFF |
1234 | movq %rsp,%rdi /* pt_regs pointer */ | 1232 | movq %rsp,%rdi /* pt_regs pointer */ |
1235 | xorl %esi,%esi /* no error code */ | 1233 | xorl %esi,%esi /* no error code */ |
1236 | call \do_sym | 1234 | call \do_sym |
1237 | jmp paranoid_exit /* %ebx: no swapgs flag */ | 1235 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
1238 | CFI_ENDPROC | 1236 | CFI_ENDPROC |
1239 | END(\sym) | 1237 | END(\sym) |
1240 | .endm | 1238 | .endm |
1241 | 1239 | ||
1242 | #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) | 1240 | #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) |
1243 | .macro paranoidzeroentry_ist sym do_sym ist | 1241 | .macro paranoidzeroentry_ist sym do_sym ist |
1244 | ENTRY(\sym) | 1242 | ENTRY(\sym) |
1245 | INTR_FRAME | 1243 | INTR_FRAME |
1246 | ASM_CLAC | 1244 | ASM_CLAC |
1247 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1245 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1248 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1246 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1249 | subq $ORIG_RAX-R15, %rsp | 1247 | subq $ORIG_RAX-R15, %rsp |
1250 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | 1248 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1251 | call save_paranoid | 1249 | call save_paranoid |
1252 | TRACE_IRQS_OFF_DEBUG | 1250 | TRACE_IRQS_OFF_DEBUG |
1253 | movq %rsp,%rdi /* pt_regs pointer */ | 1251 | movq %rsp,%rdi /* pt_regs pointer */ |
1254 | xorl %esi,%esi /* no error code */ | 1252 | xorl %esi,%esi /* no error code */ |
1255 | subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) | 1253 | subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) |
1256 | call \do_sym | 1254 | call \do_sym |
1257 | addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) | 1255 | addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) |
1258 | jmp paranoid_exit /* %ebx: no swapgs flag */ | 1256 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
1259 | CFI_ENDPROC | 1257 | CFI_ENDPROC |
1260 | END(\sym) | 1258 | END(\sym) |
1261 | .endm | 1259 | .endm |
1262 | 1260 | ||
1263 | .macro errorentry sym do_sym | 1261 | .macro errorentry sym do_sym |
1264 | ENTRY(\sym) | 1262 | ENTRY(\sym) |
1265 | XCPT_FRAME | 1263 | XCPT_FRAME |
1266 | ASM_CLAC | 1264 | ASM_CLAC |
1267 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1265 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1268 | subq $ORIG_RAX-R15, %rsp | 1266 | subq $ORIG_RAX-R15, %rsp |
1269 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | 1267 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1270 | call error_entry | 1268 | call error_entry |
1271 | DEFAULT_FRAME 0 | 1269 | DEFAULT_FRAME 0 |
1272 | movq %rsp,%rdi /* pt_regs pointer */ | 1270 | movq %rsp,%rdi /* pt_regs pointer */ |
1273 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | 1271 | movq ORIG_RAX(%rsp),%rsi /* get error code */ |
1274 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ | 1272 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ |
1275 | call \do_sym | 1273 | call \do_sym |
1276 | jmp error_exit /* %ebx: no swapgs flag */ | 1274 | jmp error_exit /* %ebx: no swapgs flag */ |
1277 | CFI_ENDPROC | 1275 | CFI_ENDPROC |
1278 | END(\sym) | 1276 | END(\sym) |
1279 | .endm | 1277 | .endm |
1280 | 1278 | ||
1281 | /* error code is on the stack already */ | 1279 | /* error code is on the stack already */ |
1282 | .macro paranoiderrorentry sym do_sym | 1280 | .macro paranoiderrorentry sym do_sym |
1283 | ENTRY(\sym) | 1281 | ENTRY(\sym) |
1284 | XCPT_FRAME | 1282 | XCPT_FRAME |
1285 | ASM_CLAC | 1283 | ASM_CLAC |
1286 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1284 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1287 | subq $ORIG_RAX-R15, %rsp | 1285 | subq $ORIG_RAX-R15, %rsp |
1288 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | 1286 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1289 | call save_paranoid | 1287 | call save_paranoid |
1290 | DEFAULT_FRAME 0 | 1288 | DEFAULT_FRAME 0 |
1291 | TRACE_IRQS_OFF | 1289 | TRACE_IRQS_OFF |
1292 | movq %rsp,%rdi /* pt_regs pointer */ | 1290 | movq %rsp,%rdi /* pt_regs pointer */ |
1293 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | 1291 | movq ORIG_RAX(%rsp),%rsi /* get error code */ |
1294 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ | 1292 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ |
1295 | call \do_sym | 1293 | call \do_sym |
1296 | jmp paranoid_exit /* %ebx: no swapgs flag */ | 1294 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
1297 | CFI_ENDPROC | 1295 | CFI_ENDPROC |
1298 | END(\sym) | 1296 | END(\sym) |
1299 | .endm | 1297 | .endm |
1300 | 1298 | ||
1301 | zeroentry divide_error do_divide_error | 1299 | zeroentry divide_error do_divide_error |
1302 | zeroentry overflow do_overflow | 1300 | zeroentry overflow do_overflow |
1303 | zeroentry bounds do_bounds | 1301 | zeroentry bounds do_bounds |
1304 | zeroentry invalid_op do_invalid_op | 1302 | zeroentry invalid_op do_invalid_op |
1305 | zeroentry device_not_available do_device_not_available | 1303 | zeroentry device_not_available do_device_not_available |
1306 | paranoiderrorentry double_fault do_double_fault | 1304 | paranoiderrorentry double_fault do_double_fault |
1307 | zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun | 1305 | zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun |
1308 | errorentry invalid_TSS do_invalid_TSS | 1306 | errorentry invalid_TSS do_invalid_TSS |
1309 | errorentry segment_not_present do_segment_not_present | 1307 | errorentry segment_not_present do_segment_not_present |
1310 | zeroentry spurious_interrupt_bug do_spurious_interrupt_bug | 1308 | zeroentry spurious_interrupt_bug do_spurious_interrupt_bug |
1311 | zeroentry coprocessor_error do_coprocessor_error | 1309 | zeroentry coprocessor_error do_coprocessor_error |
1312 | errorentry alignment_check do_alignment_check | 1310 | errorentry alignment_check do_alignment_check |
1313 | zeroentry simd_coprocessor_error do_simd_coprocessor_error | 1311 | zeroentry simd_coprocessor_error do_simd_coprocessor_error |
1314 | 1312 | ||
1315 | 1313 | ||
1316 | /* Reload gs selector with exception handling */ | 1314 | /* Reload gs selector with exception handling */ |
1317 | /* edi: new selector */ | 1315 | /* edi: new selector */ |
1318 | ENTRY(native_load_gs_index) | 1316 | ENTRY(native_load_gs_index) |
1319 | CFI_STARTPROC | 1317 | CFI_STARTPROC |
1320 | pushfq_cfi | 1318 | pushfq_cfi |
1321 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) | 1319 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
1322 | SWAPGS | 1320 | SWAPGS |
1323 | gs_change: | 1321 | gs_change: |
1324 | movl %edi,%gs | 1322 | movl %edi,%gs |
1325 | 2: mfence /* workaround */ | 1323 | 2: mfence /* workaround */ |
1326 | SWAPGS | 1324 | SWAPGS |
1327 | popfq_cfi | 1325 | popfq_cfi |
1328 | ret | 1326 | ret |
1329 | CFI_ENDPROC | 1327 | CFI_ENDPROC |
1330 | END(native_load_gs_index) | 1328 | END(native_load_gs_index) |
1331 | 1329 | ||
1332 | _ASM_EXTABLE(gs_change,bad_gs) | 1330 | _ASM_EXTABLE(gs_change,bad_gs) |
1333 | .section .fixup,"ax" | 1331 | .section .fixup,"ax" |
1334 | /* running with kernelgs */ | 1332 | /* running with kernelgs */ |
1335 | bad_gs: | 1333 | bad_gs: |
1336 | SWAPGS /* switch back to user gs */ | 1334 | SWAPGS /* switch back to user gs */ |
1337 | xorl %eax,%eax | 1335 | xorl %eax,%eax |
1338 | movl %eax,%gs | 1336 | movl %eax,%gs |
1339 | jmp 2b | 1337 | jmp 2b |
1340 | .previous | 1338 | .previous |
1341 | 1339 | ||
1342 | /* Call softirq on interrupt stack. Interrupts are off. */ | 1340 | /* Call softirq on interrupt stack. Interrupts are off. */ |
1343 | ENTRY(call_softirq) | 1341 | ENTRY(call_softirq) |
1344 | CFI_STARTPROC | 1342 | CFI_STARTPROC |
1345 | pushq_cfi %rbp | 1343 | pushq_cfi %rbp |
1346 | CFI_REL_OFFSET rbp,0 | 1344 | CFI_REL_OFFSET rbp,0 |
1347 | mov %rsp,%rbp | 1345 | mov %rsp,%rbp |
1348 | CFI_DEF_CFA_REGISTER rbp | 1346 | CFI_DEF_CFA_REGISTER rbp |
1349 | incl PER_CPU_VAR(irq_count) | 1347 | incl PER_CPU_VAR(irq_count) |
1350 | cmove PER_CPU_VAR(irq_stack_ptr),%rsp | 1348 | cmove PER_CPU_VAR(irq_stack_ptr),%rsp |
1351 | push %rbp # backlink for old unwinder | 1349 | push %rbp # backlink for old unwinder |
1352 | call __do_softirq | 1350 | call __do_softirq |
1353 | leaveq | 1351 | leaveq |
1354 | CFI_RESTORE rbp | 1352 | CFI_RESTORE rbp |
1355 | CFI_DEF_CFA_REGISTER rsp | 1353 | CFI_DEF_CFA_REGISTER rsp |
1356 | CFI_ADJUST_CFA_OFFSET -8 | 1354 | CFI_ADJUST_CFA_OFFSET -8 |
1357 | decl PER_CPU_VAR(irq_count) | 1355 | decl PER_CPU_VAR(irq_count) |
1358 | ret | 1356 | ret |
1359 | CFI_ENDPROC | 1357 | CFI_ENDPROC |
1360 | END(call_softirq) | 1358 | END(call_softirq) |
1361 | 1359 | ||
1362 | #ifdef CONFIG_XEN | 1360 | #ifdef CONFIG_XEN |
1363 | zeroentry xen_hypervisor_callback xen_do_hypervisor_callback | 1361 | zeroentry xen_hypervisor_callback xen_do_hypervisor_callback |
1364 | 1362 | ||
1365 | /* | 1363 | /* |
1366 | * A note on the "critical region" in our callback handler. | 1364 | * A note on the "critical region" in our callback handler. |
1367 | * We want to avoid stacking callback handlers due to events occurring | 1365 | * We want to avoid stacking callback handlers due to events occurring |
1368 | * during handling of the last event. To do this, we keep events disabled | 1366 | * during handling of the last event. To do this, we keep events disabled |
1369 | * until we've done all processing. HOWEVER, we must enable events before | 1367 | * until we've done all processing. HOWEVER, we must enable events before |
1370 | * popping the stack frame (can't be done atomically) and so it would still | 1368 | * popping the stack frame (can't be done atomically) and so it would still |
1371 | * be possible to get enough handler activations to overflow the stack. | 1369 | * be possible to get enough handler activations to overflow the stack. |
1372 | * Although unlikely, bugs of that kind are hard to track down, so we'd | 1370 | * Although unlikely, bugs of that kind are hard to track down, so we'd |
1373 | * like to avoid the possibility. | 1371 | * like to avoid the possibility. |
1374 | * So, on entry to the handler we detect whether we interrupted an | 1372 | * So, on entry to the handler we detect whether we interrupted an |
1375 | * existing activation in its critical region -- if so, we pop the current | 1373 | * existing activation in its critical region -- if so, we pop the current |
1376 | * activation and restart the handler using the previous one. | 1374 | * activation and restart the handler using the previous one. |
1377 | */ | 1375 | */ |
1378 | ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) | 1376 | ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) |
1379 | CFI_STARTPROC | 1377 | CFI_STARTPROC |
1380 | /* | 1378 | /* |
1381 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will | 1379 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will |
1382 | * see the correct pointer to the pt_regs | 1380 | * see the correct pointer to the pt_regs |
1383 | */ | 1381 | */ |
1384 | movq %rdi, %rsp # we don't return, adjust the stack frame | 1382 | movq %rdi, %rsp # we don't return, adjust the stack frame |
1385 | CFI_ENDPROC | 1383 | CFI_ENDPROC |
1386 | DEFAULT_FRAME | 1384 | DEFAULT_FRAME |
1387 | 11: incl PER_CPU_VAR(irq_count) | 1385 | 11: incl PER_CPU_VAR(irq_count) |
1388 | movq %rsp,%rbp | 1386 | movq %rsp,%rbp |
1389 | CFI_DEF_CFA_REGISTER rbp | 1387 | CFI_DEF_CFA_REGISTER rbp |
1390 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp | 1388 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp |
1391 | pushq %rbp # backlink for old unwinder | 1389 | pushq %rbp # backlink for old unwinder |
1392 | call xen_evtchn_do_upcall | 1390 | call xen_evtchn_do_upcall |
1393 | popq %rsp | 1391 | popq %rsp |
1394 | CFI_DEF_CFA_REGISTER rsp | 1392 | CFI_DEF_CFA_REGISTER rsp |
1395 | decl PER_CPU_VAR(irq_count) | 1393 | decl PER_CPU_VAR(irq_count) |
1396 | jmp error_exit | 1394 | jmp error_exit |
1397 | CFI_ENDPROC | 1395 | CFI_ENDPROC |
1398 | END(xen_do_hypervisor_callback) | 1396 | END(xen_do_hypervisor_callback) |
1399 | 1397 | ||
1400 | /* | 1398 | /* |
1401 | * Hypervisor uses this for application faults while it executes. | 1399 | * Hypervisor uses this for application faults while it executes. |
1402 | * We get here for two reasons: | 1400 | * We get here for two reasons: |
1403 | * 1. Fault while reloading DS, ES, FS or GS | 1401 | * 1. Fault while reloading DS, ES, FS or GS |
1404 | * 2. Fault while executing IRET | 1402 | * 2. Fault while executing IRET |
1405 | * Category 1 we do not need to fix up as Xen has already reloaded all segment | 1403 | * Category 1 we do not need to fix up as Xen has already reloaded all segment |
1406 | * registers that could be reloaded and zeroed the others. | 1404 | * registers that could be reloaded and zeroed the others. |
1407 | * Category 2 we fix up by killing the current process. We cannot use the | 1405 | * Category 2 we fix up by killing the current process. We cannot use the |
1408 | * normal Linux return path in this case because if we use the IRET hypercall | 1406 | * normal Linux return path in this case because if we use the IRET hypercall |
1409 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | 1407 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
1410 | * We distinguish between categories by comparing each saved segment register | 1408 | * We distinguish between categories by comparing each saved segment register |
1411 | * with its current contents: any discrepancy means we in category 1. | 1409 | * with its current contents: any discrepancy means we in category 1. |
1412 | */ | 1410 | */ |
1413 | ENTRY(xen_failsafe_callback) | 1411 | ENTRY(xen_failsafe_callback) |
1414 | INTR_FRAME 1 (6*8) | 1412 | INTR_FRAME 1 (6*8) |
1415 | /*CFI_REL_OFFSET gs,GS*/ | 1413 | /*CFI_REL_OFFSET gs,GS*/ |
1416 | /*CFI_REL_OFFSET fs,FS*/ | 1414 | /*CFI_REL_OFFSET fs,FS*/ |
1417 | /*CFI_REL_OFFSET es,ES*/ | 1415 | /*CFI_REL_OFFSET es,ES*/ |
1418 | /*CFI_REL_OFFSET ds,DS*/ | 1416 | /*CFI_REL_OFFSET ds,DS*/ |
1419 | CFI_REL_OFFSET r11,8 | 1417 | CFI_REL_OFFSET r11,8 |
1420 | CFI_REL_OFFSET rcx,0 | 1418 | CFI_REL_OFFSET rcx,0 |
1421 | movw %ds,%cx | 1419 | movw %ds,%cx |
1422 | cmpw %cx,0x10(%rsp) | 1420 | cmpw %cx,0x10(%rsp) |
1423 | CFI_REMEMBER_STATE | 1421 | CFI_REMEMBER_STATE |
1424 | jne 1f | 1422 | jne 1f |
1425 | movw %es,%cx | 1423 | movw %es,%cx |
1426 | cmpw %cx,0x18(%rsp) | 1424 | cmpw %cx,0x18(%rsp) |
1427 | jne 1f | 1425 | jne 1f |
1428 | movw %fs,%cx | 1426 | movw %fs,%cx |
1429 | cmpw %cx,0x20(%rsp) | 1427 | cmpw %cx,0x20(%rsp) |
1430 | jne 1f | 1428 | jne 1f |
1431 | movw %gs,%cx | 1429 | movw %gs,%cx |
1432 | cmpw %cx,0x28(%rsp) | 1430 | cmpw %cx,0x28(%rsp) |
1433 | jne 1f | 1431 | jne 1f |
1434 | /* All segments match their saved values => Category 2 (Bad IRET). */ | 1432 | /* All segments match their saved values => Category 2 (Bad IRET). */ |
1435 | movq (%rsp),%rcx | 1433 | movq (%rsp),%rcx |
1436 | CFI_RESTORE rcx | 1434 | CFI_RESTORE rcx |
1437 | movq 8(%rsp),%r11 | 1435 | movq 8(%rsp),%r11 |
1438 | CFI_RESTORE r11 | 1436 | CFI_RESTORE r11 |
1439 | addq $0x30,%rsp | 1437 | addq $0x30,%rsp |
1440 | CFI_ADJUST_CFA_OFFSET -0x30 | 1438 | CFI_ADJUST_CFA_OFFSET -0x30 |
1441 | pushq_cfi $0 /* RIP */ | 1439 | pushq_cfi $0 /* RIP */ |
1442 | pushq_cfi %r11 | 1440 | pushq_cfi %r11 |
1443 | pushq_cfi %rcx | 1441 | pushq_cfi %rcx |
1444 | jmp general_protection | 1442 | jmp general_protection |
1445 | CFI_RESTORE_STATE | 1443 | CFI_RESTORE_STATE |
1446 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ | 1444 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ |
1447 | movq (%rsp),%rcx | 1445 | movq (%rsp),%rcx |
1448 | CFI_RESTORE rcx | 1446 | CFI_RESTORE rcx |
1449 | movq 8(%rsp),%r11 | 1447 | movq 8(%rsp),%r11 |
1450 | CFI_RESTORE r11 | 1448 | CFI_RESTORE r11 |
1451 | addq $0x30,%rsp | 1449 | addq $0x30,%rsp |
1452 | CFI_ADJUST_CFA_OFFSET -0x30 | 1450 | CFI_ADJUST_CFA_OFFSET -0x30 |
1453 | pushq_cfi $-1 /* orig_ax = -1 => not a system call */ | 1451 | pushq_cfi $-1 /* orig_ax = -1 => not a system call */ |
1454 | SAVE_ALL | 1452 | SAVE_ALL |
1455 | jmp error_exit | 1453 | jmp error_exit |
1456 | CFI_ENDPROC | 1454 | CFI_ENDPROC |
1457 | END(xen_failsafe_callback) | 1455 | END(xen_failsafe_callback) |
1458 | 1456 | ||
1459 | apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ | 1457 | apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ |
1460 | xen_hvm_callback_vector xen_evtchn_do_upcall | 1458 | xen_hvm_callback_vector xen_evtchn_do_upcall |
1461 | 1459 | ||
1462 | #endif /* CONFIG_XEN */ | 1460 | #endif /* CONFIG_XEN */ |
1463 | 1461 | ||
1464 | /* | 1462 | /* |
1465 | * Some functions should be protected against kprobes | 1463 | * Some functions should be protected against kprobes |
1466 | */ | 1464 | */ |
1467 | .pushsection .kprobes.text, "ax" | 1465 | .pushsection .kprobes.text, "ax" |
1468 | 1466 | ||
1469 | paranoidzeroentry_ist debug do_debug DEBUG_STACK | 1467 | paranoidzeroentry_ist debug do_debug DEBUG_STACK |
1470 | paranoidzeroentry_ist int3 do_int3 DEBUG_STACK | 1468 | paranoidzeroentry_ist int3 do_int3 DEBUG_STACK |
1471 | paranoiderrorentry stack_segment do_stack_segment | 1469 | paranoiderrorentry stack_segment do_stack_segment |
1472 | #ifdef CONFIG_XEN | 1470 | #ifdef CONFIG_XEN |
1473 | zeroentry xen_debug do_debug | 1471 | zeroentry xen_debug do_debug |
1474 | zeroentry xen_int3 do_int3 | 1472 | zeroentry xen_int3 do_int3 |
1475 | errorentry xen_stack_segment do_stack_segment | 1473 | errorentry xen_stack_segment do_stack_segment |
1476 | #endif | 1474 | #endif |
1477 | errorentry general_protection do_general_protection | 1475 | errorentry general_protection do_general_protection |
1478 | errorentry page_fault do_page_fault | 1476 | errorentry page_fault do_page_fault |
1479 | #ifdef CONFIG_KVM_GUEST | 1477 | #ifdef CONFIG_KVM_GUEST |
1480 | errorentry async_page_fault do_async_page_fault | 1478 | errorentry async_page_fault do_async_page_fault |
1481 | #endif | 1479 | #endif |
1482 | #ifdef CONFIG_X86_MCE | 1480 | #ifdef CONFIG_X86_MCE |
1483 | paranoidzeroentry machine_check *machine_check_vector(%rip) | 1481 | paranoidzeroentry machine_check *machine_check_vector(%rip) |
1484 | #endif | 1482 | #endif |
1485 | 1483 | ||
1486 | /* | 1484 | /* |
1487 | * "Paranoid" exit path from exception stack. | 1485 | * "Paranoid" exit path from exception stack. |
1488 | * Paranoid because this is used by NMIs and cannot take | 1486 | * Paranoid because this is used by NMIs and cannot take |
1489 | * any kernel state for granted. | 1487 | * any kernel state for granted. |
1490 | * We don't do kernel preemption checks here, because only | 1488 | * We don't do kernel preemption checks here, because only |
1491 | * NMI should be common and it does not enable IRQs and | 1489 | * NMI should be common and it does not enable IRQs and |
1492 | * cannot get reschedule ticks. | 1490 | * cannot get reschedule ticks. |
1493 | * | 1491 | * |
1494 | * "trace" is 0 for the NMI handler only, because irq-tracing | 1492 | * "trace" is 0 for the NMI handler only, because irq-tracing |
1495 | * is fundamentally NMI-unsafe. (we cannot change the soft and | 1493 | * is fundamentally NMI-unsafe. (we cannot change the soft and |
1496 | * hard flags at once, atomically) | 1494 | * hard flags at once, atomically) |
1497 | */ | 1495 | */ |
1498 | 1496 | ||
1499 | /* ebx: no swapgs flag */ | 1497 | /* ebx: no swapgs flag */ |
1500 | ENTRY(paranoid_exit) | 1498 | ENTRY(paranoid_exit) |
1501 | DEFAULT_FRAME | 1499 | DEFAULT_FRAME |
1502 | DISABLE_INTERRUPTS(CLBR_NONE) | 1500 | DISABLE_INTERRUPTS(CLBR_NONE) |
1503 | TRACE_IRQS_OFF_DEBUG | 1501 | TRACE_IRQS_OFF_DEBUG |
1504 | testl %ebx,%ebx /* swapgs needed? */ | 1502 | testl %ebx,%ebx /* swapgs needed? */ |
1505 | jnz paranoid_restore | 1503 | jnz paranoid_restore |
1506 | testl $3,CS(%rsp) | 1504 | testl $3,CS(%rsp) |
1507 | jnz paranoid_userspace | 1505 | jnz paranoid_userspace |
1508 | paranoid_swapgs: | 1506 | paranoid_swapgs: |
1509 | TRACE_IRQS_IRETQ 0 | 1507 | TRACE_IRQS_IRETQ 0 |
1510 | SWAPGS_UNSAFE_STACK | 1508 | SWAPGS_UNSAFE_STACK |
1511 | RESTORE_ALL 8 | 1509 | RESTORE_ALL 8 |
1512 | jmp irq_return | 1510 | jmp irq_return |
1513 | paranoid_restore: | 1511 | paranoid_restore: |
1514 | TRACE_IRQS_IRETQ_DEBUG 0 | 1512 | TRACE_IRQS_IRETQ_DEBUG 0 |
1515 | RESTORE_ALL 8 | 1513 | RESTORE_ALL 8 |
1516 | jmp irq_return | 1514 | jmp irq_return |
1517 | paranoid_userspace: | 1515 | paranoid_userspace: |
1518 | GET_THREAD_INFO(%rcx) | 1516 | GET_THREAD_INFO(%rcx) |
1519 | movl TI_flags(%rcx),%ebx | 1517 | movl TI_flags(%rcx),%ebx |
1520 | andl $_TIF_WORK_MASK,%ebx | 1518 | andl $_TIF_WORK_MASK,%ebx |
1521 | jz paranoid_swapgs | 1519 | jz paranoid_swapgs |
1522 | movq %rsp,%rdi /* &pt_regs */ | 1520 | movq %rsp,%rdi /* &pt_regs */ |
1523 | call sync_regs | 1521 | call sync_regs |
1524 | movq %rax,%rsp /* switch stack for scheduling */ | 1522 | movq %rax,%rsp /* switch stack for scheduling */ |
1525 | testl $_TIF_NEED_RESCHED,%ebx | 1523 | testl $_TIF_NEED_RESCHED,%ebx |
1526 | jnz paranoid_schedule | 1524 | jnz paranoid_schedule |
1527 | movl %ebx,%edx /* arg3: thread flags */ | 1525 | movl %ebx,%edx /* arg3: thread flags */ |
1528 | TRACE_IRQS_ON | 1526 | TRACE_IRQS_ON |
1529 | ENABLE_INTERRUPTS(CLBR_NONE) | 1527 | ENABLE_INTERRUPTS(CLBR_NONE) |
1530 | xorl %esi,%esi /* arg2: oldset */ | 1528 | xorl %esi,%esi /* arg2: oldset */ |
1531 | movq %rsp,%rdi /* arg1: &pt_regs */ | 1529 | movq %rsp,%rdi /* arg1: &pt_regs */ |
1532 | call do_notify_resume | 1530 | call do_notify_resume |
1533 | DISABLE_INTERRUPTS(CLBR_NONE) | 1531 | DISABLE_INTERRUPTS(CLBR_NONE) |
1534 | TRACE_IRQS_OFF | 1532 | TRACE_IRQS_OFF |
1535 | jmp paranoid_userspace | 1533 | jmp paranoid_userspace |
1536 | paranoid_schedule: | 1534 | paranoid_schedule: |
1537 | TRACE_IRQS_ON | 1535 | TRACE_IRQS_ON |
1538 | ENABLE_INTERRUPTS(CLBR_ANY) | 1536 | ENABLE_INTERRUPTS(CLBR_ANY) |
1539 | SCHEDULE_USER | 1537 | SCHEDULE_USER |
1540 | DISABLE_INTERRUPTS(CLBR_ANY) | 1538 | DISABLE_INTERRUPTS(CLBR_ANY) |
1541 | TRACE_IRQS_OFF | 1539 | TRACE_IRQS_OFF |
1542 | jmp paranoid_userspace | 1540 | jmp paranoid_userspace |
1543 | CFI_ENDPROC | 1541 | CFI_ENDPROC |
1544 | END(paranoid_exit) | 1542 | END(paranoid_exit) |
1545 | 1543 | ||
1546 | /* | 1544 | /* |
1547 | * Exception entry point. This expects an error code/orig_rax on the stack. | 1545 | * Exception entry point. This expects an error code/orig_rax on the stack. |
1548 | * returns in "no swapgs flag" in %ebx. | 1546 | * returns in "no swapgs flag" in %ebx. |
1549 | */ | 1547 | */ |
1550 | ENTRY(error_entry) | 1548 | ENTRY(error_entry) |
1551 | XCPT_FRAME | 1549 | XCPT_FRAME |
1552 | CFI_ADJUST_CFA_OFFSET 15*8 | 1550 | CFI_ADJUST_CFA_OFFSET 15*8 |
1553 | /* oldrax contains error code */ | 1551 | /* oldrax contains error code */ |
1554 | cld | 1552 | cld |
1555 | movq_cfi rdi, RDI+8 | 1553 | movq_cfi rdi, RDI+8 |
1556 | movq_cfi rsi, RSI+8 | 1554 | movq_cfi rsi, RSI+8 |
1557 | movq_cfi rdx, RDX+8 | 1555 | movq_cfi rdx, RDX+8 |
1558 | movq_cfi rcx, RCX+8 | 1556 | movq_cfi rcx, RCX+8 |
1559 | movq_cfi rax, RAX+8 | 1557 | movq_cfi rax, RAX+8 |
1560 | movq_cfi r8, R8+8 | 1558 | movq_cfi r8, R8+8 |
1561 | movq_cfi r9, R9+8 | 1559 | movq_cfi r9, R9+8 |
1562 | movq_cfi r10, R10+8 | 1560 | movq_cfi r10, R10+8 |
1563 | movq_cfi r11, R11+8 | 1561 | movq_cfi r11, R11+8 |
1564 | movq_cfi rbx, RBX+8 | 1562 | movq_cfi rbx, RBX+8 |
1565 | movq_cfi rbp, RBP+8 | 1563 | movq_cfi rbp, RBP+8 |
1566 | movq_cfi r12, R12+8 | 1564 | movq_cfi r12, R12+8 |
1567 | movq_cfi r13, R13+8 | 1565 | movq_cfi r13, R13+8 |
1568 | movq_cfi r14, R14+8 | 1566 | movq_cfi r14, R14+8 |
1569 | movq_cfi r15, R15+8 | 1567 | movq_cfi r15, R15+8 |
1570 | xorl %ebx,%ebx | 1568 | xorl %ebx,%ebx |
1571 | testl $3,CS+8(%rsp) | 1569 | testl $3,CS+8(%rsp) |
1572 | je error_kernelspace | 1570 | je error_kernelspace |
1573 | error_swapgs: | 1571 | error_swapgs: |
1574 | SWAPGS | 1572 | SWAPGS |
1575 | error_sti: | 1573 | error_sti: |
1576 | TRACE_IRQS_OFF | 1574 | TRACE_IRQS_OFF |
1577 | ret | 1575 | ret |
1578 | 1576 | ||
1579 | /* | 1577 | /* |
1580 | * There are two places in the kernel that can potentially fault with | 1578 | * There are two places in the kernel that can potentially fault with |
1581 | * usergs. Handle them here. The exception handlers after iret run with | 1579 | * usergs. Handle them here. The exception handlers after iret run with |
1582 | * kernel gs again, so don't set the user space flag. B stepping K8s | 1580 | * kernel gs again, so don't set the user space flag. B stepping K8s |
1583 | * sometimes report an truncated RIP for IRET exceptions returning to | 1581 | * sometimes report an truncated RIP for IRET exceptions returning to |
1584 | * compat mode. Check for these here too. | 1582 | * compat mode. Check for these here too. |
1585 | */ | 1583 | */ |
1586 | error_kernelspace: | 1584 | error_kernelspace: |
1587 | incl %ebx | 1585 | incl %ebx |
1588 | leaq irq_return(%rip),%rcx | 1586 | leaq irq_return(%rip),%rcx |
1589 | cmpq %rcx,RIP+8(%rsp) | 1587 | cmpq %rcx,RIP+8(%rsp) |
1590 | je error_swapgs | 1588 | je error_swapgs |
1591 | movl %ecx,%eax /* zero extend */ | 1589 | movl %ecx,%eax /* zero extend */ |
1592 | cmpq %rax,RIP+8(%rsp) | 1590 | cmpq %rax,RIP+8(%rsp) |
1593 | je bstep_iret | 1591 | je bstep_iret |
1594 | cmpq $gs_change,RIP+8(%rsp) | 1592 | cmpq $gs_change,RIP+8(%rsp) |
1595 | je error_swapgs | 1593 | je error_swapgs |
1596 | jmp error_sti | 1594 | jmp error_sti |
1597 | 1595 | ||
1598 | bstep_iret: | 1596 | bstep_iret: |
1599 | /* Fix truncated RIP */ | 1597 | /* Fix truncated RIP */ |
1600 | movq %rcx,RIP+8(%rsp) | 1598 | movq %rcx,RIP+8(%rsp) |
1601 | jmp error_swapgs | 1599 | jmp error_swapgs |
1602 | CFI_ENDPROC | 1600 | CFI_ENDPROC |
1603 | END(error_entry) | 1601 | END(error_entry) |
1604 | 1602 | ||
1605 | 1603 | ||
1606 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ | 1604 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ |
1607 | ENTRY(error_exit) | 1605 | ENTRY(error_exit) |
1608 | DEFAULT_FRAME | 1606 | DEFAULT_FRAME |
1609 | movl %ebx,%eax | 1607 | movl %ebx,%eax |
1610 | RESTORE_REST | 1608 | RESTORE_REST |
1611 | DISABLE_INTERRUPTS(CLBR_NONE) | 1609 | DISABLE_INTERRUPTS(CLBR_NONE) |
1612 | TRACE_IRQS_OFF | 1610 | TRACE_IRQS_OFF |
1613 | GET_THREAD_INFO(%rcx) | 1611 | GET_THREAD_INFO(%rcx) |
1614 | testl %eax,%eax | 1612 | testl %eax,%eax |
1615 | jne retint_kernel | 1613 | jne retint_kernel |
1616 | LOCKDEP_SYS_EXIT_IRQ | 1614 | LOCKDEP_SYS_EXIT_IRQ |
1617 | movl TI_flags(%rcx),%edx | 1615 | movl TI_flags(%rcx),%edx |
1618 | movl $_TIF_WORK_MASK,%edi | 1616 | movl $_TIF_WORK_MASK,%edi |
1619 | andl %edi,%edx | 1617 | andl %edi,%edx |
1620 | jnz retint_careful | 1618 | jnz retint_careful |
1621 | jmp retint_swapgs | 1619 | jmp retint_swapgs |
1622 | CFI_ENDPROC | 1620 | CFI_ENDPROC |
1623 | END(error_exit) | 1621 | END(error_exit) |
1624 | 1622 | ||
1625 | /* | 1623 | /* |
1626 | * Test if a given stack is an NMI stack or not. | 1624 | * Test if a given stack is an NMI stack or not. |
1627 | */ | 1625 | */ |
1628 | .macro test_in_nmi reg stack nmi_ret normal_ret | 1626 | .macro test_in_nmi reg stack nmi_ret normal_ret |
1629 | cmpq %\reg, \stack | 1627 | cmpq %\reg, \stack |
1630 | ja \normal_ret | 1628 | ja \normal_ret |
1631 | subq $EXCEPTION_STKSZ, %\reg | 1629 | subq $EXCEPTION_STKSZ, %\reg |
1632 | cmpq %\reg, \stack | 1630 | cmpq %\reg, \stack |
1633 | jb \normal_ret | 1631 | jb \normal_ret |
1634 | jmp \nmi_ret | 1632 | jmp \nmi_ret |
1635 | .endm | 1633 | .endm |
1636 | 1634 | ||
1637 | /* runs on exception stack */ | 1635 | /* runs on exception stack */ |
1638 | ENTRY(nmi) | 1636 | ENTRY(nmi) |
1639 | INTR_FRAME | 1637 | INTR_FRAME |
1640 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1638 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1641 | /* | 1639 | /* |
1642 | * We allow breakpoints in NMIs. If a breakpoint occurs, then | 1640 | * We allow breakpoints in NMIs. If a breakpoint occurs, then |
1643 | * the iretq it performs will take us out of NMI context. | 1641 | * the iretq it performs will take us out of NMI context. |
1644 | * This means that we can have nested NMIs where the next | 1642 | * This means that we can have nested NMIs where the next |
1645 | * NMI is using the top of the stack of the previous NMI. We | 1643 | * NMI is using the top of the stack of the previous NMI. We |
1646 | * can't let it execute because the nested NMI will corrupt the | 1644 | * can't let it execute because the nested NMI will corrupt the |
1647 | * stack of the previous NMI. NMI handlers are not re-entrant | 1645 | * stack of the previous NMI. NMI handlers are not re-entrant |
1648 | * anyway. | 1646 | * anyway. |
1649 | * | 1647 | * |
1650 | * To handle this case we do the following: | 1648 | * To handle this case we do the following: |
1651 | * Check the a special location on the stack that contains | 1649 | * Check the a special location on the stack that contains |
1652 | * a variable that is set when NMIs are executing. | 1650 | * a variable that is set when NMIs are executing. |
1653 | * The interrupted task's stack is also checked to see if it | 1651 | * The interrupted task's stack is also checked to see if it |
1654 | * is an NMI stack. | 1652 | * is an NMI stack. |
1655 | * If the variable is not set and the stack is not the NMI | 1653 | * If the variable is not set and the stack is not the NMI |
1656 | * stack then: | 1654 | * stack then: |
1657 | * o Set the special variable on the stack | 1655 | * o Set the special variable on the stack |
1658 | * o Copy the interrupt frame into a "saved" location on the stack | 1656 | * o Copy the interrupt frame into a "saved" location on the stack |
1659 | * o Copy the interrupt frame into a "copy" location on the stack | 1657 | * o Copy the interrupt frame into a "copy" location on the stack |
1660 | * o Continue processing the NMI | 1658 | * o Continue processing the NMI |
1661 | * If the variable is set or the previous stack is the NMI stack: | 1659 | * If the variable is set or the previous stack is the NMI stack: |
1662 | * o Modify the "copy" location to jump to the repeate_nmi | 1660 | * o Modify the "copy" location to jump to the repeate_nmi |
1663 | * o return back to the first NMI | 1661 | * o return back to the first NMI |
1664 | * | 1662 | * |
1665 | * Now on exit of the first NMI, we first clear the stack variable | 1663 | * Now on exit of the first NMI, we first clear the stack variable |
1666 | * The NMI stack will tell any nested NMIs at that point that it is | 1664 | * The NMI stack will tell any nested NMIs at that point that it is |
1667 | * nested. Then we pop the stack normally with iret, and if there was | 1665 | * nested. Then we pop the stack normally with iret, and if there was |
1668 | * a nested NMI that updated the copy interrupt stack frame, a | 1666 | * a nested NMI that updated the copy interrupt stack frame, a |
1669 | * jump will be made to the repeat_nmi code that will handle the second | 1667 | * jump will be made to the repeat_nmi code that will handle the second |
1670 | * NMI. | 1668 | * NMI. |
1671 | */ | 1669 | */ |
1672 | 1670 | ||
1673 | /* Use %rdx as out temp variable throughout */ | 1671 | /* Use %rdx as out temp variable throughout */ |
1674 | pushq_cfi %rdx | 1672 | pushq_cfi %rdx |
1675 | CFI_REL_OFFSET rdx, 0 | 1673 | CFI_REL_OFFSET rdx, 0 |
1676 | 1674 | ||
1677 | /* | 1675 | /* |
1678 | * If %cs was not the kernel segment, then the NMI triggered in user | 1676 | * If %cs was not the kernel segment, then the NMI triggered in user |
1679 | * space, which means it is definitely not nested. | 1677 | * space, which means it is definitely not nested. |
1680 | */ | 1678 | */ |
1681 | cmpl $__KERNEL_CS, 16(%rsp) | 1679 | cmpl $__KERNEL_CS, 16(%rsp) |
1682 | jne first_nmi | 1680 | jne first_nmi |
1683 | 1681 | ||
1684 | /* | 1682 | /* |
1685 | * Check the special variable on the stack to see if NMIs are | 1683 | * Check the special variable on the stack to see if NMIs are |
1686 | * executing. | 1684 | * executing. |
1687 | */ | 1685 | */ |
1688 | cmpl $1, -8(%rsp) | 1686 | cmpl $1, -8(%rsp) |
1689 | je nested_nmi | 1687 | je nested_nmi |
1690 | 1688 | ||
1691 | /* | 1689 | /* |
1692 | * Now test if the previous stack was an NMI stack. | 1690 | * Now test if the previous stack was an NMI stack. |
1693 | * We need the double check. We check the NMI stack to satisfy the | 1691 | * We need the double check. We check the NMI stack to satisfy the |
1694 | * race when the first NMI clears the variable before returning. | 1692 | * race when the first NMI clears the variable before returning. |
1695 | * We check the variable because the first NMI could be in a | 1693 | * We check the variable because the first NMI could be in a |
1696 | * breakpoint routine using a breakpoint stack. | 1694 | * breakpoint routine using a breakpoint stack. |
1697 | */ | 1695 | */ |
1698 | lea 6*8(%rsp), %rdx | 1696 | lea 6*8(%rsp), %rdx |
1699 | test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi | 1697 | test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi |
1700 | CFI_REMEMBER_STATE | 1698 | CFI_REMEMBER_STATE |
1701 | 1699 | ||
1702 | nested_nmi: | 1700 | nested_nmi: |
1703 | /* | 1701 | /* |
1704 | * Do nothing if we interrupted the fixup in repeat_nmi. | 1702 | * Do nothing if we interrupted the fixup in repeat_nmi. |
1705 | * It's about to repeat the NMI handler, so we are fine | 1703 | * It's about to repeat the NMI handler, so we are fine |
1706 | * with ignoring this one. | 1704 | * with ignoring this one. |
1707 | */ | 1705 | */ |
1708 | movq $repeat_nmi, %rdx | 1706 | movq $repeat_nmi, %rdx |
1709 | cmpq 8(%rsp), %rdx | 1707 | cmpq 8(%rsp), %rdx |
1710 | ja 1f | 1708 | ja 1f |
1711 | movq $end_repeat_nmi, %rdx | 1709 | movq $end_repeat_nmi, %rdx |
1712 | cmpq 8(%rsp), %rdx | 1710 | cmpq 8(%rsp), %rdx |
1713 | ja nested_nmi_out | 1711 | ja nested_nmi_out |
1714 | 1712 | ||
1715 | 1: | 1713 | 1: |
1716 | /* Set up the interrupted NMIs stack to jump to repeat_nmi */ | 1714 | /* Set up the interrupted NMIs stack to jump to repeat_nmi */ |
1717 | leaq -6*8(%rsp), %rdx | 1715 | leaq -6*8(%rsp), %rdx |
1718 | movq %rdx, %rsp | 1716 | movq %rdx, %rsp |
1719 | CFI_ADJUST_CFA_OFFSET 6*8 | 1717 | CFI_ADJUST_CFA_OFFSET 6*8 |
1720 | pushq_cfi $__KERNEL_DS | 1718 | pushq_cfi $__KERNEL_DS |
1721 | pushq_cfi %rdx | 1719 | pushq_cfi %rdx |
1722 | pushfq_cfi | 1720 | pushfq_cfi |
1723 | pushq_cfi $__KERNEL_CS | 1721 | pushq_cfi $__KERNEL_CS |
1724 | pushq_cfi $repeat_nmi | 1722 | pushq_cfi $repeat_nmi |
1725 | 1723 | ||
1726 | /* Put stack back */ | 1724 | /* Put stack back */ |
1727 | addq $(11*8), %rsp | 1725 | addq $(11*8), %rsp |
1728 | CFI_ADJUST_CFA_OFFSET -11*8 | 1726 | CFI_ADJUST_CFA_OFFSET -11*8 |
1729 | 1727 | ||
1730 | nested_nmi_out: | 1728 | nested_nmi_out: |
1731 | popq_cfi %rdx | 1729 | popq_cfi %rdx |
1732 | CFI_RESTORE rdx | 1730 | CFI_RESTORE rdx |
1733 | 1731 | ||
1734 | /* No need to check faults here */ | 1732 | /* No need to check faults here */ |
1735 | INTERRUPT_RETURN | 1733 | INTERRUPT_RETURN |
1736 | 1734 | ||
1737 | CFI_RESTORE_STATE | 1735 | CFI_RESTORE_STATE |
1738 | first_nmi: | 1736 | first_nmi: |
1739 | /* | 1737 | /* |
1740 | * Because nested NMIs will use the pushed location that we | 1738 | * Because nested NMIs will use the pushed location that we |
1741 | * stored in rdx, we must keep that space available. | 1739 | * stored in rdx, we must keep that space available. |
1742 | * Here's what our stack frame will look like: | 1740 | * Here's what our stack frame will look like: |
1743 | * +-------------------------+ | 1741 | * +-------------------------+ |
1744 | * | original SS | | 1742 | * | original SS | |
1745 | * | original Return RSP | | 1743 | * | original Return RSP | |
1746 | * | original RFLAGS | | 1744 | * | original RFLAGS | |
1747 | * | original CS | | 1745 | * | original CS | |
1748 | * | original RIP | | 1746 | * | original RIP | |
1749 | * +-------------------------+ | 1747 | * +-------------------------+ |
1750 | * | temp storage for rdx | | 1748 | * | temp storage for rdx | |
1751 | * +-------------------------+ | 1749 | * +-------------------------+ |
1752 | * | NMI executing variable | | 1750 | * | NMI executing variable | |
1753 | * +-------------------------+ | 1751 | * +-------------------------+ |
1754 | * | Saved SS | | 1752 | * | Saved SS | |
1755 | * | Saved Return RSP | | 1753 | * | Saved Return RSP | |
1756 | * | Saved RFLAGS | | 1754 | * | Saved RFLAGS | |
1757 | * | Saved CS | | 1755 | * | Saved CS | |
1758 | * | Saved RIP | | 1756 | * | Saved RIP | |
1759 | * +-------------------------+ | 1757 | * +-------------------------+ |
1760 | * | copied SS | | 1758 | * | copied SS | |
1761 | * | copied Return RSP | | 1759 | * | copied Return RSP | |
1762 | * | copied RFLAGS | | 1760 | * | copied RFLAGS | |
1763 | * | copied CS | | 1761 | * | copied CS | |
1764 | * | copied RIP | | 1762 | * | copied RIP | |
1765 | * +-------------------------+ | 1763 | * +-------------------------+ |
1766 | * | pt_regs | | 1764 | * | pt_regs | |
1767 | * +-------------------------+ | 1765 | * +-------------------------+ |
1768 | * | 1766 | * |
1769 | * The saved stack frame is used to fix up the copied stack frame | 1767 | * The saved stack frame is used to fix up the copied stack frame |
1770 | * that a nested NMI may change to make the interrupted NMI iret jump | 1768 | * that a nested NMI may change to make the interrupted NMI iret jump |
1771 | * to the repeat_nmi. The original stack frame and the temp storage | 1769 | * to the repeat_nmi. The original stack frame and the temp storage |
1772 | * is also used by nested NMIs and can not be trusted on exit. | 1770 | * is also used by nested NMIs and can not be trusted on exit. |
1773 | */ | 1771 | */ |
1774 | /* Do not pop rdx, nested NMIs will corrupt that part of the stack */ | 1772 | /* Do not pop rdx, nested NMIs will corrupt that part of the stack */ |
1775 | movq (%rsp), %rdx | 1773 | movq (%rsp), %rdx |
1776 | CFI_RESTORE rdx | 1774 | CFI_RESTORE rdx |
1777 | 1775 | ||
1778 | /* Set the NMI executing variable on the stack. */ | 1776 | /* Set the NMI executing variable on the stack. */ |
1779 | pushq_cfi $1 | 1777 | pushq_cfi $1 |
1780 | 1778 | ||
1781 | /* Copy the stack frame to the Saved frame */ | 1779 | /* Copy the stack frame to the Saved frame */ |
1782 | .rept 5 | 1780 | .rept 5 |
1783 | pushq_cfi 6*8(%rsp) | 1781 | pushq_cfi 6*8(%rsp) |
1784 | .endr | 1782 | .endr |
1785 | CFI_DEF_CFA_OFFSET SS+8-RIP | 1783 | CFI_DEF_CFA_OFFSET SS+8-RIP |
1786 | 1784 | ||
1787 | /* Everything up to here is safe from nested NMIs */ | 1785 | /* Everything up to here is safe from nested NMIs */ |
1788 | 1786 | ||
1789 | /* | 1787 | /* |
1790 | * If there was a nested NMI, the first NMI's iret will return | 1788 | * If there was a nested NMI, the first NMI's iret will return |
1791 | * here. But NMIs are still enabled and we can take another | 1789 | * here. But NMIs are still enabled and we can take another |
1792 | * nested NMI. The nested NMI checks the interrupted RIP to see | 1790 | * nested NMI. The nested NMI checks the interrupted RIP to see |
1793 | * if it is between repeat_nmi and end_repeat_nmi, and if so | 1791 | * if it is between repeat_nmi and end_repeat_nmi, and if so |
1794 | * it will just return, as we are about to repeat an NMI anyway. | 1792 | * it will just return, as we are about to repeat an NMI anyway. |
1795 | * This makes it safe to copy to the stack frame that a nested | 1793 | * This makes it safe to copy to the stack frame that a nested |
1796 | * NMI will update. | 1794 | * NMI will update. |
1797 | */ | 1795 | */ |
1798 | repeat_nmi: | 1796 | repeat_nmi: |
1799 | /* | 1797 | /* |
1800 | * Update the stack variable to say we are still in NMI (the update | 1798 | * Update the stack variable to say we are still in NMI (the update |
1801 | * is benign for the non-repeat case, where 1 was pushed just above | 1799 | * is benign for the non-repeat case, where 1 was pushed just above |
1802 | * to this very stack slot). | 1800 | * to this very stack slot). |
1803 | */ | 1801 | */ |
1804 | movq $1, 5*8(%rsp) | 1802 | movq $1, 5*8(%rsp) |
1805 | 1803 | ||
1806 | /* Make another copy, this one may be modified by nested NMIs */ | 1804 | /* Make another copy, this one may be modified by nested NMIs */ |
1807 | .rept 5 | 1805 | .rept 5 |
1808 | pushq_cfi 4*8(%rsp) | 1806 | pushq_cfi 4*8(%rsp) |
1809 | .endr | 1807 | .endr |
1810 | CFI_DEF_CFA_OFFSET SS+8-RIP | 1808 | CFI_DEF_CFA_OFFSET SS+8-RIP |
1811 | end_repeat_nmi: | 1809 | end_repeat_nmi: |
1812 | 1810 | ||
1813 | /* | 1811 | /* |
1814 | * Everything below this point can be preempted by a nested | 1812 | * Everything below this point can be preempted by a nested |
1815 | * NMI if the first NMI took an exception and reset our iret stack | 1813 | * NMI if the first NMI took an exception and reset our iret stack |
1816 | * so that we repeat another NMI. | 1814 | * so that we repeat another NMI. |
1817 | */ | 1815 | */ |
1818 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1816 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1819 | subq $ORIG_RAX-R15, %rsp | 1817 | subq $ORIG_RAX-R15, %rsp |
1820 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | 1818 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
1821 | /* | 1819 | /* |
1822 | * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit | 1820 | * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit |
1823 | * as we should not be calling schedule in NMI context. | 1821 | * as we should not be calling schedule in NMI context. |
1824 | * Even with normal interrupts enabled. An NMI should not be | 1822 | * Even with normal interrupts enabled. An NMI should not be |
1825 | * setting NEED_RESCHED or anything that normal interrupts and | 1823 | * setting NEED_RESCHED or anything that normal interrupts and |
1826 | * exceptions might do. | 1824 | * exceptions might do. |
1827 | */ | 1825 | */ |
1828 | call save_paranoid | 1826 | call save_paranoid |
1829 | DEFAULT_FRAME 0 | 1827 | DEFAULT_FRAME 0 |
1830 | 1828 | ||
1831 | /* | 1829 | /* |
1832 | * Save off the CR2 register. If we take a page fault in the NMI then | 1830 | * Save off the CR2 register. If we take a page fault in the NMI then |
1833 | * it could corrupt the CR2 value. If the NMI preempts a page fault | 1831 | * it could corrupt the CR2 value. If the NMI preempts a page fault |
1834 | * handler before it was able to read the CR2 register, and then the | 1832 | * handler before it was able to read the CR2 register, and then the |
1835 | * NMI itself takes a page fault, the page fault that was preempted | 1833 | * NMI itself takes a page fault, the page fault that was preempted |
1836 | * will read the information from the NMI page fault and not the | 1834 | * will read the information from the NMI page fault and not the |
1837 | * origin fault. Save it off and restore it if it changes. | 1835 | * origin fault. Save it off and restore it if it changes. |
1838 | * Use the r12 callee-saved register. | 1836 | * Use the r12 callee-saved register. |
1839 | */ | 1837 | */ |
1840 | movq %cr2, %r12 | 1838 | movq %cr2, %r12 |
1841 | 1839 | ||
1842 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ | 1840 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
1843 | movq %rsp,%rdi | 1841 | movq %rsp,%rdi |
1844 | movq $-1,%rsi | 1842 | movq $-1,%rsi |
1845 | call do_nmi | 1843 | call do_nmi |
1846 | 1844 | ||
1847 | /* Did the NMI take a page fault? Restore cr2 if it did */ | 1845 | /* Did the NMI take a page fault? Restore cr2 if it did */ |
1848 | movq %cr2, %rcx | 1846 | movq %cr2, %rcx |
1849 | cmpq %rcx, %r12 | 1847 | cmpq %rcx, %r12 |
1850 | je 1f | 1848 | je 1f |
1851 | movq %r12, %cr2 | 1849 | movq %r12, %cr2 |
1852 | 1: | 1850 | 1: |
1853 | 1851 | ||
1854 | testl %ebx,%ebx /* swapgs needed? */ | 1852 | testl %ebx,%ebx /* swapgs needed? */ |
1855 | jnz nmi_restore | 1853 | jnz nmi_restore |
1856 | nmi_swapgs: | 1854 | nmi_swapgs: |
1857 | SWAPGS_UNSAFE_STACK | 1855 | SWAPGS_UNSAFE_STACK |
1858 | nmi_restore: | 1856 | nmi_restore: |
1859 | RESTORE_ALL 8 | 1857 | RESTORE_ALL 8 |
1860 | /* Clear the NMI executing stack variable */ | 1858 | /* Clear the NMI executing stack variable */ |
1861 | movq $0, 10*8(%rsp) | 1859 | movq $0, 10*8(%rsp) |
1862 | jmp irq_return | 1860 | jmp irq_return |
1863 | CFI_ENDPROC | 1861 | CFI_ENDPROC |
1864 | END(nmi) | 1862 | END(nmi) |
1865 | 1863 | ||
1866 | ENTRY(ignore_sysret) | 1864 | ENTRY(ignore_sysret) |
1867 | CFI_STARTPROC | 1865 | CFI_STARTPROC |
1868 | mov $-ENOSYS,%eax | 1866 | mov $-ENOSYS,%eax |
1869 | sysret | 1867 | sysret |
1870 | CFI_ENDPROC | 1868 | CFI_ENDPROC |
1871 | END(ignore_sysret) | 1869 | END(ignore_sysret) |
1872 | 1870 | ||
1873 | /* | 1871 | /* |
1874 | * End of kprobes section | 1872 | * End of kprobes section |
1875 | */ | 1873 | */ |
1876 | .popsection | 1874 | .popsection |
1877 | 1875 |
arch/x86/kernel/signal.c
1 | /* | 1 | /* |
2 | * Copyright (C) 1991, 1992 Linus Torvalds | 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs |
4 | * | 4 | * |
5 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | 5 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson |
6 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes | 6 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes |
7 | * 2000-2002 x86-64 support by Andi Kleen | 7 | * 2000-2002 x86-64 support by Andi Kleen |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | 11 | ||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/wait.h> | 17 | #include <linux/wait.h> |
18 | #include <linux/tracehook.h> | 18 | #include <linux/tracehook.h> |
19 | #include <linux/unistd.h> | 19 | #include <linux/unistd.h> |
20 | #include <linux/stddef.h> | 20 | #include <linux/stddef.h> |
21 | #include <linux/personality.h> | 21 | #include <linux/personality.h> |
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | #include <linux/user-return-notifier.h> | 23 | #include <linux/user-return-notifier.h> |
24 | #include <linux/uprobes.h> | 24 | #include <linux/uprobes.h> |
25 | 25 | ||
26 | #include <asm/processor.h> | 26 | #include <asm/processor.h> |
27 | #include <asm/ucontext.h> | 27 | #include <asm/ucontext.h> |
28 | #include <asm/i387.h> | 28 | #include <asm/i387.h> |
29 | #include <asm/fpu-internal.h> | 29 | #include <asm/fpu-internal.h> |
30 | #include <asm/vdso.h> | 30 | #include <asm/vdso.h> |
31 | #include <asm/mce.h> | 31 | #include <asm/mce.h> |
32 | #include <asm/sighandling.h> | 32 | #include <asm/sighandling.h> |
33 | 33 | ||
34 | #ifdef CONFIG_X86_64 | 34 | #ifdef CONFIG_X86_64 |
35 | #include <asm/proto.h> | 35 | #include <asm/proto.h> |
36 | #include <asm/ia32_unistd.h> | 36 | #include <asm/ia32_unistd.h> |
37 | #include <asm/sys_ia32.h> | 37 | #include <asm/sys_ia32.h> |
38 | #endif /* CONFIG_X86_64 */ | 38 | #endif /* CONFIG_X86_64 */ |
39 | 39 | ||
40 | #include <asm/syscall.h> | 40 | #include <asm/syscall.h> |
41 | #include <asm/syscalls.h> | 41 | #include <asm/syscalls.h> |
42 | 42 | ||
43 | #include <asm/sigframe.h> | 43 | #include <asm/sigframe.h> |
44 | 44 | ||
45 | #ifdef CONFIG_X86_32 | 45 | #ifdef CONFIG_X86_32 |
46 | # define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) | 46 | # define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) |
47 | #else | 47 | #else |
48 | # define FIX_EFLAGS __FIX_EFLAGS | 48 | # define FIX_EFLAGS __FIX_EFLAGS |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #define COPY(x) do { \ | 51 | #define COPY(x) do { \ |
52 | get_user_ex(regs->x, &sc->x); \ | 52 | get_user_ex(regs->x, &sc->x); \ |
53 | } while (0) | 53 | } while (0) |
54 | 54 | ||
55 | #define GET_SEG(seg) ({ \ | 55 | #define GET_SEG(seg) ({ \ |
56 | unsigned short tmp; \ | 56 | unsigned short tmp; \ |
57 | get_user_ex(tmp, &sc->seg); \ | 57 | get_user_ex(tmp, &sc->seg); \ |
58 | tmp; \ | 58 | tmp; \ |
59 | }) | 59 | }) |
60 | 60 | ||
61 | #define COPY_SEG(seg) do { \ | 61 | #define COPY_SEG(seg) do { \ |
62 | regs->seg = GET_SEG(seg); \ | 62 | regs->seg = GET_SEG(seg); \ |
63 | } while (0) | 63 | } while (0) |
64 | 64 | ||
65 | #define COPY_SEG_CPL3(seg) do { \ | 65 | #define COPY_SEG_CPL3(seg) do { \ |
66 | regs->seg = GET_SEG(seg) | 3; \ | 66 | regs->seg = GET_SEG(seg) | 3; \ |
67 | } while (0) | 67 | } while (0) |
68 | 68 | ||
69 | int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | 69 | int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, |
70 | unsigned long *pax) | 70 | unsigned long *pax) |
71 | { | 71 | { |
72 | void __user *buf; | 72 | void __user *buf; |
73 | unsigned int tmpflags; | 73 | unsigned int tmpflags; |
74 | unsigned int err = 0; | 74 | unsigned int err = 0; |
75 | 75 | ||
76 | /* Always make any pending restarted system calls return -EINTR */ | 76 | /* Always make any pending restarted system calls return -EINTR */ |
77 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 77 | current_thread_info()->restart_block.fn = do_no_restart_syscall; |
78 | 78 | ||
79 | get_user_try { | 79 | get_user_try { |
80 | 80 | ||
81 | #ifdef CONFIG_X86_32 | 81 | #ifdef CONFIG_X86_32 |
82 | set_user_gs(regs, GET_SEG(gs)); | 82 | set_user_gs(regs, GET_SEG(gs)); |
83 | COPY_SEG(fs); | 83 | COPY_SEG(fs); |
84 | COPY_SEG(es); | 84 | COPY_SEG(es); |
85 | COPY_SEG(ds); | 85 | COPY_SEG(ds); |
86 | #endif /* CONFIG_X86_32 */ | 86 | #endif /* CONFIG_X86_32 */ |
87 | 87 | ||
88 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); | 88 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); |
89 | COPY(dx); COPY(cx); COPY(ip); | 89 | COPY(dx); COPY(cx); COPY(ip); |
90 | 90 | ||
91 | #ifdef CONFIG_X86_64 | 91 | #ifdef CONFIG_X86_64 |
92 | COPY(r8); | 92 | COPY(r8); |
93 | COPY(r9); | 93 | COPY(r9); |
94 | COPY(r10); | 94 | COPY(r10); |
95 | COPY(r11); | 95 | COPY(r11); |
96 | COPY(r12); | 96 | COPY(r12); |
97 | COPY(r13); | 97 | COPY(r13); |
98 | COPY(r14); | 98 | COPY(r14); |
99 | COPY(r15); | 99 | COPY(r15); |
100 | #endif /* CONFIG_X86_64 */ | 100 | #endif /* CONFIG_X86_64 */ |
101 | 101 | ||
102 | #ifdef CONFIG_X86_32 | 102 | #ifdef CONFIG_X86_32 |
103 | COPY_SEG_CPL3(cs); | 103 | COPY_SEG_CPL3(cs); |
104 | COPY_SEG_CPL3(ss); | 104 | COPY_SEG_CPL3(ss); |
105 | #else /* !CONFIG_X86_32 */ | 105 | #else /* !CONFIG_X86_32 */ |
106 | /* Kernel saves and restores only the CS segment register on signals, | 106 | /* Kernel saves and restores only the CS segment register on signals, |
107 | * which is the bare minimum needed to allow mixed 32/64-bit code. | 107 | * which is the bare minimum needed to allow mixed 32/64-bit code. |
108 | * App's signal handler can save/restore other segments if needed. */ | 108 | * App's signal handler can save/restore other segments if needed. */ |
109 | COPY_SEG_CPL3(cs); | 109 | COPY_SEG_CPL3(cs); |
110 | #endif /* CONFIG_X86_32 */ | 110 | #endif /* CONFIG_X86_32 */ |
111 | 111 | ||
112 | get_user_ex(tmpflags, &sc->flags); | 112 | get_user_ex(tmpflags, &sc->flags); |
113 | regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); | 113 | regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); |
114 | regs->orig_ax = -1; /* disable syscall checks */ | 114 | regs->orig_ax = -1; /* disable syscall checks */ |
115 | 115 | ||
116 | get_user_ex(buf, &sc->fpstate); | 116 | get_user_ex(buf, &sc->fpstate); |
117 | 117 | ||
118 | get_user_ex(*pax, &sc->ax); | 118 | get_user_ex(*pax, &sc->ax); |
119 | } get_user_catch(err); | 119 | } get_user_catch(err); |
120 | 120 | ||
121 | err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32)); | 121 | err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32)); |
122 | 122 | ||
123 | return err; | 123 | return err; |
124 | } | 124 | } |
125 | 125 | ||
126 | int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, | 126 | int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, |
127 | struct pt_regs *regs, unsigned long mask) | 127 | struct pt_regs *regs, unsigned long mask) |
128 | { | 128 | { |
129 | int err = 0; | 129 | int err = 0; |
130 | 130 | ||
131 | put_user_try { | 131 | put_user_try { |
132 | 132 | ||
133 | #ifdef CONFIG_X86_32 | 133 | #ifdef CONFIG_X86_32 |
134 | put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs); | 134 | put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs); |
135 | put_user_ex(regs->fs, (unsigned int __user *)&sc->fs); | 135 | put_user_ex(regs->fs, (unsigned int __user *)&sc->fs); |
136 | put_user_ex(regs->es, (unsigned int __user *)&sc->es); | 136 | put_user_ex(regs->es, (unsigned int __user *)&sc->es); |
137 | put_user_ex(regs->ds, (unsigned int __user *)&sc->ds); | 137 | put_user_ex(regs->ds, (unsigned int __user *)&sc->ds); |
138 | #endif /* CONFIG_X86_32 */ | 138 | #endif /* CONFIG_X86_32 */ |
139 | 139 | ||
140 | put_user_ex(regs->di, &sc->di); | 140 | put_user_ex(regs->di, &sc->di); |
141 | put_user_ex(regs->si, &sc->si); | 141 | put_user_ex(regs->si, &sc->si); |
142 | put_user_ex(regs->bp, &sc->bp); | 142 | put_user_ex(regs->bp, &sc->bp); |
143 | put_user_ex(regs->sp, &sc->sp); | 143 | put_user_ex(regs->sp, &sc->sp); |
144 | put_user_ex(regs->bx, &sc->bx); | 144 | put_user_ex(regs->bx, &sc->bx); |
145 | put_user_ex(regs->dx, &sc->dx); | 145 | put_user_ex(regs->dx, &sc->dx); |
146 | put_user_ex(regs->cx, &sc->cx); | 146 | put_user_ex(regs->cx, &sc->cx); |
147 | put_user_ex(regs->ax, &sc->ax); | 147 | put_user_ex(regs->ax, &sc->ax); |
148 | #ifdef CONFIG_X86_64 | 148 | #ifdef CONFIG_X86_64 |
149 | put_user_ex(regs->r8, &sc->r8); | 149 | put_user_ex(regs->r8, &sc->r8); |
150 | put_user_ex(regs->r9, &sc->r9); | 150 | put_user_ex(regs->r9, &sc->r9); |
151 | put_user_ex(regs->r10, &sc->r10); | 151 | put_user_ex(regs->r10, &sc->r10); |
152 | put_user_ex(regs->r11, &sc->r11); | 152 | put_user_ex(regs->r11, &sc->r11); |
153 | put_user_ex(regs->r12, &sc->r12); | 153 | put_user_ex(regs->r12, &sc->r12); |
154 | put_user_ex(regs->r13, &sc->r13); | 154 | put_user_ex(regs->r13, &sc->r13); |
155 | put_user_ex(regs->r14, &sc->r14); | 155 | put_user_ex(regs->r14, &sc->r14); |
156 | put_user_ex(regs->r15, &sc->r15); | 156 | put_user_ex(regs->r15, &sc->r15); |
157 | #endif /* CONFIG_X86_64 */ | 157 | #endif /* CONFIG_X86_64 */ |
158 | 158 | ||
159 | put_user_ex(current->thread.trap_nr, &sc->trapno); | 159 | put_user_ex(current->thread.trap_nr, &sc->trapno); |
160 | put_user_ex(current->thread.error_code, &sc->err); | 160 | put_user_ex(current->thread.error_code, &sc->err); |
161 | put_user_ex(regs->ip, &sc->ip); | 161 | put_user_ex(regs->ip, &sc->ip); |
162 | #ifdef CONFIG_X86_32 | 162 | #ifdef CONFIG_X86_32 |
163 | put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); | 163 | put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); |
164 | put_user_ex(regs->flags, &sc->flags); | 164 | put_user_ex(regs->flags, &sc->flags); |
165 | put_user_ex(regs->sp, &sc->sp_at_signal); | 165 | put_user_ex(regs->sp, &sc->sp_at_signal); |
166 | put_user_ex(regs->ss, (unsigned int __user *)&sc->ss); | 166 | put_user_ex(regs->ss, (unsigned int __user *)&sc->ss); |
167 | #else /* !CONFIG_X86_32 */ | 167 | #else /* !CONFIG_X86_32 */ |
168 | put_user_ex(regs->flags, &sc->flags); | 168 | put_user_ex(regs->flags, &sc->flags); |
169 | put_user_ex(regs->cs, &sc->cs); | 169 | put_user_ex(regs->cs, &sc->cs); |
170 | put_user_ex(0, &sc->gs); | 170 | put_user_ex(0, &sc->gs); |
171 | put_user_ex(0, &sc->fs); | 171 | put_user_ex(0, &sc->fs); |
172 | #endif /* CONFIG_X86_32 */ | 172 | #endif /* CONFIG_X86_32 */ |
173 | 173 | ||
174 | put_user_ex(fpstate, &sc->fpstate); | 174 | put_user_ex(fpstate, &sc->fpstate); |
175 | 175 | ||
176 | /* non-iBCS2 extensions.. */ | 176 | /* non-iBCS2 extensions.. */ |
177 | put_user_ex(mask, &sc->oldmask); | 177 | put_user_ex(mask, &sc->oldmask); |
178 | put_user_ex(current->thread.cr2, &sc->cr2); | 178 | put_user_ex(current->thread.cr2, &sc->cr2); |
179 | } put_user_catch(err); | 179 | } put_user_catch(err); |
180 | 180 | ||
181 | return err; | 181 | return err; |
182 | } | 182 | } |
183 | 183 | ||
184 | /* | 184 | /* |
185 | * Set up a signal frame. | 185 | * Set up a signal frame. |
186 | */ | 186 | */ |
187 | 187 | ||
188 | /* | 188 | /* |
189 | * Determine which stack to use.. | 189 | * Determine which stack to use.. |
190 | */ | 190 | */ |
191 | static unsigned long align_sigframe(unsigned long sp) | 191 | static unsigned long align_sigframe(unsigned long sp) |
192 | { | 192 | { |
193 | #ifdef CONFIG_X86_32 | 193 | #ifdef CONFIG_X86_32 |
194 | /* | 194 | /* |
195 | * Align the stack pointer according to the i386 ABI, | 195 | * Align the stack pointer according to the i386 ABI, |
196 | * i.e. so that on function entry ((sp + 4) & 15) == 0. | 196 | * i.e. so that on function entry ((sp + 4) & 15) == 0. |
197 | */ | 197 | */ |
198 | sp = ((sp + 4) & -16ul) - 4; | 198 | sp = ((sp + 4) & -16ul) - 4; |
199 | #else /* !CONFIG_X86_32 */ | 199 | #else /* !CONFIG_X86_32 */ |
200 | sp = round_down(sp, 16) - 8; | 200 | sp = round_down(sp, 16) - 8; |
201 | #endif | 201 | #endif |
202 | return sp; | 202 | return sp; |
203 | } | 203 | } |
204 | 204 | ||
205 | static inline void __user * | 205 | static inline void __user * |
206 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, | 206 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, |
207 | void __user **fpstate) | 207 | void __user **fpstate) |
208 | { | 208 | { |
209 | /* Default to using normal stack */ | 209 | /* Default to using normal stack */ |
210 | unsigned long math_size = 0; | 210 | unsigned long math_size = 0; |
211 | unsigned long sp = regs->sp; | 211 | unsigned long sp = regs->sp; |
212 | unsigned long buf_fx = 0; | 212 | unsigned long buf_fx = 0; |
213 | int onsigstack = on_sig_stack(sp); | 213 | int onsigstack = on_sig_stack(sp); |
214 | 214 | ||
215 | /* redzone */ | 215 | /* redzone */ |
216 | if (config_enabled(CONFIG_X86_64)) | 216 | if (config_enabled(CONFIG_X86_64)) |
217 | sp -= 128; | 217 | sp -= 128; |
218 | 218 | ||
219 | if (!onsigstack) { | 219 | if (!onsigstack) { |
220 | /* This is the X/Open sanctioned signal stack switching. */ | 220 | /* This is the X/Open sanctioned signal stack switching. */ |
221 | if (ka->sa.sa_flags & SA_ONSTACK) { | 221 | if (ka->sa.sa_flags & SA_ONSTACK) { |
222 | if (current->sas_ss_size) | 222 | if (current->sas_ss_size) |
223 | sp = current->sas_ss_sp + current->sas_ss_size; | 223 | sp = current->sas_ss_sp + current->sas_ss_size; |
224 | } else if (config_enabled(CONFIG_X86_32) && | 224 | } else if (config_enabled(CONFIG_X86_32) && |
225 | (regs->ss & 0xffff) != __USER_DS && | 225 | (regs->ss & 0xffff) != __USER_DS && |
226 | !(ka->sa.sa_flags & SA_RESTORER) && | 226 | !(ka->sa.sa_flags & SA_RESTORER) && |
227 | ka->sa.sa_restorer) { | 227 | ka->sa.sa_restorer) { |
228 | /* This is the legacy signal stack switching. */ | 228 | /* This is the legacy signal stack switching. */ |
229 | sp = (unsigned long) ka->sa.sa_restorer; | 229 | sp = (unsigned long) ka->sa.sa_restorer; |
230 | } | 230 | } |
231 | } | 231 | } |
232 | 232 | ||
233 | if (used_math()) { | 233 | if (used_math()) { |
234 | sp = alloc_mathframe(sp, config_enabled(CONFIG_X86_32), | 234 | sp = alloc_mathframe(sp, config_enabled(CONFIG_X86_32), |
235 | &buf_fx, &math_size); | 235 | &buf_fx, &math_size); |
236 | *fpstate = (void __user *)sp; | 236 | *fpstate = (void __user *)sp; |
237 | } | 237 | } |
238 | 238 | ||
239 | sp = align_sigframe(sp - frame_size); | 239 | sp = align_sigframe(sp - frame_size); |
240 | 240 | ||
241 | /* | 241 | /* |
242 | * If we are on the alternate signal stack and would overflow it, don't. | 242 | * If we are on the alternate signal stack and would overflow it, don't. |
243 | * Return an always-bogus address instead so we will die with SIGSEGV. | 243 | * Return an always-bogus address instead so we will die with SIGSEGV. |
244 | */ | 244 | */ |
245 | if (onsigstack && !likely(on_sig_stack(sp))) | 245 | if (onsigstack && !likely(on_sig_stack(sp))) |
246 | return (void __user *)-1L; | 246 | return (void __user *)-1L; |
247 | 247 | ||
248 | /* save i387 and extended state */ | 248 | /* save i387 and extended state */ |
249 | if (used_math() && | 249 | if (used_math() && |
250 | save_xstate_sig(*fpstate, (void __user *)buf_fx, math_size) < 0) | 250 | save_xstate_sig(*fpstate, (void __user *)buf_fx, math_size) < 0) |
251 | return (void __user *)-1L; | 251 | return (void __user *)-1L; |
252 | 252 | ||
253 | return (void __user *)sp; | 253 | return (void __user *)sp; |
254 | } | 254 | } |
255 | 255 | ||
256 | #ifdef CONFIG_X86_32 | 256 | #ifdef CONFIG_X86_32 |
257 | static const struct { | 257 | static const struct { |
258 | u16 poplmovl; | 258 | u16 poplmovl; |
259 | u32 val; | 259 | u32 val; |
260 | u16 int80; | 260 | u16 int80; |
261 | } __attribute__((packed)) retcode = { | 261 | } __attribute__((packed)) retcode = { |
262 | 0xb858, /* popl %eax; movl $..., %eax */ | 262 | 0xb858, /* popl %eax; movl $..., %eax */ |
263 | __NR_sigreturn, | 263 | __NR_sigreturn, |
264 | 0x80cd, /* int $0x80 */ | 264 | 0x80cd, /* int $0x80 */ |
265 | }; | 265 | }; |
266 | 266 | ||
267 | static const struct { | 267 | static const struct { |
268 | u8 movl; | 268 | u8 movl; |
269 | u32 val; | 269 | u32 val; |
270 | u16 int80; | 270 | u16 int80; |
271 | u8 pad; | 271 | u8 pad; |
272 | } __attribute__((packed)) rt_retcode = { | 272 | } __attribute__((packed)) rt_retcode = { |
273 | 0xb8, /* movl $..., %eax */ | 273 | 0xb8, /* movl $..., %eax */ |
274 | __NR_rt_sigreturn, | 274 | __NR_rt_sigreturn, |
275 | 0x80cd, /* int $0x80 */ | 275 | 0x80cd, /* int $0x80 */ |
276 | 0 | 276 | 0 |
277 | }; | 277 | }; |
278 | 278 | ||
279 | static int | 279 | static int |
280 | __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | 280 | __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, |
281 | struct pt_regs *regs) | 281 | struct pt_regs *regs) |
282 | { | 282 | { |
283 | struct sigframe __user *frame; | 283 | struct sigframe __user *frame; |
284 | void __user *restorer; | 284 | void __user *restorer; |
285 | int err = 0; | 285 | int err = 0; |
286 | void __user *fpstate = NULL; | 286 | void __user *fpstate = NULL; |
287 | 287 | ||
288 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); | 288 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
289 | 289 | ||
290 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 290 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
291 | return -EFAULT; | 291 | return -EFAULT; |
292 | 292 | ||
293 | if (__put_user(sig, &frame->sig)) | 293 | if (__put_user(sig, &frame->sig)) |
294 | return -EFAULT; | 294 | return -EFAULT; |
295 | 295 | ||
296 | if (setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0])) | 296 | if (setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0])) |
297 | return -EFAULT; | 297 | return -EFAULT; |
298 | 298 | ||
299 | if (_NSIG_WORDS > 1) { | 299 | if (_NSIG_WORDS > 1) { |
300 | if (__copy_to_user(&frame->extramask, &set->sig[1], | 300 | if (__copy_to_user(&frame->extramask, &set->sig[1], |
301 | sizeof(frame->extramask))) | 301 | sizeof(frame->extramask))) |
302 | return -EFAULT; | 302 | return -EFAULT; |
303 | } | 303 | } |
304 | 304 | ||
305 | if (current->mm->context.vdso) | 305 | if (current->mm->context.vdso) |
306 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); | 306 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); |
307 | else | 307 | else |
308 | restorer = &frame->retcode; | 308 | restorer = &frame->retcode; |
309 | if (ka->sa.sa_flags & SA_RESTORER) | 309 | if (ka->sa.sa_flags & SA_RESTORER) |
310 | restorer = ka->sa.sa_restorer; | 310 | restorer = ka->sa.sa_restorer; |
311 | 311 | ||
312 | /* Set up to return from userspace. */ | 312 | /* Set up to return from userspace. */ |
313 | err |= __put_user(restorer, &frame->pretcode); | 313 | err |= __put_user(restorer, &frame->pretcode); |
314 | 314 | ||
315 | /* | 315 | /* |
316 | * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80 | 316 | * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80 |
317 | * | 317 | * |
318 | * WE DO NOT USE IT ANY MORE! It's only left here for historical | 318 | * WE DO NOT USE IT ANY MORE! It's only left here for historical |
319 | * reasons and because gdb uses it as a signature to notice | 319 | * reasons and because gdb uses it as a signature to notice |
320 | * signal handler stack frames. | 320 | * signal handler stack frames. |
321 | */ | 321 | */ |
322 | err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); | 322 | err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); |
323 | 323 | ||
324 | if (err) | 324 | if (err) |
325 | return -EFAULT; | 325 | return -EFAULT; |
326 | 326 | ||
327 | /* Set up registers for signal handler */ | 327 | /* Set up registers for signal handler */ |
328 | regs->sp = (unsigned long)frame; | 328 | regs->sp = (unsigned long)frame; |
329 | regs->ip = (unsigned long)ka->sa.sa_handler; | 329 | regs->ip = (unsigned long)ka->sa.sa_handler; |
330 | regs->ax = (unsigned long)sig; | 330 | regs->ax = (unsigned long)sig; |
331 | regs->dx = 0; | 331 | regs->dx = 0; |
332 | regs->cx = 0; | 332 | regs->cx = 0; |
333 | 333 | ||
334 | regs->ds = __USER_DS; | 334 | regs->ds = __USER_DS; |
335 | regs->es = __USER_DS; | 335 | regs->es = __USER_DS; |
336 | regs->ss = __USER_DS; | 336 | regs->ss = __USER_DS; |
337 | regs->cs = __USER_CS; | 337 | regs->cs = __USER_CS; |
338 | 338 | ||
339 | return 0; | 339 | return 0; |
340 | } | 340 | } |
341 | 341 | ||
342 | static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 342 | static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
343 | sigset_t *set, struct pt_regs *regs) | 343 | sigset_t *set, struct pt_regs *regs) |
344 | { | 344 | { |
345 | struct rt_sigframe __user *frame; | 345 | struct rt_sigframe __user *frame; |
346 | void __user *restorer; | 346 | void __user *restorer; |
347 | int err = 0; | 347 | int err = 0; |
348 | void __user *fpstate = NULL; | 348 | void __user *fpstate = NULL; |
349 | 349 | ||
350 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); | 350 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
351 | 351 | ||
352 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 352 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
353 | return -EFAULT; | 353 | return -EFAULT; |
354 | 354 | ||
355 | put_user_try { | 355 | put_user_try { |
356 | put_user_ex(sig, &frame->sig); | 356 | put_user_ex(sig, &frame->sig); |
357 | put_user_ex(&frame->info, &frame->pinfo); | 357 | put_user_ex(&frame->info, &frame->pinfo); |
358 | put_user_ex(&frame->uc, &frame->puc); | 358 | put_user_ex(&frame->uc, &frame->puc); |
359 | 359 | ||
360 | /* Create the ucontext. */ | 360 | /* Create the ucontext. */ |
361 | if (cpu_has_xsave) | 361 | if (cpu_has_xsave) |
362 | put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); | 362 | put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); |
363 | else | 363 | else |
364 | put_user_ex(0, &frame->uc.uc_flags); | 364 | put_user_ex(0, &frame->uc.uc_flags); |
365 | put_user_ex(0, &frame->uc.uc_link); | 365 | put_user_ex(0, &frame->uc.uc_link); |
366 | put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 366 | put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
367 | put_user_ex(sas_ss_flags(regs->sp), | 367 | put_user_ex(sas_ss_flags(regs->sp), |
368 | &frame->uc.uc_stack.ss_flags); | 368 | &frame->uc.uc_stack.ss_flags); |
369 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 369 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
370 | 370 | ||
371 | /* Set up to return from userspace. */ | 371 | /* Set up to return from userspace. */ |
372 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); | 372 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); |
373 | if (ka->sa.sa_flags & SA_RESTORER) | 373 | if (ka->sa.sa_flags & SA_RESTORER) |
374 | restorer = ka->sa.sa_restorer; | 374 | restorer = ka->sa.sa_restorer; |
375 | put_user_ex(restorer, &frame->pretcode); | 375 | put_user_ex(restorer, &frame->pretcode); |
376 | 376 | ||
377 | /* | 377 | /* |
378 | * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 | 378 | * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 |
379 | * | 379 | * |
380 | * WE DO NOT USE IT ANY MORE! It's only left here for historical | 380 | * WE DO NOT USE IT ANY MORE! It's only left here for historical |
381 | * reasons and because gdb uses it as a signature to notice | 381 | * reasons and because gdb uses it as a signature to notice |
382 | * signal handler stack frames. | 382 | * signal handler stack frames. |
383 | */ | 383 | */ |
384 | put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); | 384 | put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); |
385 | } put_user_catch(err); | 385 | } put_user_catch(err); |
386 | 386 | ||
387 | err |= copy_siginfo_to_user(&frame->info, info); | 387 | err |= copy_siginfo_to_user(&frame->info, info); |
388 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, | 388 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
389 | regs, set->sig[0]); | 389 | regs, set->sig[0]); |
390 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 390 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
391 | 391 | ||
392 | if (err) | 392 | if (err) |
393 | return -EFAULT; | 393 | return -EFAULT; |
394 | 394 | ||
395 | /* Set up registers for signal handler */ | 395 | /* Set up registers for signal handler */ |
396 | regs->sp = (unsigned long)frame; | 396 | regs->sp = (unsigned long)frame; |
397 | regs->ip = (unsigned long)ka->sa.sa_handler; | 397 | regs->ip = (unsigned long)ka->sa.sa_handler; |
398 | regs->ax = (unsigned long)sig; | 398 | regs->ax = (unsigned long)sig; |
399 | regs->dx = (unsigned long)&frame->info; | 399 | regs->dx = (unsigned long)&frame->info; |
400 | regs->cx = (unsigned long)&frame->uc; | 400 | regs->cx = (unsigned long)&frame->uc; |
401 | 401 | ||
402 | regs->ds = __USER_DS; | 402 | regs->ds = __USER_DS; |
403 | regs->es = __USER_DS; | 403 | regs->es = __USER_DS; |
404 | regs->ss = __USER_DS; | 404 | regs->ss = __USER_DS; |
405 | regs->cs = __USER_CS; | 405 | regs->cs = __USER_CS; |
406 | 406 | ||
407 | return 0; | 407 | return 0; |
408 | } | 408 | } |
409 | #else /* !CONFIG_X86_32 */ | 409 | #else /* !CONFIG_X86_32 */ |
410 | static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 410 | static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
411 | sigset_t *set, struct pt_regs *regs) | 411 | sigset_t *set, struct pt_regs *regs) |
412 | { | 412 | { |
413 | struct rt_sigframe __user *frame; | 413 | struct rt_sigframe __user *frame; |
414 | void __user *fp = NULL; | 414 | void __user *fp = NULL; |
415 | int err = 0; | 415 | int err = 0; |
416 | struct task_struct *me = current; | 416 | struct task_struct *me = current; |
417 | 417 | ||
418 | frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp); | 418 | frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp); |
419 | 419 | ||
420 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 420 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
421 | return -EFAULT; | 421 | return -EFAULT; |
422 | 422 | ||
423 | if (ka->sa.sa_flags & SA_SIGINFO) { | 423 | if (ka->sa.sa_flags & SA_SIGINFO) { |
424 | if (copy_siginfo_to_user(&frame->info, info)) | 424 | if (copy_siginfo_to_user(&frame->info, info)) |
425 | return -EFAULT; | 425 | return -EFAULT; |
426 | } | 426 | } |
427 | 427 | ||
428 | put_user_try { | 428 | put_user_try { |
429 | /* Create the ucontext. */ | 429 | /* Create the ucontext. */ |
430 | if (cpu_has_xsave) | 430 | if (cpu_has_xsave) |
431 | put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); | 431 | put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); |
432 | else | 432 | else |
433 | put_user_ex(0, &frame->uc.uc_flags); | 433 | put_user_ex(0, &frame->uc.uc_flags); |
434 | put_user_ex(0, &frame->uc.uc_link); | 434 | put_user_ex(0, &frame->uc.uc_link); |
435 | put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 435 | put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
436 | put_user_ex(sas_ss_flags(regs->sp), | 436 | put_user_ex(sas_ss_flags(regs->sp), |
437 | &frame->uc.uc_stack.ss_flags); | 437 | &frame->uc.uc_stack.ss_flags); |
438 | put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size); | 438 | put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size); |
439 | 439 | ||
440 | /* Set up to return from userspace. If provided, use a stub | 440 | /* Set up to return from userspace. If provided, use a stub |
441 | already in userspace. */ | 441 | already in userspace. */ |
442 | /* x86-64 should always use SA_RESTORER. */ | 442 | /* x86-64 should always use SA_RESTORER. */ |
443 | if (ka->sa.sa_flags & SA_RESTORER) { | 443 | if (ka->sa.sa_flags & SA_RESTORER) { |
444 | put_user_ex(ka->sa.sa_restorer, &frame->pretcode); | 444 | put_user_ex(ka->sa.sa_restorer, &frame->pretcode); |
445 | } else { | 445 | } else { |
446 | /* could use a vstub here */ | 446 | /* could use a vstub here */ |
447 | err |= -EFAULT; | 447 | err |= -EFAULT; |
448 | } | 448 | } |
449 | } put_user_catch(err); | 449 | } put_user_catch(err); |
450 | 450 | ||
451 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); | 451 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); |
452 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 452 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
453 | 453 | ||
454 | if (err) | 454 | if (err) |
455 | return -EFAULT; | 455 | return -EFAULT; |
456 | 456 | ||
457 | /* Set up registers for signal handler */ | 457 | /* Set up registers for signal handler */ |
458 | regs->di = sig; | 458 | regs->di = sig; |
459 | /* In case the signal handler was declared without prototypes */ | 459 | /* In case the signal handler was declared without prototypes */ |
460 | regs->ax = 0; | 460 | regs->ax = 0; |
461 | 461 | ||
462 | /* This also works for non SA_SIGINFO handlers because they expect the | 462 | /* This also works for non SA_SIGINFO handlers because they expect the |
463 | next argument after the signal number on the stack. */ | 463 | next argument after the signal number on the stack. */ |
464 | regs->si = (unsigned long)&frame->info; | 464 | regs->si = (unsigned long)&frame->info; |
465 | regs->dx = (unsigned long)&frame->uc; | 465 | regs->dx = (unsigned long)&frame->uc; |
466 | regs->ip = (unsigned long) ka->sa.sa_handler; | 466 | regs->ip = (unsigned long) ka->sa.sa_handler; |
467 | 467 | ||
468 | regs->sp = (unsigned long)frame; | 468 | regs->sp = (unsigned long)frame; |
469 | 469 | ||
470 | /* Set up the CS register to run signal handlers in 64-bit mode, | 470 | /* Set up the CS register to run signal handlers in 64-bit mode, |
471 | even if the handler happens to be interrupting 32-bit code. */ | 471 | even if the handler happens to be interrupting 32-bit code. */ |
472 | regs->cs = __USER_CS; | 472 | regs->cs = __USER_CS; |
473 | 473 | ||
474 | return 0; | 474 | return 0; |
475 | } | 475 | } |
476 | #endif /* CONFIG_X86_32 */ | 476 | #endif /* CONFIG_X86_32 */ |
477 | 477 | ||
478 | static int x32_setup_rt_frame(int sig, struct k_sigaction *ka, | 478 | static int x32_setup_rt_frame(int sig, struct k_sigaction *ka, |
479 | siginfo_t *info, compat_sigset_t *set, | 479 | siginfo_t *info, compat_sigset_t *set, |
480 | struct pt_regs *regs) | 480 | struct pt_regs *regs) |
481 | { | 481 | { |
482 | #ifdef CONFIG_X86_X32_ABI | 482 | #ifdef CONFIG_X86_X32_ABI |
483 | struct rt_sigframe_x32 __user *frame; | 483 | struct rt_sigframe_x32 __user *frame; |
484 | void __user *restorer; | 484 | void __user *restorer; |
485 | int err = 0; | 485 | int err = 0; |
486 | void __user *fpstate = NULL; | 486 | void __user *fpstate = NULL; |
487 | 487 | ||
488 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); | 488 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
489 | 489 | ||
490 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 490 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
491 | return -EFAULT; | 491 | return -EFAULT; |
492 | 492 | ||
493 | if (ka->sa.sa_flags & SA_SIGINFO) { | 493 | if (ka->sa.sa_flags & SA_SIGINFO) { |
494 | if (copy_siginfo_to_user32(&frame->info, info)) | 494 | if (copy_siginfo_to_user32(&frame->info, info)) |
495 | return -EFAULT; | 495 | return -EFAULT; |
496 | } | 496 | } |
497 | 497 | ||
498 | put_user_try { | 498 | put_user_try { |
499 | /* Create the ucontext. */ | 499 | /* Create the ucontext. */ |
500 | if (cpu_has_xsave) | 500 | if (cpu_has_xsave) |
501 | put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); | 501 | put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); |
502 | else | 502 | else |
503 | put_user_ex(0, &frame->uc.uc_flags); | 503 | put_user_ex(0, &frame->uc.uc_flags); |
504 | put_user_ex(0, &frame->uc.uc_link); | 504 | put_user_ex(0, &frame->uc.uc_link); |
505 | put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 505 | put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
506 | put_user_ex(sas_ss_flags(regs->sp), | 506 | put_user_ex(sas_ss_flags(regs->sp), |
507 | &frame->uc.uc_stack.ss_flags); | 507 | &frame->uc.uc_stack.ss_flags); |
508 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 508 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
509 | put_user_ex(0, &frame->uc.uc__pad0); | 509 | put_user_ex(0, &frame->uc.uc__pad0); |
510 | 510 | ||
511 | if (ka->sa.sa_flags & SA_RESTORER) { | 511 | if (ka->sa.sa_flags & SA_RESTORER) { |
512 | restorer = ka->sa.sa_restorer; | 512 | restorer = ka->sa.sa_restorer; |
513 | } else { | 513 | } else { |
514 | /* could use a vstub here */ | 514 | /* could use a vstub here */ |
515 | restorer = NULL; | 515 | restorer = NULL; |
516 | err |= -EFAULT; | 516 | err |= -EFAULT; |
517 | } | 517 | } |
518 | put_user_ex(restorer, &frame->pretcode); | 518 | put_user_ex(restorer, &frame->pretcode); |
519 | } put_user_catch(err); | 519 | } put_user_catch(err); |
520 | 520 | ||
521 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, | 521 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
522 | regs, set->sig[0]); | 522 | regs, set->sig[0]); |
523 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 523 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
524 | 524 | ||
525 | if (err) | 525 | if (err) |
526 | return -EFAULT; | 526 | return -EFAULT; |
527 | 527 | ||
528 | /* Set up registers for signal handler */ | 528 | /* Set up registers for signal handler */ |
529 | regs->sp = (unsigned long) frame; | 529 | regs->sp = (unsigned long) frame; |
530 | regs->ip = (unsigned long) ka->sa.sa_handler; | 530 | regs->ip = (unsigned long) ka->sa.sa_handler; |
531 | 531 | ||
532 | /* We use the x32 calling convention here... */ | 532 | /* We use the x32 calling convention here... */ |
533 | regs->di = sig; | 533 | regs->di = sig; |
534 | regs->si = (unsigned long) &frame->info; | 534 | regs->si = (unsigned long) &frame->info; |
535 | regs->dx = (unsigned long) &frame->uc; | 535 | regs->dx = (unsigned long) &frame->uc; |
536 | 536 | ||
537 | loadsegment(ds, __USER_DS); | 537 | loadsegment(ds, __USER_DS); |
538 | loadsegment(es, __USER_DS); | 538 | loadsegment(es, __USER_DS); |
539 | 539 | ||
540 | regs->cs = __USER_CS; | 540 | regs->cs = __USER_CS; |
541 | regs->ss = __USER_DS; | 541 | regs->ss = __USER_DS; |
542 | #endif /* CONFIG_X86_X32_ABI */ | 542 | #endif /* CONFIG_X86_X32_ABI */ |
543 | 543 | ||
544 | return 0; | 544 | return 0; |
545 | } | 545 | } |
546 | 546 | ||
547 | #ifdef CONFIG_X86_32 | 547 | #ifdef CONFIG_X86_32 |
548 | /* | 548 | /* |
549 | * Atomically swap in the new signal mask, and wait for a signal. | 549 | * Atomically swap in the new signal mask, and wait for a signal. |
550 | */ | 550 | */ |
551 | asmlinkage int | 551 | asmlinkage int |
552 | sys_sigsuspend(int history0, int history1, old_sigset_t mask) | 552 | sys_sigsuspend(int history0, int history1, old_sigset_t mask) |
553 | { | 553 | { |
554 | sigset_t blocked; | 554 | sigset_t blocked; |
555 | siginitset(&blocked, mask); | 555 | siginitset(&blocked, mask); |
556 | return sigsuspend(&blocked); | 556 | return sigsuspend(&blocked); |
557 | } | 557 | } |
558 | 558 | ||
559 | asmlinkage int | 559 | asmlinkage int |
560 | sys_sigaction(int sig, const struct old_sigaction __user *act, | 560 | sys_sigaction(int sig, const struct old_sigaction __user *act, |
561 | struct old_sigaction __user *oact) | 561 | struct old_sigaction __user *oact) |
562 | { | 562 | { |
563 | struct k_sigaction new_ka, old_ka; | 563 | struct k_sigaction new_ka, old_ka; |
564 | int ret = 0; | 564 | int ret = 0; |
565 | 565 | ||
566 | if (act) { | 566 | if (act) { |
567 | old_sigset_t mask; | 567 | old_sigset_t mask; |
568 | 568 | ||
569 | if (!access_ok(VERIFY_READ, act, sizeof(*act))) | 569 | if (!access_ok(VERIFY_READ, act, sizeof(*act))) |
570 | return -EFAULT; | 570 | return -EFAULT; |
571 | 571 | ||
572 | get_user_try { | 572 | get_user_try { |
573 | get_user_ex(new_ka.sa.sa_handler, &act->sa_handler); | 573 | get_user_ex(new_ka.sa.sa_handler, &act->sa_handler); |
574 | get_user_ex(new_ka.sa.sa_flags, &act->sa_flags); | 574 | get_user_ex(new_ka.sa.sa_flags, &act->sa_flags); |
575 | get_user_ex(mask, &act->sa_mask); | 575 | get_user_ex(mask, &act->sa_mask); |
576 | get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer); | 576 | get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer); |
577 | } get_user_catch(ret); | 577 | } get_user_catch(ret); |
578 | 578 | ||
579 | if (ret) | 579 | if (ret) |
580 | return -EFAULT; | 580 | return -EFAULT; |
581 | siginitset(&new_ka.sa.sa_mask, mask); | 581 | siginitset(&new_ka.sa.sa_mask, mask); |
582 | } | 582 | } |
583 | 583 | ||
584 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | 584 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
585 | 585 | ||
586 | if (!ret && oact) { | 586 | if (!ret && oact) { |
587 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) | 587 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) |
588 | return -EFAULT; | 588 | return -EFAULT; |
589 | 589 | ||
590 | put_user_try { | 590 | put_user_try { |
591 | put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler); | 591 | put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler); |
592 | put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags); | 592 | put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags); |
593 | put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | 593 | put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); |
594 | put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer); | 594 | put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer); |
595 | } put_user_catch(ret); | 595 | } put_user_catch(ret); |
596 | 596 | ||
597 | if (ret) | 597 | if (ret) |
598 | return -EFAULT; | 598 | return -EFAULT; |
599 | } | 599 | } |
600 | 600 | ||
601 | return ret; | 601 | return ret; |
602 | } | 602 | } |
603 | #endif /* CONFIG_X86_32 */ | 603 | #endif /* CONFIG_X86_32 */ |
604 | 604 | ||
605 | /* | 605 | /* |
606 | * Do a signal return; undo the signal stack. | 606 | * Do a signal return; undo the signal stack. |
607 | */ | 607 | */ |
608 | #ifdef CONFIG_X86_32 | 608 | #ifdef CONFIG_X86_32 |
609 | unsigned long sys_sigreturn(struct pt_regs *regs) | 609 | unsigned long sys_sigreturn(struct pt_regs *regs) |
610 | { | 610 | { |
611 | struct sigframe __user *frame; | 611 | struct sigframe __user *frame; |
612 | unsigned long ax; | 612 | unsigned long ax; |
613 | sigset_t set; | 613 | sigset_t set; |
614 | 614 | ||
615 | frame = (struct sigframe __user *)(regs->sp - 8); | 615 | frame = (struct sigframe __user *)(regs->sp - 8); |
616 | 616 | ||
617 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 617 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
618 | goto badframe; | 618 | goto badframe; |
619 | if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 | 619 | if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 |
620 | && __copy_from_user(&set.sig[1], &frame->extramask, | 620 | && __copy_from_user(&set.sig[1], &frame->extramask, |
621 | sizeof(frame->extramask)))) | 621 | sizeof(frame->extramask)))) |
622 | goto badframe; | 622 | goto badframe; |
623 | 623 | ||
624 | set_current_blocked(&set); | 624 | set_current_blocked(&set); |
625 | 625 | ||
626 | if (restore_sigcontext(regs, &frame->sc, &ax)) | 626 | if (restore_sigcontext(regs, &frame->sc, &ax)) |
627 | goto badframe; | 627 | goto badframe; |
628 | return ax; | 628 | return ax; |
629 | 629 | ||
630 | badframe: | 630 | badframe: |
631 | signal_fault(regs, frame, "sigreturn"); | 631 | signal_fault(regs, frame, "sigreturn"); |
632 | 632 | ||
633 | return 0; | 633 | return 0; |
634 | } | 634 | } |
635 | #endif /* CONFIG_X86_32 */ | 635 | #endif /* CONFIG_X86_32 */ |
636 | 636 | ||
637 | long sys_rt_sigreturn(struct pt_regs *regs) | 637 | long sys_rt_sigreturn(struct pt_regs *regs) |
638 | { | 638 | { |
639 | struct rt_sigframe __user *frame; | 639 | struct rt_sigframe __user *frame; |
640 | unsigned long ax; | 640 | unsigned long ax; |
641 | sigset_t set; | 641 | sigset_t set; |
642 | 642 | ||
643 | frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); | 643 | frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); |
644 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 644 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
645 | goto badframe; | 645 | goto badframe; |
646 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | 646 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) |
647 | goto badframe; | 647 | goto badframe; |
648 | 648 | ||
649 | set_current_blocked(&set); | 649 | set_current_blocked(&set); |
650 | 650 | ||
651 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) | 651 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) |
652 | goto badframe; | 652 | goto badframe; |
653 | 653 | ||
654 | if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) | 654 | if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) |
655 | goto badframe; | 655 | goto badframe; |
656 | 656 | ||
657 | return ax; | 657 | return ax; |
658 | 658 | ||
659 | badframe: | 659 | badframe: |
660 | signal_fault(regs, frame, "rt_sigreturn"); | 660 | signal_fault(regs, frame, "rt_sigreturn"); |
661 | return 0; | 661 | return 0; |
662 | } | 662 | } |
663 | 663 | ||
664 | /* | 664 | /* |
665 | * OK, we're invoking a handler: | 665 | * OK, we're invoking a handler: |
666 | */ | 666 | */ |
667 | static int signr_convert(int sig) | 667 | static int signr_convert(int sig) |
668 | { | 668 | { |
669 | #ifdef CONFIG_X86_32 | 669 | #ifdef CONFIG_X86_32 |
670 | struct thread_info *info = current_thread_info(); | 670 | struct thread_info *info = current_thread_info(); |
671 | 671 | ||
672 | if (info->exec_domain && info->exec_domain->signal_invmap && sig < 32) | 672 | if (info->exec_domain && info->exec_domain->signal_invmap && sig < 32) |
673 | return info->exec_domain->signal_invmap[sig]; | 673 | return info->exec_domain->signal_invmap[sig]; |
674 | #endif /* CONFIG_X86_32 */ | 674 | #endif /* CONFIG_X86_32 */ |
675 | return sig; | 675 | return sig; |
676 | } | 676 | } |
677 | 677 | ||
678 | static int | 678 | static int |
679 | setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 679 | setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
680 | struct pt_regs *regs) | 680 | struct pt_regs *regs) |
681 | { | 681 | { |
682 | int usig = signr_convert(sig); | 682 | int usig = signr_convert(sig); |
683 | sigset_t *set = sigmask_to_save(); | 683 | sigset_t *set = sigmask_to_save(); |
684 | compat_sigset_t *cset = (compat_sigset_t *) set; | 684 | compat_sigset_t *cset = (compat_sigset_t *) set; |
685 | 685 | ||
686 | /* Set up the stack frame */ | 686 | /* Set up the stack frame */ |
687 | if (is_ia32_frame()) { | 687 | if (is_ia32_frame()) { |
688 | if (ka->sa.sa_flags & SA_SIGINFO) | 688 | if (ka->sa.sa_flags & SA_SIGINFO) |
689 | return ia32_setup_rt_frame(usig, ka, info, cset, regs); | 689 | return ia32_setup_rt_frame(usig, ka, info, cset, regs); |
690 | else | 690 | else |
691 | return ia32_setup_frame(usig, ka, cset, regs); | 691 | return ia32_setup_frame(usig, ka, cset, regs); |
692 | } else if (is_x32_frame()) { | 692 | } else if (is_x32_frame()) { |
693 | return x32_setup_rt_frame(usig, ka, info, cset, regs); | 693 | return x32_setup_rt_frame(usig, ka, info, cset, regs); |
694 | } else { | 694 | } else { |
695 | return __setup_rt_frame(sig, ka, info, set, regs); | 695 | return __setup_rt_frame(sig, ka, info, set, regs); |
696 | } | 696 | } |
697 | } | 697 | } |
698 | 698 | ||
699 | static void | 699 | static void |
700 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | 700 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, |
701 | struct pt_regs *regs) | 701 | struct pt_regs *regs) |
702 | { | 702 | { |
703 | /* Are we from a system call? */ | 703 | /* Are we from a system call? */ |
704 | if (syscall_get_nr(current, regs) >= 0) { | 704 | if (syscall_get_nr(current, regs) >= 0) { |
705 | /* If so, check system call restarting.. */ | 705 | /* If so, check system call restarting.. */ |
706 | switch (syscall_get_error(current, regs)) { | 706 | switch (syscall_get_error(current, regs)) { |
707 | case -ERESTART_RESTARTBLOCK: | 707 | case -ERESTART_RESTARTBLOCK: |
708 | case -ERESTARTNOHAND: | 708 | case -ERESTARTNOHAND: |
709 | regs->ax = -EINTR; | 709 | regs->ax = -EINTR; |
710 | break; | 710 | break; |
711 | 711 | ||
712 | case -ERESTARTSYS: | 712 | case -ERESTARTSYS: |
713 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 713 | if (!(ka->sa.sa_flags & SA_RESTART)) { |
714 | regs->ax = -EINTR; | 714 | regs->ax = -EINTR; |
715 | break; | 715 | break; |
716 | } | 716 | } |
717 | /* fallthrough */ | 717 | /* fallthrough */ |
718 | case -ERESTARTNOINTR: | 718 | case -ERESTARTNOINTR: |
719 | regs->ax = regs->orig_ax; | 719 | regs->ax = regs->orig_ax; |
720 | regs->ip -= 2; | 720 | regs->ip -= 2; |
721 | break; | 721 | break; |
722 | } | 722 | } |
723 | } | 723 | } |
724 | 724 | ||
725 | /* | 725 | /* |
726 | * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF | 726 | * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF |
727 | * flag so that register information in the sigcontext is correct. | 727 | * flag so that register information in the sigcontext is correct. |
728 | */ | 728 | */ |
729 | if (unlikely(regs->flags & X86_EFLAGS_TF) && | 729 | if (unlikely(regs->flags & X86_EFLAGS_TF) && |
730 | likely(test_and_clear_thread_flag(TIF_FORCED_TF))) | 730 | likely(test_and_clear_thread_flag(TIF_FORCED_TF))) |
731 | regs->flags &= ~X86_EFLAGS_TF; | 731 | regs->flags &= ~X86_EFLAGS_TF; |
732 | 732 | ||
733 | if (setup_rt_frame(sig, ka, info, regs) < 0) { | 733 | if (setup_rt_frame(sig, ka, info, regs) < 0) { |
734 | force_sigsegv(sig, current); | 734 | force_sigsegv(sig, current); |
735 | return; | 735 | return; |
736 | } | 736 | } |
737 | 737 | ||
738 | /* | 738 | /* |
739 | * Clear the direction flag as per the ABI for function entry. | 739 | * Clear the direction flag as per the ABI for function entry. |
740 | */ | 740 | */ |
741 | regs->flags &= ~X86_EFLAGS_DF; | 741 | regs->flags &= ~X86_EFLAGS_DF; |
742 | 742 | ||
743 | /* | 743 | /* |
744 | * Clear TF when entering the signal handler, but | 744 | * Clear TF when entering the signal handler, but |
745 | * notify any tracer that was single-stepping it. | 745 | * notify any tracer that was single-stepping it. |
746 | * The tracer may want to single-step inside the | 746 | * The tracer may want to single-step inside the |
747 | * handler too. | 747 | * handler too. |
748 | */ | 748 | */ |
749 | regs->flags &= ~X86_EFLAGS_TF; | 749 | regs->flags &= ~X86_EFLAGS_TF; |
750 | 750 | ||
751 | signal_delivered(sig, info, ka, regs, | 751 | signal_delivered(sig, info, ka, regs, |
752 | test_thread_flag(TIF_SINGLESTEP)); | 752 | test_thread_flag(TIF_SINGLESTEP)); |
753 | } | 753 | } |
754 | 754 | ||
755 | #ifdef CONFIG_X86_32 | 755 | #ifdef CONFIG_X86_32 |
756 | #define NR_restart_syscall __NR_restart_syscall | 756 | #define NR_restart_syscall __NR_restart_syscall |
757 | #else /* !CONFIG_X86_32 */ | 757 | #else /* !CONFIG_X86_32 */ |
758 | #define NR_restart_syscall \ | 758 | #define NR_restart_syscall \ |
759 | test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall | 759 | test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall |
760 | #endif /* CONFIG_X86_32 */ | 760 | #endif /* CONFIG_X86_32 */ |
761 | 761 | ||
762 | /* | 762 | /* |
763 | * Note that 'init' is a special process: it doesn't get signals it doesn't | 763 | * Note that 'init' is a special process: it doesn't get signals it doesn't |
764 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | 764 | * want to handle. Thus you cannot kill init even with a SIGKILL even by |
765 | * mistake. | 765 | * mistake. |
766 | */ | 766 | */ |
767 | static void do_signal(struct pt_regs *regs) | 767 | static void do_signal(struct pt_regs *regs) |
768 | { | 768 | { |
769 | struct k_sigaction ka; | 769 | struct k_sigaction ka; |
770 | siginfo_t info; | 770 | siginfo_t info; |
771 | int signr; | 771 | int signr; |
772 | 772 | ||
773 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 773 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
774 | if (signr > 0) { | 774 | if (signr > 0) { |
775 | /* Whee! Actually deliver the signal. */ | 775 | /* Whee! Actually deliver the signal. */ |
776 | handle_signal(signr, &info, &ka, regs); | 776 | handle_signal(signr, &info, &ka, regs); |
777 | return; | 777 | return; |
778 | } | 778 | } |
779 | 779 | ||
780 | /* Did we come from a system call? */ | 780 | /* Did we come from a system call? */ |
781 | if (syscall_get_nr(current, regs) >= 0) { | 781 | if (syscall_get_nr(current, regs) >= 0) { |
782 | /* Restart the system call - no handlers present */ | 782 | /* Restart the system call - no handlers present */ |
783 | switch (syscall_get_error(current, regs)) { | 783 | switch (syscall_get_error(current, regs)) { |
784 | case -ERESTARTNOHAND: | 784 | case -ERESTARTNOHAND: |
785 | case -ERESTARTSYS: | 785 | case -ERESTARTSYS: |
786 | case -ERESTARTNOINTR: | 786 | case -ERESTARTNOINTR: |
787 | regs->ax = regs->orig_ax; | 787 | regs->ax = regs->orig_ax; |
788 | regs->ip -= 2; | 788 | regs->ip -= 2; |
789 | break; | 789 | break; |
790 | 790 | ||
791 | case -ERESTART_RESTARTBLOCK: | 791 | case -ERESTART_RESTARTBLOCK: |
792 | regs->ax = NR_restart_syscall; | 792 | regs->ax = NR_restart_syscall; |
793 | regs->ip -= 2; | 793 | regs->ip -= 2; |
794 | break; | 794 | break; |
795 | } | 795 | } |
796 | } | 796 | } |
797 | 797 | ||
798 | /* | 798 | /* |
799 | * If there's no signal to deliver, we just put the saved sigmask | 799 | * If there's no signal to deliver, we just put the saved sigmask |
800 | * back. | 800 | * back. |
801 | */ | 801 | */ |
802 | restore_saved_sigmask(); | 802 | restore_saved_sigmask(); |
803 | } | 803 | } |
804 | 804 | ||
805 | /* | 805 | /* |
806 | * notification of userspace execution resumption | 806 | * notification of userspace execution resumption |
807 | * - triggered by the TIF_WORK_MASK flags | 807 | * - triggered by the TIF_WORK_MASK flags |
808 | */ | 808 | */ |
809 | void | 809 | void |
810 | do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | 810 | do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) |
811 | { | 811 | { |
812 | rcu_user_exit(); | 812 | rcu_user_exit(); |
813 | 813 | ||
814 | #ifdef CONFIG_X86_MCE | 814 | #ifdef CONFIG_X86_MCE |
815 | /* notify userspace of pending MCEs */ | 815 | /* notify userspace of pending MCEs */ |
816 | if (thread_info_flags & _TIF_MCE_NOTIFY) | 816 | if (thread_info_flags & _TIF_MCE_NOTIFY) |
817 | mce_notify_process(); | 817 | mce_notify_process(); |
818 | #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ | 818 | #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ |
819 | 819 | ||
820 | if (thread_info_flags & _TIF_UPROBE) | 820 | if (thread_info_flags & _TIF_UPROBE) |
821 | uprobe_notify_resume(regs); | 821 | uprobe_notify_resume(regs); |
822 | 822 | ||
823 | /* deal with pending signal delivery */ | 823 | /* deal with pending signal delivery */ |
824 | if (thread_info_flags & _TIF_SIGPENDING) | 824 | if (thread_info_flags & _TIF_SIGPENDING) |
825 | do_signal(regs); | 825 | do_signal(regs); |
826 | 826 | ||
827 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 827 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
828 | clear_thread_flag(TIF_NOTIFY_RESUME); | 828 | clear_thread_flag(TIF_NOTIFY_RESUME); |
829 | tracehook_notify_resume(regs); | 829 | tracehook_notify_resume(regs); |
830 | } | 830 | } |
831 | if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) | 831 | if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) |
832 | fire_user_return_notifiers(); | 832 | fire_user_return_notifiers(); |
833 | 833 | ||
834 | rcu_user_enter(); | 834 | rcu_user_enter(); |
835 | } | 835 | } |
836 | 836 | ||
837 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) | 837 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) |
838 | { | 838 | { |
839 | struct task_struct *me = current; | 839 | struct task_struct *me = current; |
840 | 840 | ||
841 | if (show_unhandled_signals && printk_ratelimit()) { | 841 | if (show_unhandled_signals && printk_ratelimit()) { |
842 | printk("%s" | 842 | printk("%s" |
843 | "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", | 843 | "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", |
844 | task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, | 844 | task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, |
845 | me->comm, me->pid, where, frame, | 845 | me->comm, me->pid, where, frame, |
846 | regs->ip, regs->sp, regs->orig_ax); | 846 | regs->ip, regs->sp, regs->orig_ax); |
847 | print_vma_addr(" in ", regs->ip); | 847 | print_vma_addr(" in ", regs->ip); |
848 | pr_cont("\n"); | 848 | pr_cont("\n"); |
849 | } | 849 | } |
850 | 850 | ||
851 | force_sig(SIGSEGV, me); | 851 | force_sig(SIGSEGV, me); |
852 | } | 852 | } |
853 | 853 | ||
854 | #ifdef CONFIG_X86_X32_ABI | 854 | #ifdef CONFIG_X86_X32_ABI |
855 | asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs) | 855 | asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs) |
856 | { | 856 | { |
857 | struct rt_sigframe_x32 __user *frame; | 857 | struct rt_sigframe_x32 __user *frame; |
858 | sigset_t set; | 858 | sigset_t set; |
859 | unsigned long ax; | 859 | unsigned long ax; |
860 | struct pt_regs tregs; | ||
861 | 860 | ||
862 | frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); | 861 | frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); |
863 | 862 | ||
864 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 863 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
865 | goto badframe; | 864 | goto badframe; |
866 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | 865 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) |
867 | goto badframe; | 866 | goto badframe; |
868 | 867 | ||
869 | set_current_blocked(&set); | 868 | set_current_blocked(&set); |
870 | 869 | ||
871 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) | 870 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) |
872 | goto badframe; | 871 | goto badframe; |
873 | 872 | ||
874 | tregs = *regs; | 873 | if (compat_restore_altstack(&frame->uc.uc_stack)) |
875 | if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT) | ||
876 | goto badframe; | 874 | goto badframe; |
877 | 875 | ||
878 | return ax; | 876 | return ax; |
879 | 877 | ||
880 | badframe: | 878 | badframe: |
881 | signal_fault(regs, frame, "x32 rt_sigreturn"); | 879 | signal_fault(regs, frame, "x32 rt_sigreturn"); |
882 | return 0; | 880 | return 0; |
883 | } | 881 | } |
884 | #endif | 882 | #endif |
885 | 883 |
arch/x86/syscalls/syscall_32.tbl
1 | # | 1 | # |
2 | # 32-bit system call numbers and entry vectors | 2 | # 32-bit system call numbers and entry vectors |
3 | # | 3 | # |
4 | # The format is: | 4 | # The format is: |
5 | # <number> <abi> <name> <entry point> <compat entry point> | 5 | # <number> <abi> <name> <entry point> <compat entry point> |
6 | # | 6 | # |
7 | # The abi is always "i386" for this file. | 7 | # The abi is always "i386" for this file. |
8 | # | 8 | # |
9 | 0 i386 restart_syscall sys_restart_syscall | 9 | 0 i386 restart_syscall sys_restart_syscall |
10 | 1 i386 exit sys_exit | 10 | 1 i386 exit sys_exit |
11 | 2 i386 fork sys_fork stub32_fork | 11 | 2 i386 fork sys_fork stub32_fork |
12 | 3 i386 read sys_read | 12 | 3 i386 read sys_read |
13 | 4 i386 write sys_write | 13 | 4 i386 write sys_write |
14 | 5 i386 open sys_open compat_sys_open | 14 | 5 i386 open sys_open compat_sys_open |
15 | 6 i386 close sys_close | 15 | 6 i386 close sys_close |
16 | 7 i386 waitpid sys_waitpid sys32_waitpid | 16 | 7 i386 waitpid sys_waitpid sys32_waitpid |
17 | 8 i386 creat sys_creat | 17 | 8 i386 creat sys_creat |
18 | 9 i386 link sys_link | 18 | 9 i386 link sys_link |
19 | 10 i386 unlink sys_unlink | 19 | 10 i386 unlink sys_unlink |
20 | 11 i386 execve sys_execve stub32_execve | 20 | 11 i386 execve sys_execve stub32_execve |
21 | 12 i386 chdir sys_chdir | 21 | 12 i386 chdir sys_chdir |
22 | 13 i386 time sys_time compat_sys_time | 22 | 13 i386 time sys_time compat_sys_time |
23 | 14 i386 mknod sys_mknod | 23 | 14 i386 mknod sys_mknod |
24 | 15 i386 chmod sys_chmod | 24 | 15 i386 chmod sys_chmod |
25 | 16 i386 lchown sys_lchown16 | 25 | 16 i386 lchown sys_lchown16 |
26 | 17 i386 break | 26 | 17 i386 break |
27 | 18 i386 oldstat sys_stat | 27 | 18 i386 oldstat sys_stat |
28 | 19 i386 lseek sys_lseek sys32_lseek | 28 | 19 i386 lseek sys_lseek sys32_lseek |
29 | 20 i386 getpid sys_getpid | 29 | 20 i386 getpid sys_getpid |
30 | 21 i386 mount sys_mount compat_sys_mount | 30 | 21 i386 mount sys_mount compat_sys_mount |
31 | 22 i386 umount sys_oldumount | 31 | 22 i386 umount sys_oldumount |
32 | 23 i386 setuid sys_setuid16 | 32 | 23 i386 setuid sys_setuid16 |
33 | 24 i386 getuid sys_getuid16 | 33 | 24 i386 getuid sys_getuid16 |
34 | 25 i386 stime sys_stime compat_sys_stime | 34 | 25 i386 stime sys_stime compat_sys_stime |
35 | 26 i386 ptrace sys_ptrace compat_sys_ptrace | 35 | 26 i386 ptrace sys_ptrace compat_sys_ptrace |
36 | 27 i386 alarm sys_alarm | 36 | 27 i386 alarm sys_alarm |
37 | 28 i386 oldfstat sys_fstat | 37 | 28 i386 oldfstat sys_fstat |
38 | 29 i386 pause sys_pause | 38 | 29 i386 pause sys_pause |
39 | 30 i386 utime sys_utime compat_sys_utime | 39 | 30 i386 utime sys_utime compat_sys_utime |
40 | 31 i386 stty | 40 | 31 i386 stty |
41 | 32 i386 gtty | 41 | 32 i386 gtty |
42 | 33 i386 access sys_access | 42 | 33 i386 access sys_access |
43 | 34 i386 nice sys_nice | 43 | 34 i386 nice sys_nice |
44 | 35 i386 ftime | 44 | 35 i386 ftime |
45 | 36 i386 sync sys_sync | 45 | 36 i386 sync sys_sync |
46 | 37 i386 kill sys_kill sys32_kill | 46 | 37 i386 kill sys_kill sys32_kill |
47 | 38 i386 rename sys_rename | 47 | 38 i386 rename sys_rename |
48 | 39 i386 mkdir sys_mkdir | 48 | 39 i386 mkdir sys_mkdir |
49 | 40 i386 rmdir sys_rmdir | 49 | 40 i386 rmdir sys_rmdir |
50 | 41 i386 dup sys_dup | 50 | 41 i386 dup sys_dup |
51 | 42 i386 pipe sys_pipe | 51 | 42 i386 pipe sys_pipe |
52 | 43 i386 times sys_times compat_sys_times | 52 | 43 i386 times sys_times compat_sys_times |
53 | 44 i386 prof | 53 | 44 i386 prof |
54 | 45 i386 brk sys_brk | 54 | 45 i386 brk sys_brk |
55 | 46 i386 setgid sys_setgid16 | 55 | 46 i386 setgid sys_setgid16 |
56 | 47 i386 getgid sys_getgid16 | 56 | 47 i386 getgid sys_getgid16 |
57 | 48 i386 signal sys_signal | 57 | 48 i386 signal sys_signal |
58 | 49 i386 geteuid sys_geteuid16 | 58 | 49 i386 geteuid sys_geteuid16 |
59 | 50 i386 getegid sys_getegid16 | 59 | 50 i386 getegid sys_getegid16 |
60 | 51 i386 acct sys_acct | 60 | 51 i386 acct sys_acct |
61 | 52 i386 umount2 sys_umount | 61 | 52 i386 umount2 sys_umount |
62 | 53 i386 lock | 62 | 53 i386 lock |
63 | 54 i386 ioctl sys_ioctl compat_sys_ioctl | 63 | 54 i386 ioctl sys_ioctl compat_sys_ioctl |
64 | 55 i386 fcntl sys_fcntl compat_sys_fcntl64 | 64 | 55 i386 fcntl sys_fcntl compat_sys_fcntl64 |
65 | 56 i386 mpx | 65 | 56 i386 mpx |
66 | 57 i386 setpgid sys_setpgid | 66 | 57 i386 setpgid sys_setpgid |
67 | 58 i386 ulimit | 67 | 58 i386 ulimit |
68 | 59 i386 oldolduname sys_olduname | 68 | 59 i386 oldolduname sys_olduname |
69 | 60 i386 umask sys_umask | 69 | 60 i386 umask sys_umask |
70 | 61 i386 chroot sys_chroot | 70 | 61 i386 chroot sys_chroot |
71 | 62 i386 ustat sys_ustat compat_sys_ustat | 71 | 62 i386 ustat sys_ustat compat_sys_ustat |
72 | 63 i386 dup2 sys_dup2 | 72 | 63 i386 dup2 sys_dup2 |
73 | 64 i386 getppid sys_getppid | 73 | 64 i386 getppid sys_getppid |
74 | 65 i386 getpgrp sys_getpgrp | 74 | 65 i386 getpgrp sys_getpgrp |
75 | 66 i386 setsid sys_setsid | 75 | 66 i386 setsid sys_setsid |
76 | 67 i386 sigaction sys_sigaction sys32_sigaction | 76 | 67 i386 sigaction sys_sigaction sys32_sigaction |
77 | 68 i386 sgetmask sys_sgetmask | 77 | 68 i386 sgetmask sys_sgetmask |
78 | 69 i386 ssetmask sys_ssetmask | 78 | 69 i386 ssetmask sys_ssetmask |
79 | 70 i386 setreuid sys_setreuid16 | 79 | 70 i386 setreuid sys_setreuid16 |
80 | 71 i386 setregid sys_setregid16 | 80 | 71 i386 setregid sys_setregid16 |
81 | 72 i386 sigsuspend sys_sigsuspend sys32_sigsuspend | 81 | 72 i386 sigsuspend sys_sigsuspend sys32_sigsuspend |
82 | 73 i386 sigpending sys_sigpending compat_sys_sigpending | 82 | 73 i386 sigpending sys_sigpending compat_sys_sigpending |
83 | 74 i386 sethostname sys_sethostname | 83 | 74 i386 sethostname sys_sethostname |
84 | 75 i386 setrlimit sys_setrlimit compat_sys_setrlimit | 84 | 75 i386 setrlimit sys_setrlimit compat_sys_setrlimit |
85 | 76 i386 getrlimit sys_old_getrlimit compat_sys_old_getrlimit | 85 | 76 i386 getrlimit sys_old_getrlimit compat_sys_old_getrlimit |
86 | 77 i386 getrusage sys_getrusage compat_sys_getrusage | 86 | 77 i386 getrusage sys_getrusage compat_sys_getrusage |
87 | 78 i386 gettimeofday sys_gettimeofday compat_sys_gettimeofday | 87 | 78 i386 gettimeofday sys_gettimeofday compat_sys_gettimeofday |
88 | 79 i386 settimeofday sys_settimeofday compat_sys_settimeofday | 88 | 79 i386 settimeofday sys_settimeofday compat_sys_settimeofday |
89 | 80 i386 getgroups sys_getgroups16 | 89 | 80 i386 getgroups sys_getgroups16 |
90 | 81 i386 setgroups sys_setgroups16 | 90 | 81 i386 setgroups sys_setgroups16 |
91 | 82 i386 select sys_old_select compat_sys_old_select | 91 | 82 i386 select sys_old_select compat_sys_old_select |
92 | 83 i386 symlink sys_symlink | 92 | 83 i386 symlink sys_symlink |
93 | 84 i386 oldlstat sys_lstat | 93 | 84 i386 oldlstat sys_lstat |
94 | 85 i386 readlink sys_readlink | 94 | 85 i386 readlink sys_readlink |
95 | 86 i386 uselib sys_uselib | 95 | 86 i386 uselib sys_uselib |
96 | 87 i386 swapon sys_swapon | 96 | 87 i386 swapon sys_swapon |
97 | 88 i386 reboot sys_reboot | 97 | 88 i386 reboot sys_reboot |
98 | 89 i386 readdir sys_old_readdir compat_sys_old_readdir | 98 | 89 i386 readdir sys_old_readdir compat_sys_old_readdir |
99 | 90 i386 mmap sys_old_mmap sys32_mmap | 99 | 90 i386 mmap sys_old_mmap sys32_mmap |
100 | 91 i386 munmap sys_munmap | 100 | 91 i386 munmap sys_munmap |
101 | 92 i386 truncate sys_truncate | 101 | 92 i386 truncate sys_truncate |
102 | 93 i386 ftruncate sys_ftruncate | 102 | 93 i386 ftruncate sys_ftruncate |
103 | 94 i386 fchmod sys_fchmod | 103 | 94 i386 fchmod sys_fchmod |
104 | 95 i386 fchown sys_fchown16 | 104 | 95 i386 fchown sys_fchown16 |
105 | 96 i386 getpriority sys_getpriority | 105 | 96 i386 getpriority sys_getpriority |
106 | 97 i386 setpriority sys_setpriority | 106 | 97 i386 setpriority sys_setpriority |
107 | 98 i386 profil | 107 | 98 i386 profil |
108 | 99 i386 statfs sys_statfs compat_sys_statfs | 108 | 99 i386 statfs sys_statfs compat_sys_statfs |
109 | 100 i386 fstatfs sys_fstatfs compat_sys_fstatfs | 109 | 100 i386 fstatfs sys_fstatfs compat_sys_fstatfs |
110 | 101 i386 ioperm sys_ioperm | 110 | 101 i386 ioperm sys_ioperm |
111 | 102 i386 socketcall sys_socketcall compat_sys_socketcall | 111 | 102 i386 socketcall sys_socketcall compat_sys_socketcall |
112 | 103 i386 syslog sys_syslog | 112 | 103 i386 syslog sys_syslog |
113 | 104 i386 setitimer sys_setitimer compat_sys_setitimer | 113 | 104 i386 setitimer sys_setitimer compat_sys_setitimer |
114 | 105 i386 getitimer sys_getitimer compat_sys_getitimer | 114 | 105 i386 getitimer sys_getitimer compat_sys_getitimer |
115 | 106 i386 stat sys_newstat compat_sys_newstat | 115 | 106 i386 stat sys_newstat compat_sys_newstat |
116 | 107 i386 lstat sys_newlstat compat_sys_newlstat | 116 | 107 i386 lstat sys_newlstat compat_sys_newlstat |
117 | 108 i386 fstat sys_newfstat compat_sys_newfstat | 117 | 108 i386 fstat sys_newfstat compat_sys_newfstat |
118 | 109 i386 olduname sys_uname | 118 | 109 i386 olduname sys_uname |
119 | 110 i386 iopl ptregs_iopl stub32_iopl | 119 | 110 i386 iopl ptregs_iopl stub32_iopl |
120 | 111 i386 vhangup sys_vhangup | 120 | 111 i386 vhangup sys_vhangup |
121 | 112 i386 idle | 121 | 112 i386 idle |
122 | 113 i386 vm86old ptregs_vm86old sys32_vm86_warning | 122 | 113 i386 vm86old ptregs_vm86old sys32_vm86_warning |
123 | 114 i386 wait4 sys_wait4 compat_sys_wait4 | 123 | 114 i386 wait4 sys_wait4 compat_sys_wait4 |
124 | 115 i386 swapoff sys_swapoff | 124 | 115 i386 swapoff sys_swapoff |
125 | 116 i386 sysinfo sys_sysinfo compat_sys_sysinfo | 125 | 116 i386 sysinfo sys_sysinfo compat_sys_sysinfo |
126 | 117 i386 ipc sys_ipc sys32_ipc | 126 | 117 i386 ipc sys_ipc sys32_ipc |
127 | 118 i386 fsync sys_fsync | 127 | 118 i386 fsync sys_fsync |
128 | 119 i386 sigreturn ptregs_sigreturn stub32_sigreturn | 128 | 119 i386 sigreturn ptregs_sigreturn stub32_sigreturn |
129 | 120 i386 clone sys_clone stub32_clone | 129 | 120 i386 clone sys_clone stub32_clone |
130 | 121 i386 setdomainname sys_setdomainname | 130 | 121 i386 setdomainname sys_setdomainname |
131 | 122 i386 uname sys_newuname | 131 | 122 i386 uname sys_newuname |
132 | 123 i386 modify_ldt sys_modify_ldt | 132 | 123 i386 modify_ldt sys_modify_ldt |
133 | 124 i386 adjtimex sys_adjtimex compat_sys_adjtimex | 133 | 124 i386 adjtimex sys_adjtimex compat_sys_adjtimex |
134 | 125 i386 mprotect sys_mprotect sys32_mprotect | 134 | 125 i386 mprotect sys_mprotect sys32_mprotect |
135 | 126 i386 sigprocmask sys_sigprocmask compat_sys_sigprocmask | 135 | 126 i386 sigprocmask sys_sigprocmask compat_sys_sigprocmask |
136 | 127 i386 create_module | 136 | 127 i386 create_module |
137 | 128 i386 init_module sys_init_module | 137 | 128 i386 init_module sys_init_module |
138 | 129 i386 delete_module sys_delete_module | 138 | 129 i386 delete_module sys_delete_module |
139 | 130 i386 get_kernel_syms | 139 | 130 i386 get_kernel_syms |
140 | 131 i386 quotactl sys_quotactl sys32_quotactl | 140 | 131 i386 quotactl sys_quotactl sys32_quotactl |
141 | 132 i386 getpgid sys_getpgid | 141 | 132 i386 getpgid sys_getpgid |
142 | 133 i386 fchdir sys_fchdir | 142 | 133 i386 fchdir sys_fchdir |
143 | 134 i386 bdflush sys_bdflush | 143 | 134 i386 bdflush sys_bdflush |
144 | 135 i386 sysfs sys_sysfs | 144 | 135 i386 sysfs sys_sysfs |
145 | 136 i386 personality sys_personality | 145 | 136 i386 personality sys_personality |
146 | 137 i386 afs_syscall | 146 | 137 i386 afs_syscall |
147 | 138 i386 setfsuid sys_setfsuid16 | 147 | 138 i386 setfsuid sys_setfsuid16 |
148 | 139 i386 setfsgid sys_setfsgid16 | 148 | 139 i386 setfsgid sys_setfsgid16 |
149 | 140 i386 _llseek sys_llseek | 149 | 140 i386 _llseek sys_llseek |
150 | 141 i386 getdents sys_getdents compat_sys_getdents | 150 | 141 i386 getdents sys_getdents compat_sys_getdents |
151 | 142 i386 _newselect sys_select compat_sys_select | 151 | 142 i386 _newselect sys_select compat_sys_select |
152 | 143 i386 flock sys_flock | 152 | 143 i386 flock sys_flock |
153 | 144 i386 msync sys_msync | 153 | 144 i386 msync sys_msync |
154 | 145 i386 readv sys_readv compat_sys_readv | 154 | 145 i386 readv sys_readv compat_sys_readv |
155 | 146 i386 writev sys_writev compat_sys_writev | 155 | 146 i386 writev sys_writev compat_sys_writev |
156 | 147 i386 getsid sys_getsid | 156 | 147 i386 getsid sys_getsid |
157 | 148 i386 fdatasync sys_fdatasync | 157 | 148 i386 fdatasync sys_fdatasync |
158 | 149 i386 _sysctl sys_sysctl compat_sys_sysctl | 158 | 149 i386 _sysctl sys_sysctl compat_sys_sysctl |
159 | 150 i386 mlock sys_mlock | 159 | 150 i386 mlock sys_mlock |
160 | 151 i386 munlock sys_munlock | 160 | 151 i386 munlock sys_munlock |
161 | 152 i386 mlockall sys_mlockall | 161 | 152 i386 mlockall sys_mlockall |
162 | 153 i386 munlockall sys_munlockall | 162 | 153 i386 munlockall sys_munlockall |
163 | 154 i386 sched_setparam sys_sched_setparam | 163 | 154 i386 sched_setparam sys_sched_setparam |
164 | 155 i386 sched_getparam sys_sched_getparam | 164 | 155 i386 sched_getparam sys_sched_getparam |
165 | 156 i386 sched_setscheduler sys_sched_setscheduler | 165 | 156 i386 sched_setscheduler sys_sched_setscheduler |
166 | 157 i386 sched_getscheduler sys_sched_getscheduler | 166 | 157 i386 sched_getscheduler sys_sched_getscheduler |
167 | 158 i386 sched_yield sys_sched_yield | 167 | 158 i386 sched_yield sys_sched_yield |
168 | 159 i386 sched_get_priority_max sys_sched_get_priority_max | 168 | 159 i386 sched_get_priority_max sys_sched_get_priority_max |
169 | 160 i386 sched_get_priority_min sys_sched_get_priority_min | 169 | 160 i386 sched_get_priority_min sys_sched_get_priority_min |
170 | 161 i386 sched_rr_get_interval sys_sched_rr_get_interval sys32_sched_rr_get_interval | 170 | 161 i386 sched_rr_get_interval sys_sched_rr_get_interval sys32_sched_rr_get_interval |
171 | 162 i386 nanosleep sys_nanosleep compat_sys_nanosleep | 171 | 162 i386 nanosleep sys_nanosleep compat_sys_nanosleep |
172 | 163 i386 mremap sys_mremap | 172 | 163 i386 mremap sys_mremap |
173 | 164 i386 setresuid sys_setresuid16 | 173 | 164 i386 setresuid sys_setresuid16 |
174 | 165 i386 getresuid sys_getresuid16 | 174 | 165 i386 getresuid sys_getresuid16 |
175 | 166 i386 vm86 ptregs_vm86 sys32_vm86_warning | 175 | 166 i386 vm86 ptregs_vm86 sys32_vm86_warning |
176 | 167 i386 query_module | 176 | 167 i386 query_module |
177 | 168 i386 poll sys_poll | 177 | 168 i386 poll sys_poll |
178 | 169 i386 nfsservctl | 178 | 169 i386 nfsservctl |
179 | 170 i386 setresgid sys_setresgid16 | 179 | 170 i386 setresgid sys_setresgid16 |
180 | 171 i386 getresgid sys_getresgid16 | 180 | 171 i386 getresgid sys_getresgid16 |
181 | 172 i386 prctl sys_prctl | 181 | 172 i386 prctl sys_prctl |
182 | 173 i386 rt_sigreturn ptregs_rt_sigreturn stub32_rt_sigreturn | 182 | 173 i386 rt_sigreturn ptregs_rt_sigreturn stub32_rt_sigreturn |
183 | 174 i386 rt_sigaction sys_rt_sigaction sys32_rt_sigaction | 183 | 174 i386 rt_sigaction sys_rt_sigaction sys32_rt_sigaction |
184 | 175 i386 rt_sigprocmask sys_rt_sigprocmask | 184 | 175 i386 rt_sigprocmask sys_rt_sigprocmask |
185 | 176 i386 rt_sigpending sys_rt_sigpending sys32_rt_sigpending | 185 | 176 i386 rt_sigpending sys_rt_sigpending sys32_rt_sigpending |
186 | 177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait | 186 | 177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait |
187 | 178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo sys32_rt_sigqueueinfo | 187 | 178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo sys32_rt_sigqueueinfo |
188 | 179 i386 rt_sigsuspend sys_rt_sigsuspend | 188 | 179 i386 rt_sigsuspend sys_rt_sigsuspend |
189 | 180 i386 pread64 sys_pread64 sys32_pread | 189 | 180 i386 pread64 sys_pread64 sys32_pread |
190 | 181 i386 pwrite64 sys_pwrite64 sys32_pwrite | 190 | 181 i386 pwrite64 sys_pwrite64 sys32_pwrite |
191 | 182 i386 chown sys_chown16 | 191 | 182 i386 chown sys_chown16 |
192 | 183 i386 getcwd sys_getcwd | 192 | 183 i386 getcwd sys_getcwd |
193 | 184 i386 capget sys_capget | 193 | 184 i386 capget sys_capget |
194 | 185 i386 capset sys_capset | 194 | 185 i386 capset sys_capset |
195 | 186 i386 sigaltstack sys_sigaltstack stub32_sigaltstack | 195 | 186 i386 sigaltstack sys_sigaltstack compat_sys_sigaltstack |
196 | 187 i386 sendfile sys_sendfile sys32_sendfile | 196 | 187 i386 sendfile sys_sendfile sys32_sendfile |
197 | 188 i386 getpmsg | 197 | 188 i386 getpmsg |
198 | 189 i386 putpmsg | 198 | 189 i386 putpmsg |
199 | 190 i386 vfork sys_vfork stub32_vfork | 199 | 190 i386 vfork sys_vfork stub32_vfork |
200 | 191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit | 200 | 191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit |
201 | 192 i386 mmap2 sys_mmap_pgoff | 201 | 192 i386 mmap2 sys_mmap_pgoff |
202 | 193 i386 truncate64 sys_truncate64 sys32_truncate64 | 202 | 193 i386 truncate64 sys_truncate64 sys32_truncate64 |
203 | 194 i386 ftruncate64 sys_ftruncate64 sys32_ftruncate64 | 203 | 194 i386 ftruncate64 sys_ftruncate64 sys32_ftruncate64 |
204 | 195 i386 stat64 sys_stat64 sys32_stat64 | 204 | 195 i386 stat64 sys_stat64 sys32_stat64 |
205 | 196 i386 lstat64 sys_lstat64 sys32_lstat64 | 205 | 196 i386 lstat64 sys_lstat64 sys32_lstat64 |
206 | 197 i386 fstat64 sys_fstat64 sys32_fstat64 | 206 | 197 i386 fstat64 sys_fstat64 sys32_fstat64 |
207 | 198 i386 lchown32 sys_lchown | 207 | 198 i386 lchown32 sys_lchown |
208 | 199 i386 getuid32 sys_getuid | 208 | 199 i386 getuid32 sys_getuid |
209 | 200 i386 getgid32 sys_getgid | 209 | 200 i386 getgid32 sys_getgid |
210 | 201 i386 geteuid32 sys_geteuid | 210 | 201 i386 geteuid32 sys_geteuid |
211 | 202 i386 getegid32 sys_getegid | 211 | 202 i386 getegid32 sys_getegid |
212 | 203 i386 setreuid32 sys_setreuid | 212 | 203 i386 setreuid32 sys_setreuid |
213 | 204 i386 setregid32 sys_setregid | 213 | 204 i386 setregid32 sys_setregid |
214 | 205 i386 getgroups32 sys_getgroups | 214 | 205 i386 getgroups32 sys_getgroups |
215 | 206 i386 setgroups32 sys_setgroups | 215 | 206 i386 setgroups32 sys_setgroups |
216 | 207 i386 fchown32 sys_fchown | 216 | 207 i386 fchown32 sys_fchown |
217 | 208 i386 setresuid32 sys_setresuid | 217 | 208 i386 setresuid32 sys_setresuid |
218 | 209 i386 getresuid32 sys_getresuid | 218 | 209 i386 getresuid32 sys_getresuid |
219 | 210 i386 setresgid32 sys_setresgid | 219 | 210 i386 setresgid32 sys_setresgid |
220 | 211 i386 getresgid32 sys_getresgid | 220 | 211 i386 getresgid32 sys_getresgid |
221 | 212 i386 chown32 sys_chown | 221 | 212 i386 chown32 sys_chown |
222 | 213 i386 setuid32 sys_setuid | 222 | 213 i386 setuid32 sys_setuid |
223 | 214 i386 setgid32 sys_setgid | 223 | 214 i386 setgid32 sys_setgid |
224 | 215 i386 setfsuid32 sys_setfsuid | 224 | 215 i386 setfsuid32 sys_setfsuid |
225 | 216 i386 setfsgid32 sys_setfsgid | 225 | 216 i386 setfsgid32 sys_setfsgid |
226 | 217 i386 pivot_root sys_pivot_root | 226 | 217 i386 pivot_root sys_pivot_root |
227 | 218 i386 mincore sys_mincore | 227 | 218 i386 mincore sys_mincore |
228 | 219 i386 madvise sys_madvise | 228 | 219 i386 madvise sys_madvise |
229 | 220 i386 getdents64 sys_getdents64 compat_sys_getdents64 | 229 | 220 i386 getdents64 sys_getdents64 compat_sys_getdents64 |
230 | 221 i386 fcntl64 sys_fcntl64 compat_sys_fcntl64 | 230 | 221 i386 fcntl64 sys_fcntl64 compat_sys_fcntl64 |
231 | # 222 is unused | 231 | # 222 is unused |
232 | # 223 is unused | 232 | # 223 is unused |
233 | 224 i386 gettid sys_gettid | 233 | 224 i386 gettid sys_gettid |
234 | 225 i386 readahead sys_readahead sys32_readahead | 234 | 225 i386 readahead sys_readahead sys32_readahead |
235 | 226 i386 setxattr sys_setxattr | 235 | 226 i386 setxattr sys_setxattr |
236 | 227 i386 lsetxattr sys_lsetxattr | 236 | 227 i386 lsetxattr sys_lsetxattr |
237 | 228 i386 fsetxattr sys_fsetxattr | 237 | 228 i386 fsetxattr sys_fsetxattr |
238 | 229 i386 getxattr sys_getxattr | 238 | 229 i386 getxattr sys_getxattr |
239 | 230 i386 lgetxattr sys_lgetxattr | 239 | 230 i386 lgetxattr sys_lgetxattr |
240 | 231 i386 fgetxattr sys_fgetxattr | 240 | 231 i386 fgetxattr sys_fgetxattr |
241 | 232 i386 listxattr sys_listxattr | 241 | 232 i386 listxattr sys_listxattr |
242 | 233 i386 llistxattr sys_llistxattr | 242 | 233 i386 llistxattr sys_llistxattr |
243 | 234 i386 flistxattr sys_flistxattr | 243 | 234 i386 flistxattr sys_flistxattr |
244 | 235 i386 removexattr sys_removexattr | 244 | 235 i386 removexattr sys_removexattr |
245 | 236 i386 lremovexattr sys_lremovexattr | 245 | 236 i386 lremovexattr sys_lremovexattr |
246 | 237 i386 fremovexattr sys_fremovexattr | 246 | 237 i386 fremovexattr sys_fremovexattr |
247 | 238 i386 tkill sys_tkill | 247 | 238 i386 tkill sys_tkill |
248 | 239 i386 sendfile64 sys_sendfile64 | 248 | 239 i386 sendfile64 sys_sendfile64 |
249 | 240 i386 futex sys_futex compat_sys_futex | 249 | 240 i386 futex sys_futex compat_sys_futex |
250 | 241 i386 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity | 250 | 241 i386 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity |
251 | 242 i386 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity | 251 | 242 i386 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity |
252 | 243 i386 set_thread_area sys_set_thread_area | 252 | 243 i386 set_thread_area sys_set_thread_area |
253 | 244 i386 get_thread_area sys_get_thread_area | 253 | 244 i386 get_thread_area sys_get_thread_area |
254 | 245 i386 io_setup sys_io_setup compat_sys_io_setup | 254 | 245 i386 io_setup sys_io_setup compat_sys_io_setup |
255 | 246 i386 io_destroy sys_io_destroy | 255 | 246 i386 io_destroy sys_io_destroy |
256 | 247 i386 io_getevents sys_io_getevents compat_sys_io_getevents | 256 | 247 i386 io_getevents sys_io_getevents compat_sys_io_getevents |
257 | 248 i386 io_submit sys_io_submit compat_sys_io_submit | 257 | 248 i386 io_submit sys_io_submit compat_sys_io_submit |
258 | 249 i386 io_cancel sys_io_cancel | 258 | 249 i386 io_cancel sys_io_cancel |
259 | 250 i386 fadvise64 sys_fadvise64 sys32_fadvise64 | 259 | 250 i386 fadvise64 sys_fadvise64 sys32_fadvise64 |
260 | # 251 is available for reuse (was briefly sys_set_zone_reclaim) | 260 | # 251 is available for reuse (was briefly sys_set_zone_reclaim) |
261 | 252 i386 exit_group sys_exit_group | 261 | 252 i386 exit_group sys_exit_group |
262 | 253 i386 lookup_dcookie sys_lookup_dcookie sys32_lookup_dcookie | 262 | 253 i386 lookup_dcookie sys_lookup_dcookie sys32_lookup_dcookie |
263 | 254 i386 epoll_create sys_epoll_create | 263 | 254 i386 epoll_create sys_epoll_create |
264 | 255 i386 epoll_ctl sys_epoll_ctl | 264 | 255 i386 epoll_ctl sys_epoll_ctl |
265 | 256 i386 epoll_wait sys_epoll_wait | 265 | 256 i386 epoll_wait sys_epoll_wait |
266 | 257 i386 remap_file_pages sys_remap_file_pages | 266 | 257 i386 remap_file_pages sys_remap_file_pages |
267 | 258 i386 set_tid_address sys_set_tid_address | 267 | 258 i386 set_tid_address sys_set_tid_address |
268 | 259 i386 timer_create sys_timer_create compat_sys_timer_create | 268 | 259 i386 timer_create sys_timer_create compat_sys_timer_create |
269 | 260 i386 timer_settime sys_timer_settime compat_sys_timer_settime | 269 | 260 i386 timer_settime sys_timer_settime compat_sys_timer_settime |
270 | 261 i386 timer_gettime sys_timer_gettime compat_sys_timer_gettime | 270 | 261 i386 timer_gettime sys_timer_gettime compat_sys_timer_gettime |
271 | 262 i386 timer_getoverrun sys_timer_getoverrun | 271 | 262 i386 timer_getoverrun sys_timer_getoverrun |
272 | 263 i386 timer_delete sys_timer_delete | 272 | 263 i386 timer_delete sys_timer_delete |
273 | 264 i386 clock_settime sys_clock_settime compat_sys_clock_settime | 273 | 264 i386 clock_settime sys_clock_settime compat_sys_clock_settime |
274 | 265 i386 clock_gettime sys_clock_gettime compat_sys_clock_gettime | 274 | 265 i386 clock_gettime sys_clock_gettime compat_sys_clock_gettime |
275 | 266 i386 clock_getres sys_clock_getres compat_sys_clock_getres | 275 | 266 i386 clock_getres sys_clock_getres compat_sys_clock_getres |
276 | 267 i386 clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep | 276 | 267 i386 clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep |
277 | 268 i386 statfs64 sys_statfs64 compat_sys_statfs64 | 277 | 268 i386 statfs64 sys_statfs64 compat_sys_statfs64 |
278 | 269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 | 278 | 269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 |
279 | 270 i386 tgkill sys_tgkill | 279 | 270 i386 tgkill sys_tgkill |
280 | 271 i386 utimes sys_utimes compat_sys_utimes | 280 | 271 i386 utimes sys_utimes compat_sys_utimes |
281 | 272 i386 fadvise64_64 sys_fadvise64_64 sys32_fadvise64_64 | 281 | 272 i386 fadvise64_64 sys_fadvise64_64 sys32_fadvise64_64 |
282 | 273 i386 vserver | 282 | 273 i386 vserver |
283 | 274 i386 mbind sys_mbind | 283 | 274 i386 mbind sys_mbind |
284 | 275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy | 284 | 275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy |
285 | 276 i386 set_mempolicy sys_set_mempolicy | 285 | 276 i386 set_mempolicy sys_set_mempolicy |
286 | 277 i386 mq_open sys_mq_open compat_sys_mq_open | 286 | 277 i386 mq_open sys_mq_open compat_sys_mq_open |
287 | 278 i386 mq_unlink sys_mq_unlink | 287 | 278 i386 mq_unlink sys_mq_unlink |
288 | 279 i386 mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend | 288 | 279 i386 mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend |
289 | 280 i386 mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive | 289 | 280 i386 mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive |
290 | 281 i386 mq_notify sys_mq_notify compat_sys_mq_notify | 290 | 281 i386 mq_notify sys_mq_notify compat_sys_mq_notify |
291 | 282 i386 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr | 291 | 282 i386 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr |
292 | 283 i386 kexec_load sys_kexec_load compat_sys_kexec_load | 292 | 283 i386 kexec_load sys_kexec_load compat_sys_kexec_load |
293 | 284 i386 waitid sys_waitid compat_sys_waitid | 293 | 284 i386 waitid sys_waitid compat_sys_waitid |
294 | # 285 sys_setaltroot | 294 | # 285 sys_setaltroot |
295 | 286 i386 add_key sys_add_key | 295 | 286 i386 add_key sys_add_key |
296 | 287 i386 request_key sys_request_key | 296 | 287 i386 request_key sys_request_key |
297 | 288 i386 keyctl sys_keyctl | 297 | 288 i386 keyctl sys_keyctl |
298 | 289 i386 ioprio_set sys_ioprio_set | 298 | 289 i386 ioprio_set sys_ioprio_set |
299 | 290 i386 ioprio_get sys_ioprio_get | 299 | 290 i386 ioprio_get sys_ioprio_get |
300 | 291 i386 inotify_init sys_inotify_init | 300 | 291 i386 inotify_init sys_inotify_init |
301 | 292 i386 inotify_add_watch sys_inotify_add_watch | 301 | 292 i386 inotify_add_watch sys_inotify_add_watch |
302 | 293 i386 inotify_rm_watch sys_inotify_rm_watch | 302 | 293 i386 inotify_rm_watch sys_inotify_rm_watch |
303 | 294 i386 migrate_pages sys_migrate_pages | 303 | 294 i386 migrate_pages sys_migrate_pages |
304 | 295 i386 openat sys_openat compat_sys_openat | 304 | 295 i386 openat sys_openat compat_sys_openat |
305 | 296 i386 mkdirat sys_mkdirat | 305 | 296 i386 mkdirat sys_mkdirat |
306 | 297 i386 mknodat sys_mknodat | 306 | 297 i386 mknodat sys_mknodat |
307 | 298 i386 fchownat sys_fchownat | 307 | 298 i386 fchownat sys_fchownat |
308 | 299 i386 futimesat sys_futimesat compat_sys_futimesat | 308 | 299 i386 futimesat sys_futimesat compat_sys_futimesat |
309 | 300 i386 fstatat64 sys_fstatat64 sys32_fstatat | 309 | 300 i386 fstatat64 sys_fstatat64 sys32_fstatat |
310 | 301 i386 unlinkat sys_unlinkat | 310 | 301 i386 unlinkat sys_unlinkat |
311 | 302 i386 renameat sys_renameat | 311 | 302 i386 renameat sys_renameat |
312 | 303 i386 linkat sys_linkat | 312 | 303 i386 linkat sys_linkat |
313 | 304 i386 symlinkat sys_symlinkat | 313 | 304 i386 symlinkat sys_symlinkat |
314 | 305 i386 readlinkat sys_readlinkat | 314 | 305 i386 readlinkat sys_readlinkat |
315 | 306 i386 fchmodat sys_fchmodat | 315 | 306 i386 fchmodat sys_fchmodat |
316 | 307 i386 faccessat sys_faccessat | 316 | 307 i386 faccessat sys_faccessat |
317 | 308 i386 pselect6 sys_pselect6 compat_sys_pselect6 | 317 | 308 i386 pselect6 sys_pselect6 compat_sys_pselect6 |
318 | 309 i386 ppoll sys_ppoll compat_sys_ppoll | 318 | 309 i386 ppoll sys_ppoll compat_sys_ppoll |
319 | 310 i386 unshare sys_unshare | 319 | 310 i386 unshare sys_unshare |
320 | 311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list | 320 | 311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list |
321 | 312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list | 321 | 312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list |
322 | 313 i386 splice sys_splice | 322 | 313 i386 splice sys_splice |
323 | 314 i386 sync_file_range sys_sync_file_range sys32_sync_file_range | 323 | 314 i386 sync_file_range sys_sync_file_range sys32_sync_file_range |
324 | 315 i386 tee sys_tee | 324 | 315 i386 tee sys_tee |
325 | 316 i386 vmsplice sys_vmsplice compat_sys_vmsplice | 325 | 316 i386 vmsplice sys_vmsplice compat_sys_vmsplice |
326 | 317 i386 move_pages sys_move_pages compat_sys_move_pages | 326 | 317 i386 move_pages sys_move_pages compat_sys_move_pages |
327 | 318 i386 getcpu sys_getcpu | 327 | 318 i386 getcpu sys_getcpu |
328 | 319 i386 epoll_pwait sys_epoll_pwait | 328 | 319 i386 epoll_pwait sys_epoll_pwait |
329 | 320 i386 utimensat sys_utimensat compat_sys_utimensat | 329 | 320 i386 utimensat sys_utimensat compat_sys_utimensat |
330 | 321 i386 signalfd sys_signalfd compat_sys_signalfd | 330 | 321 i386 signalfd sys_signalfd compat_sys_signalfd |
331 | 322 i386 timerfd_create sys_timerfd_create | 331 | 322 i386 timerfd_create sys_timerfd_create |
332 | 323 i386 eventfd sys_eventfd | 332 | 323 i386 eventfd sys_eventfd |
333 | 324 i386 fallocate sys_fallocate sys32_fallocate | 333 | 324 i386 fallocate sys_fallocate sys32_fallocate |
334 | 325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime | 334 | 325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime |
335 | 326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime | 335 | 326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime |
336 | 327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4 | 336 | 327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4 |
337 | 328 i386 eventfd2 sys_eventfd2 | 337 | 328 i386 eventfd2 sys_eventfd2 |
338 | 329 i386 epoll_create1 sys_epoll_create1 | 338 | 329 i386 epoll_create1 sys_epoll_create1 |
339 | 330 i386 dup3 sys_dup3 | 339 | 330 i386 dup3 sys_dup3 |
340 | 331 i386 pipe2 sys_pipe2 | 340 | 331 i386 pipe2 sys_pipe2 |
341 | 332 i386 inotify_init1 sys_inotify_init1 | 341 | 332 i386 inotify_init1 sys_inotify_init1 |
342 | 333 i386 preadv sys_preadv compat_sys_preadv | 342 | 333 i386 preadv sys_preadv compat_sys_preadv |
343 | 334 i386 pwritev sys_pwritev compat_sys_pwritev | 343 | 334 i386 pwritev sys_pwritev compat_sys_pwritev |
344 | 335 i386 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo | 344 | 335 i386 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo |
345 | 336 i386 perf_event_open sys_perf_event_open | 345 | 336 i386 perf_event_open sys_perf_event_open |
346 | 337 i386 recvmmsg sys_recvmmsg compat_sys_recvmmsg | 346 | 337 i386 recvmmsg sys_recvmmsg compat_sys_recvmmsg |
347 | 338 i386 fanotify_init sys_fanotify_init | 347 | 338 i386 fanotify_init sys_fanotify_init |
348 | 339 i386 fanotify_mark sys_fanotify_mark sys32_fanotify_mark | 348 | 339 i386 fanotify_mark sys_fanotify_mark sys32_fanotify_mark |
349 | 340 i386 prlimit64 sys_prlimit64 | 349 | 340 i386 prlimit64 sys_prlimit64 |
350 | 341 i386 name_to_handle_at sys_name_to_handle_at | 350 | 341 i386 name_to_handle_at sys_name_to_handle_at |
351 | 342 i386 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at | 351 | 342 i386 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at |
352 | 343 i386 clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime | 352 | 343 i386 clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime |
353 | 344 i386 syncfs sys_syncfs | 353 | 344 i386 syncfs sys_syncfs |
354 | 345 i386 sendmmsg sys_sendmmsg compat_sys_sendmmsg | 354 | 345 i386 sendmmsg sys_sendmmsg compat_sys_sendmmsg |
355 | 346 i386 setns sys_setns | 355 | 346 i386 setns sys_setns |
356 | 347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv | 356 | 347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv |
357 | 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev | 357 | 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev |
358 | 349 i386 kcmp sys_kcmp | 358 | 349 i386 kcmp sys_kcmp |
359 | 359 |
arch/x86/syscalls/syscall_64.tbl
1 | # | 1 | # |
2 | # 64-bit system call numbers and entry vectors | 2 | # 64-bit system call numbers and entry vectors |
3 | # | 3 | # |
4 | # The format is: | 4 | # The format is: |
5 | # <number> <abi> <name> <entry point> | 5 | # <number> <abi> <name> <entry point> |
6 | # | 6 | # |
7 | # The abi is "common", "64" or "x32" for this file. | 7 | # The abi is "common", "64" or "x32" for this file. |
8 | # | 8 | # |
9 | 0 common read sys_read | 9 | 0 common read sys_read |
10 | 1 common write sys_write | 10 | 1 common write sys_write |
11 | 2 common open sys_open | 11 | 2 common open sys_open |
12 | 3 common close sys_close | 12 | 3 common close sys_close |
13 | 4 common stat sys_newstat | 13 | 4 common stat sys_newstat |
14 | 5 common fstat sys_newfstat | 14 | 5 common fstat sys_newfstat |
15 | 6 common lstat sys_newlstat | 15 | 6 common lstat sys_newlstat |
16 | 7 common poll sys_poll | 16 | 7 common poll sys_poll |
17 | 8 common lseek sys_lseek | 17 | 8 common lseek sys_lseek |
18 | 9 common mmap sys_mmap | 18 | 9 common mmap sys_mmap |
19 | 10 common mprotect sys_mprotect | 19 | 10 common mprotect sys_mprotect |
20 | 11 common munmap sys_munmap | 20 | 11 common munmap sys_munmap |
21 | 12 common brk sys_brk | 21 | 12 common brk sys_brk |
22 | 13 64 rt_sigaction sys_rt_sigaction | 22 | 13 64 rt_sigaction sys_rt_sigaction |
23 | 14 common rt_sigprocmask sys_rt_sigprocmask | 23 | 14 common rt_sigprocmask sys_rt_sigprocmask |
24 | 15 64 rt_sigreturn stub_rt_sigreturn | 24 | 15 64 rt_sigreturn stub_rt_sigreturn |
25 | 16 64 ioctl sys_ioctl | 25 | 16 64 ioctl sys_ioctl |
26 | 17 common pread64 sys_pread64 | 26 | 17 common pread64 sys_pread64 |
27 | 18 common pwrite64 sys_pwrite64 | 27 | 18 common pwrite64 sys_pwrite64 |
28 | 19 64 readv sys_readv | 28 | 19 64 readv sys_readv |
29 | 20 64 writev sys_writev | 29 | 20 64 writev sys_writev |
30 | 21 common access sys_access | 30 | 21 common access sys_access |
31 | 22 common pipe sys_pipe | 31 | 22 common pipe sys_pipe |
32 | 23 common select sys_select | 32 | 23 common select sys_select |
33 | 24 common sched_yield sys_sched_yield | 33 | 24 common sched_yield sys_sched_yield |
34 | 25 common mremap sys_mremap | 34 | 25 common mremap sys_mremap |
35 | 26 common msync sys_msync | 35 | 26 common msync sys_msync |
36 | 27 common mincore sys_mincore | 36 | 27 common mincore sys_mincore |
37 | 28 common madvise sys_madvise | 37 | 28 common madvise sys_madvise |
38 | 29 common shmget sys_shmget | 38 | 29 common shmget sys_shmget |
39 | 30 common shmat sys_shmat | 39 | 30 common shmat sys_shmat |
40 | 31 common shmctl sys_shmctl | 40 | 31 common shmctl sys_shmctl |
41 | 32 common dup sys_dup | 41 | 32 common dup sys_dup |
42 | 33 common dup2 sys_dup2 | 42 | 33 common dup2 sys_dup2 |
43 | 34 common pause sys_pause | 43 | 34 common pause sys_pause |
44 | 35 common nanosleep sys_nanosleep | 44 | 35 common nanosleep sys_nanosleep |
45 | 36 common getitimer sys_getitimer | 45 | 36 common getitimer sys_getitimer |
46 | 37 common alarm sys_alarm | 46 | 37 common alarm sys_alarm |
47 | 38 common setitimer sys_setitimer | 47 | 38 common setitimer sys_setitimer |
48 | 39 common getpid sys_getpid | 48 | 39 common getpid sys_getpid |
49 | 40 common sendfile sys_sendfile64 | 49 | 40 common sendfile sys_sendfile64 |
50 | 41 common socket sys_socket | 50 | 41 common socket sys_socket |
51 | 42 common connect sys_connect | 51 | 42 common connect sys_connect |
52 | 43 common accept sys_accept | 52 | 43 common accept sys_accept |
53 | 44 common sendto sys_sendto | 53 | 44 common sendto sys_sendto |
54 | 45 64 recvfrom sys_recvfrom | 54 | 45 64 recvfrom sys_recvfrom |
55 | 46 64 sendmsg sys_sendmsg | 55 | 46 64 sendmsg sys_sendmsg |
56 | 47 64 recvmsg sys_recvmsg | 56 | 47 64 recvmsg sys_recvmsg |
57 | 48 common shutdown sys_shutdown | 57 | 48 common shutdown sys_shutdown |
58 | 49 common bind sys_bind | 58 | 49 common bind sys_bind |
59 | 50 common listen sys_listen | 59 | 50 common listen sys_listen |
60 | 51 common getsockname sys_getsockname | 60 | 51 common getsockname sys_getsockname |
61 | 52 common getpeername sys_getpeername | 61 | 52 common getpeername sys_getpeername |
62 | 53 common socketpair sys_socketpair | 62 | 53 common socketpair sys_socketpair |
63 | 54 64 setsockopt sys_setsockopt | 63 | 54 64 setsockopt sys_setsockopt |
64 | 55 64 getsockopt sys_getsockopt | 64 | 55 64 getsockopt sys_getsockopt |
65 | 56 common clone stub_clone | 65 | 56 common clone stub_clone |
66 | 57 common fork stub_fork | 66 | 57 common fork stub_fork |
67 | 58 common vfork stub_vfork | 67 | 58 common vfork stub_vfork |
68 | 59 64 execve stub_execve | 68 | 59 64 execve stub_execve |
69 | 60 common exit sys_exit | 69 | 60 common exit sys_exit |
70 | 61 common wait4 sys_wait4 | 70 | 61 common wait4 sys_wait4 |
71 | 62 common kill sys_kill | 71 | 62 common kill sys_kill |
72 | 63 common uname sys_newuname | 72 | 63 common uname sys_newuname |
73 | 64 common semget sys_semget | 73 | 64 common semget sys_semget |
74 | 65 common semop sys_semop | 74 | 65 common semop sys_semop |
75 | 66 common semctl sys_semctl | 75 | 66 common semctl sys_semctl |
76 | 67 common shmdt sys_shmdt | 76 | 67 common shmdt sys_shmdt |
77 | 68 common msgget sys_msgget | 77 | 68 common msgget sys_msgget |
78 | 69 common msgsnd sys_msgsnd | 78 | 69 common msgsnd sys_msgsnd |
79 | 70 common msgrcv sys_msgrcv | 79 | 70 common msgrcv sys_msgrcv |
80 | 71 common msgctl sys_msgctl | 80 | 71 common msgctl sys_msgctl |
81 | 72 common fcntl sys_fcntl | 81 | 72 common fcntl sys_fcntl |
82 | 73 common flock sys_flock | 82 | 73 common flock sys_flock |
83 | 74 common fsync sys_fsync | 83 | 74 common fsync sys_fsync |
84 | 75 common fdatasync sys_fdatasync | 84 | 75 common fdatasync sys_fdatasync |
85 | 76 common truncate sys_truncate | 85 | 76 common truncate sys_truncate |
86 | 77 common ftruncate sys_ftruncate | 86 | 77 common ftruncate sys_ftruncate |
87 | 78 common getdents sys_getdents | 87 | 78 common getdents sys_getdents |
88 | 79 common getcwd sys_getcwd | 88 | 79 common getcwd sys_getcwd |
89 | 80 common chdir sys_chdir | 89 | 80 common chdir sys_chdir |
90 | 81 common fchdir sys_fchdir | 90 | 81 common fchdir sys_fchdir |
91 | 82 common rename sys_rename | 91 | 82 common rename sys_rename |
92 | 83 common mkdir sys_mkdir | 92 | 83 common mkdir sys_mkdir |
93 | 84 common rmdir sys_rmdir | 93 | 84 common rmdir sys_rmdir |
94 | 85 common creat sys_creat | 94 | 85 common creat sys_creat |
95 | 86 common link sys_link | 95 | 86 common link sys_link |
96 | 87 common unlink sys_unlink | 96 | 87 common unlink sys_unlink |
97 | 88 common symlink sys_symlink | 97 | 88 common symlink sys_symlink |
98 | 89 common readlink sys_readlink | 98 | 89 common readlink sys_readlink |
99 | 90 common chmod sys_chmod | 99 | 90 common chmod sys_chmod |
100 | 91 common fchmod sys_fchmod | 100 | 91 common fchmod sys_fchmod |
101 | 92 common chown sys_chown | 101 | 92 common chown sys_chown |
102 | 93 common fchown sys_fchown | 102 | 93 common fchown sys_fchown |
103 | 94 common lchown sys_lchown | 103 | 94 common lchown sys_lchown |
104 | 95 common umask sys_umask | 104 | 95 common umask sys_umask |
105 | 96 common gettimeofday sys_gettimeofday | 105 | 96 common gettimeofday sys_gettimeofday |
106 | 97 common getrlimit sys_getrlimit | 106 | 97 common getrlimit sys_getrlimit |
107 | 98 common getrusage sys_getrusage | 107 | 98 common getrusage sys_getrusage |
108 | 99 common sysinfo sys_sysinfo | 108 | 99 common sysinfo sys_sysinfo |
109 | 100 common times sys_times | 109 | 100 common times sys_times |
110 | 101 64 ptrace sys_ptrace | 110 | 101 64 ptrace sys_ptrace |
111 | 102 common getuid sys_getuid | 111 | 102 common getuid sys_getuid |
112 | 103 common syslog sys_syslog | 112 | 103 common syslog sys_syslog |
113 | 104 common getgid sys_getgid | 113 | 104 common getgid sys_getgid |
114 | 105 common setuid sys_setuid | 114 | 105 common setuid sys_setuid |
115 | 106 common setgid sys_setgid | 115 | 106 common setgid sys_setgid |
116 | 107 common geteuid sys_geteuid | 116 | 107 common geteuid sys_geteuid |
117 | 108 common getegid sys_getegid | 117 | 108 common getegid sys_getegid |
118 | 109 common setpgid sys_setpgid | 118 | 109 common setpgid sys_setpgid |
119 | 110 common getppid sys_getppid | 119 | 110 common getppid sys_getppid |
120 | 111 common getpgrp sys_getpgrp | 120 | 111 common getpgrp sys_getpgrp |
121 | 112 common setsid sys_setsid | 121 | 112 common setsid sys_setsid |
122 | 113 common setreuid sys_setreuid | 122 | 113 common setreuid sys_setreuid |
123 | 114 common setregid sys_setregid | 123 | 114 common setregid sys_setregid |
124 | 115 common getgroups sys_getgroups | 124 | 115 common getgroups sys_getgroups |
125 | 116 common setgroups sys_setgroups | 125 | 116 common setgroups sys_setgroups |
126 | 117 common setresuid sys_setresuid | 126 | 117 common setresuid sys_setresuid |
127 | 118 common getresuid sys_getresuid | 127 | 118 common getresuid sys_getresuid |
128 | 119 common setresgid sys_setresgid | 128 | 119 common setresgid sys_setresgid |
129 | 120 common getresgid sys_getresgid | 129 | 120 common getresgid sys_getresgid |
130 | 121 common getpgid sys_getpgid | 130 | 121 common getpgid sys_getpgid |
131 | 122 common setfsuid sys_setfsuid | 131 | 122 common setfsuid sys_setfsuid |
132 | 123 common setfsgid sys_setfsgid | 132 | 123 common setfsgid sys_setfsgid |
133 | 124 common getsid sys_getsid | 133 | 124 common getsid sys_getsid |
134 | 125 common capget sys_capget | 134 | 125 common capget sys_capget |
135 | 126 common capset sys_capset | 135 | 126 common capset sys_capset |
136 | 127 64 rt_sigpending sys_rt_sigpending | 136 | 127 64 rt_sigpending sys_rt_sigpending |
137 | 128 64 rt_sigtimedwait sys_rt_sigtimedwait | 137 | 128 64 rt_sigtimedwait sys_rt_sigtimedwait |
138 | 129 64 rt_sigqueueinfo sys_rt_sigqueueinfo | 138 | 129 64 rt_sigqueueinfo sys_rt_sigqueueinfo |
139 | 130 common rt_sigsuspend sys_rt_sigsuspend | 139 | 130 common rt_sigsuspend sys_rt_sigsuspend |
140 | 131 64 sigaltstack sys_sigaltstack | 140 | 131 64 sigaltstack sys_sigaltstack |
141 | 132 common utime sys_utime | 141 | 132 common utime sys_utime |
142 | 133 common mknod sys_mknod | 142 | 133 common mknod sys_mknod |
143 | 134 64 uselib | 143 | 134 64 uselib |
144 | 135 common personality sys_personality | 144 | 135 common personality sys_personality |
145 | 136 common ustat sys_ustat | 145 | 136 common ustat sys_ustat |
146 | 137 common statfs sys_statfs | 146 | 137 common statfs sys_statfs |
147 | 138 common fstatfs sys_fstatfs | 147 | 138 common fstatfs sys_fstatfs |
148 | 139 common sysfs sys_sysfs | 148 | 139 common sysfs sys_sysfs |
149 | 140 common getpriority sys_getpriority | 149 | 140 common getpriority sys_getpriority |
150 | 141 common setpriority sys_setpriority | 150 | 141 common setpriority sys_setpriority |
151 | 142 common sched_setparam sys_sched_setparam | 151 | 142 common sched_setparam sys_sched_setparam |
152 | 143 common sched_getparam sys_sched_getparam | 152 | 143 common sched_getparam sys_sched_getparam |
153 | 144 common sched_setscheduler sys_sched_setscheduler | 153 | 144 common sched_setscheduler sys_sched_setscheduler |
154 | 145 common sched_getscheduler sys_sched_getscheduler | 154 | 145 common sched_getscheduler sys_sched_getscheduler |
155 | 146 common sched_get_priority_max sys_sched_get_priority_max | 155 | 146 common sched_get_priority_max sys_sched_get_priority_max |
156 | 147 common sched_get_priority_min sys_sched_get_priority_min | 156 | 147 common sched_get_priority_min sys_sched_get_priority_min |
157 | 148 common sched_rr_get_interval sys_sched_rr_get_interval | 157 | 148 common sched_rr_get_interval sys_sched_rr_get_interval |
158 | 149 common mlock sys_mlock | 158 | 149 common mlock sys_mlock |
159 | 150 common munlock sys_munlock | 159 | 150 common munlock sys_munlock |
160 | 151 common mlockall sys_mlockall | 160 | 151 common mlockall sys_mlockall |
161 | 152 common munlockall sys_munlockall | 161 | 152 common munlockall sys_munlockall |
162 | 153 common vhangup sys_vhangup | 162 | 153 common vhangup sys_vhangup |
163 | 154 common modify_ldt sys_modify_ldt | 163 | 154 common modify_ldt sys_modify_ldt |
164 | 155 common pivot_root sys_pivot_root | 164 | 155 common pivot_root sys_pivot_root |
165 | 156 64 _sysctl sys_sysctl | 165 | 156 64 _sysctl sys_sysctl |
166 | 157 common prctl sys_prctl | 166 | 157 common prctl sys_prctl |
167 | 158 common arch_prctl sys_arch_prctl | 167 | 158 common arch_prctl sys_arch_prctl |
168 | 159 common adjtimex sys_adjtimex | 168 | 159 common adjtimex sys_adjtimex |
169 | 160 common setrlimit sys_setrlimit | 169 | 160 common setrlimit sys_setrlimit |
170 | 161 common chroot sys_chroot | 170 | 161 common chroot sys_chroot |
171 | 162 common sync sys_sync | 171 | 162 common sync sys_sync |
172 | 163 common acct sys_acct | 172 | 163 common acct sys_acct |
173 | 164 common settimeofday sys_settimeofday | 173 | 164 common settimeofday sys_settimeofday |
174 | 165 common mount sys_mount | 174 | 165 common mount sys_mount |
175 | 166 common umount2 sys_umount | 175 | 166 common umount2 sys_umount |
176 | 167 common swapon sys_swapon | 176 | 167 common swapon sys_swapon |
177 | 168 common swapoff sys_swapoff | 177 | 168 common swapoff sys_swapoff |
178 | 169 common reboot sys_reboot | 178 | 169 common reboot sys_reboot |
179 | 170 common sethostname sys_sethostname | 179 | 170 common sethostname sys_sethostname |
180 | 171 common setdomainname sys_setdomainname | 180 | 171 common setdomainname sys_setdomainname |
181 | 172 common iopl stub_iopl | 181 | 172 common iopl stub_iopl |
182 | 173 common ioperm sys_ioperm | 182 | 173 common ioperm sys_ioperm |
183 | 174 64 create_module | 183 | 174 64 create_module |
184 | 175 common init_module sys_init_module | 184 | 175 common init_module sys_init_module |
185 | 176 common delete_module sys_delete_module | 185 | 176 common delete_module sys_delete_module |
186 | 177 64 get_kernel_syms | 186 | 177 64 get_kernel_syms |
187 | 178 64 query_module | 187 | 178 64 query_module |
188 | 179 common quotactl sys_quotactl | 188 | 179 common quotactl sys_quotactl |
189 | 180 64 nfsservctl | 189 | 180 64 nfsservctl |
190 | 181 common getpmsg | 190 | 181 common getpmsg |
191 | 182 common putpmsg | 191 | 182 common putpmsg |
192 | 183 common afs_syscall | 192 | 183 common afs_syscall |
193 | 184 common tuxcall | 193 | 184 common tuxcall |
194 | 185 common security | 194 | 185 common security |
195 | 186 common gettid sys_gettid | 195 | 186 common gettid sys_gettid |
196 | 187 common readahead sys_readahead | 196 | 187 common readahead sys_readahead |
197 | 188 common setxattr sys_setxattr | 197 | 188 common setxattr sys_setxattr |
198 | 189 common lsetxattr sys_lsetxattr | 198 | 189 common lsetxattr sys_lsetxattr |
199 | 190 common fsetxattr sys_fsetxattr | 199 | 190 common fsetxattr sys_fsetxattr |
200 | 191 common getxattr sys_getxattr | 200 | 191 common getxattr sys_getxattr |
201 | 192 common lgetxattr sys_lgetxattr | 201 | 192 common lgetxattr sys_lgetxattr |
202 | 193 common fgetxattr sys_fgetxattr | 202 | 193 common fgetxattr sys_fgetxattr |
203 | 194 common listxattr sys_listxattr | 203 | 194 common listxattr sys_listxattr |
204 | 195 common llistxattr sys_llistxattr | 204 | 195 common llistxattr sys_llistxattr |
205 | 196 common flistxattr sys_flistxattr | 205 | 196 common flistxattr sys_flistxattr |
206 | 197 common removexattr sys_removexattr | 206 | 197 common removexattr sys_removexattr |
207 | 198 common lremovexattr sys_lremovexattr | 207 | 198 common lremovexattr sys_lremovexattr |
208 | 199 common fremovexattr sys_fremovexattr | 208 | 199 common fremovexattr sys_fremovexattr |
209 | 200 common tkill sys_tkill | 209 | 200 common tkill sys_tkill |
210 | 201 common time sys_time | 210 | 201 common time sys_time |
211 | 202 common futex sys_futex | 211 | 202 common futex sys_futex |
212 | 203 common sched_setaffinity sys_sched_setaffinity | 212 | 203 common sched_setaffinity sys_sched_setaffinity |
213 | 204 common sched_getaffinity sys_sched_getaffinity | 213 | 204 common sched_getaffinity sys_sched_getaffinity |
214 | 205 64 set_thread_area | 214 | 205 64 set_thread_area |
215 | 206 common io_setup sys_io_setup | 215 | 206 common io_setup sys_io_setup |
216 | 207 common io_destroy sys_io_destroy | 216 | 207 common io_destroy sys_io_destroy |
217 | 208 common io_getevents sys_io_getevents | 217 | 208 common io_getevents sys_io_getevents |
218 | 209 common io_submit sys_io_submit | 218 | 209 common io_submit sys_io_submit |
219 | 210 common io_cancel sys_io_cancel | 219 | 210 common io_cancel sys_io_cancel |
220 | 211 64 get_thread_area | 220 | 211 64 get_thread_area |
221 | 212 common lookup_dcookie sys_lookup_dcookie | 221 | 212 common lookup_dcookie sys_lookup_dcookie |
222 | 213 common epoll_create sys_epoll_create | 222 | 213 common epoll_create sys_epoll_create |
223 | 214 64 epoll_ctl_old | 223 | 214 64 epoll_ctl_old |
224 | 215 64 epoll_wait_old | 224 | 215 64 epoll_wait_old |
225 | 216 common remap_file_pages sys_remap_file_pages | 225 | 216 common remap_file_pages sys_remap_file_pages |
226 | 217 common getdents64 sys_getdents64 | 226 | 217 common getdents64 sys_getdents64 |
227 | 218 common set_tid_address sys_set_tid_address | 227 | 218 common set_tid_address sys_set_tid_address |
228 | 219 common restart_syscall sys_restart_syscall | 228 | 219 common restart_syscall sys_restart_syscall |
229 | 220 common semtimedop sys_semtimedop | 229 | 220 common semtimedop sys_semtimedop |
230 | 221 common fadvise64 sys_fadvise64 | 230 | 221 common fadvise64 sys_fadvise64 |
231 | 222 64 timer_create sys_timer_create | 231 | 222 64 timer_create sys_timer_create |
232 | 223 common timer_settime sys_timer_settime | 232 | 223 common timer_settime sys_timer_settime |
233 | 224 common timer_gettime sys_timer_gettime | 233 | 224 common timer_gettime sys_timer_gettime |
234 | 225 common timer_getoverrun sys_timer_getoverrun | 234 | 225 common timer_getoverrun sys_timer_getoverrun |
235 | 226 common timer_delete sys_timer_delete | 235 | 226 common timer_delete sys_timer_delete |
236 | 227 common clock_settime sys_clock_settime | 236 | 227 common clock_settime sys_clock_settime |
237 | 228 common clock_gettime sys_clock_gettime | 237 | 228 common clock_gettime sys_clock_gettime |
238 | 229 common clock_getres sys_clock_getres | 238 | 229 common clock_getres sys_clock_getres |
239 | 230 common clock_nanosleep sys_clock_nanosleep | 239 | 230 common clock_nanosleep sys_clock_nanosleep |
240 | 231 common exit_group sys_exit_group | 240 | 231 common exit_group sys_exit_group |
241 | 232 common epoll_wait sys_epoll_wait | 241 | 232 common epoll_wait sys_epoll_wait |
242 | 233 common epoll_ctl sys_epoll_ctl | 242 | 233 common epoll_ctl sys_epoll_ctl |
243 | 234 common tgkill sys_tgkill | 243 | 234 common tgkill sys_tgkill |
244 | 235 common utimes sys_utimes | 244 | 235 common utimes sys_utimes |
245 | 236 64 vserver | 245 | 236 64 vserver |
246 | 237 common mbind sys_mbind | 246 | 237 common mbind sys_mbind |
247 | 238 common set_mempolicy sys_set_mempolicy | 247 | 238 common set_mempolicy sys_set_mempolicy |
248 | 239 common get_mempolicy sys_get_mempolicy | 248 | 239 common get_mempolicy sys_get_mempolicy |
249 | 240 common mq_open sys_mq_open | 249 | 240 common mq_open sys_mq_open |
250 | 241 common mq_unlink sys_mq_unlink | 250 | 241 common mq_unlink sys_mq_unlink |
251 | 242 common mq_timedsend sys_mq_timedsend | 251 | 242 common mq_timedsend sys_mq_timedsend |
252 | 243 common mq_timedreceive sys_mq_timedreceive | 252 | 243 common mq_timedreceive sys_mq_timedreceive |
253 | 244 64 mq_notify sys_mq_notify | 253 | 244 64 mq_notify sys_mq_notify |
254 | 245 common mq_getsetattr sys_mq_getsetattr | 254 | 245 common mq_getsetattr sys_mq_getsetattr |
255 | 246 64 kexec_load sys_kexec_load | 255 | 246 64 kexec_load sys_kexec_load |
256 | 247 64 waitid sys_waitid | 256 | 247 64 waitid sys_waitid |
257 | 248 common add_key sys_add_key | 257 | 248 common add_key sys_add_key |
258 | 249 common request_key sys_request_key | 258 | 249 common request_key sys_request_key |
259 | 250 common keyctl sys_keyctl | 259 | 250 common keyctl sys_keyctl |
260 | 251 common ioprio_set sys_ioprio_set | 260 | 251 common ioprio_set sys_ioprio_set |
261 | 252 common ioprio_get sys_ioprio_get | 261 | 252 common ioprio_get sys_ioprio_get |
262 | 253 common inotify_init sys_inotify_init | 262 | 253 common inotify_init sys_inotify_init |
263 | 254 common inotify_add_watch sys_inotify_add_watch | 263 | 254 common inotify_add_watch sys_inotify_add_watch |
264 | 255 common inotify_rm_watch sys_inotify_rm_watch | 264 | 255 common inotify_rm_watch sys_inotify_rm_watch |
265 | 256 common migrate_pages sys_migrate_pages | 265 | 256 common migrate_pages sys_migrate_pages |
266 | 257 common openat sys_openat | 266 | 257 common openat sys_openat |
267 | 258 common mkdirat sys_mkdirat | 267 | 258 common mkdirat sys_mkdirat |
268 | 259 common mknodat sys_mknodat | 268 | 259 common mknodat sys_mknodat |
269 | 260 common fchownat sys_fchownat | 269 | 260 common fchownat sys_fchownat |
270 | 261 common futimesat sys_futimesat | 270 | 261 common futimesat sys_futimesat |
271 | 262 common newfstatat sys_newfstatat | 271 | 262 common newfstatat sys_newfstatat |
272 | 263 common unlinkat sys_unlinkat | 272 | 263 common unlinkat sys_unlinkat |
273 | 264 common renameat sys_renameat | 273 | 264 common renameat sys_renameat |
274 | 265 common linkat sys_linkat | 274 | 265 common linkat sys_linkat |
275 | 266 common symlinkat sys_symlinkat | 275 | 266 common symlinkat sys_symlinkat |
276 | 267 common readlinkat sys_readlinkat | 276 | 267 common readlinkat sys_readlinkat |
277 | 268 common fchmodat sys_fchmodat | 277 | 268 common fchmodat sys_fchmodat |
278 | 269 common faccessat sys_faccessat | 278 | 269 common faccessat sys_faccessat |
279 | 270 common pselect6 sys_pselect6 | 279 | 270 common pselect6 sys_pselect6 |
280 | 271 common ppoll sys_ppoll | 280 | 271 common ppoll sys_ppoll |
281 | 272 common unshare sys_unshare | 281 | 272 common unshare sys_unshare |
282 | 273 64 set_robust_list sys_set_robust_list | 282 | 273 64 set_robust_list sys_set_robust_list |
283 | 274 64 get_robust_list sys_get_robust_list | 283 | 274 64 get_robust_list sys_get_robust_list |
284 | 275 common splice sys_splice | 284 | 275 common splice sys_splice |
285 | 276 common tee sys_tee | 285 | 276 common tee sys_tee |
286 | 277 common sync_file_range sys_sync_file_range | 286 | 277 common sync_file_range sys_sync_file_range |
287 | 278 64 vmsplice sys_vmsplice | 287 | 278 64 vmsplice sys_vmsplice |
288 | 279 64 move_pages sys_move_pages | 288 | 279 64 move_pages sys_move_pages |
289 | 280 common utimensat sys_utimensat | 289 | 280 common utimensat sys_utimensat |
290 | 281 common epoll_pwait sys_epoll_pwait | 290 | 281 common epoll_pwait sys_epoll_pwait |
291 | 282 common signalfd sys_signalfd | 291 | 282 common signalfd sys_signalfd |
292 | 283 common timerfd_create sys_timerfd_create | 292 | 283 common timerfd_create sys_timerfd_create |
293 | 284 common eventfd sys_eventfd | 293 | 284 common eventfd sys_eventfd |
294 | 285 common fallocate sys_fallocate | 294 | 285 common fallocate sys_fallocate |
295 | 286 common timerfd_settime sys_timerfd_settime | 295 | 286 common timerfd_settime sys_timerfd_settime |
296 | 287 common timerfd_gettime sys_timerfd_gettime | 296 | 287 common timerfd_gettime sys_timerfd_gettime |
297 | 288 common accept4 sys_accept4 | 297 | 288 common accept4 sys_accept4 |
298 | 289 common signalfd4 sys_signalfd4 | 298 | 289 common signalfd4 sys_signalfd4 |
299 | 290 common eventfd2 sys_eventfd2 | 299 | 290 common eventfd2 sys_eventfd2 |
300 | 291 common epoll_create1 sys_epoll_create1 | 300 | 291 common epoll_create1 sys_epoll_create1 |
301 | 292 common dup3 sys_dup3 | 301 | 292 common dup3 sys_dup3 |
302 | 293 common pipe2 sys_pipe2 | 302 | 293 common pipe2 sys_pipe2 |
303 | 294 common inotify_init1 sys_inotify_init1 | 303 | 294 common inotify_init1 sys_inotify_init1 |
304 | 295 64 preadv sys_preadv | 304 | 295 64 preadv sys_preadv |
305 | 296 64 pwritev sys_pwritev | 305 | 296 64 pwritev sys_pwritev |
306 | 297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo | 306 | 297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo |
307 | 298 common perf_event_open sys_perf_event_open | 307 | 298 common perf_event_open sys_perf_event_open |
308 | 299 64 recvmmsg sys_recvmmsg | 308 | 299 64 recvmmsg sys_recvmmsg |
309 | 300 common fanotify_init sys_fanotify_init | 309 | 300 common fanotify_init sys_fanotify_init |
310 | 301 common fanotify_mark sys_fanotify_mark | 310 | 301 common fanotify_mark sys_fanotify_mark |
311 | 302 common prlimit64 sys_prlimit64 | 311 | 302 common prlimit64 sys_prlimit64 |
312 | 303 common name_to_handle_at sys_name_to_handle_at | 312 | 303 common name_to_handle_at sys_name_to_handle_at |
313 | 304 common open_by_handle_at sys_open_by_handle_at | 313 | 304 common open_by_handle_at sys_open_by_handle_at |
314 | 305 common clock_adjtime sys_clock_adjtime | 314 | 305 common clock_adjtime sys_clock_adjtime |
315 | 306 common syncfs sys_syncfs | 315 | 306 common syncfs sys_syncfs |
316 | 307 64 sendmmsg sys_sendmmsg | 316 | 307 64 sendmmsg sys_sendmmsg |
317 | 308 common setns sys_setns | 317 | 308 common setns sys_setns |
318 | 309 common getcpu sys_getcpu | 318 | 309 common getcpu sys_getcpu |
319 | 310 64 process_vm_readv sys_process_vm_readv | 319 | 310 64 process_vm_readv sys_process_vm_readv |
320 | 311 64 process_vm_writev sys_process_vm_writev | 320 | 311 64 process_vm_writev sys_process_vm_writev |
321 | 312 common kcmp sys_kcmp | 321 | 312 common kcmp sys_kcmp |
322 | 322 | ||
323 | # | 323 | # |
324 | # x32-specific system call numbers start at 512 to avoid cache impact | 324 | # x32-specific system call numbers start at 512 to avoid cache impact |
325 | # for native 64-bit operation. | 325 | # for native 64-bit operation. |
326 | # | 326 | # |
327 | 512 x32 rt_sigaction sys32_rt_sigaction | 327 | 512 x32 rt_sigaction sys32_rt_sigaction |
328 | 513 x32 rt_sigreturn stub_x32_rt_sigreturn | 328 | 513 x32 rt_sigreturn stub_x32_rt_sigreturn |
329 | 514 x32 ioctl compat_sys_ioctl | 329 | 514 x32 ioctl compat_sys_ioctl |
330 | 515 x32 readv compat_sys_readv | 330 | 515 x32 readv compat_sys_readv |
331 | 516 x32 writev compat_sys_writev | 331 | 516 x32 writev compat_sys_writev |
332 | 517 x32 recvfrom compat_sys_recvfrom | 332 | 517 x32 recvfrom compat_sys_recvfrom |
333 | 518 x32 sendmsg compat_sys_sendmsg | 333 | 518 x32 sendmsg compat_sys_sendmsg |
334 | 519 x32 recvmsg compat_sys_recvmsg | 334 | 519 x32 recvmsg compat_sys_recvmsg |
335 | 520 x32 execve stub_x32_execve | 335 | 520 x32 execve stub_x32_execve |
336 | 521 x32 ptrace compat_sys_ptrace | 336 | 521 x32 ptrace compat_sys_ptrace |
337 | 522 x32 rt_sigpending sys32_rt_sigpending | 337 | 522 x32 rt_sigpending sys32_rt_sigpending |
338 | 523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait | 338 | 523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait |
339 | 524 x32 rt_sigqueueinfo sys32_rt_sigqueueinfo | 339 | 524 x32 rt_sigqueueinfo sys32_rt_sigqueueinfo |
340 | 525 x32 sigaltstack stub_x32_sigaltstack | 340 | 525 x32 sigaltstack compat_sys_sigaltstack |
341 | 526 x32 timer_create compat_sys_timer_create | 341 | 526 x32 timer_create compat_sys_timer_create |
342 | 527 x32 mq_notify compat_sys_mq_notify | 342 | 527 x32 mq_notify compat_sys_mq_notify |
343 | 528 x32 kexec_load compat_sys_kexec_load | 343 | 528 x32 kexec_load compat_sys_kexec_load |
344 | 529 x32 waitid compat_sys_waitid | 344 | 529 x32 waitid compat_sys_waitid |
345 | 530 x32 set_robust_list compat_sys_set_robust_list | 345 | 530 x32 set_robust_list compat_sys_set_robust_list |
346 | 531 x32 get_robust_list compat_sys_get_robust_list | 346 | 531 x32 get_robust_list compat_sys_get_robust_list |
347 | 532 x32 vmsplice compat_sys_vmsplice | 347 | 532 x32 vmsplice compat_sys_vmsplice |
348 | 533 x32 move_pages compat_sys_move_pages | 348 | 533 x32 move_pages compat_sys_move_pages |
349 | 534 x32 preadv compat_sys_preadv64 | 349 | 534 x32 preadv compat_sys_preadv64 |
350 | 535 x32 pwritev compat_sys_pwritev64 | 350 | 535 x32 pwritev compat_sys_pwritev64 |
351 | 536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo | 351 | 536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo |
352 | 537 x32 recvmmsg compat_sys_recvmmsg | 352 | 537 x32 recvmmsg compat_sys_recvmmsg |
353 | 538 x32 sendmmsg compat_sys_sendmmsg | 353 | 538 x32 sendmmsg compat_sys_sendmmsg |
354 | 539 x32 process_vm_readv compat_sys_process_vm_readv | 354 | 539 x32 process_vm_readv compat_sys_process_vm_readv |
355 | 540 x32 process_vm_writev compat_sys_process_vm_writev | 355 | 540 x32 process_vm_writev compat_sys_process_vm_writev |
356 | 541 x32 setsockopt compat_sys_setsockopt | 356 | 541 x32 setsockopt compat_sys_setsockopt |
357 | 542 x32 getsockopt compat_sys_getsockopt | 357 | 542 x32 getsockopt compat_sys_getsockopt |
358 | 358 |
include/linux/compat.h
1 | #ifndef _LINUX_COMPAT_H | 1 | #ifndef _LINUX_COMPAT_H |
2 | #define _LINUX_COMPAT_H | 2 | #define _LINUX_COMPAT_H |
3 | /* | 3 | /* |
4 | * These are the type definitions for the architecture specific | 4 | * These are the type definitions for the architecture specific |
5 | * syscall compatibility layer. | 5 | * syscall compatibility layer. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifdef CONFIG_COMPAT | 8 | #ifdef CONFIG_COMPAT |
9 | 9 | ||
10 | #include <linux/stat.h> | 10 | #include <linux/stat.h> |
11 | #include <linux/param.h> /* for HZ */ | 11 | #include <linux/param.h> /* for HZ */ |
12 | #include <linux/sem.h> | 12 | #include <linux/sem.h> |
13 | #include <linux/socket.h> | 13 | #include <linux/socket.h> |
14 | #include <linux/if.h> | 14 | #include <linux/if.h> |
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | #include <linux/aio_abi.h> /* for aio_context_t */ | 16 | #include <linux/aio_abi.h> /* for aio_context_t */ |
17 | 17 | ||
18 | #include <asm/compat.h> | 18 | #include <asm/compat.h> |
19 | #include <asm/siginfo.h> | 19 | #include <asm/siginfo.h> |
20 | #include <asm/signal.h> | 20 | #include <asm/signal.h> |
21 | 21 | ||
22 | #ifndef COMPAT_USE_64BIT_TIME | 22 | #ifndef COMPAT_USE_64BIT_TIME |
23 | #define COMPAT_USE_64BIT_TIME 0 | 23 | #define COMPAT_USE_64BIT_TIME 0 |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #ifndef __SC_DELOUSE | 26 | #ifndef __SC_DELOUSE |
27 | #define __SC_DELOUSE(t,v) ((t)(unsigned long)(v)) | 27 | #define __SC_DELOUSE(t,v) ((t)(unsigned long)(v)) |
28 | #endif | 28 | #endif |
29 | 29 | ||
30 | #define __SC_CCAST1(t1, a1) __SC_DELOUSE(t1,a1) | 30 | #define __SC_CCAST1(t1, a1) __SC_DELOUSE(t1,a1) |
31 | #define __SC_CCAST2(t2, a2, ...) __SC_DELOUSE(t2,a2), __SC_CCAST1(__VA_ARGS__) | 31 | #define __SC_CCAST2(t2, a2, ...) __SC_DELOUSE(t2,a2), __SC_CCAST1(__VA_ARGS__) |
32 | #define __SC_CCAST3(t3, a3, ...) __SC_DELOUSE(t3,a3), __SC_CCAST2(__VA_ARGS__) | 32 | #define __SC_CCAST3(t3, a3, ...) __SC_DELOUSE(t3,a3), __SC_CCAST2(__VA_ARGS__) |
33 | #define __SC_CCAST4(t4, a4, ...) __SC_DELOUSE(t4,a4), __SC_CCAST3(__VA_ARGS__) | 33 | #define __SC_CCAST4(t4, a4, ...) __SC_DELOUSE(t4,a4), __SC_CCAST3(__VA_ARGS__) |
34 | #define __SC_CCAST5(t5, a5, ...) __SC_DELOUSE(t5,a5), __SC_CCAST4(__VA_ARGS__) | 34 | #define __SC_CCAST5(t5, a5, ...) __SC_DELOUSE(t5,a5), __SC_CCAST4(__VA_ARGS__) |
35 | #define __SC_CCAST6(t6, a6, ...) __SC_DELOUSE(t6,a6), __SC_CCAST5(__VA_ARGS__) | 35 | #define __SC_CCAST6(t6, a6, ...) __SC_DELOUSE(t6,a6), __SC_CCAST5(__VA_ARGS__) |
36 | #define COMPAT_SYSCALL_DEFINE1(name, ...) \ | 36 | #define COMPAT_SYSCALL_DEFINE1(name, ...) \ |
37 | COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) | 37 | COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) |
38 | #define COMPAT_SYSCALL_DEFINE2(name, ...) \ | 38 | #define COMPAT_SYSCALL_DEFINE2(name, ...) \ |
39 | COMPAT_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) | 39 | COMPAT_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) |
40 | #define COMPAT_SYSCALL_DEFINE3(name, ...) \ | 40 | #define COMPAT_SYSCALL_DEFINE3(name, ...) \ |
41 | COMPAT_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) | 41 | COMPAT_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) |
42 | #define COMPAT_SYSCALL_DEFINE4(name, ...) \ | 42 | #define COMPAT_SYSCALL_DEFINE4(name, ...) \ |
43 | COMPAT_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__) | 43 | COMPAT_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__) |
44 | #define COMPAT_SYSCALL_DEFINE5(name, ...) \ | 44 | #define COMPAT_SYSCALL_DEFINE5(name, ...) \ |
45 | COMPAT_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) | 45 | COMPAT_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) |
46 | #define COMPAT_SYSCALL_DEFINE6(name, ...) \ | 46 | #define COMPAT_SYSCALL_DEFINE6(name, ...) \ |
47 | COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) | 47 | COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) |
48 | 48 | ||
49 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS | 49 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS |
50 | 50 | ||
51 | #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ | 51 | #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ |
52 | asmlinkage long compat_sys##name(__SC_DECL##x(__VA_ARGS__)); \ | 52 | asmlinkage long compat_sys##name(__SC_DECL##x(__VA_ARGS__)); \ |
53 | static inline long C_SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ | 53 | static inline long C_SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ |
54 | asmlinkage long compat_SyS##name(__SC_LONG##x(__VA_ARGS__)) \ | 54 | asmlinkage long compat_SyS##name(__SC_LONG##x(__VA_ARGS__)) \ |
55 | { \ | 55 | { \ |
56 | return (long) C_SYSC##name(__SC_CCAST##x(__VA_ARGS__)); \ | 56 | return (long) C_SYSC##name(__SC_CCAST##x(__VA_ARGS__)); \ |
57 | } \ | 57 | } \ |
58 | SYSCALL_ALIAS(compat_sys##name, compat_SyS##name); \ | 58 | SYSCALL_ALIAS(compat_sys##name, compat_SyS##name); \ |
59 | static inline long C_SYSC##name(__SC_DECL##x(__VA_ARGS__)) | 59 | static inline long C_SYSC##name(__SC_DECL##x(__VA_ARGS__)) |
60 | 60 | ||
61 | #else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ | 61 | #else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ |
62 | 62 | ||
63 | #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ | 63 | #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ |
64 | asmlinkage long compat_sys##name(__SC_DECL##x(__VA_ARGS__)) | 64 | asmlinkage long compat_sys##name(__SC_DECL##x(__VA_ARGS__)) |
65 | 65 | ||
66 | #endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ | 66 | #endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ |
67 | 67 | ||
68 | #ifndef compat_user_stack_pointer | 68 | #ifndef compat_user_stack_pointer |
69 | #define compat_user_stack_pointer() current_user_stack_pointer() | 69 | #define compat_user_stack_pointer() current_user_stack_pointer() |
70 | #endif | 70 | #endif |
71 | #ifdef CONFIG_GENERIC_SIGALTSTACK | ||
72 | #ifndef compat_sigaltstack /* we'll need that for MIPS */ | ||
73 | typedef struct compat_sigaltstack { | ||
74 | compat_uptr_t ss_sp; | ||
75 | int ss_flags; | ||
76 | compat_size_t ss_size; | ||
77 | } compat_stack_t; | ||
78 | #endif | ||
79 | #endif | ||
80 | |||
71 | #define compat_jiffies_to_clock_t(x) \ | 81 | #define compat_jiffies_to_clock_t(x) \ |
72 | (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) | 82 | (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) |
73 | 83 | ||
74 | typedef __compat_uid32_t compat_uid_t; | 84 | typedef __compat_uid32_t compat_uid_t; |
75 | typedef __compat_gid32_t compat_gid_t; | 85 | typedef __compat_gid32_t compat_gid_t; |
76 | 86 | ||
77 | struct compat_sel_arg_struct; | 87 | struct compat_sel_arg_struct; |
78 | struct rusage; | 88 | struct rusage; |
79 | 89 | ||
80 | struct compat_itimerspec { | 90 | struct compat_itimerspec { |
81 | struct compat_timespec it_interval; | 91 | struct compat_timespec it_interval; |
82 | struct compat_timespec it_value; | 92 | struct compat_timespec it_value; |
83 | }; | 93 | }; |
84 | 94 | ||
85 | struct compat_utimbuf { | 95 | struct compat_utimbuf { |
86 | compat_time_t actime; | 96 | compat_time_t actime; |
87 | compat_time_t modtime; | 97 | compat_time_t modtime; |
88 | }; | 98 | }; |
89 | 99 | ||
90 | struct compat_itimerval { | 100 | struct compat_itimerval { |
91 | struct compat_timeval it_interval; | 101 | struct compat_timeval it_interval; |
92 | struct compat_timeval it_value; | 102 | struct compat_timeval it_value; |
93 | }; | 103 | }; |
94 | 104 | ||
95 | struct compat_tms { | 105 | struct compat_tms { |
96 | compat_clock_t tms_utime; | 106 | compat_clock_t tms_utime; |
97 | compat_clock_t tms_stime; | 107 | compat_clock_t tms_stime; |
98 | compat_clock_t tms_cutime; | 108 | compat_clock_t tms_cutime; |
99 | compat_clock_t tms_cstime; | 109 | compat_clock_t tms_cstime; |
100 | }; | 110 | }; |
101 | 111 | ||
102 | struct compat_timex { | 112 | struct compat_timex { |
103 | compat_uint_t modes; | 113 | compat_uint_t modes; |
104 | compat_long_t offset; | 114 | compat_long_t offset; |
105 | compat_long_t freq; | 115 | compat_long_t freq; |
106 | compat_long_t maxerror; | 116 | compat_long_t maxerror; |
107 | compat_long_t esterror; | 117 | compat_long_t esterror; |
108 | compat_int_t status; | 118 | compat_int_t status; |
109 | compat_long_t constant; | 119 | compat_long_t constant; |
110 | compat_long_t precision; | 120 | compat_long_t precision; |
111 | compat_long_t tolerance; | 121 | compat_long_t tolerance; |
112 | struct compat_timeval time; | 122 | struct compat_timeval time; |
113 | compat_long_t tick; | 123 | compat_long_t tick; |
114 | compat_long_t ppsfreq; | 124 | compat_long_t ppsfreq; |
115 | compat_long_t jitter; | 125 | compat_long_t jitter; |
116 | compat_int_t shift; | 126 | compat_int_t shift; |
117 | compat_long_t stabil; | 127 | compat_long_t stabil; |
118 | compat_long_t jitcnt; | 128 | compat_long_t jitcnt; |
119 | compat_long_t calcnt; | 129 | compat_long_t calcnt; |
120 | compat_long_t errcnt; | 130 | compat_long_t errcnt; |
121 | compat_long_t stbcnt; | 131 | compat_long_t stbcnt; |
122 | compat_int_t tai; | 132 | compat_int_t tai; |
123 | 133 | ||
124 | compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; | 134 | compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; |
125 | compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; | 135 | compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; |
126 | compat_int_t:32; compat_int_t:32; compat_int_t:32; | 136 | compat_int_t:32; compat_int_t:32; compat_int_t:32; |
127 | }; | 137 | }; |
128 | 138 | ||
129 | #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) | 139 | #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) |
130 | 140 | ||
131 | typedef struct { | 141 | typedef struct { |
132 | compat_sigset_word sig[_COMPAT_NSIG_WORDS]; | 142 | compat_sigset_word sig[_COMPAT_NSIG_WORDS]; |
133 | } compat_sigset_t; | 143 | } compat_sigset_t; |
134 | 144 | ||
135 | /* | 145 | /* |
136 | * These functions operate strictly on struct compat_time* | 146 | * These functions operate strictly on struct compat_time* |
137 | */ | 147 | */ |
138 | extern int get_compat_timespec(struct timespec *, | 148 | extern int get_compat_timespec(struct timespec *, |
139 | const struct compat_timespec __user *); | 149 | const struct compat_timespec __user *); |
140 | extern int put_compat_timespec(const struct timespec *, | 150 | extern int put_compat_timespec(const struct timespec *, |
141 | struct compat_timespec __user *); | 151 | struct compat_timespec __user *); |
142 | extern int get_compat_timeval(struct timeval *, | 152 | extern int get_compat_timeval(struct timeval *, |
143 | const struct compat_timeval __user *); | 153 | const struct compat_timeval __user *); |
144 | extern int put_compat_timeval(const struct timeval *, | 154 | extern int put_compat_timeval(const struct timeval *, |
145 | struct compat_timeval __user *); | 155 | struct compat_timeval __user *); |
146 | /* | 156 | /* |
147 | * These functions operate on 32- or 64-bit specs depending on | 157 | * These functions operate on 32- or 64-bit specs depending on |
148 | * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments and the | 158 | * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments and the |
149 | * naming as compat_get/put_ rather than get/put_compat_. | 159 | * naming as compat_get/put_ rather than get/put_compat_. |
150 | */ | 160 | */ |
151 | extern int compat_get_timespec(struct timespec *, const void __user *); | 161 | extern int compat_get_timespec(struct timespec *, const void __user *); |
152 | extern int compat_put_timespec(const struct timespec *, void __user *); | 162 | extern int compat_put_timespec(const struct timespec *, void __user *); |
153 | extern int compat_get_timeval(struct timeval *, const void __user *); | 163 | extern int compat_get_timeval(struct timeval *, const void __user *); |
154 | extern int compat_put_timeval(const struct timeval *, void __user *); | 164 | extern int compat_put_timeval(const struct timeval *, void __user *); |
155 | 165 | ||
156 | struct compat_iovec { | 166 | struct compat_iovec { |
157 | compat_uptr_t iov_base; | 167 | compat_uptr_t iov_base; |
158 | compat_size_t iov_len; | 168 | compat_size_t iov_len; |
159 | }; | 169 | }; |
160 | 170 | ||
161 | struct compat_rlimit { | 171 | struct compat_rlimit { |
162 | compat_ulong_t rlim_cur; | 172 | compat_ulong_t rlim_cur; |
163 | compat_ulong_t rlim_max; | 173 | compat_ulong_t rlim_max; |
164 | }; | 174 | }; |
165 | 175 | ||
166 | struct compat_rusage { | 176 | struct compat_rusage { |
167 | struct compat_timeval ru_utime; | 177 | struct compat_timeval ru_utime; |
168 | struct compat_timeval ru_stime; | 178 | struct compat_timeval ru_stime; |
169 | compat_long_t ru_maxrss; | 179 | compat_long_t ru_maxrss; |
170 | compat_long_t ru_ixrss; | 180 | compat_long_t ru_ixrss; |
171 | compat_long_t ru_idrss; | 181 | compat_long_t ru_idrss; |
172 | compat_long_t ru_isrss; | 182 | compat_long_t ru_isrss; |
173 | compat_long_t ru_minflt; | 183 | compat_long_t ru_minflt; |
174 | compat_long_t ru_majflt; | 184 | compat_long_t ru_majflt; |
175 | compat_long_t ru_nswap; | 185 | compat_long_t ru_nswap; |
176 | compat_long_t ru_inblock; | 186 | compat_long_t ru_inblock; |
177 | compat_long_t ru_oublock; | 187 | compat_long_t ru_oublock; |
178 | compat_long_t ru_msgsnd; | 188 | compat_long_t ru_msgsnd; |
179 | compat_long_t ru_msgrcv; | 189 | compat_long_t ru_msgrcv; |
180 | compat_long_t ru_nsignals; | 190 | compat_long_t ru_nsignals; |
181 | compat_long_t ru_nvcsw; | 191 | compat_long_t ru_nvcsw; |
182 | compat_long_t ru_nivcsw; | 192 | compat_long_t ru_nivcsw; |
183 | }; | 193 | }; |
184 | 194 | ||
185 | extern int put_compat_rusage(const struct rusage *, | 195 | extern int put_compat_rusage(const struct rusage *, |
186 | struct compat_rusage __user *); | 196 | struct compat_rusage __user *); |
187 | 197 | ||
188 | struct compat_siginfo; | 198 | struct compat_siginfo; |
189 | 199 | ||
190 | extern asmlinkage long compat_sys_waitid(int, compat_pid_t, | 200 | extern asmlinkage long compat_sys_waitid(int, compat_pid_t, |
191 | struct compat_siginfo __user *, int, | 201 | struct compat_siginfo __user *, int, |
192 | struct compat_rusage __user *); | 202 | struct compat_rusage __user *); |
193 | 203 | ||
194 | struct compat_dirent { | 204 | struct compat_dirent { |
195 | u32 d_ino; | 205 | u32 d_ino; |
196 | compat_off_t d_off; | 206 | compat_off_t d_off; |
197 | u16 d_reclen; | 207 | u16 d_reclen; |
198 | char d_name[256]; | 208 | char d_name[256]; |
199 | }; | 209 | }; |
200 | 210 | ||
201 | struct compat_ustat { | 211 | struct compat_ustat { |
202 | compat_daddr_t f_tfree; | 212 | compat_daddr_t f_tfree; |
203 | compat_ino_t f_tinode; | 213 | compat_ino_t f_tinode; |
204 | char f_fname[6]; | 214 | char f_fname[6]; |
205 | char f_fpack[6]; | 215 | char f_fpack[6]; |
206 | }; | 216 | }; |
207 | 217 | ||
208 | #define COMPAT_SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3) | 218 | #define COMPAT_SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3) |
209 | 219 | ||
210 | typedef struct compat_sigevent { | 220 | typedef struct compat_sigevent { |
211 | compat_sigval_t sigev_value; | 221 | compat_sigval_t sigev_value; |
212 | compat_int_t sigev_signo; | 222 | compat_int_t sigev_signo; |
213 | compat_int_t sigev_notify; | 223 | compat_int_t sigev_notify; |
214 | union { | 224 | union { |
215 | compat_int_t _pad[COMPAT_SIGEV_PAD_SIZE]; | 225 | compat_int_t _pad[COMPAT_SIGEV_PAD_SIZE]; |
216 | compat_int_t _tid; | 226 | compat_int_t _tid; |
217 | 227 | ||
218 | struct { | 228 | struct { |
219 | compat_uptr_t _function; | 229 | compat_uptr_t _function; |
220 | compat_uptr_t _attribute; | 230 | compat_uptr_t _attribute; |
221 | } _sigev_thread; | 231 | } _sigev_thread; |
222 | } _sigev_un; | 232 | } _sigev_un; |
223 | } compat_sigevent_t; | 233 | } compat_sigevent_t; |
224 | 234 | ||
225 | struct compat_ifmap { | 235 | struct compat_ifmap { |
226 | compat_ulong_t mem_start; | 236 | compat_ulong_t mem_start; |
227 | compat_ulong_t mem_end; | 237 | compat_ulong_t mem_end; |
228 | unsigned short base_addr; | 238 | unsigned short base_addr; |
229 | unsigned char irq; | 239 | unsigned char irq; |
230 | unsigned char dma; | 240 | unsigned char dma; |
231 | unsigned char port; | 241 | unsigned char port; |
232 | }; | 242 | }; |
233 | 243 | ||
234 | struct compat_if_settings { | 244 | struct compat_if_settings { |
235 | unsigned int type; /* Type of physical device or protocol */ | 245 | unsigned int type; /* Type of physical device or protocol */ |
236 | unsigned int size; /* Size of the data allocated by the caller */ | 246 | unsigned int size; /* Size of the data allocated by the caller */ |
237 | compat_uptr_t ifs_ifsu; /* union of pointers */ | 247 | compat_uptr_t ifs_ifsu; /* union of pointers */ |
238 | }; | 248 | }; |
239 | 249 | ||
240 | struct compat_ifreq { | 250 | struct compat_ifreq { |
241 | union { | 251 | union { |
242 | char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ | 252 | char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ |
243 | } ifr_ifrn; | 253 | } ifr_ifrn; |
244 | union { | 254 | union { |
245 | struct sockaddr ifru_addr; | 255 | struct sockaddr ifru_addr; |
246 | struct sockaddr ifru_dstaddr; | 256 | struct sockaddr ifru_dstaddr; |
247 | struct sockaddr ifru_broadaddr; | 257 | struct sockaddr ifru_broadaddr; |
248 | struct sockaddr ifru_netmask; | 258 | struct sockaddr ifru_netmask; |
249 | struct sockaddr ifru_hwaddr; | 259 | struct sockaddr ifru_hwaddr; |
250 | short ifru_flags; | 260 | short ifru_flags; |
251 | compat_int_t ifru_ivalue; | 261 | compat_int_t ifru_ivalue; |
252 | compat_int_t ifru_mtu; | 262 | compat_int_t ifru_mtu; |
253 | struct compat_ifmap ifru_map; | 263 | struct compat_ifmap ifru_map; |
254 | char ifru_slave[IFNAMSIZ]; /* Just fits the size */ | 264 | char ifru_slave[IFNAMSIZ]; /* Just fits the size */ |
255 | char ifru_newname[IFNAMSIZ]; | 265 | char ifru_newname[IFNAMSIZ]; |
256 | compat_caddr_t ifru_data; | 266 | compat_caddr_t ifru_data; |
257 | struct compat_if_settings ifru_settings; | 267 | struct compat_if_settings ifru_settings; |
258 | } ifr_ifru; | 268 | } ifr_ifru; |
259 | }; | 269 | }; |
260 | 270 | ||
261 | struct compat_ifconf { | 271 | struct compat_ifconf { |
262 | compat_int_t ifc_len; /* size of buffer */ | 272 | compat_int_t ifc_len; /* size of buffer */ |
263 | compat_caddr_t ifcbuf; | 273 | compat_caddr_t ifcbuf; |
264 | }; | 274 | }; |
265 | 275 | ||
266 | struct compat_robust_list { | 276 | struct compat_robust_list { |
267 | compat_uptr_t next; | 277 | compat_uptr_t next; |
268 | }; | 278 | }; |
269 | 279 | ||
270 | struct compat_robust_list_head { | 280 | struct compat_robust_list_head { |
271 | struct compat_robust_list list; | 281 | struct compat_robust_list list; |
272 | compat_long_t futex_offset; | 282 | compat_long_t futex_offset; |
273 | compat_uptr_t list_op_pending; | 283 | compat_uptr_t list_op_pending; |
274 | }; | 284 | }; |
275 | 285 | ||
276 | struct compat_statfs; | 286 | struct compat_statfs; |
277 | struct compat_statfs64; | 287 | struct compat_statfs64; |
278 | struct compat_old_linux_dirent; | 288 | struct compat_old_linux_dirent; |
279 | struct compat_linux_dirent; | 289 | struct compat_linux_dirent; |
280 | struct linux_dirent64; | 290 | struct linux_dirent64; |
281 | struct compat_msghdr; | 291 | struct compat_msghdr; |
282 | struct compat_mmsghdr; | 292 | struct compat_mmsghdr; |
283 | struct compat_sysinfo; | 293 | struct compat_sysinfo; |
284 | struct compat_sysctl_args; | 294 | struct compat_sysctl_args; |
285 | struct compat_kexec_segment; | 295 | struct compat_kexec_segment; |
286 | struct compat_mq_attr; | 296 | struct compat_mq_attr; |
287 | struct compat_msgbuf; | 297 | struct compat_msgbuf; |
288 | 298 | ||
289 | extern void compat_exit_robust_list(struct task_struct *curr); | 299 | extern void compat_exit_robust_list(struct task_struct *curr); |
290 | 300 | ||
291 | asmlinkage long | 301 | asmlinkage long |
292 | compat_sys_set_robust_list(struct compat_robust_list_head __user *head, | 302 | compat_sys_set_robust_list(struct compat_robust_list_head __user *head, |
293 | compat_size_t len); | 303 | compat_size_t len); |
294 | asmlinkage long | 304 | asmlinkage long |
295 | compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, | 305 | compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, |
296 | compat_size_t __user *len_ptr); | 306 | compat_size_t __user *len_ptr); |
297 | 307 | ||
298 | #ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC | 308 | #ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC |
299 | long compat_sys_semctl(int first, int second, int third, void __user *uptr); | 309 | long compat_sys_semctl(int first, int second, int third, void __user *uptr); |
300 | long compat_sys_msgsnd(int first, int second, int third, void __user *uptr); | 310 | long compat_sys_msgsnd(int first, int second, int third, void __user *uptr); |
301 | long compat_sys_msgrcv(int first, int second, int msgtyp, int third, | 311 | long compat_sys_msgrcv(int first, int second, int msgtyp, int third, |
302 | int version, void __user *uptr); | 312 | int version, void __user *uptr); |
303 | long compat_sys_shmat(int first, int second, compat_uptr_t third, int version, | 313 | long compat_sys_shmat(int first, int second, compat_uptr_t third, int version, |
304 | void __user *uptr); | 314 | void __user *uptr); |
305 | #else | 315 | #else |
306 | long compat_sys_semctl(int semid, int semnum, int cmd, int arg); | 316 | long compat_sys_semctl(int semid, int semnum, int cmd, int arg); |
307 | long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp, | 317 | long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp, |
308 | compat_ssize_t msgsz, int msgflg); | 318 | compat_ssize_t msgsz, int msgflg); |
309 | long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp, | 319 | long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp, |
310 | compat_ssize_t msgsz, long msgtyp, int msgflg); | 320 | compat_ssize_t msgsz, long msgtyp, int msgflg); |
311 | long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); | 321 | long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); |
312 | #endif | 322 | #endif |
313 | long compat_sys_msgctl(int first, int second, void __user *uptr); | 323 | long compat_sys_msgctl(int first, int second, void __user *uptr); |
314 | long compat_sys_shmctl(int first, int second, void __user *uptr); | 324 | long compat_sys_shmctl(int first, int second, void __user *uptr); |
315 | long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, | 325 | long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, |
316 | unsigned nsems, const struct compat_timespec __user *timeout); | 326 | unsigned nsems, const struct compat_timespec __user *timeout); |
317 | asmlinkage long compat_sys_keyctl(u32 option, | 327 | asmlinkage long compat_sys_keyctl(u32 option, |
318 | u32 arg2, u32 arg3, u32 arg4, u32 arg5); | 328 | u32 arg2, u32 arg3, u32 arg4, u32 arg5); |
319 | asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); | 329 | asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); |
320 | 330 | ||
321 | asmlinkage ssize_t compat_sys_readv(unsigned long fd, | 331 | asmlinkage ssize_t compat_sys_readv(unsigned long fd, |
322 | const struct compat_iovec __user *vec, unsigned long vlen); | 332 | const struct compat_iovec __user *vec, unsigned long vlen); |
323 | asmlinkage ssize_t compat_sys_writev(unsigned long fd, | 333 | asmlinkage ssize_t compat_sys_writev(unsigned long fd, |
324 | const struct compat_iovec __user *vec, unsigned long vlen); | 334 | const struct compat_iovec __user *vec, unsigned long vlen); |
325 | asmlinkage ssize_t compat_sys_preadv(unsigned long fd, | 335 | asmlinkage ssize_t compat_sys_preadv(unsigned long fd, |
326 | const struct compat_iovec __user *vec, | 336 | const struct compat_iovec __user *vec, |
327 | unsigned long vlen, u32 pos_low, u32 pos_high); | 337 | unsigned long vlen, u32 pos_low, u32 pos_high); |
328 | asmlinkage ssize_t compat_sys_pwritev(unsigned long fd, | 338 | asmlinkage ssize_t compat_sys_pwritev(unsigned long fd, |
329 | const struct compat_iovec __user *vec, | 339 | const struct compat_iovec __user *vec, |
330 | unsigned long vlen, u32 pos_low, u32 pos_high); | 340 | unsigned long vlen, u32 pos_low, u32 pos_high); |
331 | 341 | ||
332 | asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, | 342 | asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, |
333 | const compat_uptr_t __user *envp); | 343 | const compat_uptr_t __user *envp); |
334 | 344 | ||
335 | asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, | 345 | asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, |
336 | compat_ulong_t __user *outp, compat_ulong_t __user *exp, | 346 | compat_ulong_t __user *outp, compat_ulong_t __user *exp, |
337 | struct compat_timeval __user *tvp); | 347 | struct compat_timeval __user *tvp); |
338 | 348 | ||
339 | asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg); | 349 | asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg); |
340 | 350 | ||
341 | asmlinkage long compat_sys_wait4(compat_pid_t pid, | 351 | asmlinkage long compat_sys_wait4(compat_pid_t pid, |
342 | compat_uint_t __user *stat_addr, int options, | 352 | compat_uint_t __user *stat_addr, int options, |
343 | struct compat_rusage __user *ru); | 353 | struct compat_rusage __user *ru); |
344 | 354 | ||
345 | #define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) | 355 | #define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) |
346 | 356 | ||
347 | #define BITS_TO_COMPAT_LONGS(bits) \ | 357 | #define BITS_TO_COMPAT_LONGS(bits) \ |
348 | (((bits)+BITS_PER_COMPAT_LONG-1)/BITS_PER_COMPAT_LONG) | 358 | (((bits)+BITS_PER_COMPAT_LONG-1)/BITS_PER_COMPAT_LONG) |
349 | 359 | ||
350 | long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, | 360 | long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, |
351 | unsigned long bitmap_size); | 361 | unsigned long bitmap_size); |
352 | long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, | 362 | long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, |
353 | unsigned long bitmap_size); | 363 | unsigned long bitmap_size); |
354 | int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); | 364 | int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); |
355 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from); | 365 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from); |
356 | int get_compat_sigevent(struct sigevent *event, | 366 | int get_compat_sigevent(struct sigevent *event, |
357 | const struct compat_sigevent __user *u_event); | 367 | const struct compat_sigevent __user *u_event); |
358 | long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, | 368 | long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, |
359 | struct compat_siginfo __user *uinfo); | 369 | struct compat_siginfo __user *uinfo); |
360 | 370 | ||
361 | static inline int compat_timeval_compare(struct compat_timeval *lhs, | 371 | static inline int compat_timeval_compare(struct compat_timeval *lhs, |
362 | struct compat_timeval *rhs) | 372 | struct compat_timeval *rhs) |
363 | { | 373 | { |
364 | if (lhs->tv_sec < rhs->tv_sec) | 374 | if (lhs->tv_sec < rhs->tv_sec) |
365 | return -1; | 375 | return -1; |
366 | if (lhs->tv_sec > rhs->tv_sec) | 376 | if (lhs->tv_sec > rhs->tv_sec) |
367 | return 1; | 377 | return 1; |
368 | return lhs->tv_usec - rhs->tv_usec; | 378 | return lhs->tv_usec - rhs->tv_usec; |
369 | } | 379 | } |
370 | 380 | ||
371 | static inline int compat_timespec_compare(struct compat_timespec *lhs, | 381 | static inline int compat_timespec_compare(struct compat_timespec *lhs, |
372 | struct compat_timespec *rhs) | 382 | struct compat_timespec *rhs) |
373 | { | 383 | { |
374 | if (lhs->tv_sec < rhs->tv_sec) | 384 | if (lhs->tv_sec < rhs->tv_sec) |
375 | return -1; | 385 | return -1; |
376 | if (lhs->tv_sec > rhs->tv_sec) | 386 | if (lhs->tv_sec > rhs->tv_sec) |
377 | return 1; | 387 | return 1; |
378 | return lhs->tv_nsec - rhs->tv_nsec; | 388 | return lhs->tv_nsec - rhs->tv_nsec; |
379 | } | 389 | } |
380 | 390 | ||
381 | extern int get_compat_itimerspec(struct itimerspec *dst, | 391 | extern int get_compat_itimerspec(struct itimerspec *dst, |
382 | const struct compat_itimerspec __user *src); | 392 | const struct compat_itimerspec __user *src); |
383 | extern int put_compat_itimerspec(struct compat_itimerspec __user *dst, | 393 | extern int put_compat_itimerspec(struct compat_itimerspec __user *dst, |
384 | const struct itimerspec *src); | 394 | const struct itimerspec *src); |
385 | 395 | ||
386 | asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, | 396 | asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, |
387 | struct timezone __user *tz); | 397 | struct timezone __user *tz); |
388 | asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, | 398 | asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, |
389 | struct timezone __user *tz); | 399 | struct timezone __user *tz); |
390 | 400 | ||
391 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); | 401 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); |
392 | 402 | ||
393 | extern int compat_printk(const char *fmt, ...); | 403 | extern int compat_printk(const char *fmt, ...); |
394 | extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); | 404 | extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); |
395 | 405 | ||
396 | asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, | 406 | asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, |
397 | compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, | 407 | compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, |
398 | const compat_ulong_t __user *new_nodes); | 408 | const compat_ulong_t __user *new_nodes); |
399 | 409 | ||
400 | extern int compat_ptrace_request(struct task_struct *child, | 410 | extern int compat_ptrace_request(struct task_struct *child, |
401 | compat_long_t request, | 411 | compat_long_t request, |
402 | compat_ulong_t addr, compat_ulong_t data); | 412 | compat_ulong_t addr, compat_ulong_t data); |
403 | 413 | ||
404 | extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | 414 | extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, |
405 | compat_ulong_t addr, compat_ulong_t data); | 415 | compat_ulong_t addr, compat_ulong_t data); |
406 | asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | 416 | asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, |
407 | compat_long_t addr, compat_long_t data); | 417 | compat_long_t addr, compat_long_t data); |
408 | 418 | ||
409 | /* | 419 | /* |
410 | * epoll (fs/eventpoll.c) compat bits follow ... | 420 | * epoll (fs/eventpoll.c) compat bits follow ... |
411 | */ | 421 | */ |
412 | struct epoll_event; | 422 | struct epoll_event; |
413 | #define compat_epoll_event epoll_event | 423 | #define compat_epoll_event epoll_event |
414 | asmlinkage long compat_sys_epoll_pwait(int epfd, | 424 | asmlinkage long compat_sys_epoll_pwait(int epfd, |
415 | struct compat_epoll_event __user *events, | 425 | struct compat_epoll_event __user *events, |
416 | int maxevents, int timeout, | 426 | int maxevents, int timeout, |
417 | const compat_sigset_t __user *sigmask, | 427 | const compat_sigset_t __user *sigmask, |
418 | compat_size_t sigsetsize); | 428 | compat_size_t sigsetsize); |
419 | 429 | ||
420 | asmlinkage long compat_sys_utime(const char __user *filename, | 430 | asmlinkage long compat_sys_utime(const char __user *filename, |
421 | struct compat_utimbuf __user *t); | 431 | struct compat_utimbuf __user *t); |
422 | asmlinkage long compat_sys_utimensat(unsigned int dfd, | 432 | asmlinkage long compat_sys_utimensat(unsigned int dfd, |
423 | const char __user *filename, | 433 | const char __user *filename, |
424 | struct compat_timespec __user *t, | 434 | struct compat_timespec __user *t, |
425 | int flags); | 435 | int flags); |
426 | 436 | ||
427 | asmlinkage long compat_sys_time(compat_time_t __user *tloc); | 437 | asmlinkage long compat_sys_time(compat_time_t __user *tloc); |
428 | asmlinkage long compat_sys_stime(compat_time_t __user *tptr); | 438 | asmlinkage long compat_sys_stime(compat_time_t __user *tptr); |
429 | asmlinkage long compat_sys_signalfd(int ufd, | 439 | asmlinkage long compat_sys_signalfd(int ufd, |
430 | const compat_sigset_t __user *sigmask, | 440 | const compat_sigset_t __user *sigmask, |
431 | compat_size_t sigsetsize); | 441 | compat_size_t sigsetsize); |
432 | asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, | 442 | asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, |
433 | const struct compat_itimerspec __user *utmr, | 443 | const struct compat_itimerspec __user *utmr, |
434 | struct compat_itimerspec __user *otmr); | 444 | struct compat_itimerspec __user *otmr); |
435 | asmlinkage long compat_sys_timerfd_gettime(int ufd, | 445 | asmlinkage long compat_sys_timerfd_gettime(int ufd, |
436 | struct compat_itimerspec __user *otmr); | 446 | struct compat_itimerspec __user *otmr); |
437 | 447 | ||
438 | asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page, | 448 | asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page, |
439 | __u32 __user *pages, | 449 | __u32 __user *pages, |
440 | const int __user *nodes, | 450 | const int __user *nodes, |
441 | int __user *status, | 451 | int __user *status, |
442 | int flags); | 452 | int flags); |
443 | asmlinkage long compat_sys_futimesat(unsigned int dfd, | 453 | asmlinkage long compat_sys_futimesat(unsigned int dfd, |
444 | const char __user *filename, | 454 | const char __user *filename, |
445 | struct compat_timeval __user *t); | 455 | struct compat_timeval __user *t); |
446 | asmlinkage long compat_sys_utimes(const char __user *filename, | 456 | asmlinkage long compat_sys_utimes(const char __user *filename, |
447 | struct compat_timeval __user *t); | 457 | struct compat_timeval __user *t); |
448 | asmlinkage long compat_sys_newstat(const char __user *filename, | 458 | asmlinkage long compat_sys_newstat(const char __user *filename, |
449 | struct compat_stat __user *statbuf); | 459 | struct compat_stat __user *statbuf); |
450 | asmlinkage long compat_sys_newlstat(const char __user *filename, | 460 | asmlinkage long compat_sys_newlstat(const char __user *filename, |
451 | struct compat_stat __user *statbuf); | 461 | struct compat_stat __user *statbuf); |
452 | asmlinkage long compat_sys_newfstatat(unsigned int dfd, | 462 | asmlinkage long compat_sys_newfstatat(unsigned int dfd, |
453 | const char __user *filename, | 463 | const char __user *filename, |
454 | struct compat_stat __user *statbuf, | 464 | struct compat_stat __user *statbuf, |
455 | int flag); | 465 | int flag); |
456 | asmlinkage long compat_sys_newfstat(unsigned int fd, | 466 | asmlinkage long compat_sys_newfstat(unsigned int fd, |
457 | struct compat_stat __user *statbuf); | 467 | struct compat_stat __user *statbuf); |
458 | asmlinkage long compat_sys_statfs(const char __user *pathname, | 468 | asmlinkage long compat_sys_statfs(const char __user *pathname, |
459 | struct compat_statfs __user *buf); | 469 | struct compat_statfs __user *buf); |
460 | asmlinkage long compat_sys_fstatfs(unsigned int fd, | 470 | asmlinkage long compat_sys_fstatfs(unsigned int fd, |
461 | struct compat_statfs __user *buf); | 471 | struct compat_statfs __user *buf); |
462 | asmlinkage long compat_sys_statfs64(const char __user *pathname, | 472 | asmlinkage long compat_sys_statfs64(const char __user *pathname, |
463 | compat_size_t sz, | 473 | compat_size_t sz, |
464 | struct compat_statfs64 __user *buf); | 474 | struct compat_statfs64 __user *buf); |
465 | asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, | 475 | asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, |
466 | struct compat_statfs64 __user *buf); | 476 | struct compat_statfs64 __user *buf); |
467 | asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd, | 477 | asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd, |
468 | unsigned long arg); | 478 | unsigned long arg); |
469 | asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd, | 479 | asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd, |
470 | unsigned long arg); | 480 | unsigned long arg); |
471 | asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); | 481 | asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); |
472 | asmlinkage long compat_sys_io_getevents(aio_context_t ctx_id, | 482 | asmlinkage long compat_sys_io_getevents(aio_context_t ctx_id, |
473 | unsigned long min_nr, | 483 | unsigned long min_nr, |
474 | unsigned long nr, | 484 | unsigned long nr, |
475 | struct io_event __user *events, | 485 | struct io_event __user *events, |
476 | struct compat_timespec __user *timeout); | 486 | struct compat_timespec __user *timeout); |
477 | asmlinkage long compat_sys_io_submit(aio_context_t ctx_id, int nr, | 487 | asmlinkage long compat_sys_io_submit(aio_context_t ctx_id, int nr, |
478 | u32 __user *iocb); | 488 | u32 __user *iocb); |
479 | asmlinkage long compat_sys_mount(const char __user *dev_name, | 489 | asmlinkage long compat_sys_mount(const char __user *dev_name, |
480 | const char __user *dir_name, | 490 | const char __user *dir_name, |
481 | const char __user *type, unsigned long flags, | 491 | const char __user *type, unsigned long flags, |
482 | const void __user *data); | 492 | const void __user *data); |
483 | asmlinkage long compat_sys_old_readdir(unsigned int fd, | 493 | asmlinkage long compat_sys_old_readdir(unsigned int fd, |
484 | struct compat_old_linux_dirent __user *, | 494 | struct compat_old_linux_dirent __user *, |
485 | unsigned int count); | 495 | unsigned int count); |
486 | asmlinkage long compat_sys_getdents(unsigned int fd, | 496 | asmlinkage long compat_sys_getdents(unsigned int fd, |
487 | struct compat_linux_dirent __user *dirent, | 497 | struct compat_linux_dirent __user *dirent, |
488 | unsigned int count); | 498 | unsigned int count); |
489 | asmlinkage long compat_sys_getdents64(unsigned int fd, | 499 | asmlinkage long compat_sys_getdents64(unsigned int fd, |
490 | struct linux_dirent64 __user *dirent, | 500 | struct linux_dirent64 __user *dirent, |
491 | unsigned int count); | 501 | unsigned int count); |
492 | asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *, | 502 | asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *, |
493 | unsigned int nr_segs, unsigned int flags); | 503 | unsigned int nr_segs, unsigned int flags); |
494 | asmlinkage long compat_sys_open(const char __user *filename, int flags, | 504 | asmlinkage long compat_sys_open(const char __user *filename, int flags, |
495 | umode_t mode); | 505 | umode_t mode); |
496 | asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, | 506 | asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, |
497 | int flags, umode_t mode); | 507 | int flags, umode_t mode); |
498 | asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, | 508 | asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, |
499 | struct file_handle __user *handle, | 509 | struct file_handle __user *handle, |
500 | int flags); | 510 | int flags); |
501 | asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, | 511 | asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, |
502 | compat_ulong_t __user *outp, | 512 | compat_ulong_t __user *outp, |
503 | compat_ulong_t __user *exp, | 513 | compat_ulong_t __user *exp, |
504 | struct compat_timespec __user *tsp, | 514 | struct compat_timespec __user *tsp, |
505 | void __user *sig); | 515 | void __user *sig); |
506 | asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, | 516 | asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, |
507 | unsigned int nfds, | 517 | unsigned int nfds, |
508 | struct compat_timespec __user *tsp, | 518 | struct compat_timespec __user *tsp, |
509 | const compat_sigset_t __user *sigmask, | 519 | const compat_sigset_t __user *sigmask, |
510 | compat_size_t sigsetsize); | 520 | compat_size_t sigsetsize); |
511 | asmlinkage long compat_sys_signalfd4(int ufd, | 521 | asmlinkage long compat_sys_signalfd4(int ufd, |
512 | const compat_sigset_t __user *sigmask, | 522 | const compat_sigset_t __user *sigmask, |
513 | compat_size_t sigsetsize, int flags); | 523 | compat_size_t sigsetsize, int flags); |
514 | asmlinkage long compat_sys_get_mempolicy(int __user *policy, | 524 | asmlinkage long compat_sys_get_mempolicy(int __user *policy, |
515 | compat_ulong_t __user *nmask, | 525 | compat_ulong_t __user *nmask, |
516 | compat_ulong_t maxnode, | 526 | compat_ulong_t maxnode, |
517 | compat_ulong_t addr, | 527 | compat_ulong_t addr, |
518 | compat_ulong_t flags); | 528 | compat_ulong_t flags); |
519 | asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, | 529 | asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, |
520 | compat_ulong_t maxnode); | 530 | compat_ulong_t maxnode); |
521 | asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, | 531 | asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, |
522 | compat_ulong_t mode, | 532 | compat_ulong_t mode, |
523 | compat_ulong_t __user *nmask, | 533 | compat_ulong_t __user *nmask, |
524 | compat_ulong_t maxnode, compat_ulong_t flags); | 534 | compat_ulong_t maxnode, compat_ulong_t flags); |
525 | 535 | ||
526 | asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, | 536 | asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, |
527 | char __user *optval, unsigned int optlen); | 537 | char __user *optval, unsigned int optlen); |
528 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, | 538 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, |
529 | unsigned flags); | 539 | unsigned flags); |
530 | asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, | 540 | asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, |
531 | unsigned vlen, unsigned int flags); | 541 | unsigned vlen, unsigned int flags); |
532 | asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, | 542 | asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, |
533 | unsigned int flags); | 543 | unsigned int flags); |
534 | asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, | 544 | asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, |
535 | unsigned flags); | 545 | unsigned flags); |
536 | asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, | 546 | asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, |
537 | unsigned flags, struct sockaddr __user *addr, | 547 | unsigned flags, struct sockaddr __user *addr, |
538 | int __user *addrlen); | 548 | int __user *addrlen); |
539 | asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, | 549 | asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, |
540 | unsigned vlen, unsigned int flags, | 550 | unsigned vlen, unsigned int flags, |
541 | struct compat_timespec __user *timeout); | 551 | struct compat_timespec __user *timeout); |
542 | asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, | 552 | asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, |
543 | struct compat_timespec __user *rmtp); | 553 | struct compat_timespec __user *rmtp); |
544 | asmlinkage long compat_sys_getitimer(int which, | 554 | asmlinkage long compat_sys_getitimer(int which, |
545 | struct compat_itimerval __user *it); | 555 | struct compat_itimerval __user *it); |
546 | asmlinkage long compat_sys_setitimer(int which, | 556 | asmlinkage long compat_sys_setitimer(int which, |
547 | struct compat_itimerval __user *in, | 557 | struct compat_itimerval __user *in, |
548 | struct compat_itimerval __user *out); | 558 | struct compat_itimerval __user *out); |
549 | asmlinkage long compat_sys_times(struct compat_tms __user *tbuf); | 559 | asmlinkage long compat_sys_times(struct compat_tms __user *tbuf); |
550 | asmlinkage long compat_sys_setrlimit(unsigned int resource, | 560 | asmlinkage long compat_sys_setrlimit(unsigned int resource, |
551 | struct compat_rlimit __user *rlim); | 561 | struct compat_rlimit __user *rlim); |
552 | asmlinkage long compat_sys_getrlimit(unsigned int resource, | 562 | asmlinkage long compat_sys_getrlimit(unsigned int resource, |
553 | struct compat_rlimit __user *rlim); | 563 | struct compat_rlimit __user *rlim); |
554 | asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); | 564 | asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); |
555 | asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, | 565 | asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, |
556 | unsigned int len, | 566 | unsigned int len, |
557 | compat_ulong_t __user *user_mask_ptr); | 567 | compat_ulong_t __user *user_mask_ptr); |
558 | asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, | 568 | asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, |
559 | unsigned int len, | 569 | unsigned int len, |
560 | compat_ulong_t __user *user_mask_ptr); | 570 | compat_ulong_t __user *user_mask_ptr); |
561 | asmlinkage long compat_sys_timer_create(clockid_t which_clock, | 571 | asmlinkage long compat_sys_timer_create(clockid_t which_clock, |
562 | struct compat_sigevent __user *timer_event_spec, | 572 | struct compat_sigevent __user *timer_event_spec, |
563 | timer_t __user *created_timer_id); | 573 | timer_t __user *created_timer_id); |
564 | asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, | 574 | asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, |
565 | struct compat_itimerspec __user *new, | 575 | struct compat_itimerspec __user *new, |
566 | struct compat_itimerspec __user *old); | 576 | struct compat_itimerspec __user *old); |
567 | asmlinkage long compat_sys_timer_gettime(timer_t timer_id, | 577 | asmlinkage long compat_sys_timer_gettime(timer_t timer_id, |
568 | struct compat_itimerspec __user *setting); | 578 | struct compat_itimerspec __user *setting); |
569 | asmlinkage long compat_sys_clock_settime(clockid_t which_clock, | 579 | asmlinkage long compat_sys_clock_settime(clockid_t which_clock, |
570 | struct compat_timespec __user *tp); | 580 | struct compat_timespec __user *tp); |
571 | asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, | 581 | asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, |
572 | struct compat_timespec __user *tp); | 582 | struct compat_timespec __user *tp); |
573 | asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock, | 583 | asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock, |
574 | struct compat_timex __user *tp); | 584 | struct compat_timex __user *tp); |
575 | asmlinkage long compat_sys_clock_getres(clockid_t which_clock, | 585 | asmlinkage long compat_sys_clock_getres(clockid_t which_clock, |
576 | struct compat_timespec __user *tp); | 586 | struct compat_timespec __user *tp); |
577 | asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, | 587 | asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, |
578 | struct compat_timespec __user *rqtp, | 588 | struct compat_timespec __user *rqtp, |
579 | struct compat_timespec __user *rmtp); | 589 | struct compat_timespec __user *rmtp); |
580 | asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, | 590 | asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, |
581 | struct compat_siginfo __user *uinfo, | 591 | struct compat_siginfo __user *uinfo, |
582 | struct compat_timespec __user *uts, compat_size_t sigsetsize); | 592 | struct compat_timespec __user *uts, compat_size_t sigsetsize); |
583 | asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, | 593 | asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, |
584 | compat_size_t sigsetsize); | 594 | compat_size_t sigsetsize); |
585 | asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); | 595 | asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); |
586 | asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, | 596 | asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, |
587 | unsigned long arg); | 597 | unsigned long arg); |
588 | asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, | 598 | asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, |
589 | struct compat_timespec __user *utime, u32 __user *uaddr2, | 599 | struct compat_timespec __user *utime, u32 __user *uaddr2, |
590 | u32 val3); | 600 | u32 val3); |
591 | asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, | 601 | asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, |
592 | char __user *optval, int __user *optlen); | 602 | char __user *optval, int __user *optlen); |
593 | asmlinkage long compat_sys_kexec_load(unsigned long entry, | 603 | asmlinkage long compat_sys_kexec_load(unsigned long entry, |
594 | unsigned long nr_segments, | 604 | unsigned long nr_segments, |
595 | struct compat_kexec_segment __user *, | 605 | struct compat_kexec_segment __user *, |
596 | unsigned long flags); | 606 | unsigned long flags); |
597 | asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, | 607 | asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, |
598 | const struct compat_mq_attr __user *u_mqstat, | 608 | const struct compat_mq_attr __user *u_mqstat, |
599 | struct compat_mq_attr __user *u_omqstat); | 609 | struct compat_mq_attr __user *u_omqstat); |
600 | asmlinkage long compat_sys_mq_notify(mqd_t mqdes, | 610 | asmlinkage long compat_sys_mq_notify(mqd_t mqdes, |
601 | const struct compat_sigevent __user *u_notification); | 611 | const struct compat_sigevent __user *u_notification); |
602 | asmlinkage long compat_sys_mq_open(const char __user *u_name, | 612 | asmlinkage long compat_sys_mq_open(const char __user *u_name, |
603 | int oflag, compat_mode_t mode, | 613 | int oflag, compat_mode_t mode, |
604 | struct compat_mq_attr __user *u_attr); | 614 | struct compat_mq_attr __user *u_attr); |
605 | asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, | 615 | asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, |
606 | const char __user *u_msg_ptr, | 616 | const char __user *u_msg_ptr, |
607 | size_t msg_len, unsigned int msg_prio, | 617 | size_t msg_len, unsigned int msg_prio, |
608 | const struct compat_timespec __user *u_abs_timeout); | 618 | const struct compat_timespec __user *u_abs_timeout); |
609 | asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, | 619 | asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, |
610 | char __user *u_msg_ptr, | 620 | char __user *u_msg_ptr, |
611 | size_t msg_len, unsigned int __user *u_msg_prio, | 621 | size_t msg_len, unsigned int __user *u_msg_prio, |
612 | const struct compat_timespec __user *u_abs_timeout); | 622 | const struct compat_timespec __user *u_abs_timeout); |
613 | asmlinkage long compat_sys_socketcall(int call, u32 __user *args); | 623 | asmlinkage long compat_sys_socketcall(int call, u32 __user *args); |
614 | asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); | 624 | asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); |
615 | 625 | ||
616 | extern ssize_t compat_rw_copy_check_uvector(int type, | 626 | extern ssize_t compat_rw_copy_check_uvector(int type, |
617 | const struct compat_iovec __user *uvector, | 627 | const struct compat_iovec __user *uvector, |
618 | unsigned long nr_segs, | 628 | unsigned long nr_segs, |
619 | unsigned long fast_segs, struct iovec *fast_pointer, | 629 | unsigned long fast_segs, struct iovec *fast_pointer, |
620 | struct iovec **ret_pointer); | 630 | struct iovec **ret_pointer); |
621 | 631 | ||
622 | extern void __user *compat_alloc_user_space(unsigned long len); | 632 | extern void __user *compat_alloc_user_space(unsigned long len); |
623 | 633 | ||
624 | asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, | 634 | asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, |
625 | const struct compat_iovec __user *lvec, | 635 | const struct compat_iovec __user *lvec, |
626 | unsigned long liovcnt, const struct compat_iovec __user *rvec, | 636 | unsigned long liovcnt, const struct compat_iovec __user *rvec, |
627 | unsigned long riovcnt, unsigned long flags); | 637 | unsigned long riovcnt, unsigned long flags); |
628 | asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid, | 638 | asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid, |
629 | const struct compat_iovec __user *lvec, | 639 | const struct compat_iovec __user *lvec, |
630 | unsigned long liovcnt, const struct compat_iovec __user *rvec, | 640 | unsigned long liovcnt, const struct compat_iovec __user *rvec, |
631 | unsigned long riovcnt, unsigned long flags); | 641 | unsigned long riovcnt, unsigned long flags); |
632 | 642 | ||
633 | asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, | 643 | asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, |
634 | compat_off_t __user *offset, compat_size_t count); | 644 | compat_off_t __user *offset, compat_size_t count); |
645 | #ifdef CONFIG_GENERIC_SIGALTSTACK | ||
646 | asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, | ||
647 | compat_stack_t __user *uoss_ptr); | ||
648 | |||
649 | int compat_restore_altstack(const compat_stack_t __user *uss); | ||
650 | #endif | ||
635 | 651 | ||
636 | #else | 652 | #else |
637 | 653 | ||
638 | #define is_compat_task() (0) | 654 | #define is_compat_task() (0) |
639 | 655 | ||
640 | #endif /* CONFIG_COMPAT */ | 656 | #endif /* CONFIG_COMPAT */ |
641 | #endif /* _LINUX_COMPAT_H */ | 657 | #endif /* _LINUX_COMPAT_H */ |
642 | 658 |
kernel/signal.c
1 | /* | 1 | /* |
2 | * linux/kernel/signal.c | 2 | * linux/kernel/signal.c |
3 | * | 3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * | 5 | * |
6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson | 6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson |
7 | * | 7 | * |
8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. | 8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. |
9 | * Changes to use preallocated sigqueue structures | 9 | * Changes to use preallocated sigqueue structures |
10 | * to allow signals to be sent reliably. | 10 | * to allow signals to be sent reliably. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/export.h> | 14 | #include <linux/export.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/tty.h> | 18 | #include <linux/tty.h> |
19 | #include <linux/binfmts.h> | 19 | #include <linux/binfmts.h> |
20 | #include <linux/coredump.h> | 20 | #include <linux/coredump.h> |
21 | #include <linux/security.h> | 21 | #include <linux/security.h> |
22 | #include <linux/syscalls.h> | 22 | #include <linux/syscalls.h> |
23 | #include <linux/ptrace.h> | 23 | #include <linux/ptrace.h> |
24 | #include <linux/signal.h> | 24 | #include <linux/signal.h> |
25 | #include <linux/signalfd.h> | 25 | #include <linux/signalfd.h> |
26 | #include <linux/ratelimit.h> | 26 | #include <linux/ratelimit.h> |
27 | #include <linux/tracehook.h> | 27 | #include <linux/tracehook.h> |
28 | #include <linux/capability.h> | 28 | #include <linux/capability.h> |
29 | #include <linux/freezer.h> | 29 | #include <linux/freezer.h> |
30 | #include <linux/pid_namespace.h> | 30 | #include <linux/pid_namespace.h> |
31 | #include <linux/nsproxy.h> | 31 | #include <linux/nsproxy.h> |
32 | #include <linux/user_namespace.h> | 32 | #include <linux/user_namespace.h> |
33 | #include <linux/uprobes.h> | 33 | #include <linux/uprobes.h> |
34 | #include <linux/compat.h> | ||
34 | #define CREATE_TRACE_POINTS | 35 | #define CREATE_TRACE_POINTS |
35 | #include <trace/events/signal.h> | 36 | #include <trace/events/signal.h> |
36 | 37 | ||
37 | #include <asm/param.h> | 38 | #include <asm/param.h> |
38 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
39 | #include <asm/unistd.h> | 40 | #include <asm/unistd.h> |
40 | #include <asm/siginfo.h> | 41 | #include <asm/siginfo.h> |
41 | #include <asm/cacheflush.h> | 42 | #include <asm/cacheflush.h> |
42 | #include "audit.h" /* audit_signal_info() */ | 43 | #include "audit.h" /* audit_signal_info() */ |
43 | 44 | ||
44 | /* | 45 | /* |
45 | * SLAB caches for signal bits. | 46 | * SLAB caches for signal bits. |
46 | */ | 47 | */ |
47 | 48 | ||
48 | static struct kmem_cache *sigqueue_cachep; | 49 | static struct kmem_cache *sigqueue_cachep; |
49 | 50 | ||
50 | int print_fatal_signals __read_mostly; | 51 | int print_fatal_signals __read_mostly; |
51 | 52 | ||
52 | static void __user *sig_handler(struct task_struct *t, int sig) | 53 | static void __user *sig_handler(struct task_struct *t, int sig) |
53 | { | 54 | { |
54 | return t->sighand->action[sig - 1].sa.sa_handler; | 55 | return t->sighand->action[sig - 1].sa.sa_handler; |
55 | } | 56 | } |
56 | 57 | ||
57 | static int sig_handler_ignored(void __user *handler, int sig) | 58 | static int sig_handler_ignored(void __user *handler, int sig) |
58 | { | 59 | { |
59 | /* Is it explicitly or implicitly ignored? */ | 60 | /* Is it explicitly or implicitly ignored? */ |
60 | return handler == SIG_IGN || | 61 | return handler == SIG_IGN || |
61 | (handler == SIG_DFL && sig_kernel_ignore(sig)); | 62 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
62 | } | 63 | } |
63 | 64 | ||
64 | static int sig_task_ignored(struct task_struct *t, int sig, bool force) | 65 | static int sig_task_ignored(struct task_struct *t, int sig, bool force) |
65 | { | 66 | { |
66 | void __user *handler; | 67 | void __user *handler; |
67 | 68 | ||
68 | handler = sig_handler(t, sig); | 69 | handler = sig_handler(t, sig); |
69 | 70 | ||
70 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && | 71 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
71 | handler == SIG_DFL && !force) | 72 | handler == SIG_DFL && !force) |
72 | return 1; | 73 | return 1; |
73 | 74 | ||
74 | return sig_handler_ignored(handler, sig); | 75 | return sig_handler_ignored(handler, sig); |
75 | } | 76 | } |
76 | 77 | ||
77 | static int sig_ignored(struct task_struct *t, int sig, bool force) | 78 | static int sig_ignored(struct task_struct *t, int sig, bool force) |
78 | { | 79 | { |
79 | /* | 80 | /* |
80 | * Blocked signals are never ignored, since the | 81 | * Blocked signals are never ignored, since the |
81 | * signal handler may change by the time it is | 82 | * signal handler may change by the time it is |
82 | * unblocked. | 83 | * unblocked. |
83 | */ | 84 | */ |
84 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) | 85 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
85 | return 0; | 86 | return 0; |
86 | 87 | ||
87 | if (!sig_task_ignored(t, sig, force)) | 88 | if (!sig_task_ignored(t, sig, force)) |
88 | return 0; | 89 | return 0; |
89 | 90 | ||
90 | /* | 91 | /* |
91 | * Tracers may want to know about even ignored signals. | 92 | * Tracers may want to know about even ignored signals. |
92 | */ | 93 | */ |
93 | return !t->ptrace; | 94 | return !t->ptrace; |
94 | } | 95 | } |
95 | 96 | ||
96 | /* | 97 | /* |
97 | * Re-calculate pending state from the set of locally pending | 98 | * Re-calculate pending state from the set of locally pending |
98 | * signals, globally pending signals, and blocked signals. | 99 | * signals, globally pending signals, and blocked signals. |
99 | */ | 100 | */ |
100 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) | 101 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) |
101 | { | 102 | { |
102 | unsigned long ready; | 103 | unsigned long ready; |
103 | long i; | 104 | long i; |
104 | 105 | ||
105 | switch (_NSIG_WORDS) { | 106 | switch (_NSIG_WORDS) { |
106 | default: | 107 | default: |
107 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) | 108 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) |
108 | ready |= signal->sig[i] &~ blocked->sig[i]; | 109 | ready |= signal->sig[i] &~ blocked->sig[i]; |
109 | break; | 110 | break; |
110 | 111 | ||
111 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; | 112 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; |
112 | ready |= signal->sig[2] &~ blocked->sig[2]; | 113 | ready |= signal->sig[2] &~ blocked->sig[2]; |
113 | ready |= signal->sig[1] &~ blocked->sig[1]; | 114 | ready |= signal->sig[1] &~ blocked->sig[1]; |
114 | ready |= signal->sig[0] &~ blocked->sig[0]; | 115 | ready |= signal->sig[0] &~ blocked->sig[0]; |
115 | break; | 116 | break; |
116 | 117 | ||
117 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; | 118 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; |
118 | ready |= signal->sig[0] &~ blocked->sig[0]; | 119 | ready |= signal->sig[0] &~ blocked->sig[0]; |
119 | break; | 120 | break; |
120 | 121 | ||
121 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; | 122 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; |
122 | } | 123 | } |
123 | return ready != 0; | 124 | return ready != 0; |
124 | } | 125 | } |
125 | 126 | ||
126 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) | 127 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) |
127 | 128 | ||
128 | static int recalc_sigpending_tsk(struct task_struct *t) | 129 | static int recalc_sigpending_tsk(struct task_struct *t) |
129 | { | 130 | { |
130 | if ((t->jobctl & JOBCTL_PENDING_MASK) || | 131 | if ((t->jobctl & JOBCTL_PENDING_MASK) || |
131 | PENDING(&t->pending, &t->blocked) || | 132 | PENDING(&t->pending, &t->blocked) || |
132 | PENDING(&t->signal->shared_pending, &t->blocked)) { | 133 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
133 | set_tsk_thread_flag(t, TIF_SIGPENDING); | 134 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
134 | return 1; | 135 | return 1; |
135 | } | 136 | } |
136 | /* | 137 | /* |
137 | * We must never clear the flag in another thread, or in current | 138 | * We must never clear the flag in another thread, or in current |
138 | * when it's possible the current syscall is returning -ERESTART*. | 139 | * when it's possible the current syscall is returning -ERESTART*. |
139 | * So we don't clear it here, and only callers who know they should do. | 140 | * So we don't clear it here, and only callers who know they should do. |
140 | */ | 141 | */ |
141 | return 0; | 142 | return 0; |
142 | } | 143 | } |
143 | 144 | ||
144 | /* | 145 | /* |
145 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. | 146 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. |
146 | * This is superfluous when called on current, the wakeup is a harmless no-op. | 147 | * This is superfluous when called on current, the wakeup is a harmless no-op. |
147 | */ | 148 | */ |
148 | void recalc_sigpending_and_wake(struct task_struct *t) | 149 | void recalc_sigpending_and_wake(struct task_struct *t) |
149 | { | 150 | { |
150 | if (recalc_sigpending_tsk(t)) | 151 | if (recalc_sigpending_tsk(t)) |
151 | signal_wake_up(t, 0); | 152 | signal_wake_up(t, 0); |
152 | } | 153 | } |
153 | 154 | ||
154 | void recalc_sigpending(void) | 155 | void recalc_sigpending(void) |
155 | { | 156 | { |
156 | if (!recalc_sigpending_tsk(current) && !freezing(current)) | 157 | if (!recalc_sigpending_tsk(current) && !freezing(current)) |
157 | clear_thread_flag(TIF_SIGPENDING); | 158 | clear_thread_flag(TIF_SIGPENDING); |
158 | 159 | ||
159 | } | 160 | } |
160 | 161 | ||
161 | /* Given the mask, find the first available signal that should be serviced. */ | 162 | /* Given the mask, find the first available signal that should be serviced. */ |
162 | 163 | ||
163 | #define SYNCHRONOUS_MASK \ | 164 | #define SYNCHRONOUS_MASK \ |
164 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ | 165 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ |
165 | sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) | 166 | sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) |
166 | 167 | ||
167 | int next_signal(struct sigpending *pending, sigset_t *mask) | 168 | int next_signal(struct sigpending *pending, sigset_t *mask) |
168 | { | 169 | { |
169 | unsigned long i, *s, *m, x; | 170 | unsigned long i, *s, *m, x; |
170 | int sig = 0; | 171 | int sig = 0; |
171 | 172 | ||
172 | s = pending->signal.sig; | 173 | s = pending->signal.sig; |
173 | m = mask->sig; | 174 | m = mask->sig; |
174 | 175 | ||
175 | /* | 176 | /* |
176 | * Handle the first word specially: it contains the | 177 | * Handle the first word specially: it contains the |
177 | * synchronous signals that need to be dequeued first. | 178 | * synchronous signals that need to be dequeued first. |
178 | */ | 179 | */ |
179 | x = *s &~ *m; | 180 | x = *s &~ *m; |
180 | if (x) { | 181 | if (x) { |
181 | if (x & SYNCHRONOUS_MASK) | 182 | if (x & SYNCHRONOUS_MASK) |
182 | x &= SYNCHRONOUS_MASK; | 183 | x &= SYNCHRONOUS_MASK; |
183 | sig = ffz(~x) + 1; | 184 | sig = ffz(~x) + 1; |
184 | return sig; | 185 | return sig; |
185 | } | 186 | } |
186 | 187 | ||
187 | switch (_NSIG_WORDS) { | 188 | switch (_NSIG_WORDS) { |
188 | default: | 189 | default: |
189 | for (i = 1; i < _NSIG_WORDS; ++i) { | 190 | for (i = 1; i < _NSIG_WORDS; ++i) { |
190 | x = *++s &~ *++m; | 191 | x = *++s &~ *++m; |
191 | if (!x) | 192 | if (!x) |
192 | continue; | 193 | continue; |
193 | sig = ffz(~x) + i*_NSIG_BPW + 1; | 194 | sig = ffz(~x) + i*_NSIG_BPW + 1; |
194 | break; | 195 | break; |
195 | } | 196 | } |
196 | break; | 197 | break; |
197 | 198 | ||
198 | case 2: | 199 | case 2: |
199 | x = s[1] &~ m[1]; | 200 | x = s[1] &~ m[1]; |
200 | if (!x) | 201 | if (!x) |
201 | break; | 202 | break; |
202 | sig = ffz(~x) + _NSIG_BPW + 1; | 203 | sig = ffz(~x) + _NSIG_BPW + 1; |
203 | break; | 204 | break; |
204 | 205 | ||
205 | case 1: | 206 | case 1: |
206 | /* Nothing to do */ | 207 | /* Nothing to do */ |
207 | break; | 208 | break; |
208 | } | 209 | } |
209 | 210 | ||
210 | return sig; | 211 | return sig; |
211 | } | 212 | } |
212 | 213 | ||
213 | static inline void print_dropped_signal(int sig) | 214 | static inline void print_dropped_signal(int sig) |
214 | { | 215 | { |
215 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); | 216 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); |
216 | 217 | ||
217 | if (!print_fatal_signals) | 218 | if (!print_fatal_signals) |
218 | return; | 219 | return; |
219 | 220 | ||
220 | if (!__ratelimit(&ratelimit_state)) | 221 | if (!__ratelimit(&ratelimit_state)) |
221 | return; | 222 | return; |
222 | 223 | ||
223 | printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", | 224 | printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", |
224 | current->comm, current->pid, sig); | 225 | current->comm, current->pid, sig); |
225 | } | 226 | } |
226 | 227 | ||
227 | /** | 228 | /** |
228 | * task_set_jobctl_pending - set jobctl pending bits | 229 | * task_set_jobctl_pending - set jobctl pending bits |
229 | * @task: target task | 230 | * @task: target task |
230 | * @mask: pending bits to set | 231 | * @mask: pending bits to set |
231 | * | 232 | * |
232 | * Clear @mask from @task->jobctl. @mask must be subset of | 233 | * Clear @mask from @task->jobctl. @mask must be subset of |
233 | * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | | 234 | * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | |
234 | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is | 235 | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is |
235 | * cleared. If @task is already being killed or exiting, this function | 236 | * cleared. If @task is already being killed or exiting, this function |
236 | * becomes noop. | 237 | * becomes noop. |
237 | * | 238 | * |
238 | * CONTEXT: | 239 | * CONTEXT: |
239 | * Must be called with @task->sighand->siglock held. | 240 | * Must be called with @task->sighand->siglock held. |
240 | * | 241 | * |
241 | * RETURNS: | 242 | * RETURNS: |
242 | * %true if @mask is set, %false if made noop because @task was dying. | 243 | * %true if @mask is set, %false if made noop because @task was dying. |
243 | */ | 244 | */ |
244 | bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask) | 245 | bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask) |
245 | { | 246 | { |
246 | BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | | 247 | BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | |
247 | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); | 248 | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); |
248 | BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); | 249 | BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); |
249 | 250 | ||
250 | if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) | 251 | if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) |
251 | return false; | 252 | return false; |
252 | 253 | ||
253 | if (mask & JOBCTL_STOP_SIGMASK) | 254 | if (mask & JOBCTL_STOP_SIGMASK) |
254 | task->jobctl &= ~JOBCTL_STOP_SIGMASK; | 255 | task->jobctl &= ~JOBCTL_STOP_SIGMASK; |
255 | 256 | ||
256 | task->jobctl |= mask; | 257 | task->jobctl |= mask; |
257 | return true; | 258 | return true; |
258 | } | 259 | } |
259 | 260 | ||
260 | /** | 261 | /** |
261 | * task_clear_jobctl_trapping - clear jobctl trapping bit | 262 | * task_clear_jobctl_trapping - clear jobctl trapping bit |
262 | * @task: target task | 263 | * @task: target task |
263 | * | 264 | * |
264 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. | 265 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. |
265 | * Clear it and wake up the ptracer. Note that we don't need any further | 266 | * Clear it and wake up the ptracer. Note that we don't need any further |
266 | * locking. @task->siglock guarantees that @task->parent points to the | 267 | * locking. @task->siglock guarantees that @task->parent points to the |
267 | * ptracer. | 268 | * ptracer. |
268 | * | 269 | * |
269 | * CONTEXT: | 270 | * CONTEXT: |
270 | * Must be called with @task->sighand->siglock held. | 271 | * Must be called with @task->sighand->siglock held. |
271 | */ | 272 | */ |
272 | void task_clear_jobctl_trapping(struct task_struct *task) | 273 | void task_clear_jobctl_trapping(struct task_struct *task) |
273 | { | 274 | { |
274 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { | 275 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { |
275 | task->jobctl &= ~JOBCTL_TRAPPING; | 276 | task->jobctl &= ~JOBCTL_TRAPPING; |
276 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); | 277 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); |
277 | } | 278 | } |
278 | } | 279 | } |
279 | 280 | ||
280 | /** | 281 | /** |
281 | * task_clear_jobctl_pending - clear jobctl pending bits | 282 | * task_clear_jobctl_pending - clear jobctl pending bits |
282 | * @task: target task | 283 | * @task: target task |
283 | * @mask: pending bits to clear | 284 | * @mask: pending bits to clear |
284 | * | 285 | * |
285 | * Clear @mask from @task->jobctl. @mask must be subset of | 286 | * Clear @mask from @task->jobctl. @mask must be subset of |
286 | * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other | 287 | * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other |
287 | * STOP bits are cleared together. | 288 | * STOP bits are cleared together. |
288 | * | 289 | * |
289 | * If clearing of @mask leaves no stop or trap pending, this function calls | 290 | * If clearing of @mask leaves no stop or trap pending, this function calls |
290 | * task_clear_jobctl_trapping(). | 291 | * task_clear_jobctl_trapping(). |
291 | * | 292 | * |
292 | * CONTEXT: | 293 | * CONTEXT: |
293 | * Must be called with @task->sighand->siglock held. | 294 | * Must be called with @task->sighand->siglock held. |
294 | */ | 295 | */ |
295 | void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) | 296 | void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) |
296 | { | 297 | { |
297 | BUG_ON(mask & ~JOBCTL_PENDING_MASK); | 298 | BUG_ON(mask & ~JOBCTL_PENDING_MASK); |
298 | 299 | ||
299 | if (mask & JOBCTL_STOP_PENDING) | 300 | if (mask & JOBCTL_STOP_PENDING) |
300 | mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; | 301 | mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; |
301 | 302 | ||
302 | task->jobctl &= ~mask; | 303 | task->jobctl &= ~mask; |
303 | 304 | ||
304 | if (!(task->jobctl & JOBCTL_PENDING_MASK)) | 305 | if (!(task->jobctl & JOBCTL_PENDING_MASK)) |
305 | task_clear_jobctl_trapping(task); | 306 | task_clear_jobctl_trapping(task); |
306 | } | 307 | } |
307 | 308 | ||
308 | /** | 309 | /** |
309 | * task_participate_group_stop - participate in a group stop | 310 | * task_participate_group_stop - participate in a group stop |
310 | * @task: task participating in a group stop | 311 | * @task: task participating in a group stop |
311 | * | 312 | * |
312 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. | 313 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. |
313 | * Group stop states are cleared and the group stop count is consumed if | 314 | * Group stop states are cleared and the group stop count is consumed if |
314 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group | 315 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group |
315 | * stop, the appropriate %SIGNAL_* flags are set. | 316 | * stop, the appropriate %SIGNAL_* flags are set. |
316 | * | 317 | * |
317 | * CONTEXT: | 318 | * CONTEXT: |
318 | * Must be called with @task->sighand->siglock held. | 319 | * Must be called with @task->sighand->siglock held. |
319 | * | 320 | * |
320 | * RETURNS: | 321 | * RETURNS: |
321 | * %true if group stop completion should be notified to the parent, %false | 322 | * %true if group stop completion should be notified to the parent, %false |
322 | * otherwise. | 323 | * otherwise. |
323 | */ | 324 | */ |
324 | static bool task_participate_group_stop(struct task_struct *task) | 325 | static bool task_participate_group_stop(struct task_struct *task) |
325 | { | 326 | { |
326 | struct signal_struct *sig = task->signal; | 327 | struct signal_struct *sig = task->signal; |
327 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; | 328 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; |
328 | 329 | ||
329 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); | 330 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); |
330 | 331 | ||
331 | task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); | 332 | task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); |
332 | 333 | ||
333 | if (!consume) | 334 | if (!consume) |
334 | return false; | 335 | return false; |
335 | 336 | ||
336 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) | 337 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) |
337 | sig->group_stop_count--; | 338 | sig->group_stop_count--; |
338 | 339 | ||
339 | /* | 340 | /* |
340 | * Tell the caller to notify completion iff we are entering into a | 341 | * Tell the caller to notify completion iff we are entering into a |
341 | * fresh group stop. Read comment in do_signal_stop() for details. | 342 | * fresh group stop. Read comment in do_signal_stop() for details. |
342 | */ | 343 | */ |
343 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { | 344 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { |
344 | sig->flags = SIGNAL_STOP_STOPPED; | 345 | sig->flags = SIGNAL_STOP_STOPPED; |
345 | return true; | 346 | return true; |
346 | } | 347 | } |
347 | return false; | 348 | return false; |
348 | } | 349 | } |
349 | 350 | ||
350 | /* | 351 | /* |
351 | * allocate a new signal queue record | 352 | * allocate a new signal queue record |
352 | * - this may be called without locks if and only if t == current, otherwise an | 353 | * - this may be called without locks if and only if t == current, otherwise an |
353 | * appropriate lock must be held to stop the target task from exiting | 354 | * appropriate lock must be held to stop the target task from exiting |
354 | */ | 355 | */ |
355 | static struct sigqueue * | 356 | static struct sigqueue * |
356 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) | 357 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
357 | { | 358 | { |
358 | struct sigqueue *q = NULL; | 359 | struct sigqueue *q = NULL; |
359 | struct user_struct *user; | 360 | struct user_struct *user; |
360 | 361 | ||
361 | /* | 362 | /* |
362 | * Protect access to @t credentials. This can go away when all | 363 | * Protect access to @t credentials. This can go away when all |
363 | * callers hold rcu read lock. | 364 | * callers hold rcu read lock. |
364 | */ | 365 | */ |
365 | rcu_read_lock(); | 366 | rcu_read_lock(); |
366 | user = get_uid(__task_cred(t)->user); | 367 | user = get_uid(__task_cred(t)->user); |
367 | atomic_inc(&user->sigpending); | 368 | atomic_inc(&user->sigpending); |
368 | rcu_read_unlock(); | 369 | rcu_read_unlock(); |
369 | 370 | ||
370 | if (override_rlimit || | 371 | if (override_rlimit || |
371 | atomic_read(&user->sigpending) <= | 372 | atomic_read(&user->sigpending) <= |
372 | task_rlimit(t, RLIMIT_SIGPENDING)) { | 373 | task_rlimit(t, RLIMIT_SIGPENDING)) { |
373 | q = kmem_cache_alloc(sigqueue_cachep, flags); | 374 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
374 | } else { | 375 | } else { |
375 | print_dropped_signal(sig); | 376 | print_dropped_signal(sig); |
376 | } | 377 | } |
377 | 378 | ||
378 | if (unlikely(q == NULL)) { | 379 | if (unlikely(q == NULL)) { |
379 | atomic_dec(&user->sigpending); | 380 | atomic_dec(&user->sigpending); |
380 | free_uid(user); | 381 | free_uid(user); |
381 | } else { | 382 | } else { |
382 | INIT_LIST_HEAD(&q->list); | 383 | INIT_LIST_HEAD(&q->list); |
383 | q->flags = 0; | 384 | q->flags = 0; |
384 | q->user = user; | 385 | q->user = user; |
385 | } | 386 | } |
386 | 387 | ||
387 | return q; | 388 | return q; |
388 | } | 389 | } |
389 | 390 | ||
390 | static void __sigqueue_free(struct sigqueue *q) | 391 | static void __sigqueue_free(struct sigqueue *q) |
391 | { | 392 | { |
392 | if (q->flags & SIGQUEUE_PREALLOC) | 393 | if (q->flags & SIGQUEUE_PREALLOC) |
393 | return; | 394 | return; |
394 | atomic_dec(&q->user->sigpending); | 395 | atomic_dec(&q->user->sigpending); |
395 | free_uid(q->user); | 396 | free_uid(q->user); |
396 | kmem_cache_free(sigqueue_cachep, q); | 397 | kmem_cache_free(sigqueue_cachep, q); |
397 | } | 398 | } |
398 | 399 | ||
399 | void flush_sigqueue(struct sigpending *queue) | 400 | void flush_sigqueue(struct sigpending *queue) |
400 | { | 401 | { |
401 | struct sigqueue *q; | 402 | struct sigqueue *q; |
402 | 403 | ||
403 | sigemptyset(&queue->signal); | 404 | sigemptyset(&queue->signal); |
404 | while (!list_empty(&queue->list)) { | 405 | while (!list_empty(&queue->list)) { |
405 | q = list_entry(queue->list.next, struct sigqueue , list); | 406 | q = list_entry(queue->list.next, struct sigqueue , list); |
406 | list_del_init(&q->list); | 407 | list_del_init(&q->list); |
407 | __sigqueue_free(q); | 408 | __sigqueue_free(q); |
408 | } | 409 | } |
409 | } | 410 | } |
410 | 411 | ||
411 | /* | 412 | /* |
412 | * Flush all pending signals for a task. | 413 | * Flush all pending signals for a task. |
413 | */ | 414 | */ |
414 | void __flush_signals(struct task_struct *t) | 415 | void __flush_signals(struct task_struct *t) |
415 | { | 416 | { |
416 | clear_tsk_thread_flag(t, TIF_SIGPENDING); | 417 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
417 | flush_sigqueue(&t->pending); | 418 | flush_sigqueue(&t->pending); |
418 | flush_sigqueue(&t->signal->shared_pending); | 419 | flush_sigqueue(&t->signal->shared_pending); |
419 | } | 420 | } |
420 | 421 | ||
421 | void flush_signals(struct task_struct *t) | 422 | void flush_signals(struct task_struct *t) |
422 | { | 423 | { |
423 | unsigned long flags; | 424 | unsigned long flags; |
424 | 425 | ||
425 | spin_lock_irqsave(&t->sighand->siglock, flags); | 426 | spin_lock_irqsave(&t->sighand->siglock, flags); |
426 | __flush_signals(t); | 427 | __flush_signals(t); |
427 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | 428 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
428 | } | 429 | } |
429 | 430 | ||
430 | static void __flush_itimer_signals(struct sigpending *pending) | 431 | static void __flush_itimer_signals(struct sigpending *pending) |
431 | { | 432 | { |
432 | sigset_t signal, retain; | 433 | sigset_t signal, retain; |
433 | struct sigqueue *q, *n; | 434 | struct sigqueue *q, *n; |
434 | 435 | ||
435 | signal = pending->signal; | 436 | signal = pending->signal; |
436 | sigemptyset(&retain); | 437 | sigemptyset(&retain); |
437 | 438 | ||
438 | list_for_each_entry_safe(q, n, &pending->list, list) { | 439 | list_for_each_entry_safe(q, n, &pending->list, list) { |
439 | int sig = q->info.si_signo; | 440 | int sig = q->info.si_signo; |
440 | 441 | ||
441 | if (likely(q->info.si_code != SI_TIMER)) { | 442 | if (likely(q->info.si_code != SI_TIMER)) { |
442 | sigaddset(&retain, sig); | 443 | sigaddset(&retain, sig); |
443 | } else { | 444 | } else { |
444 | sigdelset(&signal, sig); | 445 | sigdelset(&signal, sig); |
445 | list_del_init(&q->list); | 446 | list_del_init(&q->list); |
446 | __sigqueue_free(q); | 447 | __sigqueue_free(q); |
447 | } | 448 | } |
448 | } | 449 | } |
449 | 450 | ||
450 | sigorsets(&pending->signal, &signal, &retain); | 451 | sigorsets(&pending->signal, &signal, &retain); |
451 | } | 452 | } |
452 | 453 | ||
453 | void flush_itimer_signals(void) | 454 | void flush_itimer_signals(void) |
454 | { | 455 | { |
455 | struct task_struct *tsk = current; | 456 | struct task_struct *tsk = current; |
456 | unsigned long flags; | 457 | unsigned long flags; |
457 | 458 | ||
458 | spin_lock_irqsave(&tsk->sighand->siglock, flags); | 459 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
459 | __flush_itimer_signals(&tsk->pending); | 460 | __flush_itimer_signals(&tsk->pending); |
460 | __flush_itimer_signals(&tsk->signal->shared_pending); | 461 | __flush_itimer_signals(&tsk->signal->shared_pending); |
461 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | 462 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
462 | } | 463 | } |
463 | 464 | ||
464 | void ignore_signals(struct task_struct *t) | 465 | void ignore_signals(struct task_struct *t) |
465 | { | 466 | { |
466 | int i; | 467 | int i; |
467 | 468 | ||
468 | for (i = 0; i < _NSIG; ++i) | 469 | for (i = 0; i < _NSIG; ++i) |
469 | t->sighand->action[i].sa.sa_handler = SIG_IGN; | 470 | t->sighand->action[i].sa.sa_handler = SIG_IGN; |
470 | 471 | ||
471 | flush_signals(t); | 472 | flush_signals(t); |
472 | } | 473 | } |
473 | 474 | ||
474 | /* | 475 | /* |
475 | * Flush all handlers for a task. | 476 | * Flush all handlers for a task. |
476 | */ | 477 | */ |
477 | 478 | ||
478 | void | 479 | void |
479 | flush_signal_handlers(struct task_struct *t, int force_default) | 480 | flush_signal_handlers(struct task_struct *t, int force_default) |
480 | { | 481 | { |
481 | int i; | 482 | int i; |
482 | struct k_sigaction *ka = &t->sighand->action[0]; | 483 | struct k_sigaction *ka = &t->sighand->action[0]; |
483 | for (i = _NSIG ; i != 0 ; i--) { | 484 | for (i = _NSIG ; i != 0 ; i--) { |
484 | if (force_default || ka->sa.sa_handler != SIG_IGN) | 485 | if (force_default || ka->sa.sa_handler != SIG_IGN) |
485 | ka->sa.sa_handler = SIG_DFL; | 486 | ka->sa.sa_handler = SIG_DFL; |
486 | ka->sa.sa_flags = 0; | 487 | ka->sa.sa_flags = 0; |
487 | sigemptyset(&ka->sa.sa_mask); | 488 | sigemptyset(&ka->sa.sa_mask); |
488 | ka++; | 489 | ka++; |
489 | } | 490 | } |
490 | } | 491 | } |
491 | 492 | ||
492 | int unhandled_signal(struct task_struct *tsk, int sig) | 493 | int unhandled_signal(struct task_struct *tsk, int sig) |
493 | { | 494 | { |
494 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; | 495 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
495 | if (is_global_init(tsk)) | 496 | if (is_global_init(tsk)) |
496 | return 1; | 497 | return 1; |
497 | if (handler != SIG_IGN && handler != SIG_DFL) | 498 | if (handler != SIG_IGN && handler != SIG_DFL) |
498 | return 0; | 499 | return 0; |
499 | /* if ptraced, let the tracer determine */ | 500 | /* if ptraced, let the tracer determine */ |
500 | return !tsk->ptrace; | 501 | return !tsk->ptrace; |
501 | } | 502 | } |
502 | 503 | ||
503 | /* | 504 | /* |
504 | * Notify the system that a driver wants to block all signals for this | 505 | * Notify the system that a driver wants to block all signals for this |
505 | * process, and wants to be notified if any signals at all were to be | 506 | * process, and wants to be notified if any signals at all were to be |
506 | * sent/acted upon. If the notifier routine returns non-zero, then the | 507 | * sent/acted upon. If the notifier routine returns non-zero, then the |
507 | * signal will be acted upon after all. If the notifier routine returns 0, | 508 | * signal will be acted upon after all. If the notifier routine returns 0, |
508 | * then then signal will be blocked. Only one block per process is | 509 | * then then signal will be blocked. Only one block per process is |
509 | * allowed. priv is a pointer to private data that the notifier routine | 510 | * allowed. priv is a pointer to private data that the notifier routine |
510 | * can use to determine if the signal should be blocked or not. | 511 | * can use to determine if the signal should be blocked or not. |
511 | */ | 512 | */ |
512 | void | 513 | void |
513 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) | 514 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) |
514 | { | 515 | { |
515 | unsigned long flags; | 516 | unsigned long flags; |
516 | 517 | ||
517 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 518 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
518 | current->notifier_mask = mask; | 519 | current->notifier_mask = mask; |
519 | current->notifier_data = priv; | 520 | current->notifier_data = priv; |
520 | current->notifier = notifier; | 521 | current->notifier = notifier; |
521 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 522 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
522 | } | 523 | } |
523 | 524 | ||
524 | /* Notify the system that blocking has ended. */ | 525 | /* Notify the system that blocking has ended. */ |
525 | 526 | ||
526 | void | 527 | void |
527 | unblock_all_signals(void) | 528 | unblock_all_signals(void) |
528 | { | 529 | { |
529 | unsigned long flags; | 530 | unsigned long flags; |
530 | 531 | ||
531 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 532 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
532 | current->notifier = NULL; | 533 | current->notifier = NULL; |
533 | current->notifier_data = NULL; | 534 | current->notifier_data = NULL; |
534 | recalc_sigpending(); | 535 | recalc_sigpending(); |
535 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 536 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
536 | } | 537 | } |
537 | 538 | ||
538 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) | 539 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
539 | { | 540 | { |
540 | struct sigqueue *q, *first = NULL; | 541 | struct sigqueue *q, *first = NULL; |
541 | 542 | ||
542 | /* | 543 | /* |
543 | * Collect the siginfo appropriate to this signal. Check if | 544 | * Collect the siginfo appropriate to this signal. Check if |
544 | * there is another siginfo for the same signal. | 545 | * there is another siginfo for the same signal. |
545 | */ | 546 | */ |
546 | list_for_each_entry(q, &list->list, list) { | 547 | list_for_each_entry(q, &list->list, list) { |
547 | if (q->info.si_signo == sig) { | 548 | if (q->info.si_signo == sig) { |
548 | if (first) | 549 | if (first) |
549 | goto still_pending; | 550 | goto still_pending; |
550 | first = q; | 551 | first = q; |
551 | } | 552 | } |
552 | } | 553 | } |
553 | 554 | ||
554 | sigdelset(&list->signal, sig); | 555 | sigdelset(&list->signal, sig); |
555 | 556 | ||
556 | if (first) { | 557 | if (first) { |
557 | still_pending: | 558 | still_pending: |
558 | list_del_init(&first->list); | 559 | list_del_init(&first->list); |
559 | copy_siginfo(info, &first->info); | 560 | copy_siginfo(info, &first->info); |
560 | __sigqueue_free(first); | 561 | __sigqueue_free(first); |
561 | } else { | 562 | } else { |
562 | /* | 563 | /* |
563 | * Ok, it wasn't in the queue. This must be | 564 | * Ok, it wasn't in the queue. This must be |
564 | * a fast-pathed signal or we must have been | 565 | * a fast-pathed signal or we must have been |
565 | * out of queue space. So zero out the info. | 566 | * out of queue space. So zero out the info. |
566 | */ | 567 | */ |
567 | info->si_signo = sig; | 568 | info->si_signo = sig; |
568 | info->si_errno = 0; | 569 | info->si_errno = 0; |
569 | info->si_code = SI_USER; | 570 | info->si_code = SI_USER; |
570 | info->si_pid = 0; | 571 | info->si_pid = 0; |
571 | info->si_uid = 0; | 572 | info->si_uid = 0; |
572 | } | 573 | } |
573 | } | 574 | } |
574 | 575 | ||
575 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | 576 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, |
576 | siginfo_t *info) | 577 | siginfo_t *info) |
577 | { | 578 | { |
578 | int sig = next_signal(pending, mask); | 579 | int sig = next_signal(pending, mask); |
579 | 580 | ||
580 | if (sig) { | 581 | if (sig) { |
581 | if (current->notifier) { | 582 | if (current->notifier) { |
582 | if (sigismember(current->notifier_mask, sig)) { | 583 | if (sigismember(current->notifier_mask, sig)) { |
583 | if (!(current->notifier)(current->notifier_data)) { | 584 | if (!(current->notifier)(current->notifier_data)) { |
584 | clear_thread_flag(TIF_SIGPENDING); | 585 | clear_thread_flag(TIF_SIGPENDING); |
585 | return 0; | 586 | return 0; |
586 | } | 587 | } |
587 | } | 588 | } |
588 | } | 589 | } |
589 | 590 | ||
590 | collect_signal(sig, pending, info); | 591 | collect_signal(sig, pending, info); |
591 | } | 592 | } |
592 | 593 | ||
593 | return sig; | 594 | return sig; |
594 | } | 595 | } |
595 | 596 | ||
596 | /* | 597 | /* |
597 | * Dequeue a signal and return the element to the caller, which is | 598 | * Dequeue a signal and return the element to the caller, which is |
598 | * expected to free it. | 599 | * expected to free it. |
599 | * | 600 | * |
600 | * All callers have to hold the siglock. | 601 | * All callers have to hold the siglock. |
601 | */ | 602 | */ |
602 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | 603 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
603 | { | 604 | { |
604 | int signr; | 605 | int signr; |
605 | 606 | ||
606 | /* We only dequeue private signals from ourselves, we don't let | 607 | /* We only dequeue private signals from ourselves, we don't let |
607 | * signalfd steal them | 608 | * signalfd steal them |
608 | */ | 609 | */ |
609 | signr = __dequeue_signal(&tsk->pending, mask, info); | 610 | signr = __dequeue_signal(&tsk->pending, mask, info); |
610 | if (!signr) { | 611 | if (!signr) { |
611 | signr = __dequeue_signal(&tsk->signal->shared_pending, | 612 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
612 | mask, info); | 613 | mask, info); |
613 | /* | 614 | /* |
614 | * itimer signal ? | 615 | * itimer signal ? |
615 | * | 616 | * |
616 | * itimers are process shared and we restart periodic | 617 | * itimers are process shared and we restart periodic |
617 | * itimers in the signal delivery path to prevent DoS | 618 | * itimers in the signal delivery path to prevent DoS |
618 | * attacks in the high resolution timer case. This is | 619 | * attacks in the high resolution timer case. This is |
619 | * compliant with the old way of self-restarting | 620 | * compliant with the old way of self-restarting |
620 | * itimers, as the SIGALRM is a legacy signal and only | 621 | * itimers, as the SIGALRM is a legacy signal and only |
621 | * queued once. Changing the restart behaviour to | 622 | * queued once. Changing the restart behaviour to |
622 | * restart the timer in the signal dequeue path is | 623 | * restart the timer in the signal dequeue path is |
623 | * reducing the timer noise on heavy loaded !highres | 624 | * reducing the timer noise on heavy loaded !highres |
624 | * systems too. | 625 | * systems too. |
625 | */ | 626 | */ |
626 | if (unlikely(signr == SIGALRM)) { | 627 | if (unlikely(signr == SIGALRM)) { |
627 | struct hrtimer *tmr = &tsk->signal->real_timer; | 628 | struct hrtimer *tmr = &tsk->signal->real_timer; |
628 | 629 | ||
629 | if (!hrtimer_is_queued(tmr) && | 630 | if (!hrtimer_is_queued(tmr) && |
630 | tsk->signal->it_real_incr.tv64 != 0) { | 631 | tsk->signal->it_real_incr.tv64 != 0) { |
631 | hrtimer_forward(tmr, tmr->base->get_time(), | 632 | hrtimer_forward(tmr, tmr->base->get_time(), |
632 | tsk->signal->it_real_incr); | 633 | tsk->signal->it_real_incr); |
633 | hrtimer_restart(tmr); | 634 | hrtimer_restart(tmr); |
634 | } | 635 | } |
635 | } | 636 | } |
636 | } | 637 | } |
637 | 638 | ||
638 | recalc_sigpending(); | 639 | recalc_sigpending(); |
639 | if (!signr) | 640 | if (!signr) |
640 | return 0; | 641 | return 0; |
641 | 642 | ||
642 | if (unlikely(sig_kernel_stop(signr))) { | 643 | if (unlikely(sig_kernel_stop(signr))) { |
643 | /* | 644 | /* |
644 | * Set a marker that we have dequeued a stop signal. Our | 645 | * Set a marker that we have dequeued a stop signal. Our |
645 | * caller might release the siglock and then the pending | 646 | * caller might release the siglock and then the pending |
646 | * stop signal it is about to process is no longer in the | 647 | * stop signal it is about to process is no longer in the |
647 | * pending bitmasks, but must still be cleared by a SIGCONT | 648 | * pending bitmasks, but must still be cleared by a SIGCONT |
648 | * (and overruled by a SIGKILL). So those cases clear this | 649 | * (and overruled by a SIGKILL). So those cases clear this |
649 | * shared flag after we've set it. Note that this flag may | 650 | * shared flag after we've set it. Note that this flag may |
650 | * remain set after the signal we return is ignored or | 651 | * remain set after the signal we return is ignored or |
651 | * handled. That doesn't matter because its only purpose | 652 | * handled. That doesn't matter because its only purpose |
652 | * is to alert stop-signal processing code when another | 653 | * is to alert stop-signal processing code when another |
653 | * processor has come along and cleared the flag. | 654 | * processor has come along and cleared the flag. |
654 | */ | 655 | */ |
655 | current->jobctl |= JOBCTL_STOP_DEQUEUED; | 656 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
656 | } | 657 | } |
657 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { | 658 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
658 | /* | 659 | /* |
659 | * Release the siglock to ensure proper locking order | 660 | * Release the siglock to ensure proper locking order |
660 | * of timer locks outside of siglocks. Note, we leave | 661 | * of timer locks outside of siglocks. Note, we leave |
661 | * irqs disabled here, since the posix-timers code is | 662 | * irqs disabled here, since the posix-timers code is |
662 | * about to disable them again anyway. | 663 | * about to disable them again anyway. |
663 | */ | 664 | */ |
664 | spin_unlock(&tsk->sighand->siglock); | 665 | spin_unlock(&tsk->sighand->siglock); |
665 | do_schedule_next_timer(info); | 666 | do_schedule_next_timer(info); |
666 | spin_lock(&tsk->sighand->siglock); | 667 | spin_lock(&tsk->sighand->siglock); |
667 | } | 668 | } |
668 | return signr; | 669 | return signr; |
669 | } | 670 | } |
670 | 671 | ||
671 | /* | 672 | /* |
672 | * Tell a process that it has a new active signal.. | 673 | * Tell a process that it has a new active signal.. |
673 | * | 674 | * |
674 | * NOTE! we rely on the previous spin_lock to | 675 | * NOTE! we rely on the previous spin_lock to |
675 | * lock interrupts for us! We can only be called with | 676 | * lock interrupts for us! We can only be called with |
676 | * "siglock" held, and the local interrupt must | 677 | * "siglock" held, and the local interrupt must |
677 | * have been disabled when that got acquired! | 678 | * have been disabled when that got acquired! |
678 | * | 679 | * |
679 | * No need to set need_resched since signal event passing | 680 | * No need to set need_resched since signal event passing |
680 | * goes through ->blocked | 681 | * goes through ->blocked |
681 | */ | 682 | */ |
682 | void signal_wake_up(struct task_struct *t, int resume) | 683 | void signal_wake_up(struct task_struct *t, int resume) |
683 | { | 684 | { |
684 | unsigned int mask; | 685 | unsigned int mask; |
685 | 686 | ||
686 | set_tsk_thread_flag(t, TIF_SIGPENDING); | 687 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
687 | 688 | ||
688 | /* | 689 | /* |
689 | * For SIGKILL, we want to wake it up in the stopped/traced/killable | 690 | * For SIGKILL, we want to wake it up in the stopped/traced/killable |
690 | * case. We don't check t->state here because there is a race with it | 691 | * case. We don't check t->state here because there is a race with it |
691 | * executing another processor and just now entering stopped state. | 692 | * executing another processor and just now entering stopped state. |
692 | * By using wake_up_state, we ensure the process will wake up and | 693 | * By using wake_up_state, we ensure the process will wake up and |
693 | * handle its death signal. | 694 | * handle its death signal. |
694 | */ | 695 | */ |
695 | mask = TASK_INTERRUPTIBLE; | 696 | mask = TASK_INTERRUPTIBLE; |
696 | if (resume) | 697 | if (resume) |
697 | mask |= TASK_WAKEKILL; | 698 | mask |= TASK_WAKEKILL; |
698 | if (!wake_up_state(t, mask)) | 699 | if (!wake_up_state(t, mask)) |
699 | kick_process(t); | 700 | kick_process(t); |
700 | } | 701 | } |
701 | 702 | ||
702 | /* | 703 | /* |
703 | * Remove signals in mask from the pending set and queue. | 704 | * Remove signals in mask from the pending set and queue. |
704 | * Returns 1 if any signals were found. | 705 | * Returns 1 if any signals were found. |
705 | * | 706 | * |
706 | * All callers must be holding the siglock. | 707 | * All callers must be holding the siglock. |
707 | * | 708 | * |
708 | * This version takes a sigset mask and looks at all signals, | 709 | * This version takes a sigset mask and looks at all signals, |
709 | * not just those in the first mask word. | 710 | * not just those in the first mask word. |
710 | */ | 711 | */ |
711 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) | 712 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) |
712 | { | 713 | { |
713 | struct sigqueue *q, *n; | 714 | struct sigqueue *q, *n; |
714 | sigset_t m; | 715 | sigset_t m; |
715 | 716 | ||
716 | sigandsets(&m, mask, &s->signal); | 717 | sigandsets(&m, mask, &s->signal); |
717 | if (sigisemptyset(&m)) | 718 | if (sigisemptyset(&m)) |
718 | return 0; | 719 | return 0; |
719 | 720 | ||
720 | sigandnsets(&s->signal, &s->signal, mask); | 721 | sigandnsets(&s->signal, &s->signal, mask); |
721 | list_for_each_entry_safe(q, n, &s->list, list) { | 722 | list_for_each_entry_safe(q, n, &s->list, list) { |
722 | if (sigismember(mask, q->info.si_signo)) { | 723 | if (sigismember(mask, q->info.si_signo)) { |
723 | list_del_init(&q->list); | 724 | list_del_init(&q->list); |
724 | __sigqueue_free(q); | 725 | __sigqueue_free(q); |
725 | } | 726 | } |
726 | } | 727 | } |
727 | return 1; | 728 | return 1; |
728 | } | 729 | } |
729 | /* | 730 | /* |
730 | * Remove signals in mask from the pending set and queue. | 731 | * Remove signals in mask from the pending set and queue. |
731 | * Returns 1 if any signals were found. | 732 | * Returns 1 if any signals were found. |
732 | * | 733 | * |
733 | * All callers must be holding the siglock. | 734 | * All callers must be holding the siglock. |
734 | */ | 735 | */ |
735 | static int rm_from_queue(unsigned long mask, struct sigpending *s) | 736 | static int rm_from_queue(unsigned long mask, struct sigpending *s) |
736 | { | 737 | { |
737 | struct sigqueue *q, *n; | 738 | struct sigqueue *q, *n; |
738 | 739 | ||
739 | if (!sigtestsetmask(&s->signal, mask)) | 740 | if (!sigtestsetmask(&s->signal, mask)) |
740 | return 0; | 741 | return 0; |
741 | 742 | ||
742 | sigdelsetmask(&s->signal, mask); | 743 | sigdelsetmask(&s->signal, mask); |
743 | list_for_each_entry_safe(q, n, &s->list, list) { | 744 | list_for_each_entry_safe(q, n, &s->list, list) { |
744 | if (q->info.si_signo < SIGRTMIN && | 745 | if (q->info.si_signo < SIGRTMIN && |
745 | (mask & sigmask(q->info.si_signo))) { | 746 | (mask & sigmask(q->info.si_signo))) { |
746 | list_del_init(&q->list); | 747 | list_del_init(&q->list); |
747 | __sigqueue_free(q); | 748 | __sigqueue_free(q); |
748 | } | 749 | } |
749 | } | 750 | } |
750 | return 1; | 751 | return 1; |
751 | } | 752 | } |
752 | 753 | ||
753 | static inline int is_si_special(const struct siginfo *info) | 754 | static inline int is_si_special(const struct siginfo *info) |
754 | { | 755 | { |
755 | return info <= SEND_SIG_FORCED; | 756 | return info <= SEND_SIG_FORCED; |
756 | } | 757 | } |
757 | 758 | ||
758 | static inline bool si_fromuser(const struct siginfo *info) | 759 | static inline bool si_fromuser(const struct siginfo *info) |
759 | { | 760 | { |
760 | return info == SEND_SIG_NOINFO || | 761 | return info == SEND_SIG_NOINFO || |
761 | (!is_si_special(info) && SI_FROMUSER(info)); | 762 | (!is_si_special(info) && SI_FROMUSER(info)); |
762 | } | 763 | } |
763 | 764 | ||
764 | /* | 765 | /* |
765 | * called with RCU read lock from check_kill_permission() | 766 | * called with RCU read lock from check_kill_permission() |
766 | */ | 767 | */ |
767 | static int kill_ok_by_cred(struct task_struct *t) | 768 | static int kill_ok_by_cred(struct task_struct *t) |
768 | { | 769 | { |
769 | const struct cred *cred = current_cred(); | 770 | const struct cred *cred = current_cred(); |
770 | const struct cred *tcred = __task_cred(t); | 771 | const struct cred *tcred = __task_cred(t); |
771 | 772 | ||
772 | if (uid_eq(cred->euid, tcred->suid) || | 773 | if (uid_eq(cred->euid, tcred->suid) || |
773 | uid_eq(cred->euid, tcred->uid) || | 774 | uid_eq(cred->euid, tcred->uid) || |
774 | uid_eq(cred->uid, tcred->suid) || | 775 | uid_eq(cred->uid, tcred->suid) || |
775 | uid_eq(cred->uid, tcred->uid)) | 776 | uid_eq(cred->uid, tcred->uid)) |
776 | return 1; | 777 | return 1; |
777 | 778 | ||
778 | if (ns_capable(tcred->user_ns, CAP_KILL)) | 779 | if (ns_capable(tcred->user_ns, CAP_KILL)) |
779 | return 1; | 780 | return 1; |
780 | 781 | ||
781 | return 0; | 782 | return 0; |
782 | } | 783 | } |
783 | 784 | ||
784 | /* | 785 | /* |
785 | * Bad permissions for sending the signal | 786 | * Bad permissions for sending the signal |
786 | * - the caller must hold the RCU read lock | 787 | * - the caller must hold the RCU read lock |
787 | */ | 788 | */ |
788 | static int check_kill_permission(int sig, struct siginfo *info, | 789 | static int check_kill_permission(int sig, struct siginfo *info, |
789 | struct task_struct *t) | 790 | struct task_struct *t) |
790 | { | 791 | { |
791 | struct pid *sid; | 792 | struct pid *sid; |
792 | int error; | 793 | int error; |
793 | 794 | ||
794 | if (!valid_signal(sig)) | 795 | if (!valid_signal(sig)) |
795 | return -EINVAL; | 796 | return -EINVAL; |
796 | 797 | ||
797 | if (!si_fromuser(info)) | 798 | if (!si_fromuser(info)) |
798 | return 0; | 799 | return 0; |
799 | 800 | ||
800 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ | 801 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
801 | if (error) | 802 | if (error) |
802 | return error; | 803 | return error; |
803 | 804 | ||
804 | if (!same_thread_group(current, t) && | 805 | if (!same_thread_group(current, t) && |
805 | !kill_ok_by_cred(t)) { | 806 | !kill_ok_by_cred(t)) { |
806 | switch (sig) { | 807 | switch (sig) { |
807 | case SIGCONT: | 808 | case SIGCONT: |
808 | sid = task_session(t); | 809 | sid = task_session(t); |
809 | /* | 810 | /* |
810 | * We don't return the error if sid == NULL. The | 811 | * We don't return the error if sid == NULL. The |
811 | * task was unhashed, the caller must notice this. | 812 | * task was unhashed, the caller must notice this. |
812 | */ | 813 | */ |
813 | if (!sid || sid == task_session(current)) | 814 | if (!sid || sid == task_session(current)) |
814 | break; | 815 | break; |
815 | default: | 816 | default: |
816 | return -EPERM; | 817 | return -EPERM; |
817 | } | 818 | } |
818 | } | 819 | } |
819 | 820 | ||
820 | return security_task_kill(t, info, sig, 0); | 821 | return security_task_kill(t, info, sig, 0); |
821 | } | 822 | } |
822 | 823 | ||
823 | /** | 824 | /** |
824 | * ptrace_trap_notify - schedule trap to notify ptracer | 825 | * ptrace_trap_notify - schedule trap to notify ptracer |
825 | * @t: tracee wanting to notify tracer | 826 | * @t: tracee wanting to notify tracer |
826 | * | 827 | * |
827 | * This function schedules sticky ptrace trap which is cleared on the next | 828 | * This function schedules sticky ptrace trap which is cleared on the next |
828 | * TRAP_STOP to notify ptracer of an event. @t must have been seized by | 829 | * TRAP_STOP to notify ptracer of an event. @t must have been seized by |
829 | * ptracer. | 830 | * ptracer. |
830 | * | 831 | * |
831 | * If @t is running, STOP trap will be taken. If trapped for STOP and | 832 | * If @t is running, STOP trap will be taken. If trapped for STOP and |
832 | * ptracer is listening for events, tracee is woken up so that it can | 833 | * ptracer is listening for events, tracee is woken up so that it can |
833 | * re-trap for the new event. If trapped otherwise, STOP trap will be | 834 | * re-trap for the new event. If trapped otherwise, STOP trap will be |
834 | * eventually taken without returning to userland after the existing traps | 835 | * eventually taken without returning to userland after the existing traps |
835 | * are finished by PTRACE_CONT. | 836 | * are finished by PTRACE_CONT. |
836 | * | 837 | * |
837 | * CONTEXT: | 838 | * CONTEXT: |
838 | * Must be called with @task->sighand->siglock held. | 839 | * Must be called with @task->sighand->siglock held. |
839 | */ | 840 | */ |
840 | static void ptrace_trap_notify(struct task_struct *t) | 841 | static void ptrace_trap_notify(struct task_struct *t) |
841 | { | 842 | { |
842 | WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); | 843 | WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); |
843 | assert_spin_locked(&t->sighand->siglock); | 844 | assert_spin_locked(&t->sighand->siglock); |
844 | 845 | ||
845 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); | 846 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); |
846 | signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); | 847 | signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); |
847 | } | 848 | } |
848 | 849 | ||
849 | /* | 850 | /* |
850 | * Handle magic process-wide effects of stop/continue signals. Unlike | 851 | * Handle magic process-wide effects of stop/continue signals. Unlike |
851 | * the signal actions, these happen immediately at signal-generation | 852 | * the signal actions, these happen immediately at signal-generation |
852 | * time regardless of blocking, ignoring, or handling. This does the | 853 | * time regardless of blocking, ignoring, or handling. This does the |
853 | * actual continuing for SIGCONT, but not the actual stopping for stop | 854 | * actual continuing for SIGCONT, but not the actual stopping for stop |
854 | * signals. The process stop is done as a signal action for SIG_DFL. | 855 | * signals. The process stop is done as a signal action for SIG_DFL. |
855 | * | 856 | * |
856 | * Returns true if the signal should be actually delivered, otherwise | 857 | * Returns true if the signal should be actually delivered, otherwise |
857 | * it should be dropped. | 858 | * it should be dropped. |
858 | */ | 859 | */ |
859 | static int prepare_signal(int sig, struct task_struct *p, bool force) | 860 | static int prepare_signal(int sig, struct task_struct *p, bool force) |
860 | { | 861 | { |
861 | struct signal_struct *signal = p->signal; | 862 | struct signal_struct *signal = p->signal; |
862 | struct task_struct *t; | 863 | struct task_struct *t; |
863 | 864 | ||
864 | if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { | 865 | if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { |
865 | /* | 866 | /* |
866 | * The process is in the middle of dying, nothing to do. | 867 | * The process is in the middle of dying, nothing to do. |
867 | */ | 868 | */ |
868 | } else if (sig_kernel_stop(sig)) { | 869 | } else if (sig_kernel_stop(sig)) { |
869 | /* | 870 | /* |
870 | * This is a stop signal. Remove SIGCONT from all queues. | 871 | * This is a stop signal. Remove SIGCONT from all queues. |
871 | */ | 872 | */ |
872 | rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); | 873 | rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); |
873 | t = p; | 874 | t = p; |
874 | do { | 875 | do { |
875 | rm_from_queue(sigmask(SIGCONT), &t->pending); | 876 | rm_from_queue(sigmask(SIGCONT), &t->pending); |
876 | } while_each_thread(p, t); | 877 | } while_each_thread(p, t); |
877 | } else if (sig == SIGCONT) { | 878 | } else if (sig == SIGCONT) { |
878 | unsigned int why; | 879 | unsigned int why; |
879 | /* | 880 | /* |
880 | * Remove all stop signals from all queues, wake all threads. | 881 | * Remove all stop signals from all queues, wake all threads. |
881 | */ | 882 | */ |
882 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); | 883 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
883 | t = p; | 884 | t = p; |
884 | do { | 885 | do { |
885 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); | 886 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); |
886 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | 887 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
887 | if (likely(!(t->ptrace & PT_SEIZED))) | 888 | if (likely(!(t->ptrace & PT_SEIZED))) |
888 | wake_up_state(t, __TASK_STOPPED); | 889 | wake_up_state(t, __TASK_STOPPED); |
889 | else | 890 | else |
890 | ptrace_trap_notify(t); | 891 | ptrace_trap_notify(t); |
891 | } while_each_thread(p, t); | 892 | } while_each_thread(p, t); |
892 | 893 | ||
893 | /* | 894 | /* |
894 | * Notify the parent with CLD_CONTINUED if we were stopped. | 895 | * Notify the parent with CLD_CONTINUED if we were stopped. |
895 | * | 896 | * |
896 | * If we were in the middle of a group stop, we pretend it | 897 | * If we were in the middle of a group stop, we pretend it |
897 | * was already finished, and then continued. Since SIGCHLD | 898 | * was already finished, and then continued. Since SIGCHLD |
898 | * doesn't queue we report only CLD_STOPPED, as if the next | 899 | * doesn't queue we report only CLD_STOPPED, as if the next |
899 | * CLD_CONTINUED was dropped. | 900 | * CLD_CONTINUED was dropped. |
900 | */ | 901 | */ |
901 | why = 0; | 902 | why = 0; |
902 | if (signal->flags & SIGNAL_STOP_STOPPED) | 903 | if (signal->flags & SIGNAL_STOP_STOPPED) |
903 | why |= SIGNAL_CLD_CONTINUED; | 904 | why |= SIGNAL_CLD_CONTINUED; |
904 | else if (signal->group_stop_count) | 905 | else if (signal->group_stop_count) |
905 | why |= SIGNAL_CLD_STOPPED; | 906 | why |= SIGNAL_CLD_STOPPED; |
906 | 907 | ||
907 | if (why) { | 908 | if (why) { |
908 | /* | 909 | /* |
909 | * The first thread which returns from do_signal_stop() | 910 | * The first thread which returns from do_signal_stop() |
910 | * will take ->siglock, notice SIGNAL_CLD_MASK, and | 911 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
911 | * notify its parent. See get_signal_to_deliver(). | 912 | * notify its parent. See get_signal_to_deliver(). |
912 | */ | 913 | */ |
913 | signal->flags = why | SIGNAL_STOP_CONTINUED; | 914 | signal->flags = why | SIGNAL_STOP_CONTINUED; |
914 | signal->group_stop_count = 0; | 915 | signal->group_stop_count = 0; |
915 | signal->group_exit_code = 0; | 916 | signal->group_exit_code = 0; |
916 | } | 917 | } |
917 | } | 918 | } |
918 | 919 | ||
919 | return !sig_ignored(p, sig, force); | 920 | return !sig_ignored(p, sig, force); |
920 | } | 921 | } |
921 | 922 | ||
922 | /* | 923 | /* |
923 | * Test if P wants to take SIG. After we've checked all threads with this, | 924 | * Test if P wants to take SIG. After we've checked all threads with this, |
924 | * it's equivalent to finding no threads not blocking SIG. Any threads not | 925 | * it's equivalent to finding no threads not blocking SIG. Any threads not |
925 | * blocking SIG were ruled out because they are not running and already | 926 | * blocking SIG were ruled out because they are not running and already |
926 | * have pending signals. Such threads will dequeue from the shared queue | 927 | * have pending signals. Such threads will dequeue from the shared queue |
927 | * as soon as they're available, so putting the signal on the shared queue | 928 | * as soon as they're available, so putting the signal on the shared queue |
928 | * will be equivalent to sending it to one such thread. | 929 | * will be equivalent to sending it to one such thread. |
929 | */ | 930 | */ |
930 | static inline int wants_signal(int sig, struct task_struct *p) | 931 | static inline int wants_signal(int sig, struct task_struct *p) |
931 | { | 932 | { |
932 | if (sigismember(&p->blocked, sig)) | 933 | if (sigismember(&p->blocked, sig)) |
933 | return 0; | 934 | return 0; |
934 | if (p->flags & PF_EXITING) | 935 | if (p->flags & PF_EXITING) |
935 | return 0; | 936 | return 0; |
936 | if (sig == SIGKILL) | 937 | if (sig == SIGKILL) |
937 | return 1; | 938 | return 1; |
938 | if (task_is_stopped_or_traced(p)) | 939 | if (task_is_stopped_or_traced(p)) |
939 | return 0; | 940 | return 0; |
940 | return task_curr(p) || !signal_pending(p); | 941 | return task_curr(p) || !signal_pending(p); |
941 | } | 942 | } |
942 | 943 | ||
943 | static void complete_signal(int sig, struct task_struct *p, int group) | 944 | static void complete_signal(int sig, struct task_struct *p, int group) |
944 | { | 945 | { |
945 | struct signal_struct *signal = p->signal; | 946 | struct signal_struct *signal = p->signal; |
946 | struct task_struct *t; | 947 | struct task_struct *t; |
947 | 948 | ||
948 | /* | 949 | /* |
949 | * Now find a thread we can wake up to take the signal off the queue. | 950 | * Now find a thread we can wake up to take the signal off the queue. |
950 | * | 951 | * |
951 | * If the main thread wants the signal, it gets first crack. | 952 | * If the main thread wants the signal, it gets first crack. |
952 | * Probably the least surprising to the average bear. | 953 | * Probably the least surprising to the average bear. |
953 | */ | 954 | */ |
954 | if (wants_signal(sig, p)) | 955 | if (wants_signal(sig, p)) |
955 | t = p; | 956 | t = p; |
956 | else if (!group || thread_group_empty(p)) | 957 | else if (!group || thread_group_empty(p)) |
957 | /* | 958 | /* |
958 | * There is just one thread and it does not need to be woken. | 959 | * There is just one thread and it does not need to be woken. |
959 | * It will dequeue unblocked signals before it runs again. | 960 | * It will dequeue unblocked signals before it runs again. |
960 | */ | 961 | */ |
961 | return; | 962 | return; |
962 | else { | 963 | else { |
963 | /* | 964 | /* |
964 | * Otherwise try to find a suitable thread. | 965 | * Otherwise try to find a suitable thread. |
965 | */ | 966 | */ |
966 | t = signal->curr_target; | 967 | t = signal->curr_target; |
967 | while (!wants_signal(sig, t)) { | 968 | while (!wants_signal(sig, t)) { |
968 | t = next_thread(t); | 969 | t = next_thread(t); |
969 | if (t == signal->curr_target) | 970 | if (t == signal->curr_target) |
970 | /* | 971 | /* |
971 | * No thread needs to be woken. | 972 | * No thread needs to be woken. |
972 | * Any eligible threads will see | 973 | * Any eligible threads will see |
973 | * the signal in the queue soon. | 974 | * the signal in the queue soon. |
974 | */ | 975 | */ |
975 | return; | 976 | return; |
976 | } | 977 | } |
977 | signal->curr_target = t; | 978 | signal->curr_target = t; |
978 | } | 979 | } |
979 | 980 | ||
980 | /* | 981 | /* |
981 | * Found a killable thread. If the signal will be fatal, | 982 | * Found a killable thread. If the signal will be fatal, |
982 | * then start taking the whole group down immediately. | 983 | * then start taking the whole group down immediately. |
983 | */ | 984 | */ |
984 | if (sig_fatal(p, sig) && | 985 | if (sig_fatal(p, sig) && |
985 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && | 986 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && |
986 | !sigismember(&t->real_blocked, sig) && | 987 | !sigismember(&t->real_blocked, sig) && |
987 | (sig == SIGKILL || !t->ptrace)) { | 988 | (sig == SIGKILL || !t->ptrace)) { |
988 | /* | 989 | /* |
989 | * This signal will be fatal to the whole group. | 990 | * This signal will be fatal to the whole group. |
990 | */ | 991 | */ |
991 | if (!sig_kernel_coredump(sig)) { | 992 | if (!sig_kernel_coredump(sig)) { |
992 | /* | 993 | /* |
993 | * Start a group exit and wake everybody up. | 994 | * Start a group exit and wake everybody up. |
994 | * This way we don't have other threads | 995 | * This way we don't have other threads |
995 | * running and doing things after a slower | 996 | * running and doing things after a slower |
996 | * thread has the fatal signal pending. | 997 | * thread has the fatal signal pending. |
997 | */ | 998 | */ |
998 | signal->flags = SIGNAL_GROUP_EXIT; | 999 | signal->flags = SIGNAL_GROUP_EXIT; |
999 | signal->group_exit_code = sig; | 1000 | signal->group_exit_code = sig; |
1000 | signal->group_stop_count = 0; | 1001 | signal->group_stop_count = 0; |
1001 | t = p; | 1002 | t = p; |
1002 | do { | 1003 | do { |
1003 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); | 1004 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
1004 | sigaddset(&t->pending.signal, SIGKILL); | 1005 | sigaddset(&t->pending.signal, SIGKILL); |
1005 | signal_wake_up(t, 1); | 1006 | signal_wake_up(t, 1); |
1006 | } while_each_thread(p, t); | 1007 | } while_each_thread(p, t); |
1007 | return; | 1008 | return; |
1008 | } | 1009 | } |
1009 | } | 1010 | } |
1010 | 1011 | ||
1011 | /* | 1012 | /* |
1012 | * The signal is already in the shared-pending queue. | 1013 | * The signal is already in the shared-pending queue. |
1013 | * Tell the chosen thread to wake up and dequeue it. | 1014 | * Tell the chosen thread to wake up and dequeue it. |
1014 | */ | 1015 | */ |
1015 | signal_wake_up(t, sig == SIGKILL); | 1016 | signal_wake_up(t, sig == SIGKILL); |
1016 | return; | 1017 | return; |
1017 | } | 1018 | } |
1018 | 1019 | ||
1019 | static inline int legacy_queue(struct sigpending *signals, int sig) | 1020 | static inline int legacy_queue(struct sigpending *signals, int sig) |
1020 | { | 1021 | { |
1021 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); | 1022 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); |
1022 | } | 1023 | } |
1023 | 1024 | ||
1024 | #ifdef CONFIG_USER_NS | 1025 | #ifdef CONFIG_USER_NS |
1025 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) | 1026 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) |
1026 | { | 1027 | { |
1027 | if (current_user_ns() == task_cred_xxx(t, user_ns)) | 1028 | if (current_user_ns() == task_cred_xxx(t, user_ns)) |
1028 | return; | 1029 | return; |
1029 | 1030 | ||
1030 | if (SI_FROMKERNEL(info)) | 1031 | if (SI_FROMKERNEL(info)) |
1031 | return; | 1032 | return; |
1032 | 1033 | ||
1033 | rcu_read_lock(); | 1034 | rcu_read_lock(); |
1034 | info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), | 1035 | info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), |
1035 | make_kuid(current_user_ns(), info->si_uid)); | 1036 | make_kuid(current_user_ns(), info->si_uid)); |
1036 | rcu_read_unlock(); | 1037 | rcu_read_unlock(); |
1037 | } | 1038 | } |
1038 | #else | 1039 | #else |
1039 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) | 1040 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) |
1040 | { | 1041 | { |
1041 | return; | 1042 | return; |
1042 | } | 1043 | } |
1043 | #endif | 1044 | #endif |
1044 | 1045 | ||
1045 | static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, | 1046 | static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, |
1046 | int group, int from_ancestor_ns) | 1047 | int group, int from_ancestor_ns) |
1047 | { | 1048 | { |
1048 | struct sigpending *pending; | 1049 | struct sigpending *pending; |
1049 | struct sigqueue *q; | 1050 | struct sigqueue *q; |
1050 | int override_rlimit; | 1051 | int override_rlimit; |
1051 | int ret = 0, result; | 1052 | int ret = 0, result; |
1052 | 1053 | ||
1053 | assert_spin_locked(&t->sighand->siglock); | 1054 | assert_spin_locked(&t->sighand->siglock); |
1054 | 1055 | ||
1055 | result = TRACE_SIGNAL_IGNORED; | 1056 | result = TRACE_SIGNAL_IGNORED; |
1056 | if (!prepare_signal(sig, t, | 1057 | if (!prepare_signal(sig, t, |
1057 | from_ancestor_ns || (info == SEND_SIG_FORCED))) | 1058 | from_ancestor_ns || (info == SEND_SIG_FORCED))) |
1058 | goto ret; | 1059 | goto ret; |
1059 | 1060 | ||
1060 | pending = group ? &t->signal->shared_pending : &t->pending; | 1061 | pending = group ? &t->signal->shared_pending : &t->pending; |
1061 | /* | 1062 | /* |
1062 | * Short-circuit ignored signals and support queuing | 1063 | * Short-circuit ignored signals and support queuing |
1063 | * exactly one non-rt signal, so that we can get more | 1064 | * exactly one non-rt signal, so that we can get more |
1064 | * detailed information about the cause of the signal. | 1065 | * detailed information about the cause of the signal. |
1065 | */ | 1066 | */ |
1066 | result = TRACE_SIGNAL_ALREADY_PENDING; | 1067 | result = TRACE_SIGNAL_ALREADY_PENDING; |
1067 | if (legacy_queue(pending, sig)) | 1068 | if (legacy_queue(pending, sig)) |
1068 | goto ret; | 1069 | goto ret; |
1069 | 1070 | ||
1070 | result = TRACE_SIGNAL_DELIVERED; | 1071 | result = TRACE_SIGNAL_DELIVERED; |
1071 | /* | 1072 | /* |
1072 | * fast-pathed signals for kernel-internal things like SIGSTOP | 1073 | * fast-pathed signals for kernel-internal things like SIGSTOP |
1073 | * or SIGKILL. | 1074 | * or SIGKILL. |
1074 | */ | 1075 | */ |
1075 | if (info == SEND_SIG_FORCED) | 1076 | if (info == SEND_SIG_FORCED) |
1076 | goto out_set; | 1077 | goto out_set; |
1077 | 1078 | ||
1078 | /* | 1079 | /* |
1079 | * Real-time signals must be queued if sent by sigqueue, or | 1080 | * Real-time signals must be queued if sent by sigqueue, or |
1080 | * some other real-time mechanism. It is implementation | 1081 | * some other real-time mechanism. It is implementation |
1081 | * defined whether kill() does so. We attempt to do so, on | 1082 | * defined whether kill() does so. We attempt to do so, on |
1082 | * the principle of least surprise, but since kill is not | 1083 | * the principle of least surprise, but since kill is not |
1083 | * allowed to fail with EAGAIN when low on memory we just | 1084 | * allowed to fail with EAGAIN when low on memory we just |
1084 | * make sure at least one signal gets delivered and don't | 1085 | * make sure at least one signal gets delivered and don't |
1085 | * pass on the info struct. | 1086 | * pass on the info struct. |
1086 | */ | 1087 | */ |
1087 | if (sig < SIGRTMIN) | 1088 | if (sig < SIGRTMIN) |
1088 | override_rlimit = (is_si_special(info) || info->si_code >= 0); | 1089 | override_rlimit = (is_si_special(info) || info->si_code >= 0); |
1089 | else | 1090 | else |
1090 | override_rlimit = 0; | 1091 | override_rlimit = 0; |
1091 | 1092 | ||
1092 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, | 1093 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, |
1093 | override_rlimit); | 1094 | override_rlimit); |
1094 | if (q) { | 1095 | if (q) { |
1095 | list_add_tail(&q->list, &pending->list); | 1096 | list_add_tail(&q->list, &pending->list); |
1096 | switch ((unsigned long) info) { | 1097 | switch ((unsigned long) info) { |
1097 | case (unsigned long) SEND_SIG_NOINFO: | 1098 | case (unsigned long) SEND_SIG_NOINFO: |
1098 | q->info.si_signo = sig; | 1099 | q->info.si_signo = sig; |
1099 | q->info.si_errno = 0; | 1100 | q->info.si_errno = 0; |
1100 | q->info.si_code = SI_USER; | 1101 | q->info.si_code = SI_USER; |
1101 | q->info.si_pid = task_tgid_nr_ns(current, | 1102 | q->info.si_pid = task_tgid_nr_ns(current, |
1102 | task_active_pid_ns(t)); | 1103 | task_active_pid_ns(t)); |
1103 | q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); | 1104 | q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1104 | break; | 1105 | break; |
1105 | case (unsigned long) SEND_SIG_PRIV: | 1106 | case (unsigned long) SEND_SIG_PRIV: |
1106 | q->info.si_signo = sig; | 1107 | q->info.si_signo = sig; |
1107 | q->info.si_errno = 0; | 1108 | q->info.si_errno = 0; |
1108 | q->info.si_code = SI_KERNEL; | 1109 | q->info.si_code = SI_KERNEL; |
1109 | q->info.si_pid = 0; | 1110 | q->info.si_pid = 0; |
1110 | q->info.si_uid = 0; | 1111 | q->info.si_uid = 0; |
1111 | break; | 1112 | break; |
1112 | default: | 1113 | default: |
1113 | copy_siginfo(&q->info, info); | 1114 | copy_siginfo(&q->info, info); |
1114 | if (from_ancestor_ns) | 1115 | if (from_ancestor_ns) |
1115 | q->info.si_pid = 0; | 1116 | q->info.si_pid = 0; |
1116 | break; | 1117 | break; |
1117 | } | 1118 | } |
1118 | 1119 | ||
1119 | userns_fixup_signal_uid(&q->info, t); | 1120 | userns_fixup_signal_uid(&q->info, t); |
1120 | 1121 | ||
1121 | } else if (!is_si_special(info)) { | 1122 | } else if (!is_si_special(info)) { |
1122 | if (sig >= SIGRTMIN && info->si_code != SI_USER) { | 1123 | if (sig >= SIGRTMIN && info->si_code != SI_USER) { |
1123 | /* | 1124 | /* |
1124 | * Queue overflow, abort. We may abort if the | 1125 | * Queue overflow, abort. We may abort if the |
1125 | * signal was rt and sent by user using something | 1126 | * signal was rt and sent by user using something |
1126 | * other than kill(). | 1127 | * other than kill(). |
1127 | */ | 1128 | */ |
1128 | result = TRACE_SIGNAL_OVERFLOW_FAIL; | 1129 | result = TRACE_SIGNAL_OVERFLOW_FAIL; |
1129 | ret = -EAGAIN; | 1130 | ret = -EAGAIN; |
1130 | goto ret; | 1131 | goto ret; |
1131 | } else { | 1132 | } else { |
1132 | /* | 1133 | /* |
1133 | * This is a silent loss of information. We still | 1134 | * This is a silent loss of information. We still |
1134 | * send the signal, but the *info bits are lost. | 1135 | * send the signal, but the *info bits are lost. |
1135 | */ | 1136 | */ |
1136 | result = TRACE_SIGNAL_LOSE_INFO; | 1137 | result = TRACE_SIGNAL_LOSE_INFO; |
1137 | } | 1138 | } |
1138 | } | 1139 | } |
1139 | 1140 | ||
1140 | out_set: | 1141 | out_set: |
1141 | signalfd_notify(t, sig); | 1142 | signalfd_notify(t, sig); |
1142 | sigaddset(&pending->signal, sig); | 1143 | sigaddset(&pending->signal, sig); |
1143 | complete_signal(sig, t, group); | 1144 | complete_signal(sig, t, group); |
1144 | ret: | 1145 | ret: |
1145 | trace_signal_generate(sig, info, t, group, result); | 1146 | trace_signal_generate(sig, info, t, group, result); |
1146 | return ret; | 1147 | return ret; |
1147 | } | 1148 | } |
1148 | 1149 | ||
1149 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | 1150 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, |
1150 | int group) | 1151 | int group) |
1151 | { | 1152 | { |
1152 | int from_ancestor_ns = 0; | 1153 | int from_ancestor_ns = 0; |
1153 | 1154 | ||
1154 | #ifdef CONFIG_PID_NS | 1155 | #ifdef CONFIG_PID_NS |
1155 | from_ancestor_ns = si_fromuser(info) && | 1156 | from_ancestor_ns = si_fromuser(info) && |
1156 | !task_pid_nr_ns(current, task_active_pid_ns(t)); | 1157 | !task_pid_nr_ns(current, task_active_pid_ns(t)); |
1157 | #endif | 1158 | #endif |
1158 | 1159 | ||
1159 | return __send_signal(sig, info, t, group, from_ancestor_ns); | 1160 | return __send_signal(sig, info, t, group, from_ancestor_ns); |
1160 | } | 1161 | } |
1161 | 1162 | ||
1162 | static void print_fatal_signal(int signr) | 1163 | static void print_fatal_signal(int signr) |
1163 | { | 1164 | { |
1164 | struct pt_regs *regs = signal_pt_regs(); | 1165 | struct pt_regs *regs = signal_pt_regs(); |
1165 | printk("%s/%d: potentially unexpected fatal signal %d.\n", | 1166 | printk("%s/%d: potentially unexpected fatal signal %d.\n", |
1166 | current->comm, task_pid_nr(current), signr); | 1167 | current->comm, task_pid_nr(current), signr); |
1167 | 1168 | ||
1168 | #if defined(__i386__) && !defined(__arch_um__) | 1169 | #if defined(__i386__) && !defined(__arch_um__) |
1169 | printk("code at %08lx: ", regs->ip); | 1170 | printk("code at %08lx: ", regs->ip); |
1170 | { | 1171 | { |
1171 | int i; | 1172 | int i; |
1172 | for (i = 0; i < 16; i++) { | 1173 | for (i = 0; i < 16; i++) { |
1173 | unsigned char insn; | 1174 | unsigned char insn; |
1174 | 1175 | ||
1175 | if (get_user(insn, (unsigned char *)(regs->ip + i))) | 1176 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
1176 | break; | 1177 | break; |
1177 | printk("%02x ", insn); | 1178 | printk("%02x ", insn); |
1178 | } | 1179 | } |
1179 | } | 1180 | } |
1180 | #endif | 1181 | #endif |
1181 | printk("\n"); | 1182 | printk("\n"); |
1182 | preempt_disable(); | 1183 | preempt_disable(); |
1183 | show_regs(regs); | 1184 | show_regs(regs); |
1184 | preempt_enable(); | 1185 | preempt_enable(); |
1185 | } | 1186 | } |
1186 | 1187 | ||
1187 | static int __init setup_print_fatal_signals(char *str) | 1188 | static int __init setup_print_fatal_signals(char *str) |
1188 | { | 1189 | { |
1189 | get_option (&str, &print_fatal_signals); | 1190 | get_option (&str, &print_fatal_signals); |
1190 | 1191 | ||
1191 | return 1; | 1192 | return 1; |
1192 | } | 1193 | } |
1193 | 1194 | ||
1194 | __setup("print-fatal-signals=", setup_print_fatal_signals); | 1195 | __setup("print-fatal-signals=", setup_print_fatal_signals); |
1195 | 1196 | ||
1196 | int | 1197 | int |
1197 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1198 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1198 | { | 1199 | { |
1199 | return send_signal(sig, info, p, 1); | 1200 | return send_signal(sig, info, p, 1); |
1200 | } | 1201 | } |
1201 | 1202 | ||
1202 | static int | 1203 | static int |
1203 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) | 1204 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
1204 | { | 1205 | { |
1205 | return send_signal(sig, info, t, 0); | 1206 | return send_signal(sig, info, t, 0); |
1206 | } | 1207 | } |
1207 | 1208 | ||
1208 | int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, | 1209 | int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, |
1209 | bool group) | 1210 | bool group) |
1210 | { | 1211 | { |
1211 | unsigned long flags; | 1212 | unsigned long flags; |
1212 | int ret = -ESRCH; | 1213 | int ret = -ESRCH; |
1213 | 1214 | ||
1214 | if (lock_task_sighand(p, &flags)) { | 1215 | if (lock_task_sighand(p, &flags)) { |
1215 | ret = send_signal(sig, info, p, group); | 1216 | ret = send_signal(sig, info, p, group); |
1216 | unlock_task_sighand(p, &flags); | 1217 | unlock_task_sighand(p, &flags); |
1217 | } | 1218 | } |
1218 | 1219 | ||
1219 | return ret; | 1220 | return ret; |
1220 | } | 1221 | } |
1221 | 1222 | ||
1222 | /* | 1223 | /* |
1223 | * Force a signal that the process can't ignore: if necessary | 1224 | * Force a signal that the process can't ignore: if necessary |
1224 | * we unblock the signal and change any SIG_IGN to SIG_DFL. | 1225 | * we unblock the signal and change any SIG_IGN to SIG_DFL. |
1225 | * | 1226 | * |
1226 | * Note: If we unblock the signal, we always reset it to SIG_DFL, | 1227 | * Note: If we unblock the signal, we always reset it to SIG_DFL, |
1227 | * since we do not want to have a signal handler that was blocked | 1228 | * since we do not want to have a signal handler that was blocked |
1228 | * be invoked when user space had explicitly blocked it. | 1229 | * be invoked when user space had explicitly blocked it. |
1229 | * | 1230 | * |
1230 | * We don't want to have recursive SIGSEGV's etc, for example, | 1231 | * We don't want to have recursive SIGSEGV's etc, for example, |
1231 | * that is why we also clear SIGNAL_UNKILLABLE. | 1232 | * that is why we also clear SIGNAL_UNKILLABLE. |
1232 | */ | 1233 | */ |
1233 | int | 1234 | int |
1234 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) | 1235 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
1235 | { | 1236 | { |
1236 | unsigned long int flags; | 1237 | unsigned long int flags; |
1237 | int ret, blocked, ignored; | 1238 | int ret, blocked, ignored; |
1238 | struct k_sigaction *action; | 1239 | struct k_sigaction *action; |
1239 | 1240 | ||
1240 | spin_lock_irqsave(&t->sighand->siglock, flags); | 1241 | spin_lock_irqsave(&t->sighand->siglock, flags); |
1241 | action = &t->sighand->action[sig-1]; | 1242 | action = &t->sighand->action[sig-1]; |
1242 | ignored = action->sa.sa_handler == SIG_IGN; | 1243 | ignored = action->sa.sa_handler == SIG_IGN; |
1243 | blocked = sigismember(&t->blocked, sig); | 1244 | blocked = sigismember(&t->blocked, sig); |
1244 | if (blocked || ignored) { | 1245 | if (blocked || ignored) { |
1245 | action->sa.sa_handler = SIG_DFL; | 1246 | action->sa.sa_handler = SIG_DFL; |
1246 | if (blocked) { | 1247 | if (blocked) { |
1247 | sigdelset(&t->blocked, sig); | 1248 | sigdelset(&t->blocked, sig); |
1248 | recalc_sigpending_and_wake(t); | 1249 | recalc_sigpending_and_wake(t); |
1249 | } | 1250 | } |
1250 | } | 1251 | } |
1251 | if (action->sa.sa_handler == SIG_DFL) | 1252 | if (action->sa.sa_handler == SIG_DFL) |
1252 | t->signal->flags &= ~SIGNAL_UNKILLABLE; | 1253 | t->signal->flags &= ~SIGNAL_UNKILLABLE; |
1253 | ret = specific_send_sig_info(sig, info, t); | 1254 | ret = specific_send_sig_info(sig, info, t); |
1254 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | 1255 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
1255 | 1256 | ||
1256 | return ret; | 1257 | return ret; |
1257 | } | 1258 | } |
1258 | 1259 | ||
1259 | /* | 1260 | /* |
1260 | * Nuke all other threads in the group. | 1261 | * Nuke all other threads in the group. |
1261 | */ | 1262 | */ |
1262 | int zap_other_threads(struct task_struct *p) | 1263 | int zap_other_threads(struct task_struct *p) |
1263 | { | 1264 | { |
1264 | struct task_struct *t = p; | 1265 | struct task_struct *t = p; |
1265 | int count = 0; | 1266 | int count = 0; |
1266 | 1267 | ||
1267 | p->signal->group_stop_count = 0; | 1268 | p->signal->group_stop_count = 0; |
1268 | 1269 | ||
1269 | while_each_thread(p, t) { | 1270 | while_each_thread(p, t) { |
1270 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); | 1271 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
1271 | count++; | 1272 | count++; |
1272 | 1273 | ||
1273 | /* Don't bother with already dead threads */ | 1274 | /* Don't bother with already dead threads */ |
1274 | if (t->exit_state) | 1275 | if (t->exit_state) |
1275 | continue; | 1276 | continue; |
1276 | sigaddset(&t->pending.signal, SIGKILL); | 1277 | sigaddset(&t->pending.signal, SIGKILL); |
1277 | signal_wake_up(t, 1); | 1278 | signal_wake_up(t, 1); |
1278 | } | 1279 | } |
1279 | 1280 | ||
1280 | return count; | 1281 | return count; |
1281 | } | 1282 | } |
1282 | 1283 | ||
1283 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, | 1284 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
1284 | unsigned long *flags) | 1285 | unsigned long *flags) |
1285 | { | 1286 | { |
1286 | struct sighand_struct *sighand; | 1287 | struct sighand_struct *sighand; |
1287 | 1288 | ||
1288 | for (;;) { | 1289 | for (;;) { |
1289 | local_irq_save(*flags); | 1290 | local_irq_save(*flags); |
1290 | rcu_read_lock(); | 1291 | rcu_read_lock(); |
1291 | sighand = rcu_dereference(tsk->sighand); | 1292 | sighand = rcu_dereference(tsk->sighand); |
1292 | if (unlikely(sighand == NULL)) { | 1293 | if (unlikely(sighand == NULL)) { |
1293 | rcu_read_unlock(); | 1294 | rcu_read_unlock(); |
1294 | local_irq_restore(*flags); | 1295 | local_irq_restore(*flags); |
1295 | break; | 1296 | break; |
1296 | } | 1297 | } |
1297 | 1298 | ||
1298 | spin_lock(&sighand->siglock); | 1299 | spin_lock(&sighand->siglock); |
1299 | if (likely(sighand == tsk->sighand)) { | 1300 | if (likely(sighand == tsk->sighand)) { |
1300 | rcu_read_unlock(); | 1301 | rcu_read_unlock(); |
1301 | break; | 1302 | break; |
1302 | } | 1303 | } |
1303 | spin_unlock(&sighand->siglock); | 1304 | spin_unlock(&sighand->siglock); |
1304 | rcu_read_unlock(); | 1305 | rcu_read_unlock(); |
1305 | local_irq_restore(*flags); | 1306 | local_irq_restore(*flags); |
1306 | } | 1307 | } |
1307 | 1308 | ||
1308 | return sighand; | 1309 | return sighand; |
1309 | } | 1310 | } |
1310 | 1311 | ||
1311 | /* | 1312 | /* |
1312 | * send signal info to all the members of a group | 1313 | * send signal info to all the members of a group |
1313 | */ | 1314 | */ |
1314 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1315 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1315 | { | 1316 | { |
1316 | int ret; | 1317 | int ret; |
1317 | 1318 | ||
1318 | rcu_read_lock(); | 1319 | rcu_read_lock(); |
1319 | ret = check_kill_permission(sig, info, p); | 1320 | ret = check_kill_permission(sig, info, p); |
1320 | rcu_read_unlock(); | 1321 | rcu_read_unlock(); |
1321 | 1322 | ||
1322 | if (!ret && sig) | 1323 | if (!ret && sig) |
1323 | ret = do_send_sig_info(sig, info, p, true); | 1324 | ret = do_send_sig_info(sig, info, p, true); |
1324 | 1325 | ||
1325 | return ret; | 1326 | return ret; |
1326 | } | 1327 | } |
1327 | 1328 | ||
1328 | /* | 1329 | /* |
1329 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty | 1330 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1330 | * control characters do (^C, ^Z etc) | 1331 | * control characters do (^C, ^Z etc) |
1331 | * - the caller must hold at least a readlock on tasklist_lock | 1332 | * - the caller must hold at least a readlock on tasklist_lock |
1332 | */ | 1333 | */ |
1333 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) | 1334 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1334 | { | 1335 | { |
1335 | struct task_struct *p = NULL; | 1336 | struct task_struct *p = NULL; |
1336 | int retval, success; | 1337 | int retval, success; |
1337 | 1338 | ||
1338 | success = 0; | 1339 | success = 0; |
1339 | retval = -ESRCH; | 1340 | retval = -ESRCH; |
1340 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | 1341 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
1341 | int err = group_send_sig_info(sig, info, p); | 1342 | int err = group_send_sig_info(sig, info, p); |
1342 | success |= !err; | 1343 | success |= !err; |
1343 | retval = err; | 1344 | retval = err; |
1344 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | 1345 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1345 | return success ? 0 : retval; | 1346 | return success ? 0 : retval; |
1346 | } | 1347 | } |
1347 | 1348 | ||
1348 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) | 1349 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) |
1349 | { | 1350 | { |
1350 | int error = -ESRCH; | 1351 | int error = -ESRCH; |
1351 | struct task_struct *p; | 1352 | struct task_struct *p; |
1352 | 1353 | ||
1353 | rcu_read_lock(); | 1354 | rcu_read_lock(); |
1354 | retry: | 1355 | retry: |
1355 | p = pid_task(pid, PIDTYPE_PID); | 1356 | p = pid_task(pid, PIDTYPE_PID); |
1356 | if (p) { | 1357 | if (p) { |
1357 | error = group_send_sig_info(sig, info, p); | 1358 | error = group_send_sig_info(sig, info, p); |
1358 | if (unlikely(error == -ESRCH)) | 1359 | if (unlikely(error == -ESRCH)) |
1359 | /* | 1360 | /* |
1360 | * The task was unhashed in between, try again. | 1361 | * The task was unhashed in between, try again. |
1361 | * If it is dead, pid_task() will return NULL, | 1362 | * If it is dead, pid_task() will return NULL, |
1362 | * if we race with de_thread() it will find the | 1363 | * if we race with de_thread() it will find the |
1363 | * new leader. | 1364 | * new leader. |
1364 | */ | 1365 | */ |
1365 | goto retry; | 1366 | goto retry; |
1366 | } | 1367 | } |
1367 | rcu_read_unlock(); | 1368 | rcu_read_unlock(); |
1368 | 1369 | ||
1369 | return error; | 1370 | return error; |
1370 | } | 1371 | } |
1371 | 1372 | ||
1372 | int kill_proc_info(int sig, struct siginfo *info, pid_t pid) | 1373 | int kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
1373 | { | 1374 | { |
1374 | int error; | 1375 | int error; |
1375 | rcu_read_lock(); | 1376 | rcu_read_lock(); |
1376 | error = kill_pid_info(sig, info, find_vpid(pid)); | 1377 | error = kill_pid_info(sig, info, find_vpid(pid)); |
1377 | rcu_read_unlock(); | 1378 | rcu_read_unlock(); |
1378 | return error; | 1379 | return error; |
1379 | } | 1380 | } |
1380 | 1381 | ||
1381 | static int kill_as_cred_perm(const struct cred *cred, | 1382 | static int kill_as_cred_perm(const struct cred *cred, |
1382 | struct task_struct *target) | 1383 | struct task_struct *target) |
1383 | { | 1384 | { |
1384 | const struct cred *pcred = __task_cred(target); | 1385 | const struct cred *pcred = __task_cred(target); |
1385 | if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) && | 1386 | if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) && |
1386 | !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid)) | 1387 | !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid)) |
1387 | return 0; | 1388 | return 0; |
1388 | return 1; | 1389 | return 1; |
1389 | } | 1390 | } |
1390 | 1391 | ||
1391 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ | 1392 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ |
1392 | int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, | 1393 | int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, |
1393 | const struct cred *cred, u32 secid) | 1394 | const struct cred *cred, u32 secid) |
1394 | { | 1395 | { |
1395 | int ret = -EINVAL; | 1396 | int ret = -EINVAL; |
1396 | struct task_struct *p; | 1397 | struct task_struct *p; |
1397 | unsigned long flags; | 1398 | unsigned long flags; |
1398 | 1399 | ||
1399 | if (!valid_signal(sig)) | 1400 | if (!valid_signal(sig)) |
1400 | return ret; | 1401 | return ret; |
1401 | 1402 | ||
1402 | rcu_read_lock(); | 1403 | rcu_read_lock(); |
1403 | p = pid_task(pid, PIDTYPE_PID); | 1404 | p = pid_task(pid, PIDTYPE_PID); |
1404 | if (!p) { | 1405 | if (!p) { |
1405 | ret = -ESRCH; | 1406 | ret = -ESRCH; |
1406 | goto out_unlock; | 1407 | goto out_unlock; |
1407 | } | 1408 | } |
1408 | if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { | 1409 | if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { |
1409 | ret = -EPERM; | 1410 | ret = -EPERM; |
1410 | goto out_unlock; | 1411 | goto out_unlock; |
1411 | } | 1412 | } |
1412 | ret = security_task_kill(p, info, sig, secid); | 1413 | ret = security_task_kill(p, info, sig, secid); |
1413 | if (ret) | 1414 | if (ret) |
1414 | goto out_unlock; | 1415 | goto out_unlock; |
1415 | 1416 | ||
1416 | if (sig) { | 1417 | if (sig) { |
1417 | if (lock_task_sighand(p, &flags)) { | 1418 | if (lock_task_sighand(p, &flags)) { |
1418 | ret = __send_signal(sig, info, p, 1, 0); | 1419 | ret = __send_signal(sig, info, p, 1, 0); |
1419 | unlock_task_sighand(p, &flags); | 1420 | unlock_task_sighand(p, &flags); |
1420 | } else | 1421 | } else |
1421 | ret = -ESRCH; | 1422 | ret = -ESRCH; |
1422 | } | 1423 | } |
1423 | out_unlock: | 1424 | out_unlock: |
1424 | rcu_read_unlock(); | 1425 | rcu_read_unlock(); |
1425 | return ret; | 1426 | return ret; |
1426 | } | 1427 | } |
1427 | EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); | 1428 | EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); |
1428 | 1429 | ||
1429 | /* | 1430 | /* |
1430 | * kill_something_info() interprets pid in interesting ways just like kill(2). | 1431 | * kill_something_info() interprets pid in interesting ways just like kill(2). |
1431 | * | 1432 | * |
1432 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have | 1433 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have |
1433 | * is probably wrong. Should make it like BSD or SYSV. | 1434 | * is probably wrong. Should make it like BSD or SYSV. |
1434 | */ | 1435 | */ |
1435 | 1436 | ||
1436 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) | 1437 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) |
1437 | { | 1438 | { |
1438 | int ret; | 1439 | int ret; |
1439 | 1440 | ||
1440 | if (pid > 0) { | 1441 | if (pid > 0) { |
1441 | rcu_read_lock(); | 1442 | rcu_read_lock(); |
1442 | ret = kill_pid_info(sig, info, find_vpid(pid)); | 1443 | ret = kill_pid_info(sig, info, find_vpid(pid)); |
1443 | rcu_read_unlock(); | 1444 | rcu_read_unlock(); |
1444 | return ret; | 1445 | return ret; |
1445 | } | 1446 | } |
1446 | 1447 | ||
1447 | read_lock(&tasklist_lock); | 1448 | read_lock(&tasklist_lock); |
1448 | if (pid != -1) { | 1449 | if (pid != -1) { |
1449 | ret = __kill_pgrp_info(sig, info, | 1450 | ret = __kill_pgrp_info(sig, info, |
1450 | pid ? find_vpid(-pid) : task_pgrp(current)); | 1451 | pid ? find_vpid(-pid) : task_pgrp(current)); |
1451 | } else { | 1452 | } else { |
1452 | int retval = 0, count = 0; | 1453 | int retval = 0, count = 0; |
1453 | struct task_struct * p; | 1454 | struct task_struct * p; |
1454 | 1455 | ||
1455 | for_each_process(p) { | 1456 | for_each_process(p) { |
1456 | if (task_pid_vnr(p) > 1 && | 1457 | if (task_pid_vnr(p) > 1 && |
1457 | !same_thread_group(p, current)) { | 1458 | !same_thread_group(p, current)) { |
1458 | int err = group_send_sig_info(sig, info, p); | 1459 | int err = group_send_sig_info(sig, info, p); |
1459 | ++count; | 1460 | ++count; |
1460 | if (err != -EPERM) | 1461 | if (err != -EPERM) |
1461 | retval = err; | 1462 | retval = err; |
1462 | } | 1463 | } |
1463 | } | 1464 | } |
1464 | ret = count ? retval : -ESRCH; | 1465 | ret = count ? retval : -ESRCH; |
1465 | } | 1466 | } |
1466 | read_unlock(&tasklist_lock); | 1467 | read_unlock(&tasklist_lock); |
1467 | 1468 | ||
1468 | return ret; | 1469 | return ret; |
1469 | } | 1470 | } |
1470 | 1471 | ||
1471 | /* | 1472 | /* |
1472 | * These are for backward compatibility with the rest of the kernel source. | 1473 | * These are for backward compatibility with the rest of the kernel source. |
1473 | */ | 1474 | */ |
1474 | 1475 | ||
1475 | int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1476 | int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1476 | { | 1477 | { |
1477 | /* | 1478 | /* |
1478 | * Make sure legacy kernel users don't send in bad values | 1479 | * Make sure legacy kernel users don't send in bad values |
1479 | * (normal paths check this in check_kill_permission). | 1480 | * (normal paths check this in check_kill_permission). |
1480 | */ | 1481 | */ |
1481 | if (!valid_signal(sig)) | 1482 | if (!valid_signal(sig)) |
1482 | return -EINVAL; | 1483 | return -EINVAL; |
1483 | 1484 | ||
1484 | return do_send_sig_info(sig, info, p, false); | 1485 | return do_send_sig_info(sig, info, p, false); |
1485 | } | 1486 | } |
1486 | 1487 | ||
1487 | #define __si_special(priv) \ | 1488 | #define __si_special(priv) \ |
1488 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) | 1489 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) |
1489 | 1490 | ||
1490 | int | 1491 | int |
1491 | send_sig(int sig, struct task_struct *p, int priv) | 1492 | send_sig(int sig, struct task_struct *p, int priv) |
1492 | { | 1493 | { |
1493 | return send_sig_info(sig, __si_special(priv), p); | 1494 | return send_sig_info(sig, __si_special(priv), p); |
1494 | } | 1495 | } |
1495 | 1496 | ||
1496 | void | 1497 | void |
1497 | force_sig(int sig, struct task_struct *p) | 1498 | force_sig(int sig, struct task_struct *p) |
1498 | { | 1499 | { |
1499 | force_sig_info(sig, SEND_SIG_PRIV, p); | 1500 | force_sig_info(sig, SEND_SIG_PRIV, p); |
1500 | } | 1501 | } |
1501 | 1502 | ||
1502 | /* | 1503 | /* |
1503 | * When things go south during signal handling, we | 1504 | * When things go south during signal handling, we |
1504 | * will force a SIGSEGV. And if the signal that caused | 1505 | * will force a SIGSEGV. And if the signal that caused |
1505 | * the problem was already a SIGSEGV, we'll want to | 1506 | * the problem was already a SIGSEGV, we'll want to |
1506 | * make sure we don't even try to deliver the signal.. | 1507 | * make sure we don't even try to deliver the signal.. |
1507 | */ | 1508 | */ |
1508 | int | 1509 | int |
1509 | force_sigsegv(int sig, struct task_struct *p) | 1510 | force_sigsegv(int sig, struct task_struct *p) |
1510 | { | 1511 | { |
1511 | if (sig == SIGSEGV) { | 1512 | if (sig == SIGSEGV) { |
1512 | unsigned long flags; | 1513 | unsigned long flags; |
1513 | spin_lock_irqsave(&p->sighand->siglock, flags); | 1514 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1514 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; | 1515 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; |
1515 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 1516 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1516 | } | 1517 | } |
1517 | force_sig(SIGSEGV, p); | 1518 | force_sig(SIGSEGV, p); |
1518 | return 0; | 1519 | return 0; |
1519 | } | 1520 | } |
1520 | 1521 | ||
1521 | int kill_pgrp(struct pid *pid, int sig, int priv) | 1522 | int kill_pgrp(struct pid *pid, int sig, int priv) |
1522 | { | 1523 | { |
1523 | int ret; | 1524 | int ret; |
1524 | 1525 | ||
1525 | read_lock(&tasklist_lock); | 1526 | read_lock(&tasklist_lock); |
1526 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); | 1527 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); |
1527 | read_unlock(&tasklist_lock); | 1528 | read_unlock(&tasklist_lock); |
1528 | 1529 | ||
1529 | return ret; | 1530 | return ret; |
1530 | } | 1531 | } |
1531 | EXPORT_SYMBOL(kill_pgrp); | 1532 | EXPORT_SYMBOL(kill_pgrp); |
1532 | 1533 | ||
1533 | int kill_pid(struct pid *pid, int sig, int priv) | 1534 | int kill_pid(struct pid *pid, int sig, int priv) |
1534 | { | 1535 | { |
1535 | return kill_pid_info(sig, __si_special(priv), pid); | 1536 | return kill_pid_info(sig, __si_special(priv), pid); |
1536 | } | 1537 | } |
1537 | EXPORT_SYMBOL(kill_pid); | 1538 | EXPORT_SYMBOL(kill_pid); |
1538 | 1539 | ||
1539 | /* | 1540 | /* |
1540 | * These functions support sending signals using preallocated sigqueue | 1541 | * These functions support sending signals using preallocated sigqueue |
1541 | * structures. This is needed "because realtime applications cannot | 1542 | * structures. This is needed "because realtime applications cannot |
1542 | * afford to lose notifications of asynchronous events, like timer | 1543 | * afford to lose notifications of asynchronous events, like timer |
1543 | * expirations or I/O completions". In the case of POSIX Timers | 1544 | * expirations or I/O completions". In the case of POSIX Timers |
1544 | * we allocate the sigqueue structure from the timer_create. If this | 1545 | * we allocate the sigqueue structure from the timer_create. If this |
1545 | * allocation fails we are able to report the failure to the application | 1546 | * allocation fails we are able to report the failure to the application |
1546 | * with an EAGAIN error. | 1547 | * with an EAGAIN error. |
1547 | */ | 1548 | */ |
1548 | struct sigqueue *sigqueue_alloc(void) | 1549 | struct sigqueue *sigqueue_alloc(void) |
1549 | { | 1550 | { |
1550 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); | 1551 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
1551 | 1552 | ||
1552 | if (q) | 1553 | if (q) |
1553 | q->flags |= SIGQUEUE_PREALLOC; | 1554 | q->flags |= SIGQUEUE_PREALLOC; |
1554 | 1555 | ||
1555 | return q; | 1556 | return q; |
1556 | } | 1557 | } |
1557 | 1558 | ||
1558 | void sigqueue_free(struct sigqueue *q) | 1559 | void sigqueue_free(struct sigqueue *q) |
1559 | { | 1560 | { |
1560 | unsigned long flags; | 1561 | unsigned long flags; |
1561 | spinlock_t *lock = ¤t->sighand->siglock; | 1562 | spinlock_t *lock = ¤t->sighand->siglock; |
1562 | 1563 | ||
1563 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1564 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1564 | /* | 1565 | /* |
1565 | * We must hold ->siglock while testing q->list | 1566 | * We must hold ->siglock while testing q->list |
1566 | * to serialize with collect_signal() or with | 1567 | * to serialize with collect_signal() or with |
1567 | * __exit_signal()->flush_sigqueue(). | 1568 | * __exit_signal()->flush_sigqueue(). |
1568 | */ | 1569 | */ |
1569 | spin_lock_irqsave(lock, flags); | 1570 | spin_lock_irqsave(lock, flags); |
1570 | q->flags &= ~SIGQUEUE_PREALLOC; | 1571 | q->flags &= ~SIGQUEUE_PREALLOC; |
1571 | /* | 1572 | /* |
1572 | * If it is queued it will be freed when dequeued, | 1573 | * If it is queued it will be freed when dequeued, |
1573 | * like the "regular" sigqueue. | 1574 | * like the "regular" sigqueue. |
1574 | */ | 1575 | */ |
1575 | if (!list_empty(&q->list)) | 1576 | if (!list_empty(&q->list)) |
1576 | q = NULL; | 1577 | q = NULL; |
1577 | spin_unlock_irqrestore(lock, flags); | 1578 | spin_unlock_irqrestore(lock, flags); |
1578 | 1579 | ||
1579 | if (q) | 1580 | if (q) |
1580 | __sigqueue_free(q); | 1581 | __sigqueue_free(q); |
1581 | } | 1582 | } |
1582 | 1583 | ||
1583 | int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) | 1584 | int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) |
1584 | { | 1585 | { |
1585 | int sig = q->info.si_signo; | 1586 | int sig = q->info.si_signo; |
1586 | struct sigpending *pending; | 1587 | struct sigpending *pending; |
1587 | unsigned long flags; | 1588 | unsigned long flags; |
1588 | int ret, result; | 1589 | int ret, result; |
1589 | 1590 | ||
1590 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1591 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1591 | 1592 | ||
1592 | ret = -1; | 1593 | ret = -1; |
1593 | if (!likely(lock_task_sighand(t, &flags))) | 1594 | if (!likely(lock_task_sighand(t, &flags))) |
1594 | goto ret; | 1595 | goto ret; |
1595 | 1596 | ||
1596 | ret = 1; /* the signal is ignored */ | 1597 | ret = 1; /* the signal is ignored */ |
1597 | result = TRACE_SIGNAL_IGNORED; | 1598 | result = TRACE_SIGNAL_IGNORED; |
1598 | if (!prepare_signal(sig, t, false)) | 1599 | if (!prepare_signal(sig, t, false)) |
1599 | goto out; | 1600 | goto out; |
1600 | 1601 | ||
1601 | ret = 0; | 1602 | ret = 0; |
1602 | if (unlikely(!list_empty(&q->list))) { | 1603 | if (unlikely(!list_empty(&q->list))) { |
1603 | /* | 1604 | /* |
1604 | * If an SI_TIMER entry is already queue just increment | 1605 | * If an SI_TIMER entry is already queue just increment |
1605 | * the overrun count. | 1606 | * the overrun count. |
1606 | */ | 1607 | */ |
1607 | BUG_ON(q->info.si_code != SI_TIMER); | 1608 | BUG_ON(q->info.si_code != SI_TIMER); |
1608 | q->info.si_overrun++; | 1609 | q->info.si_overrun++; |
1609 | result = TRACE_SIGNAL_ALREADY_PENDING; | 1610 | result = TRACE_SIGNAL_ALREADY_PENDING; |
1610 | goto out; | 1611 | goto out; |
1611 | } | 1612 | } |
1612 | q->info.si_overrun = 0; | 1613 | q->info.si_overrun = 0; |
1613 | 1614 | ||
1614 | signalfd_notify(t, sig); | 1615 | signalfd_notify(t, sig); |
1615 | pending = group ? &t->signal->shared_pending : &t->pending; | 1616 | pending = group ? &t->signal->shared_pending : &t->pending; |
1616 | list_add_tail(&q->list, &pending->list); | 1617 | list_add_tail(&q->list, &pending->list); |
1617 | sigaddset(&pending->signal, sig); | 1618 | sigaddset(&pending->signal, sig); |
1618 | complete_signal(sig, t, group); | 1619 | complete_signal(sig, t, group); |
1619 | result = TRACE_SIGNAL_DELIVERED; | 1620 | result = TRACE_SIGNAL_DELIVERED; |
1620 | out: | 1621 | out: |
1621 | trace_signal_generate(sig, &q->info, t, group, result); | 1622 | trace_signal_generate(sig, &q->info, t, group, result); |
1622 | unlock_task_sighand(t, &flags); | 1623 | unlock_task_sighand(t, &flags); |
1623 | ret: | 1624 | ret: |
1624 | return ret; | 1625 | return ret; |
1625 | } | 1626 | } |
1626 | 1627 | ||
1627 | /* | 1628 | /* |
1628 | * Let a parent know about the death of a child. | 1629 | * Let a parent know about the death of a child. |
1629 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. | 1630 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
1630 | * | 1631 | * |
1631 | * Returns true if our parent ignored us and so we've switched to | 1632 | * Returns true if our parent ignored us and so we've switched to |
1632 | * self-reaping. | 1633 | * self-reaping. |
1633 | */ | 1634 | */ |
1634 | bool do_notify_parent(struct task_struct *tsk, int sig) | 1635 | bool do_notify_parent(struct task_struct *tsk, int sig) |
1635 | { | 1636 | { |
1636 | struct siginfo info; | 1637 | struct siginfo info; |
1637 | unsigned long flags; | 1638 | unsigned long flags; |
1638 | struct sighand_struct *psig; | 1639 | struct sighand_struct *psig; |
1639 | bool autoreap = false; | 1640 | bool autoreap = false; |
1640 | 1641 | ||
1641 | BUG_ON(sig == -1); | 1642 | BUG_ON(sig == -1); |
1642 | 1643 | ||
1643 | /* do_notify_parent_cldstop should have been called instead. */ | 1644 | /* do_notify_parent_cldstop should have been called instead. */ |
1644 | BUG_ON(task_is_stopped_or_traced(tsk)); | 1645 | BUG_ON(task_is_stopped_or_traced(tsk)); |
1645 | 1646 | ||
1646 | BUG_ON(!tsk->ptrace && | 1647 | BUG_ON(!tsk->ptrace && |
1647 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); | 1648 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); |
1648 | 1649 | ||
1649 | if (sig != SIGCHLD) { | 1650 | if (sig != SIGCHLD) { |
1650 | /* | 1651 | /* |
1651 | * This is only possible if parent == real_parent. | 1652 | * This is only possible if parent == real_parent. |
1652 | * Check if it has changed security domain. | 1653 | * Check if it has changed security domain. |
1653 | */ | 1654 | */ |
1654 | if (tsk->parent_exec_id != tsk->parent->self_exec_id) | 1655 | if (tsk->parent_exec_id != tsk->parent->self_exec_id) |
1655 | sig = SIGCHLD; | 1656 | sig = SIGCHLD; |
1656 | } | 1657 | } |
1657 | 1658 | ||
1658 | info.si_signo = sig; | 1659 | info.si_signo = sig; |
1659 | info.si_errno = 0; | 1660 | info.si_errno = 0; |
1660 | /* | 1661 | /* |
1661 | * We are under tasklist_lock here so our parent is tied to | 1662 | * We are under tasklist_lock here so our parent is tied to |
1662 | * us and cannot change. | 1663 | * us and cannot change. |
1663 | * | 1664 | * |
1664 | * task_active_pid_ns will always return the same pid namespace | 1665 | * task_active_pid_ns will always return the same pid namespace |
1665 | * until a task passes through release_task. | 1666 | * until a task passes through release_task. |
1666 | * | 1667 | * |
1667 | * write_lock() currently calls preempt_disable() which is the | 1668 | * write_lock() currently calls preempt_disable() which is the |
1668 | * same as rcu_read_lock(), but according to Oleg, this is not | 1669 | * same as rcu_read_lock(), but according to Oleg, this is not |
1669 | * correct to rely on this | 1670 | * correct to rely on this |
1670 | */ | 1671 | */ |
1671 | rcu_read_lock(); | 1672 | rcu_read_lock(); |
1672 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); | 1673 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); |
1673 | info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), | 1674 | info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), |
1674 | task_uid(tsk)); | 1675 | task_uid(tsk)); |
1675 | rcu_read_unlock(); | 1676 | rcu_read_unlock(); |
1676 | 1677 | ||
1677 | info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime); | 1678 | info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime); |
1678 | info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime); | 1679 | info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime); |
1679 | 1680 | ||
1680 | info.si_status = tsk->exit_code & 0x7f; | 1681 | info.si_status = tsk->exit_code & 0x7f; |
1681 | if (tsk->exit_code & 0x80) | 1682 | if (tsk->exit_code & 0x80) |
1682 | info.si_code = CLD_DUMPED; | 1683 | info.si_code = CLD_DUMPED; |
1683 | else if (tsk->exit_code & 0x7f) | 1684 | else if (tsk->exit_code & 0x7f) |
1684 | info.si_code = CLD_KILLED; | 1685 | info.si_code = CLD_KILLED; |
1685 | else { | 1686 | else { |
1686 | info.si_code = CLD_EXITED; | 1687 | info.si_code = CLD_EXITED; |
1687 | info.si_status = tsk->exit_code >> 8; | 1688 | info.si_status = tsk->exit_code >> 8; |
1688 | } | 1689 | } |
1689 | 1690 | ||
1690 | psig = tsk->parent->sighand; | 1691 | psig = tsk->parent->sighand; |
1691 | spin_lock_irqsave(&psig->siglock, flags); | 1692 | spin_lock_irqsave(&psig->siglock, flags); |
1692 | if (!tsk->ptrace && sig == SIGCHLD && | 1693 | if (!tsk->ptrace && sig == SIGCHLD && |
1693 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || | 1694 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
1694 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { | 1695 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { |
1695 | /* | 1696 | /* |
1696 | * We are exiting and our parent doesn't care. POSIX.1 | 1697 | * We are exiting and our parent doesn't care. POSIX.1 |
1697 | * defines special semantics for setting SIGCHLD to SIG_IGN | 1698 | * defines special semantics for setting SIGCHLD to SIG_IGN |
1698 | * or setting the SA_NOCLDWAIT flag: we should be reaped | 1699 | * or setting the SA_NOCLDWAIT flag: we should be reaped |
1699 | * automatically and not left for our parent's wait4 call. | 1700 | * automatically and not left for our parent's wait4 call. |
1700 | * Rather than having the parent do it as a magic kind of | 1701 | * Rather than having the parent do it as a magic kind of |
1701 | * signal handler, we just set this to tell do_exit that we | 1702 | * signal handler, we just set this to tell do_exit that we |
1702 | * can be cleaned up without becoming a zombie. Note that | 1703 | * can be cleaned up without becoming a zombie. Note that |
1703 | * we still call __wake_up_parent in this case, because a | 1704 | * we still call __wake_up_parent in this case, because a |
1704 | * blocked sys_wait4 might now return -ECHILD. | 1705 | * blocked sys_wait4 might now return -ECHILD. |
1705 | * | 1706 | * |
1706 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT | 1707 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT |
1707 | * is implementation-defined: we do (if you don't want | 1708 | * is implementation-defined: we do (if you don't want |
1708 | * it, just use SIG_IGN instead). | 1709 | * it, just use SIG_IGN instead). |
1709 | */ | 1710 | */ |
1710 | autoreap = true; | 1711 | autoreap = true; |
1711 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) | 1712 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
1712 | sig = 0; | 1713 | sig = 0; |
1713 | } | 1714 | } |
1714 | if (valid_signal(sig) && sig) | 1715 | if (valid_signal(sig) && sig) |
1715 | __group_send_sig_info(sig, &info, tsk->parent); | 1716 | __group_send_sig_info(sig, &info, tsk->parent); |
1716 | __wake_up_parent(tsk, tsk->parent); | 1717 | __wake_up_parent(tsk, tsk->parent); |
1717 | spin_unlock_irqrestore(&psig->siglock, flags); | 1718 | spin_unlock_irqrestore(&psig->siglock, flags); |
1718 | 1719 | ||
1719 | return autoreap; | 1720 | return autoreap; |
1720 | } | 1721 | } |
1721 | 1722 | ||
1722 | /** | 1723 | /** |
1723 | * do_notify_parent_cldstop - notify parent of stopped/continued state change | 1724 | * do_notify_parent_cldstop - notify parent of stopped/continued state change |
1724 | * @tsk: task reporting the state change | 1725 | * @tsk: task reporting the state change |
1725 | * @for_ptracer: the notification is for ptracer | 1726 | * @for_ptracer: the notification is for ptracer |
1726 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report | 1727 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report |
1727 | * | 1728 | * |
1728 | * Notify @tsk's parent that the stopped/continued state has changed. If | 1729 | * Notify @tsk's parent that the stopped/continued state has changed. If |
1729 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. | 1730 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. |
1730 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. | 1731 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. |
1731 | * | 1732 | * |
1732 | * CONTEXT: | 1733 | * CONTEXT: |
1733 | * Must be called with tasklist_lock at least read locked. | 1734 | * Must be called with tasklist_lock at least read locked. |
1734 | */ | 1735 | */ |
1735 | static void do_notify_parent_cldstop(struct task_struct *tsk, | 1736 | static void do_notify_parent_cldstop(struct task_struct *tsk, |
1736 | bool for_ptracer, int why) | 1737 | bool for_ptracer, int why) |
1737 | { | 1738 | { |
1738 | struct siginfo info; | 1739 | struct siginfo info; |
1739 | unsigned long flags; | 1740 | unsigned long flags; |
1740 | struct task_struct *parent; | 1741 | struct task_struct *parent; |
1741 | struct sighand_struct *sighand; | 1742 | struct sighand_struct *sighand; |
1742 | 1743 | ||
1743 | if (for_ptracer) { | 1744 | if (for_ptracer) { |
1744 | parent = tsk->parent; | 1745 | parent = tsk->parent; |
1745 | } else { | 1746 | } else { |
1746 | tsk = tsk->group_leader; | 1747 | tsk = tsk->group_leader; |
1747 | parent = tsk->real_parent; | 1748 | parent = tsk->real_parent; |
1748 | } | 1749 | } |
1749 | 1750 | ||
1750 | info.si_signo = SIGCHLD; | 1751 | info.si_signo = SIGCHLD; |
1751 | info.si_errno = 0; | 1752 | info.si_errno = 0; |
1752 | /* | 1753 | /* |
1753 | * see comment in do_notify_parent() about the following 4 lines | 1754 | * see comment in do_notify_parent() about the following 4 lines |
1754 | */ | 1755 | */ |
1755 | rcu_read_lock(); | 1756 | rcu_read_lock(); |
1756 | info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); | 1757 | info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); |
1757 | info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); | 1758 | info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); |
1758 | rcu_read_unlock(); | 1759 | rcu_read_unlock(); |
1759 | 1760 | ||
1760 | info.si_utime = cputime_to_clock_t(tsk->utime); | 1761 | info.si_utime = cputime_to_clock_t(tsk->utime); |
1761 | info.si_stime = cputime_to_clock_t(tsk->stime); | 1762 | info.si_stime = cputime_to_clock_t(tsk->stime); |
1762 | 1763 | ||
1763 | info.si_code = why; | 1764 | info.si_code = why; |
1764 | switch (why) { | 1765 | switch (why) { |
1765 | case CLD_CONTINUED: | 1766 | case CLD_CONTINUED: |
1766 | info.si_status = SIGCONT; | 1767 | info.si_status = SIGCONT; |
1767 | break; | 1768 | break; |
1768 | case CLD_STOPPED: | 1769 | case CLD_STOPPED: |
1769 | info.si_status = tsk->signal->group_exit_code & 0x7f; | 1770 | info.si_status = tsk->signal->group_exit_code & 0x7f; |
1770 | break; | 1771 | break; |
1771 | case CLD_TRAPPED: | 1772 | case CLD_TRAPPED: |
1772 | info.si_status = tsk->exit_code & 0x7f; | 1773 | info.si_status = tsk->exit_code & 0x7f; |
1773 | break; | 1774 | break; |
1774 | default: | 1775 | default: |
1775 | BUG(); | 1776 | BUG(); |
1776 | } | 1777 | } |
1777 | 1778 | ||
1778 | sighand = parent->sighand; | 1779 | sighand = parent->sighand; |
1779 | spin_lock_irqsave(&sighand->siglock, flags); | 1780 | spin_lock_irqsave(&sighand->siglock, flags); |
1780 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && | 1781 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && |
1781 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) | 1782 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) |
1782 | __group_send_sig_info(SIGCHLD, &info, parent); | 1783 | __group_send_sig_info(SIGCHLD, &info, parent); |
1783 | /* | 1784 | /* |
1784 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. | 1785 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. |
1785 | */ | 1786 | */ |
1786 | __wake_up_parent(tsk, parent); | 1787 | __wake_up_parent(tsk, parent); |
1787 | spin_unlock_irqrestore(&sighand->siglock, flags); | 1788 | spin_unlock_irqrestore(&sighand->siglock, flags); |
1788 | } | 1789 | } |
1789 | 1790 | ||
1790 | static inline int may_ptrace_stop(void) | 1791 | static inline int may_ptrace_stop(void) |
1791 | { | 1792 | { |
1792 | if (!likely(current->ptrace)) | 1793 | if (!likely(current->ptrace)) |
1793 | return 0; | 1794 | return 0; |
1794 | /* | 1795 | /* |
1795 | * Are we in the middle of do_coredump? | 1796 | * Are we in the middle of do_coredump? |
1796 | * If so and our tracer is also part of the coredump stopping | 1797 | * If so and our tracer is also part of the coredump stopping |
1797 | * is a deadlock situation, and pointless because our tracer | 1798 | * is a deadlock situation, and pointless because our tracer |
1798 | * is dead so don't allow us to stop. | 1799 | * is dead so don't allow us to stop. |
1799 | * If SIGKILL was already sent before the caller unlocked | 1800 | * If SIGKILL was already sent before the caller unlocked |
1800 | * ->siglock we must see ->core_state != NULL. Otherwise it | 1801 | * ->siglock we must see ->core_state != NULL. Otherwise it |
1801 | * is safe to enter schedule(). | 1802 | * is safe to enter schedule(). |
1802 | */ | 1803 | */ |
1803 | if (unlikely(current->mm->core_state) && | 1804 | if (unlikely(current->mm->core_state) && |
1804 | unlikely(current->mm == current->parent->mm)) | 1805 | unlikely(current->mm == current->parent->mm)) |
1805 | return 0; | 1806 | return 0; |
1806 | 1807 | ||
1807 | return 1; | 1808 | return 1; |
1808 | } | 1809 | } |
1809 | 1810 | ||
1810 | /* | 1811 | /* |
1811 | * Return non-zero if there is a SIGKILL that should be waking us up. | 1812 | * Return non-zero if there is a SIGKILL that should be waking us up. |
1812 | * Called with the siglock held. | 1813 | * Called with the siglock held. |
1813 | */ | 1814 | */ |
1814 | static int sigkill_pending(struct task_struct *tsk) | 1815 | static int sigkill_pending(struct task_struct *tsk) |
1815 | { | 1816 | { |
1816 | return sigismember(&tsk->pending.signal, SIGKILL) || | 1817 | return sigismember(&tsk->pending.signal, SIGKILL) || |
1817 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); | 1818 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); |
1818 | } | 1819 | } |
1819 | 1820 | ||
1820 | /* | 1821 | /* |
1821 | * This must be called with current->sighand->siglock held. | 1822 | * This must be called with current->sighand->siglock held. |
1822 | * | 1823 | * |
1823 | * This should be the path for all ptrace stops. | 1824 | * This should be the path for all ptrace stops. |
1824 | * We always set current->last_siginfo while stopped here. | 1825 | * We always set current->last_siginfo while stopped here. |
1825 | * That makes it a way to test a stopped process for | 1826 | * That makes it a way to test a stopped process for |
1826 | * being ptrace-stopped vs being job-control-stopped. | 1827 | * being ptrace-stopped vs being job-control-stopped. |
1827 | * | 1828 | * |
1828 | * If we actually decide not to stop at all because the tracer | 1829 | * If we actually decide not to stop at all because the tracer |
1829 | * is gone, we keep current->exit_code unless clear_code. | 1830 | * is gone, we keep current->exit_code unless clear_code. |
1830 | */ | 1831 | */ |
1831 | static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | 1832 | static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) |
1832 | __releases(¤t->sighand->siglock) | 1833 | __releases(¤t->sighand->siglock) |
1833 | __acquires(¤t->sighand->siglock) | 1834 | __acquires(¤t->sighand->siglock) |
1834 | { | 1835 | { |
1835 | bool gstop_done = false; | 1836 | bool gstop_done = false; |
1836 | 1837 | ||
1837 | if (arch_ptrace_stop_needed(exit_code, info)) { | 1838 | if (arch_ptrace_stop_needed(exit_code, info)) { |
1838 | /* | 1839 | /* |
1839 | * The arch code has something special to do before a | 1840 | * The arch code has something special to do before a |
1840 | * ptrace stop. This is allowed to block, e.g. for faults | 1841 | * ptrace stop. This is allowed to block, e.g. for faults |
1841 | * on user stack pages. We can't keep the siglock while | 1842 | * on user stack pages. We can't keep the siglock while |
1842 | * calling arch_ptrace_stop, so we must release it now. | 1843 | * calling arch_ptrace_stop, so we must release it now. |
1843 | * To preserve proper semantics, we must do this before | 1844 | * To preserve proper semantics, we must do this before |
1844 | * any signal bookkeeping like checking group_stop_count. | 1845 | * any signal bookkeeping like checking group_stop_count. |
1845 | * Meanwhile, a SIGKILL could come in before we retake the | 1846 | * Meanwhile, a SIGKILL could come in before we retake the |
1846 | * siglock. That must prevent us from sleeping in TASK_TRACED. | 1847 | * siglock. That must prevent us from sleeping in TASK_TRACED. |
1847 | * So after regaining the lock, we must check for SIGKILL. | 1848 | * So after regaining the lock, we must check for SIGKILL. |
1848 | */ | 1849 | */ |
1849 | spin_unlock_irq(¤t->sighand->siglock); | 1850 | spin_unlock_irq(¤t->sighand->siglock); |
1850 | arch_ptrace_stop(exit_code, info); | 1851 | arch_ptrace_stop(exit_code, info); |
1851 | spin_lock_irq(¤t->sighand->siglock); | 1852 | spin_lock_irq(¤t->sighand->siglock); |
1852 | if (sigkill_pending(current)) | 1853 | if (sigkill_pending(current)) |
1853 | return; | 1854 | return; |
1854 | } | 1855 | } |
1855 | 1856 | ||
1856 | /* | 1857 | /* |
1857 | * We're committing to trapping. TRACED should be visible before | 1858 | * We're committing to trapping. TRACED should be visible before |
1858 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). | 1859 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). |
1859 | * Also, transition to TRACED and updates to ->jobctl should be | 1860 | * Also, transition to TRACED and updates to ->jobctl should be |
1860 | * atomic with respect to siglock and should be done after the arch | 1861 | * atomic with respect to siglock and should be done after the arch |
1861 | * hook as siglock is released and regrabbed across it. | 1862 | * hook as siglock is released and regrabbed across it. |
1862 | */ | 1863 | */ |
1863 | set_current_state(TASK_TRACED); | 1864 | set_current_state(TASK_TRACED); |
1864 | 1865 | ||
1865 | current->last_siginfo = info; | 1866 | current->last_siginfo = info; |
1866 | current->exit_code = exit_code; | 1867 | current->exit_code = exit_code; |
1867 | 1868 | ||
1868 | /* | 1869 | /* |
1869 | * If @why is CLD_STOPPED, we're trapping to participate in a group | 1870 | * If @why is CLD_STOPPED, we're trapping to participate in a group |
1870 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered | 1871 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered |
1871 | * across siglock relocks since INTERRUPT was scheduled, PENDING | 1872 | * across siglock relocks since INTERRUPT was scheduled, PENDING |
1872 | * could be clear now. We act as if SIGCONT is received after | 1873 | * could be clear now. We act as if SIGCONT is received after |
1873 | * TASK_TRACED is entered - ignore it. | 1874 | * TASK_TRACED is entered - ignore it. |
1874 | */ | 1875 | */ |
1875 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) | 1876 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) |
1876 | gstop_done = task_participate_group_stop(current); | 1877 | gstop_done = task_participate_group_stop(current); |
1877 | 1878 | ||
1878 | /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ | 1879 | /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ |
1879 | task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); | 1880 | task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); |
1880 | if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) | 1881 | if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) |
1881 | task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); | 1882 | task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); |
1882 | 1883 | ||
1883 | /* entering a trap, clear TRAPPING */ | 1884 | /* entering a trap, clear TRAPPING */ |
1884 | task_clear_jobctl_trapping(current); | 1885 | task_clear_jobctl_trapping(current); |
1885 | 1886 | ||
1886 | spin_unlock_irq(¤t->sighand->siglock); | 1887 | spin_unlock_irq(¤t->sighand->siglock); |
1887 | read_lock(&tasklist_lock); | 1888 | read_lock(&tasklist_lock); |
1888 | if (may_ptrace_stop()) { | 1889 | if (may_ptrace_stop()) { |
1889 | /* | 1890 | /* |
1890 | * Notify parents of the stop. | 1891 | * Notify parents of the stop. |
1891 | * | 1892 | * |
1892 | * While ptraced, there are two parents - the ptracer and | 1893 | * While ptraced, there are two parents - the ptracer and |
1893 | * the real_parent of the group_leader. The ptracer should | 1894 | * the real_parent of the group_leader. The ptracer should |
1894 | * know about every stop while the real parent is only | 1895 | * know about every stop while the real parent is only |
1895 | * interested in the completion of group stop. The states | 1896 | * interested in the completion of group stop. The states |
1896 | * for the two don't interact with each other. Notify | 1897 | * for the two don't interact with each other. Notify |
1897 | * separately unless they're gonna be duplicates. | 1898 | * separately unless they're gonna be duplicates. |
1898 | */ | 1899 | */ |
1899 | do_notify_parent_cldstop(current, true, why); | 1900 | do_notify_parent_cldstop(current, true, why); |
1900 | if (gstop_done && ptrace_reparented(current)) | 1901 | if (gstop_done && ptrace_reparented(current)) |
1901 | do_notify_parent_cldstop(current, false, why); | 1902 | do_notify_parent_cldstop(current, false, why); |
1902 | 1903 | ||
1903 | /* | 1904 | /* |
1904 | * Don't want to allow preemption here, because | 1905 | * Don't want to allow preemption here, because |
1905 | * sys_ptrace() needs this task to be inactive. | 1906 | * sys_ptrace() needs this task to be inactive. |
1906 | * | 1907 | * |
1907 | * XXX: implement read_unlock_no_resched(). | 1908 | * XXX: implement read_unlock_no_resched(). |
1908 | */ | 1909 | */ |
1909 | preempt_disable(); | 1910 | preempt_disable(); |
1910 | read_unlock(&tasklist_lock); | 1911 | read_unlock(&tasklist_lock); |
1911 | preempt_enable_no_resched(); | 1912 | preempt_enable_no_resched(); |
1912 | schedule(); | 1913 | schedule(); |
1913 | } else { | 1914 | } else { |
1914 | /* | 1915 | /* |
1915 | * By the time we got the lock, our tracer went away. | 1916 | * By the time we got the lock, our tracer went away. |
1916 | * Don't drop the lock yet, another tracer may come. | 1917 | * Don't drop the lock yet, another tracer may come. |
1917 | * | 1918 | * |
1918 | * If @gstop_done, the ptracer went away between group stop | 1919 | * If @gstop_done, the ptracer went away between group stop |
1919 | * completion and here. During detach, it would have set | 1920 | * completion and here. During detach, it would have set |
1920 | * JOBCTL_STOP_PENDING on us and we'll re-enter | 1921 | * JOBCTL_STOP_PENDING on us and we'll re-enter |
1921 | * TASK_STOPPED in do_signal_stop() on return, so notifying | 1922 | * TASK_STOPPED in do_signal_stop() on return, so notifying |
1922 | * the real parent of the group stop completion is enough. | 1923 | * the real parent of the group stop completion is enough. |
1923 | */ | 1924 | */ |
1924 | if (gstop_done) | 1925 | if (gstop_done) |
1925 | do_notify_parent_cldstop(current, false, why); | 1926 | do_notify_parent_cldstop(current, false, why); |
1926 | 1927 | ||
1927 | __set_current_state(TASK_RUNNING); | 1928 | __set_current_state(TASK_RUNNING); |
1928 | if (clear_code) | 1929 | if (clear_code) |
1929 | current->exit_code = 0; | 1930 | current->exit_code = 0; |
1930 | read_unlock(&tasklist_lock); | 1931 | read_unlock(&tasklist_lock); |
1931 | } | 1932 | } |
1932 | 1933 | ||
1933 | /* | 1934 | /* |
1934 | * While in TASK_TRACED, we were considered "frozen enough". | 1935 | * While in TASK_TRACED, we were considered "frozen enough". |
1935 | * Now that we woke up, it's crucial if we're supposed to be | 1936 | * Now that we woke up, it's crucial if we're supposed to be |
1936 | * frozen that we freeze now before running anything substantial. | 1937 | * frozen that we freeze now before running anything substantial. |
1937 | */ | 1938 | */ |
1938 | try_to_freeze(); | 1939 | try_to_freeze(); |
1939 | 1940 | ||
1940 | /* | 1941 | /* |
1941 | * We are back. Now reacquire the siglock before touching | 1942 | * We are back. Now reacquire the siglock before touching |
1942 | * last_siginfo, so that we are sure to have synchronized with | 1943 | * last_siginfo, so that we are sure to have synchronized with |
1943 | * any signal-sending on another CPU that wants to examine it. | 1944 | * any signal-sending on another CPU that wants to examine it. |
1944 | */ | 1945 | */ |
1945 | spin_lock_irq(¤t->sighand->siglock); | 1946 | spin_lock_irq(¤t->sighand->siglock); |
1946 | current->last_siginfo = NULL; | 1947 | current->last_siginfo = NULL; |
1947 | 1948 | ||
1948 | /* LISTENING can be set only during STOP traps, clear it */ | 1949 | /* LISTENING can be set only during STOP traps, clear it */ |
1949 | current->jobctl &= ~JOBCTL_LISTENING; | 1950 | current->jobctl &= ~JOBCTL_LISTENING; |
1950 | 1951 | ||
1951 | /* | 1952 | /* |
1952 | * Queued signals ignored us while we were stopped for tracing. | 1953 | * Queued signals ignored us while we were stopped for tracing. |
1953 | * So check for any that we should take before resuming user mode. | 1954 | * So check for any that we should take before resuming user mode. |
1954 | * This sets TIF_SIGPENDING, but never clears it. | 1955 | * This sets TIF_SIGPENDING, but never clears it. |
1955 | */ | 1956 | */ |
1956 | recalc_sigpending_tsk(current); | 1957 | recalc_sigpending_tsk(current); |
1957 | } | 1958 | } |
1958 | 1959 | ||
1959 | static void ptrace_do_notify(int signr, int exit_code, int why) | 1960 | static void ptrace_do_notify(int signr, int exit_code, int why) |
1960 | { | 1961 | { |
1961 | siginfo_t info; | 1962 | siginfo_t info; |
1962 | 1963 | ||
1963 | memset(&info, 0, sizeof info); | 1964 | memset(&info, 0, sizeof info); |
1964 | info.si_signo = signr; | 1965 | info.si_signo = signr; |
1965 | info.si_code = exit_code; | 1966 | info.si_code = exit_code; |
1966 | info.si_pid = task_pid_vnr(current); | 1967 | info.si_pid = task_pid_vnr(current); |
1967 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); | 1968 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1968 | 1969 | ||
1969 | /* Let the debugger run. */ | 1970 | /* Let the debugger run. */ |
1970 | ptrace_stop(exit_code, why, 1, &info); | 1971 | ptrace_stop(exit_code, why, 1, &info); |
1971 | } | 1972 | } |
1972 | 1973 | ||
1973 | void ptrace_notify(int exit_code) | 1974 | void ptrace_notify(int exit_code) |
1974 | { | 1975 | { |
1975 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); | 1976 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); |
1976 | if (unlikely(current->task_works)) | 1977 | if (unlikely(current->task_works)) |
1977 | task_work_run(); | 1978 | task_work_run(); |
1978 | 1979 | ||
1979 | spin_lock_irq(¤t->sighand->siglock); | 1980 | spin_lock_irq(¤t->sighand->siglock); |
1980 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); | 1981 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); |
1981 | spin_unlock_irq(¤t->sighand->siglock); | 1982 | spin_unlock_irq(¤t->sighand->siglock); |
1982 | } | 1983 | } |
1983 | 1984 | ||
1984 | /** | 1985 | /** |
1985 | * do_signal_stop - handle group stop for SIGSTOP and other stop signals | 1986 | * do_signal_stop - handle group stop for SIGSTOP and other stop signals |
1986 | * @signr: signr causing group stop if initiating | 1987 | * @signr: signr causing group stop if initiating |
1987 | * | 1988 | * |
1988 | * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr | 1989 | * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr |
1989 | * and participate in it. If already set, participate in the existing | 1990 | * and participate in it. If already set, participate in the existing |
1990 | * group stop. If participated in a group stop (and thus slept), %true is | 1991 | * group stop. If participated in a group stop (and thus slept), %true is |
1991 | * returned with siglock released. | 1992 | * returned with siglock released. |
1992 | * | 1993 | * |
1993 | * If ptraced, this function doesn't handle stop itself. Instead, | 1994 | * If ptraced, this function doesn't handle stop itself. Instead, |
1994 | * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock | 1995 | * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock |
1995 | * untouched. The caller must ensure that INTERRUPT trap handling takes | 1996 | * untouched. The caller must ensure that INTERRUPT trap handling takes |
1996 | * places afterwards. | 1997 | * places afterwards. |
1997 | * | 1998 | * |
1998 | * CONTEXT: | 1999 | * CONTEXT: |
1999 | * Must be called with @current->sighand->siglock held, which is released | 2000 | * Must be called with @current->sighand->siglock held, which is released |
2000 | * on %true return. | 2001 | * on %true return. |
2001 | * | 2002 | * |
2002 | * RETURNS: | 2003 | * RETURNS: |
2003 | * %false if group stop is already cancelled or ptrace trap is scheduled. | 2004 | * %false if group stop is already cancelled or ptrace trap is scheduled. |
2004 | * %true if participated in group stop. | 2005 | * %true if participated in group stop. |
2005 | */ | 2006 | */ |
2006 | static bool do_signal_stop(int signr) | 2007 | static bool do_signal_stop(int signr) |
2007 | __releases(¤t->sighand->siglock) | 2008 | __releases(¤t->sighand->siglock) |
2008 | { | 2009 | { |
2009 | struct signal_struct *sig = current->signal; | 2010 | struct signal_struct *sig = current->signal; |
2010 | 2011 | ||
2011 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { | 2012 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { |
2012 | unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; | 2013 | unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; |
2013 | struct task_struct *t; | 2014 | struct task_struct *t; |
2014 | 2015 | ||
2015 | /* signr will be recorded in task->jobctl for retries */ | 2016 | /* signr will be recorded in task->jobctl for retries */ |
2016 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); | 2017 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); |
2017 | 2018 | ||
2018 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || | 2019 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || |
2019 | unlikely(signal_group_exit(sig))) | 2020 | unlikely(signal_group_exit(sig))) |
2020 | return false; | 2021 | return false; |
2021 | /* | 2022 | /* |
2022 | * There is no group stop already in progress. We must | 2023 | * There is no group stop already in progress. We must |
2023 | * initiate one now. | 2024 | * initiate one now. |
2024 | * | 2025 | * |
2025 | * While ptraced, a task may be resumed while group stop is | 2026 | * While ptraced, a task may be resumed while group stop is |
2026 | * still in effect and then receive a stop signal and | 2027 | * still in effect and then receive a stop signal and |
2027 | * initiate another group stop. This deviates from the | 2028 | * initiate another group stop. This deviates from the |
2028 | * usual behavior as two consecutive stop signals can't | 2029 | * usual behavior as two consecutive stop signals can't |
2029 | * cause two group stops when !ptraced. That is why we | 2030 | * cause two group stops when !ptraced. That is why we |
2030 | * also check !task_is_stopped(t) below. | 2031 | * also check !task_is_stopped(t) below. |
2031 | * | 2032 | * |
2032 | * The condition can be distinguished by testing whether | 2033 | * The condition can be distinguished by testing whether |
2033 | * SIGNAL_STOP_STOPPED is already set. Don't generate | 2034 | * SIGNAL_STOP_STOPPED is already set. Don't generate |
2034 | * group_exit_code in such case. | 2035 | * group_exit_code in such case. |
2035 | * | 2036 | * |
2036 | * This is not necessary for SIGNAL_STOP_CONTINUED because | 2037 | * This is not necessary for SIGNAL_STOP_CONTINUED because |
2037 | * an intervening stop signal is required to cause two | 2038 | * an intervening stop signal is required to cause two |
2038 | * continued events regardless of ptrace. | 2039 | * continued events regardless of ptrace. |
2039 | */ | 2040 | */ |
2040 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) | 2041 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
2041 | sig->group_exit_code = signr; | 2042 | sig->group_exit_code = signr; |
2042 | 2043 | ||
2043 | sig->group_stop_count = 0; | 2044 | sig->group_stop_count = 0; |
2044 | 2045 | ||
2045 | if (task_set_jobctl_pending(current, signr | gstop)) | 2046 | if (task_set_jobctl_pending(current, signr | gstop)) |
2046 | sig->group_stop_count++; | 2047 | sig->group_stop_count++; |
2047 | 2048 | ||
2048 | for (t = next_thread(current); t != current; | 2049 | for (t = next_thread(current); t != current; |
2049 | t = next_thread(t)) { | 2050 | t = next_thread(t)) { |
2050 | /* | 2051 | /* |
2051 | * Setting state to TASK_STOPPED for a group | 2052 | * Setting state to TASK_STOPPED for a group |
2052 | * stop is always done with the siglock held, | 2053 | * stop is always done with the siglock held, |
2053 | * so this check has no races. | 2054 | * so this check has no races. |
2054 | */ | 2055 | */ |
2055 | if (!task_is_stopped(t) && | 2056 | if (!task_is_stopped(t) && |
2056 | task_set_jobctl_pending(t, signr | gstop)) { | 2057 | task_set_jobctl_pending(t, signr | gstop)) { |
2057 | sig->group_stop_count++; | 2058 | sig->group_stop_count++; |
2058 | if (likely(!(t->ptrace & PT_SEIZED))) | 2059 | if (likely(!(t->ptrace & PT_SEIZED))) |
2059 | signal_wake_up(t, 0); | 2060 | signal_wake_up(t, 0); |
2060 | else | 2061 | else |
2061 | ptrace_trap_notify(t); | 2062 | ptrace_trap_notify(t); |
2062 | } | 2063 | } |
2063 | } | 2064 | } |
2064 | } | 2065 | } |
2065 | 2066 | ||
2066 | if (likely(!current->ptrace)) { | 2067 | if (likely(!current->ptrace)) { |
2067 | int notify = 0; | 2068 | int notify = 0; |
2068 | 2069 | ||
2069 | /* | 2070 | /* |
2070 | * If there are no other threads in the group, or if there | 2071 | * If there are no other threads in the group, or if there |
2071 | * is a group stop in progress and we are the last to stop, | 2072 | * is a group stop in progress and we are the last to stop, |
2072 | * report to the parent. | 2073 | * report to the parent. |
2073 | */ | 2074 | */ |
2074 | if (task_participate_group_stop(current)) | 2075 | if (task_participate_group_stop(current)) |
2075 | notify = CLD_STOPPED; | 2076 | notify = CLD_STOPPED; |
2076 | 2077 | ||
2077 | __set_current_state(TASK_STOPPED); | 2078 | __set_current_state(TASK_STOPPED); |
2078 | spin_unlock_irq(¤t->sighand->siglock); | 2079 | spin_unlock_irq(¤t->sighand->siglock); |
2079 | 2080 | ||
2080 | /* | 2081 | /* |
2081 | * Notify the parent of the group stop completion. Because | 2082 | * Notify the parent of the group stop completion. Because |
2082 | * we're not holding either the siglock or tasklist_lock | 2083 | * we're not holding either the siglock or tasklist_lock |
2083 | * here, ptracer may attach inbetween; however, this is for | 2084 | * here, ptracer may attach inbetween; however, this is for |
2084 | * group stop and should always be delivered to the real | 2085 | * group stop and should always be delivered to the real |
2085 | * parent of the group leader. The new ptracer will get | 2086 | * parent of the group leader. The new ptracer will get |
2086 | * its notification when this task transitions into | 2087 | * its notification when this task transitions into |
2087 | * TASK_TRACED. | 2088 | * TASK_TRACED. |
2088 | */ | 2089 | */ |
2089 | if (notify) { | 2090 | if (notify) { |
2090 | read_lock(&tasklist_lock); | 2091 | read_lock(&tasklist_lock); |
2091 | do_notify_parent_cldstop(current, false, notify); | 2092 | do_notify_parent_cldstop(current, false, notify); |
2092 | read_unlock(&tasklist_lock); | 2093 | read_unlock(&tasklist_lock); |
2093 | } | 2094 | } |
2094 | 2095 | ||
2095 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | 2096 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ |
2096 | schedule(); | 2097 | schedule(); |
2097 | return true; | 2098 | return true; |
2098 | } else { | 2099 | } else { |
2099 | /* | 2100 | /* |
2100 | * While ptraced, group stop is handled by STOP trap. | 2101 | * While ptraced, group stop is handled by STOP trap. |
2101 | * Schedule it and let the caller deal with it. | 2102 | * Schedule it and let the caller deal with it. |
2102 | */ | 2103 | */ |
2103 | task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); | 2104 | task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); |
2104 | return false; | 2105 | return false; |
2105 | } | 2106 | } |
2106 | } | 2107 | } |
2107 | 2108 | ||
2108 | /** | 2109 | /** |
2109 | * do_jobctl_trap - take care of ptrace jobctl traps | 2110 | * do_jobctl_trap - take care of ptrace jobctl traps |
2110 | * | 2111 | * |
2111 | * When PT_SEIZED, it's used for both group stop and explicit | 2112 | * When PT_SEIZED, it's used for both group stop and explicit |
2112 | * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with | 2113 | * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with |
2113 | * accompanying siginfo. If stopped, lower eight bits of exit_code contain | 2114 | * accompanying siginfo. If stopped, lower eight bits of exit_code contain |
2114 | * the stop signal; otherwise, %SIGTRAP. | 2115 | * the stop signal; otherwise, %SIGTRAP. |
2115 | * | 2116 | * |
2116 | * When !PT_SEIZED, it's used only for group stop trap with stop signal | 2117 | * When !PT_SEIZED, it's used only for group stop trap with stop signal |
2117 | * number as exit_code and no siginfo. | 2118 | * number as exit_code and no siginfo. |
2118 | * | 2119 | * |
2119 | * CONTEXT: | 2120 | * CONTEXT: |
2120 | * Must be called with @current->sighand->siglock held, which may be | 2121 | * Must be called with @current->sighand->siglock held, which may be |
2121 | * released and re-acquired before returning with intervening sleep. | 2122 | * released and re-acquired before returning with intervening sleep. |
2122 | */ | 2123 | */ |
2123 | static void do_jobctl_trap(void) | 2124 | static void do_jobctl_trap(void) |
2124 | { | 2125 | { |
2125 | struct signal_struct *signal = current->signal; | 2126 | struct signal_struct *signal = current->signal; |
2126 | int signr = current->jobctl & JOBCTL_STOP_SIGMASK; | 2127 | int signr = current->jobctl & JOBCTL_STOP_SIGMASK; |
2127 | 2128 | ||
2128 | if (current->ptrace & PT_SEIZED) { | 2129 | if (current->ptrace & PT_SEIZED) { |
2129 | if (!signal->group_stop_count && | 2130 | if (!signal->group_stop_count && |
2130 | !(signal->flags & SIGNAL_STOP_STOPPED)) | 2131 | !(signal->flags & SIGNAL_STOP_STOPPED)) |
2131 | signr = SIGTRAP; | 2132 | signr = SIGTRAP; |
2132 | WARN_ON_ONCE(!signr); | 2133 | WARN_ON_ONCE(!signr); |
2133 | ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), | 2134 | ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), |
2134 | CLD_STOPPED); | 2135 | CLD_STOPPED); |
2135 | } else { | 2136 | } else { |
2136 | WARN_ON_ONCE(!signr); | 2137 | WARN_ON_ONCE(!signr); |
2137 | ptrace_stop(signr, CLD_STOPPED, 0, NULL); | 2138 | ptrace_stop(signr, CLD_STOPPED, 0, NULL); |
2138 | current->exit_code = 0; | 2139 | current->exit_code = 0; |
2139 | } | 2140 | } |
2140 | } | 2141 | } |
2141 | 2142 | ||
2142 | static int ptrace_signal(int signr, siginfo_t *info) | 2143 | static int ptrace_signal(int signr, siginfo_t *info) |
2143 | { | 2144 | { |
2144 | ptrace_signal_deliver(); | 2145 | ptrace_signal_deliver(); |
2145 | /* | 2146 | /* |
2146 | * We do not check sig_kernel_stop(signr) but set this marker | 2147 | * We do not check sig_kernel_stop(signr) but set this marker |
2147 | * unconditionally because we do not know whether debugger will | 2148 | * unconditionally because we do not know whether debugger will |
2148 | * change signr. This flag has no meaning unless we are going | 2149 | * change signr. This flag has no meaning unless we are going |
2149 | * to stop after return from ptrace_stop(). In this case it will | 2150 | * to stop after return from ptrace_stop(). In this case it will |
2150 | * be checked in do_signal_stop(), we should only stop if it was | 2151 | * be checked in do_signal_stop(), we should only stop if it was |
2151 | * not cleared by SIGCONT while we were sleeping. See also the | 2152 | * not cleared by SIGCONT while we were sleeping. See also the |
2152 | * comment in dequeue_signal(). | 2153 | * comment in dequeue_signal(). |
2153 | */ | 2154 | */ |
2154 | current->jobctl |= JOBCTL_STOP_DEQUEUED; | 2155 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
2155 | ptrace_stop(signr, CLD_TRAPPED, 0, info); | 2156 | ptrace_stop(signr, CLD_TRAPPED, 0, info); |
2156 | 2157 | ||
2157 | /* We're back. Did the debugger cancel the sig? */ | 2158 | /* We're back. Did the debugger cancel the sig? */ |
2158 | signr = current->exit_code; | 2159 | signr = current->exit_code; |
2159 | if (signr == 0) | 2160 | if (signr == 0) |
2160 | return signr; | 2161 | return signr; |
2161 | 2162 | ||
2162 | current->exit_code = 0; | 2163 | current->exit_code = 0; |
2163 | 2164 | ||
2164 | /* | 2165 | /* |
2165 | * Update the siginfo structure if the signal has | 2166 | * Update the siginfo structure if the signal has |
2166 | * changed. If the debugger wanted something | 2167 | * changed. If the debugger wanted something |
2167 | * specific in the siginfo structure then it should | 2168 | * specific in the siginfo structure then it should |
2168 | * have updated *info via PTRACE_SETSIGINFO. | 2169 | * have updated *info via PTRACE_SETSIGINFO. |
2169 | */ | 2170 | */ |
2170 | if (signr != info->si_signo) { | 2171 | if (signr != info->si_signo) { |
2171 | info->si_signo = signr; | 2172 | info->si_signo = signr; |
2172 | info->si_errno = 0; | 2173 | info->si_errno = 0; |
2173 | info->si_code = SI_USER; | 2174 | info->si_code = SI_USER; |
2174 | rcu_read_lock(); | 2175 | rcu_read_lock(); |
2175 | info->si_pid = task_pid_vnr(current->parent); | 2176 | info->si_pid = task_pid_vnr(current->parent); |
2176 | info->si_uid = from_kuid_munged(current_user_ns(), | 2177 | info->si_uid = from_kuid_munged(current_user_ns(), |
2177 | task_uid(current->parent)); | 2178 | task_uid(current->parent)); |
2178 | rcu_read_unlock(); | 2179 | rcu_read_unlock(); |
2179 | } | 2180 | } |
2180 | 2181 | ||
2181 | /* If the (new) signal is now blocked, requeue it. */ | 2182 | /* If the (new) signal is now blocked, requeue it. */ |
2182 | if (sigismember(¤t->blocked, signr)) { | 2183 | if (sigismember(¤t->blocked, signr)) { |
2183 | specific_send_sig_info(signr, info, current); | 2184 | specific_send_sig_info(signr, info, current); |
2184 | signr = 0; | 2185 | signr = 0; |
2185 | } | 2186 | } |
2186 | 2187 | ||
2187 | return signr; | 2188 | return signr; |
2188 | } | 2189 | } |
2189 | 2190 | ||
2190 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, | 2191 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, |
2191 | struct pt_regs *regs, void *cookie) | 2192 | struct pt_regs *regs, void *cookie) |
2192 | { | 2193 | { |
2193 | struct sighand_struct *sighand = current->sighand; | 2194 | struct sighand_struct *sighand = current->sighand; |
2194 | struct signal_struct *signal = current->signal; | 2195 | struct signal_struct *signal = current->signal; |
2195 | int signr; | 2196 | int signr; |
2196 | 2197 | ||
2197 | if (unlikely(current->task_works)) | 2198 | if (unlikely(current->task_works)) |
2198 | task_work_run(); | 2199 | task_work_run(); |
2199 | 2200 | ||
2200 | if (unlikely(uprobe_deny_signal())) | 2201 | if (unlikely(uprobe_deny_signal())) |
2201 | return 0; | 2202 | return 0; |
2202 | 2203 | ||
2203 | relock: | 2204 | relock: |
2204 | /* | 2205 | /* |
2205 | * We'll jump back here after any time we were stopped in TASK_STOPPED. | 2206 | * We'll jump back here after any time we were stopped in TASK_STOPPED. |
2206 | * While in TASK_STOPPED, we were considered "frozen enough". | 2207 | * While in TASK_STOPPED, we were considered "frozen enough". |
2207 | * Now that we woke up, it's crucial if we're supposed to be | 2208 | * Now that we woke up, it's crucial if we're supposed to be |
2208 | * frozen that we freeze now before running anything substantial. | 2209 | * frozen that we freeze now before running anything substantial. |
2209 | */ | 2210 | */ |
2210 | try_to_freeze(); | 2211 | try_to_freeze(); |
2211 | 2212 | ||
2212 | spin_lock_irq(&sighand->siglock); | 2213 | spin_lock_irq(&sighand->siglock); |
2213 | /* | 2214 | /* |
2214 | * Every stopped thread goes here after wakeup. Check to see if | 2215 | * Every stopped thread goes here after wakeup. Check to see if |
2215 | * we should notify the parent, prepare_signal(SIGCONT) encodes | 2216 | * we should notify the parent, prepare_signal(SIGCONT) encodes |
2216 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. | 2217 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. |
2217 | */ | 2218 | */ |
2218 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { | 2219 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
2219 | int why; | 2220 | int why; |
2220 | 2221 | ||
2221 | if (signal->flags & SIGNAL_CLD_CONTINUED) | 2222 | if (signal->flags & SIGNAL_CLD_CONTINUED) |
2222 | why = CLD_CONTINUED; | 2223 | why = CLD_CONTINUED; |
2223 | else | 2224 | else |
2224 | why = CLD_STOPPED; | 2225 | why = CLD_STOPPED; |
2225 | 2226 | ||
2226 | signal->flags &= ~SIGNAL_CLD_MASK; | 2227 | signal->flags &= ~SIGNAL_CLD_MASK; |
2227 | 2228 | ||
2228 | spin_unlock_irq(&sighand->siglock); | 2229 | spin_unlock_irq(&sighand->siglock); |
2229 | 2230 | ||
2230 | /* | 2231 | /* |
2231 | * Notify the parent that we're continuing. This event is | 2232 | * Notify the parent that we're continuing. This event is |
2232 | * always per-process and doesn't make whole lot of sense | 2233 | * always per-process and doesn't make whole lot of sense |
2233 | * for ptracers, who shouldn't consume the state via | 2234 | * for ptracers, who shouldn't consume the state via |
2234 | * wait(2) either, but, for backward compatibility, notify | 2235 | * wait(2) either, but, for backward compatibility, notify |
2235 | * the ptracer of the group leader too unless it's gonna be | 2236 | * the ptracer of the group leader too unless it's gonna be |
2236 | * a duplicate. | 2237 | * a duplicate. |
2237 | */ | 2238 | */ |
2238 | read_lock(&tasklist_lock); | 2239 | read_lock(&tasklist_lock); |
2239 | do_notify_parent_cldstop(current, false, why); | 2240 | do_notify_parent_cldstop(current, false, why); |
2240 | 2241 | ||
2241 | if (ptrace_reparented(current->group_leader)) | 2242 | if (ptrace_reparented(current->group_leader)) |
2242 | do_notify_parent_cldstop(current->group_leader, | 2243 | do_notify_parent_cldstop(current->group_leader, |
2243 | true, why); | 2244 | true, why); |
2244 | read_unlock(&tasklist_lock); | 2245 | read_unlock(&tasklist_lock); |
2245 | 2246 | ||
2246 | goto relock; | 2247 | goto relock; |
2247 | } | 2248 | } |
2248 | 2249 | ||
2249 | for (;;) { | 2250 | for (;;) { |
2250 | struct k_sigaction *ka; | 2251 | struct k_sigaction *ka; |
2251 | 2252 | ||
2252 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && | 2253 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && |
2253 | do_signal_stop(0)) | 2254 | do_signal_stop(0)) |
2254 | goto relock; | 2255 | goto relock; |
2255 | 2256 | ||
2256 | if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { | 2257 | if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { |
2257 | do_jobctl_trap(); | 2258 | do_jobctl_trap(); |
2258 | spin_unlock_irq(&sighand->siglock); | 2259 | spin_unlock_irq(&sighand->siglock); |
2259 | goto relock; | 2260 | goto relock; |
2260 | } | 2261 | } |
2261 | 2262 | ||
2262 | signr = dequeue_signal(current, ¤t->blocked, info); | 2263 | signr = dequeue_signal(current, ¤t->blocked, info); |
2263 | 2264 | ||
2264 | if (!signr) | 2265 | if (!signr) |
2265 | break; /* will return 0 */ | 2266 | break; /* will return 0 */ |
2266 | 2267 | ||
2267 | if (unlikely(current->ptrace) && signr != SIGKILL) { | 2268 | if (unlikely(current->ptrace) && signr != SIGKILL) { |
2268 | signr = ptrace_signal(signr, info); | 2269 | signr = ptrace_signal(signr, info); |
2269 | if (!signr) | 2270 | if (!signr) |
2270 | continue; | 2271 | continue; |
2271 | } | 2272 | } |
2272 | 2273 | ||
2273 | ka = &sighand->action[signr-1]; | 2274 | ka = &sighand->action[signr-1]; |
2274 | 2275 | ||
2275 | /* Trace actually delivered signals. */ | 2276 | /* Trace actually delivered signals. */ |
2276 | trace_signal_deliver(signr, info, ka); | 2277 | trace_signal_deliver(signr, info, ka); |
2277 | 2278 | ||
2278 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ | 2279 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
2279 | continue; | 2280 | continue; |
2280 | if (ka->sa.sa_handler != SIG_DFL) { | 2281 | if (ka->sa.sa_handler != SIG_DFL) { |
2281 | /* Run the handler. */ | 2282 | /* Run the handler. */ |
2282 | *return_ka = *ka; | 2283 | *return_ka = *ka; |
2283 | 2284 | ||
2284 | if (ka->sa.sa_flags & SA_ONESHOT) | 2285 | if (ka->sa.sa_flags & SA_ONESHOT) |
2285 | ka->sa.sa_handler = SIG_DFL; | 2286 | ka->sa.sa_handler = SIG_DFL; |
2286 | 2287 | ||
2287 | break; /* will return non-zero "signr" value */ | 2288 | break; /* will return non-zero "signr" value */ |
2288 | } | 2289 | } |
2289 | 2290 | ||
2290 | /* | 2291 | /* |
2291 | * Now we are doing the default action for this signal. | 2292 | * Now we are doing the default action for this signal. |
2292 | */ | 2293 | */ |
2293 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ | 2294 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ |
2294 | continue; | 2295 | continue; |
2295 | 2296 | ||
2296 | /* | 2297 | /* |
2297 | * Global init gets no signals it doesn't want. | 2298 | * Global init gets no signals it doesn't want. |
2298 | * Container-init gets no signals it doesn't want from same | 2299 | * Container-init gets no signals it doesn't want from same |
2299 | * container. | 2300 | * container. |
2300 | * | 2301 | * |
2301 | * Note that if global/container-init sees a sig_kernel_only() | 2302 | * Note that if global/container-init sees a sig_kernel_only() |
2302 | * signal here, the signal must have been generated internally | 2303 | * signal here, the signal must have been generated internally |
2303 | * or must have come from an ancestor namespace. In either | 2304 | * or must have come from an ancestor namespace. In either |
2304 | * case, the signal cannot be dropped. | 2305 | * case, the signal cannot be dropped. |
2305 | */ | 2306 | */ |
2306 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && | 2307 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
2307 | !sig_kernel_only(signr)) | 2308 | !sig_kernel_only(signr)) |
2308 | continue; | 2309 | continue; |
2309 | 2310 | ||
2310 | if (sig_kernel_stop(signr)) { | 2311 | if (sig_kernel_stop(signr)) { |
2311 | /* | 2312 | /* |
2312 | * The default action is to stop all threads in | 2313 | * The default action is to stop all threads in |
2313 | * the thread group. The job control signals | 2314 | * the thread group. The job control signals |
2314 | * do nothing in an orphaned pgrp, but SIGSTOP | 2315 | * do nothing in an orphaned pgrp, but SIGSTOP |
2315 | * always works. Note that siglock needs to be | 2316 | * always works. Note that siglock needs to be |
2316 | * dropped during the call to is_orphaned_pgrp() | 2317 | * dropped during the call to is_orphaned_pgrp() |
2317 | * because of lock ordering with tasklist_lock. | 2318 | * because of lock ordering with tasklist_lock. |
2318 | * This allows an intervening SIGCONT to be posted. | 2319 | * This allows an intervening SIGCONT to be posted. |
2319 | * We need to check for that and bail out if necessary. | 2320 | * We need to check for that and bail out if necessary. |
2320 | */ | 2321 | */ |
2321 | if (signr != SIGSTOP) { | 2322 | if (signr != SIGSTOP) { |
2322 | spin_unlock_irq(&sighand->siglock); | 2323 | spin_unlock_irq(&sighand->siglock); |
2323 | 2324 | ||
2324 | /* signals can be posted during this window */ | 2325 | /* signals can be posted during this window */ |
2325 | 2326 | ||
2326 | if (is_current_pgrp_orphaned()) | 2327 | if (is_current_pgrp_orphaned()) |
2327 | goto relock; | 2328 | goto relock; |
2328 | 2329 | ||
2329 | spin_lock_irq(&sighand->siglock); | 2330 | spin_lock_irq(&sighand->siglock); |
2330 | } | 2331 | } |
2331 | 2332 | ||
2332 | if (likely(do_signal_stop(info->si_signo))) { | 2333 | if (likely(do_signal_stop(info->si_signo))) { |
2333 | /* It released the siglock. */ | 2334 | /* It released the siglock. */ |
2334 | goto relock; | 2335 | goto relock; |
2335 | } | 2336 | } |
2336 | 2337 | ||
2337 | /* | 2338 | /* |
2338 | * We didn't actually stop, due to a race | 2339 | * We didn't actually stop, due to a race |
2339 | * with SIGCONT or something like that. | 2340 | * with SIGCONT or something like that. |
2340 | */ | 2341 | */ |
2341 | continue; | 2342 | continue; |
2342 | } | 2343 | } |
2343 | 2344 | ||
2344 | spin_unlock_irq(&sighand->siglock); | 2345 | spin_unlock_irq(&sighand->siglock); |
2345 | 2346 | ||
2346 | /* | 2347 | /* |
2347 | * Anything else is fatal, maybe with a core dump. | 2348 | * Anything else is fatal, maybe with a core dump. |
2348 | */ | 2349 | */ |
2349 | current->flags |= PF_SIGNALED; | 2350 | current->flags |= PF_SIGNALED; |
2350 | 2351 | ||
2351 | if (sig_kernel_coredump(signr)) { | 2352 | if (sig_kernel_coredump(signr)) { |
2352 | if (print_fatal_signals) | 2353 | if (print_fatal_signals) |
2353 | print_fatal_signal(info->si_signo); | 2354 | print_fatal_signal(info->si_signo); |
2354 | /* | 2355 | /* |
2355 | * If it was able to dump core, this kills all | 2356 | * If it was able to dump core, this kills all |
2356 | * other threads in the group and synchronizes with | 2357 | * other threads in the group and synchronizes with |
2357 | * their demise. If we lost the race with another | 2358 | * their demise. If we lost the race with another |
2358 | * thread getting here, it set group_exit_code | 2359 | * thread getting here, it set group_exit_code |
2359 | * first and our do_group_exit call below will use | 2360 | * first and our do_group_exit call below will use |
2360 | * that value and ignore the one we pass it. | 2361 | * that value and ignore the one we pass it. |
2361 | */ | 2362 | */ |
2362 | do_coredump(info); | 2363 | do_coredump(info); |
2363 | } | 2364 | } |
2364 | 2365 | ||
2365 | /* | 2366 | /* |
2366 | * Death signals, no core dump. | 2367 | * Death signals, no core dump. |
2367 | */ | 2368 | */ |
2368 | do_group_exit(info->si_signo); | 2369 | do_group_exit(info->si_signo); |
2369 | /* NOTREACHED */ | 2370 | /* NOTREACHED */ |
2370 | } | 2371 | } |
2371 | spin_unlock_irq(&sighand->siglock); | 2372 | spin_unlock_irq(&sighand->siglock); |
2372 | return signr; | 2373 | return signr; |
2373 | } | 2374 | } |
2374 | 2375 | ||
2375 | /** | 2376 | /** |
2376 | * signal_delivered - | 2377 | * signal_delivered - |
2377 | * @sig: number of signal being delivered | 2378 | * @sig: number of signal being delivered |
2378 | * @info: siginfo_t of signal being delivered | 2379 | * @info: siginfo_t of signal being delivered |
2379 | * @ka: sigaction setting that chose the handler | 2380 | * @ka: sigaction setting that chose the handler |
2380 | * @regs: user register state | 2381 | * @regs: user register state |
2381 | * @stepping: nonzero if debugger single-step or block-step in use | 2382 | * @stepping: nonzero if debugger single-step or block-step in use |
2382 | * | 2383 | * |
2383 | * This function should be called when a signal has succesfully been | 2384 | * This function should be called when a signal has succesfully been |
2384 | * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask | 2385 | * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask |
2385 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER | 2386 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER |
2386 | * is set in @ka->sa.sa_flags. Tracing is notified. | 2387 | * is set in @ka->sa.sa_flags. Tracing is notified. |
2387 | */ | 2388 | */ |
2388 | void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, | 2389 | void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, |
2389 | struct pt_regs *regs, int stepping) | 2390 | struct pt_regs *regs, int stepping) |
2390 | { | 2391 | { |
2391 | sigset_t blocked; | 2392 | sigset_t blocked; |
2392 | 2393 | ||
2393 | /* A signal was successfully delivered, and the | 2394 | /* A signal was successfully delivered, and the |
2394 | saved sigmask was stored on the signal frame, | 2395 | saved sigmask was stored on the signal frame, |
2395 | and will be restored by sigreturn. So we can | 2396 | and will be restored by sigreturn. So we can |
2396 | simply clear the restore sigmask flag. */ | 2397 | simply clear the restore sigmask flag. */ |
2397 | clear_restore_sigmask(); | 2398 | clear_restore_sigmask(); |
2398 | 2399 | ||
2399 | sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); | 2400 | sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); |
2400 | if (!(ka->sa.sa_flags & SA_NODEFER)) | 2401 | if (!(ka->sa.sa_flags & SA_NODEFER)) |
2401 | sigaddset(&blocked, sig); | 2402 | sigaddset(&blocked, sig); |
2402 | set_current_blocked(&blocked); | 2403 | set_current_blocked(&blocked); |
2403 | tracehook_signal_handler(sig, info, ka, regs, stepping); | 2404 | tracehook_signal_handler(sig, info, ka, regs, stepping); |
2404 | } | 2405 | } |
2405 | 2406 | ||
2406 | /* | 2407 | /* |
2407 | * It could be that complete_signal() picked us to notify about the | 2408 | * It could be that complete_signal() picked us to notify about the |
2408 | * group-wide signal. Other threads should be notified now to take | 2409 | * group-wide signal. Other threads should be notified now to take |
2409 | * the shared signals in @which since we will not. | 2410 | * the shared signals in @which since we will not. |
2410 | */ | 2411 | */ |
2411 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) | 2412 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) |
2412 | { | 2413 | { |
2413 | sigset_t retarget; | 2414 | sigset_t retarget; |
2414 | struct task_struct *t; | 2415 | struct task_struct *t; |
2415 | 2416 | ||
2416 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); | 2417 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); |
2417 | if (sigisemptyset(&retarget)) | 2418 | if (sigisemptyset(&retarget)) |
2418 | return; | 2419 | return; |
2419 | 2420 | ||
2420 | t = tsk; | 2421 | t = tsk; |
2421 | while_each_thread(tsk, t) { | 2422 | while_each_thread(tsk, t) { |
2422 | if (t->flags & PF_EXITING) | 2423 | if (t->flags & PF_EXITING) |
2423 | continue; | 2424 | continue; |
2424 | 2425 | ||
2425 | if (!has_pending_signals(&retarget, &t->blocked)) | 2426 | if (!has_pending_signals(&retarget, &t->blocked)) |
2426 | continue; | 2427 | continue; |
2427 | /* Remove the signals this thread can handle. */ | 2428 | /* Remove the signals this thread can handle. */ |
2428 | sigandsets(&retarget, &retarget, &t->blocked); | 2429 | sigandsets(&retarget, &retarget, &t->blocked); |
2429 | 2430 | ||
2430 | if (!signal_pending(t)) | 2431 | if (!signal_pending(t)) |
2431 | signal_wake_up(t, 0); | 2432 | signal_wake_up(t, 0); |
2432 | 2433 | ||
2433 | if (sigisemptyset(&retarget)) | 2434 | if (sigisemptyset(&retarget)) |
2434 | break; | 2435 | break; |
2435 | } | 2436 | } |
2436 | } | 2437 | } |
2437 | 2438 | ||
2438 | void exit_signals(struct task_struct *tsk) | 2439 | void exit_signals(struct task_struct *tsk) |
2439 | { | 2440 | { |
2440 | int group_stop = 0; | 2441 | int group_stop = 0; |
2441 | sigset_t unblocked; | 2442 | sigset_t unblocked; |
2442 | 2443 | ||
2443 | /* | 2444 | /* |
2444 | * @tsk is about to have PF_EXITING set - lock out users which | 2445 | * @tsk is about to have PF_EXITING set - lock out users which |
2445 | * expect stable threadgroup. | 2446 | * expect stable threadgroup. |
2446 | */ | 2447 | */ |
2447 | threadgroup_change_begin(tsk); | 2448 | threadgroup_change_begin(tsk); |
2448 | 2449 | ||
2449 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { | 2450 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
2450 | tsk->flags |= PF_EXITING; | 2451 | tsk->flags |= PF_EXITING; |
2451 | threadgroup_change_end(tsk); | 2452 | threadgroup_change_end(tsk); |
2452 | return; | 2453 | return; |
2453 | } | 2454 | } |
2454 | 2455 | ||
2455 | spin_lock_irq(&tsk->sighand->siglock); | 2456 | spin_lock_irq(&tsk->sighand->siglock); |
2456 | /* | 2457 | /* |
2457 | * From now this task is not visible for group-wide signals, | 2458 | * From now this task is not visible for group-wide signals, |
2458 | * see wants_signal(), do_signal_stop(). | 2459 | * see wants_signal(), do_signal_stop(). |
2459 | */ | 2460 | */ |
2460 | tsk->flags |= PF_EXITING; | 2461 | tsk->flags |= PF_EXITING; |
2461 | 2462 | ||
2462 | threadgroup_change_end(tsk); | 2463 | threadgroup_change_end(tsk); |
2463 | 2464 | ||
2464 | if (!signal_pending(tsk)) | 2465 | if (!signal_pending(tsk)) |
2465 | goto out; | 2466 | goto out; |
2466 | 2467 | ||
2467 | unblocked = tsk->blocked; | 2468 | unblocked = tsk->blocked; |
2468 | signotset(&unblocked); | 2469 | signotset(&unblocked); |
2469 | retarget_shared_pending(tsk, &unblocked); | 2470 | retarget_shared_pending(tsk, &unblocked); |
2470 | 2471 | ||
2471 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && | 2472 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && |
2472 | task_participate_group_stop(tsk)) | 2473 | task_participate_group_stop(tsk)) |
2473 | group_stop = CLD_STOPPED; | 2474 | group_stop = CLD_STOPPED; |
2474 | out: | 2475 | out: |
2475 | spin_unlock_irq(&tsk->sighand->siglock); | 2476 | spin_unlock_irq(&tsk->sighand->siglock); |
2476 | 2477 | ||
2477 | /* | 2478 | /* |
2478 | * If group stop has completed, deliver the notification. This | 2479 | * If group stop has completed, deliver the notification. This |
2479 | * should always go to the real parent of the group leader. | 2480 | * should always go to the real parent of the group leader. |
2480 | */ | 2481 | */ |
2481 | if (unlikely(group_stop)) { | 2482 | if (unlikely(group_stop)) { |
2482 | read_lock(&tasklist_lock); | 2483 | read_lock(&tasklist_lock); |
2483 | do_notify_parent_cldstop(tsk, false, group_stop); | 2484 | do_notify_parent_cldstop(tsk, false, group_stop); |
2484 | read_unlock(&tasklist_lock); | 2485 | read_unlock(&tasklist_lock); |
2485 | } | 2486 | } |
2486 | } | 2487 | } |
2487 | 2488 | ||
2488 | EXPORT_SYMBOL(recalc_sigpending); | 2489 | EXPORT_SYMBOL(recalc_sigpending); |
2489 | EXPORT_SYMBOL_GPL(dequeue_signal); | 2490 | EXPORT_SYMBOL_GPL(dequeue_signal); |
2490 | EXPORT_SYMBOL(flush_signals); | 2491 | EXPORT_SYMBOL(flush_signals); |
2491 | EXPORT_SYMBOL(force_sig); | 2492 | EXPORT_SYMBOL(force_sig); |
2492 | EXPORT_SYMBOL(send_sig); | 2493 | EXPORT_SYMBOL(send_sig); |
2493 | EXPORT_SYMBOL(send_sig_info); | 2494 | EXPORT_SYMBOL(send_sig_info); |
2494 | EXPORT_SYMBOL(sigprocmask); | 2495 | EXPORT_SYMBOL(sigprocmask); |
2495 | EXPORT_SYMBOL(block_all_signals); | 2496 | EXPORT_SYMBOL(block_all_signals); |
2496 | EXPORT_SYMBOL(unblock_all_signals); | 2497 | EXPORT_SYMBOL(unblock_all_signals); |
2497 | 2498 | ||
2498 | 2499 | ||
2499 | /* | 2500 | /* |
2500 | * System call entry points. | 2501 | * System call entry points. |
2501 | */ | 2502 | */ |
2502 | 2503 | ||
2503 | /** | 2504 | /** |
2504 | * sys_restart_syscall - restart a system call | 2505 | * sys_restart_syscall - restart a system call |
2505 | */ | 2506 | */ |
2506 | SYSCALL_DEFINE0(restart_syscall) | 2507 | SYSCALL_DEFINE0(restart_syscall) |
2507 | { | 2508 | { |
2508 | struct restart_block *restart = ¤t_thread_info()->restart_block; | 2509 | struct restart_block *restart = ¤t_thread_info()->restart_block; |
2509 | return restart->fn(restart); | 2510 | return restart->fn(restart); |
2510 | } | 2511 | } |
2511 | 2512 | ||
2512 | long do_no_restart_syscall(struct restart_block *param) | 2513 | long do_no_restart_syscall(struct restart_block *param) |
2513 | { | 2514 | { |
2514 | return -EINTR; | 2515 | return -EINTR; |
2515 | } | 2516 | } |
2516 | 2517 | ||
2517 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) | 2518 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) |
2518 | { | 2519 | { |
2519 | if (signal_pending(tsk) && !thread_group_empty(tsk)) { | 2520 | if (signal_pending(tsk) && !thread_group_empty(tsk)) { |
2520 | sigset_t newblocked; | 2521 | sigset_t newblocked; |
2521 | /* A set of now blocked but previously unblocked signals. */ | 2522 | /* A set of now blocked but previously unblocked signals. */ |
2522 | sigandnsets(&newblocked, newset, ¤t->blocked); | 2523 | sigandnsets(&newblocked, newset, ¤t->blocked); |
2523 | retarget_shared_pending(tsk, &newblocked); | 2524 | retarget_shared_pending(tsk, &newblocked); |
2524 | } | 2525 | } |
2525 | tsk->blocked = *newset; | 2526 | tsk->blocked = *newset; |
2526 | recalc_sigpending(); | 2527 | recalc_sigpending(); |
2527 | } | 2528 | } |
2528 | 2529 | ||
2529 | /** | 2530 | /** |
2530 | * set_current_blocked - change current->blocked mask | 2531 | * set_current_blocked - change current->blocked mask |
2531 | * @newset: new mask | 2532 | * @newset: new mask |
2532 | * | 2533 | * |
2533 | * It is wrong to change ->blocked directly, this helper should be used | 2534 | * It is wrong to change ->blocked directly, this helper should be used |
2534 | * to ensure the process can't miss a shared signal we are going to block. | 2535 | * to ensure the process can't miss a shared signal we are going to block. |
2535 | */ | 2536 | */ |
2536 | void set_current_blocked(sigset_t *newset) | 2537 | void set_current_blocked(sigset_t *newset) |
2537 | { | 2538 | { |
2538 | struct task_struct *tsk = current; | 2539 | struct task_struct *tsk = current; |
2539 | sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); | 2540 | sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2540 | spin_lock_irq(&tsk->sighand->siglock); | 2541 | spin_lock_irq(&tsk->sighand->siglock); |
2541 | __set_task_blocked(tsk, newset); | 2542 | __set_task_blocked(tsk, newset); |
2542 | spin_unlock_irq(&tsk->sighand->siglock); | 2543 | spin_unlock_irq(&tsk->sighand->siglock); |
2543 | } | 2544 | } |
2544 | 2545 | ||
2545 | void __set_current_blocked(const sigset_t *newset) | 2546 | void __set_current_blocked(const sigset_t *newset) |
2546 | { | 2547 | { |
2547 | struct task_struct *tsk = current; | 2548 | struct task_struct *tsk = current; |
2548 | 2549 | ||
2549 | spin_lock_irq(&tsk->sighand->siglock); | 2550 | spin_lock_irq(&tsk->sighand->siglock); |
2550 | __set_task_blocked(tsk, newset); | 2551 | __set_task_blocked(tsk, newset); |
2551 | spin_unlock_irq(&tsk->sighand->siglock); | 2552 | spin_unlock_irq(&tsk->sighand->siglock); |
2552 | } | 2553 | } |
2553 | 2554 | ||
2554 | /* | 2555 | /* |
2555 | * This is also useful for kernel threads that want to temporarily | 2556 | * This is also useful for kernel threads that want to temporarily |
2556 | * (or permanently) block certain signals. | 2557 | * (or permanently) block certain signals. |
2557 | * | 2558 | * |
2558 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel | 2559 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel |
2559 | * interface happily blocks "unblockable" signals like SIGKILL | 2560 | * interface happily blocks "unblockable" signals like SIGKILL |
2560 | * and friends. | 2561 | * and friends. |
2561 | */ | 2562 | */ |
2562 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | 2563 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) |
2563 | { | 2564 | { |
2564 | struct task_struct *tsk = current; | 2565 | struct task_struct *tsk = current; |
2565 | sigset_t newset; | 2566 | sigset_t newset; |
2566 | 2567 | ||
2567 | /* Lockless, only current can change ->blocked, never from irq */ | 2568 | /* Lockless, only current can change ->blocked, never from irq */ |
2568 | if (oldset) | 2569 | if (oldset) |
2569 | *oldset = tsk->blocked; | 2570 | *oldset = tsk->blocked; |
2570 | 2571 | ||
2571 | switch (how) { | 2572 | switch (how) { |
2572 | case SIG_BLOCK: | 2573 | case SIG_BLOCK: |
2573 | sigorsets(&newset, &tsk->blocked, set); | 2574 | sigorsets(&newset, &tsk->blocked, set); |
2574 | break; | 2575 | break; |
2575 | case SIG_UNBLOCK: | 2576 | case SIG_UNBLOCK: |
2576 | sigandnsets(&newset, &tsk->blocked, set); | 2577 | sigandnsets(&newset, &tsk->blocked, set); |
2577 | break; | 2578 | break; |
2578 | case SIG_SETMASK: | 2579 | case SIG_SETMASK: |
2579 | newset = *set; | 2580 | newset = *set; |
2580 | break; | 2581 | break; |
2581 | default: | 2582 | default: |
2582 | return -EINVAL; | 2583 | return -EINVAL; |
2583 | } | 2584 | } |
2584 | 2585 | ||
2585 | __set_current_blocked(&newset); | 2586 | __set_current_blocked(&newset); |
2586 | return 0; | 2587 | return 0; |
2587 | } | 2588 | } |
2588 | 2589 | ||
2589 | /** | 2590 | /** |
2590 | * sys_rt_sigprocmask - change the list of currently blocked signals | 2591 | * sys_rt_sigprocmask - change the list of currently blocked signals |
2591 | * @how: whether to add, remove, or set signals | 2592 | * @how: whether to add, remove, or set signals |
2592 | * @nset: stores pending signals | 2593 | * @nset: stores pending signals |
2593 | * @oset: previous value of signal mask if non-null | 2594 | * @oset: previous value of signal mask if non-null |
2594 | * @sigsetsize: size of sigset_t type | 2595 | * @sigsetsize: size of sigset_t type |
2595 | */ | 2596 | */ |
2596 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, | 2597 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, |
2597 | sigset_t __user *, oset, size_t, sigsetsize) | 2598 | sigset_t __user *, oset, size_t, sigsetsize) |
2598 | { | 2599 | { |
2599 | sigset_t old_set, new_set; | 2600 | sigset_t old_set, new_set; |
2600 | int error; | 2601 | int error; |
2601 | 2602 | ||
2602 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 2603 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2603 | if (sigsetsize != sizeof(sigset_t)) | 2604 | if (sigsetsize != sizeof(sigset_t)) |
2604 | return -EINVAL; | 2605 | return -EINVAL; |
2605 | 2606 | ||
2606 | old_set = current->blocked; | 2607 | old_set = current->blocked; |
2607 | 2608 | ||
2608 | if (nset) { | 2609 | if (nset) { |
2609 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) | 2610 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) |
2610 | return -EFAULT; | 2611 | return -EFAULT; |
2611 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); | 2612 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2612 | 2613 | ||
2613 | error = sigprocmask(how, &new_set, NULL); | 2614 | error = sigprocmask(how, &new_set, NULL); |
2614 | if (error) | 2615 | if (error) |
2615 | return error; | 2616 | return error; |
2616 | } | 2617 | } |
2617 | 2618 | ||
2618 | if (oset) { | 2619 | if (oset) { |
2619 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) | 2620 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) |
2620 | return -EFAULT; | 2621 | return -EFAULT; |
2621 | } | 2622 | } |
2622 | 2623 | ||
2623 | return 0; | 2624 | return 0; |
2624 | } | 2625 | } |
2625 | 2626 | ||
2626 | long do_sigpending(void __user *set, unsigned long sigsetsize) | 2627 | long do_sigpending(void __user *set, unsigned long sigsetsize) |
2627 | { | 2628 | { |
2628 | long error = -EINVAL; | 2629 | long error = -EINVAL; |
2629 | sigset_t pending; | 2630 | sigset_t pending; |
2630 | 2631 | ||
2631 | if (sigsetsize > sizeof(sigset_t)) | 2632 | if (sigsetsize > sizeof(sigset_t)) |
2632 | goto out; | 2633 | goto out; |
2633 | 2634 | ||
2634 | spin_lock_irq(¤t->sighand->siglock); | 2635 | spin_lock_irq(¤t->sighand->siglock); |
2635 | sigorsets(&pending, ¤t->pending.signal, | 2636 | sigorsets(&pending, ¤t->pending.signal, |
2636 | ¤t->signal->shared_pending.signal); | 2637 | ¤t->signal->shared_pending.signal); |
2637 | spin_unlock_irq(¤t->sighand->siglock); | 2638 | spin_unlock_irq(¤t->sighand->siglock); |
2638 | 2639 | ||
2639 | /* Outside the lock because only this thread touches it. */ | 2640 | /* Outside the lock because only this thread touches it. */ |
2640 | sigandsets(&pending, ¤t->blocked, &pending); | 2641 | sigandsets(&pending, ¤t->blocked, &pending); |
2641 | 2642 | ||
2642 | error = -EFAULT; | 2643 | error = -EFAULT; |
2643 | if (!copy_to_user(set, &pending, sigsetsize)) | 2644 | if (!copy_to_user(set, &pending, sigsetsize)) |
2644 | error = 0; | 2645 | error = 0; |
2645 | 2646 | ||
2646 | out: | 2647 | out: |
2647 | return error; | 2648 | return error; |
2648 | } | 2649 | } |
2649 | 2650 | ||
2650 | /** | 2651 | /** |
2651 | * sys_rt_sigpending - examine a pending signal that has been raised | 2652 | * sys_rt_sigpending - examine a pending signal that has been raised |
2652 | * while blocked | 2653 | * while blocked |
2653 | * @set: stores pending signals | 2654 | * @set: stores pending signals |
2654 | * @sigsetsize: size of sigset_t type or larger | 2655 | * @sigsetsize: size of sigset_t type or larger |
2655 | */ | 2656 | */ |
2656 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) | 2657 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) |
2657 | { | 2658 | { |
2658 | return do_sigpending(set, sigsetsize); | 2659 | return do_sigpending(set, sigsetsize); |
2659 | } | 2660 | } |
2660 | 2661 | ||
2661 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER | 2662 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER |
2662 | 2663 | ||
2663 | int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | 2664 | int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) |
2664 | { | 2665 | { |
2665 | int err; | 2666 | int err; |
2666 | 2667 | ||
2667 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) | 2668 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) |
2668 | return -EFAULT; | 2669 | return -EFAULT; |
2669 | if (from->si_code < 0) | 2670 | if (from->si_code < 0) |
2670 | return __copy_to_user(to, from, sizeof(siginfo_t)) | 2671 | return __copy_to_user(to, from, sizeof(siginfo_t)) |
2671 | ? -EFAULT : 0; | 2672 | ? -EFAULT : 0; |
2672 | /* | 2673 | /* |
2673 | * If you change siginfo_t structure, please be sure | 2674 | * If you change siginfo_t structure, please be sure |
2674 | * this code is fixed accordingly. | 2675 | * this code is fixed accordingly. |
2675 | * Please remember to update the signalfd_copyinfo() function | 2676 | * Please remember to update the signalfd_copyinfo() function |
2676 | * inside fs/signalfd.c too, in case siginfo_t changes. | 2677 | * inside fs/signalfd.c too, in case siginfo_t changes. |
2677 | * It should never copy any pad contained in the structure | 2678 | * It should never copy any pad contained in the structure |
2678 | * to avoid security leaks, but must copy the generic | 2679 | * to avoid security leaks, but must copy the generic |
2679 | * 3 ints plus the relevant union member. | 2680 | * 3 ints plus the relevant union member. |
2680 | */ | 2681 | */ |
2681 | err = __put_user(from->si_signo, &to->si_signo); | 2682 | err = __put_user(from->si_signo, &to->si_signo); |
2682 | err |= __put_user(from->si_errno, &to->si_errno); | 2683 | err |= __put_user(from->si_errno, &to->si_errno); |
2683 | err |= __put_user((short)from->si_code, &to->si_code); | 2684 | err |= __put_user((short)from->si_code, &to->si_code); |
2684 | switch (from->si_code & __SI_MASK) { | 2685 | switch (from->si_code & __SI_MASK) { |
2685 | case __SI_KILL: | 2686 | case __SI_KILL: |
2686 | err |= __put_user(from->si_pid, &to->si_pid); | 2687 | err |= __put_user(from->si_pid, &to->si_pid); |
2687 | err |= __put_user(from->si_uid, &to->si_uid); | 2688 | err |= __put_user(from->si_uid, &to->si_uid); |
2688 | break; | 2689 | break; |
2689 | case __SI_TIMER: | 2690 | case __SI_TIMER: |
2690 | err |= __put_user(from->si_tid, &to->si_tid); | 2691 | err |= __put_user(from->si_tid, &to->si_tid); |
2691 | err |= __put_user(from->si_overrun, &to->si_overrun); | 2692 | err |= __put_user(from->si_overrun, &to->si_overrun); |
2692 | err |= __put_user(from->si_ptr, &to->si_ptr); | 2693 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2693 | break; | 2694 | break; |
2694 | case __SI_POLL: | 2695 | case __SI_POLL: |
2695 | err |= __put_user(from->si_band, &to->si_band); | 2696 | err |= __put_user(from->si_band, &to->si_band); |
2696 | err |= __put_user(from->si_fd, &to->si_fd); | 2697 | err |= __put_user(from->si_fd, &to->si_fd); |
2697 | break; | 2698 | break; |
2698 | case __SI_FAULT: | 2699 | case __SI_FAULT: |
2699 | err |= __put_user(from->si_addr, &to->si_addr); | 2700 | err |= __put_user(from->si_addr, &to->si_addr); |
2700 | #ifdef __ARCH_SI_TRAPNO | 2701 | #ifdef __ARCH_SI_TRAPNO |
2701 | err |= __put_user(from->si_trapno, &to->si_trapno); | 2702 | err |= __put_user(from->si_trapno, &to->si_trapno); |
2702 | #endif | 2703 | #endif |
2703 | #ifdef BUS_MCEERR_AO | 2704 | #ifdef BUS_MCEERR_AO |
2704 | /* | 2705 | /* |
2705 | * Other callers might not initialize the si_lsb field, | 2706 | * Other callers might not initialize the si_lsb field, |
2706 | * so check explicitly for the right codes here. | 2707 | * so check explicitly for the right codes here. |
2707 | */ | 2708 | */ |
2708 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) | 2709 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) |
2709 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); | 2710 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); |
2710 | #endif | 2711 | #endif |
2711 | break; | 2712 | break; |
2712 | case __SI_CHLD: | 2713 | case __SI_CHLD: |
2713 | err |= __put_user(from->si_pid, &to->si_pid); | 2714 | err |= __put_user(from->si_pid, &to->si_pid); |
2714 | err |= __put_user(from->si_uid, &to->si_uid); | 2715 | err |= __put_user(from->si_uid, &to->si_uid); |
2715 | err |= __put_user(from->si_status, &to->si_status); | 2716 | err |= __put_user(from->si_status, &to->si_status); |
2716 | err |= __put_user(from->si_utime, &to->si_utime); | 2717 | err |= __put_user(from->si_utime, &to->si_utime); |
2717 | err |= __put_user(from->si_stime, &to->si_stime); | 2718 | err |= __put_user(from->si_stime, &to->si_stime); |
2718 | break; | 2719 | break; |
2719 | case __SI_RT: /* This is not generated by the kernel as of now. */ | 2720 | case __SI_RT: /* This is not generated by the kernel as of now. */ |
2720 | case __SI_MESGQ: /* But this is */ | 2721 | case __SI_MESGQ: /* But this is */ |
2721 | err |= __put_user(from->si_pid, &to->si_pid); | 2722 | err |= __put_user(from->si_pid, &to->si_pid); |
2722 | err |= __put_user(from->si_uid, &to->si_uid); | 2723 | err |= __put_user(from->si_uid, &to->si_uid); |
2723 | err |= __put_user(from->si_ptr, &to->si_ptr); | 2724 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2724 | break; | 2725 | break; |
2725 | #ifdef __ARCH_SIGSYS | 2726 | #ifdef __ARCH_SIGSYS |
2726 | case __SI_SYS: | 2727 | case __SI_SYS: |
2727 | err |= __put_user(from->si_call_addr, &to->si_call_addr); | 2728 | err |= __put_user(from->si_call_addr, &to->si_call_addr); |
2728 | err |= __put_user(from->si_syscall, &to->si_syscall); | 2729 | err |= __put_user(from->si_syscall, &to->si_syscall); |
2729 | err |= __put_user(from->si_arch, &to->si_arch); | 2730 | err |= __put_user(from->si_arch, &to->si_arch); |
2730 | break; | 2731 | break; |
2731 | #endif | 2732 | #endif |
2732 | default: /* this is just in case for now ... */ | 2733 | default: /* this is just in case for now ... */ |
2733 | err |= __put_user(from->si_pid, &to->si_pid); | 2734 | err |= __put_user(from->si_pid, &to->si_pid); |
2734 | err |= __put_user(from->si_uid, &to->si_uid); | 2735 | err |= __put_user(from->si_uid, &to->si_uid); |
2735 | break; | 2736 | break; |
2736 | } | 2737 | } |
2737 | return err; | 2738 | return err; |
2738 | } | 2739 | } |
2739 | 2740 | ||
2740 | #endif | 2741 | #endif |
2741 | 2742 | ||
2742 | /** | 2743 | /** |
2743 | * do_sigtimedwait - wait for queued signals specified in @which | 2744 | * do_sigtimedwait - wait for queued signals specified in @which |
2744 | * @which: queued signals to wait for | 2745 | * @which: queued signals to wait for |
2745 | * @info: if non-null, the signal's siginfo is returned here | 2746 | * @info: if non-null, the signal's siginfo is returned here |
2746 | * @ts: upper bound on process time suspension | 2747 | * @ts: upper bound on process time suspension |
2747 | */ | 2748 | */ |
2748 | int do_sigtimedwait(const sigset_t *which, siginfo_t *info, | 2749 | int do_sigtimedwait(const sigset_t *which, siginfo_t *info, |
2749 | const struct timespec *ts) | 2750 | const struct timespec *ts) |
2750 | { | 2751 | { |
2751 | struct task_struct *tsk = current; | 2752 | struct task_struct *tsk = current; |
2752 | long timeout = MAX_SCHEDULE_TIMEOUT; | 2753 | long timeout = MAX_SCHEDULE_TIMEOUT; |
2753 | sigset_t mask = *which; | 2754 | sigset_t mask = *which; |
2754 | int sig; | 2755 | int sig; |
2755 | 2756 | ||
2756 | if (ts) { | 2757 | if (ts) { |
2757 | if (!timespec_valid(ts)) | 2758 | if (!timespec_valid(ts)) |
2758 | return -EINVAL; | 2759 | return -EINVAL; |
2759 | timeout = timespec_to_jiffies(ts); | 2760 | timeout = timespec_to_jiffies(ts); |
2760 | /* | 2761 | /* |
2761 | * We can be close to the next tick, add another one | 2762 | * We can be close to the next tick, add another one |
2762 | * to ensure we will wait at least the time asked for. | 2763 | * to ensure we will wait at least the time asked for. |
2763 | */ | 2764 | */ |
2764 | if (ts->tv_sec || ts->tv_nsec) | 2765 | if (ts->tv_sec || ts->tv_nsec) |
2765 | timeout++; | 2766 | timeout++; |
2766 | } | 2767 | } |
2767 | 2768 | ||
2768 | /* | 2769 | /* |
2769 | * Invert the set of allowed signals to get those we want to block. | 2770 | * Invert the set of allowed signals to get those we want to block. |
2770 | */ | 2771 | */ |
2771 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | 2772 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2772 | signotset(&mask); | 2773 | signotset(&mask); |
2773 | 2774 | ||
2774 | spin_lock_irq(&tsk->sighand->siglock); | 2775 | spin_lock_irq(&tsk->sighand->siglock); |
2775 | sig = dequeue_signal(tsk, &mask, info); | 2776 | sig = dequeue_signal(tsk, &mask, info); |
2776 | if (!sig && timeout) { | 2777 | if (!sig && timeout) { |
2777 | /* | 2778 | /* |
2778 | * None ready, temporarily unblock those we're interested | 2779 | * None ready, temporarily unblock those we're interested |
2779 | * while we are sleeping in so that we'll be awakened when | 2780 | * while we are sleeping in so that we'll be awakened when |
2780 | * they arrive. Unblocking is always fine, we can avoid | 2781 | * they arrive. Unblocking is always fine, we can avoid |
2781 | * set_current_blocked(). | 2782 | * set_current_blocked(). |
2782 | */ | 2783 | */ |
2783 | tsk->real_blocked = tsk->blocked; | 2784 | tsk->real_blocked = tsk->blocked; |
2784 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); | 2785 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); |
2785 | recalc_sigpending(); | 2786 | recalc_sigpending(); |
2786 | spin_unlock_irq(&tsk->sighand->siglock); | 2787 | spin_unlock_irq(&tsk->sighand->siglock); |
2787 | 2788 | ||
2788 | timeout = schedule_timeout_interruptible(timeout); | 2789 | timeout = schedule_timeout_interruptible(timeout); |
2789 | 2790 | ||
2790 | spin_lock_irq(&tsk->sighand->siglock); | 2791 | spin_lock_irq(&tsk->sighand->siglock); |
2791 | __set_task_blocked(tsk, &tsk->real_blocked); | 2792 | __set_task_blocked(tsk, &tsk->real_blocked); |
2792 | siginitset(&tsk->real_blocked, 0); | 2793 | siginitset(&tsk->real_blocked, 0); |
2793 | sig = dequeue_signal(tsk, &mask, info); | 2794 | sig = dequeue_signal(tsk, &mask, info); |
2794 | } | 2795 | } |
2795 | spin_unlock_irq(&tsk->sighand->siglock); | 2796 | spin_unlock_irq(&tsk->sighand->siglock); |
2796 | 2797 | ||
2797 | if (sig) | 2798 | if (sig) |
2798 | return sig; | 2799 | return sig; |
2799 | return timeout ? -EINTR : -EAGAIN; | 2800 | return timeout ? -EINTR : -EAGAIN; |
2800 | } | 2801 | } |
2801 | 2802 | ||
2802 | /** | 2803 | /** |
2803 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified | 2804 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified |
2804 | * in @uthese | 2805 | * in @uthese |
2805 | * @uthese: queued signals to wait for | 2806 | * @uthese: queued signals to wait for |
2806 | * @uinfo: if non-null, the signal's siginfo is returned here | 2807 | * @uinfo: if non-null, the signal's siginfo is returned here |
2807 | * @uts: upper bound on process time suspension | 2808 | * @uts: upper bound on process time suspension |
2808 | * @sigsetsize: size of sigset_t type | 2809 | * @sigsetsize: size of sigset_t type |
2809 | */ | 2810 | */ |
2810 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, | 2811 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
2811 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, | 2812 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, |
2812 | size_t, sigsetsize) | 2813 | size_t, sigsetsize) |
2813 | { | 2814 | { |
2814 | sigset_t these; | 2815 | sigset_t these; |
2815 | struct timespec ts; | 2816 | struct timespec ts; |
2816 | siginfo_t info; | 2817 | siginfo_t info; |
2817 | int ret; | 2818 | int ret; |
2818 | 2819 | ||
2819 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 2820 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2820 | if (sigsetsize != sizeof(sigset_t)) | 2821 | if (sigsetsize != sizeof(sigset_t)) |
2821 | return -EINVAL; | 2822 | return -EINVAL; |
2822 | 2823 | ||
2823 | if (copy_from_user(&these, uthese, sizeof(these))) | 2824 | if (copy_from_user(&these, uthese, sizeof(these))) |
2824 | return -EFAULT; | 2825 | return -EFAULT; |
2825 | 2826 | ||
2826 | if (uts) { | 2827 | if (uts) { |
2827 | if (copy_from_user(&ts, uts, sizeof(ts))) | 2828 | if (copy_from_user(&ts, uts, sizeof(ts))) |
2828 | return -EFAULT; | 2829 | return -EFAULT; |
2829 | } | 2830 | } |
2830 | 2831 | ||
2831 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); | 2832 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); |
2832 | 2833 | ||
2833 | if (ret > 0 && uinfo) { | 2834 | if (ret > 0 && uinfo) { |
2834 | if (copy_siginfo_to_user(uinfo, &info)) | 2835 | if (copy_siginfo_to_user(uinfo, &info)) |
2835 | ret = -EFAULT; | 2836 | ret = -EFAULT; |
2836 | } | 2837 | } |
2837 | 2838 | ||
2838 | return ret; | 2839 | return ret; |
2839 | } | 2840 | } |
2840 | 2841 | ||
2841 | /** | 2842 | /** |
2842 | * sys_kill - send a signal to a process | 2843 | * sys_kill - send a signal to a process |
2843 | * @pid: the PID of the process | 2844 | * @pid: the PID of the process |
2844 | * @sig: signal to be sent | 2845 | * @sig: signal to be sent |
2845 | */ | 2846 | */ |
2846 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) | 2847 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
2847 | { | 2848 | { |
2848 | struct siginfo info; | 2849 | struct siginfo info; |
2849 | 2850 | ||
2850 | info.si_signo = sig; | 2851 | info.si_signo = sig; |
2851 | info.si_errno = 0; | 2852 | info.si_errno = 0; |
2852 | info.si_code = SI_USER; | 2853 | info.si_code = SI_USER; |
2853 | info.si_pid = task_tgid_vnr(current); | 2854 | info.si_pid = task_tgid_vnr(current); |
2854 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); | 2855 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
2855 | 2856 | ||
2856 | return kill_something_info(sig, &info, pid); | 2857 | return kill_something_info(sig, &info, pid); |
2857 | } | 2858 | } |
2858 | 2859 | ||
2859 | static int | 2860 | static int |
2860 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) | 2861 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) |
2861 | { | 2862 | { |
2862 | struct task_struct *p; | 2863 | struct task_struct *p; |
2863 | int error = -ESRCH; | 2864 | int error = -ESRCH; |
2864 | 2865 | ||
2865 | rcu_read_lock(); | 2866 | rcu_read_lock(); |
2866 | p = find_task_by_vpid(pid); | 2867 | p = find_task_by_vpid(pid); |
2867 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { | 2868 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
2868 | error = check_kill_permission(sig, info, p); | 2869 | error = check_kill_permission(sig, info, p); |
2869 | /* | 2870 | /* |
2870 | * The null signal is a permissions and process existence | 2871 | * The null signal is a permissions and process existence |
2871 | * probe. No signal is actually delivered. | 2872 | * probe. No signal is actually delivered. |
2872 | */ | 2873 | */ |
2873 | if (!error && sig) { | 2874 | if (!error && sig) { |
2874 | error = do_send_sig_info(sig, info, p, false); | 2875 | error = do_send_sig_info(sig, info, p, false); |
2875 | /* | 2876 | /* |
2876 | * If lock_task_sighand() failed we pretend the task | 2877 | * If lock_task_sighand() failed we pretend the task |
2877 | * dies after receiving the signal. The window is tiny, | 2878 | * dies after receiving the signal. The window is tiny, |
2878 | * and the signal is private anyway. | 2879 | * and the signal is private anyway. |
2879 | */ | 2880 | */ |
2880 | if (unlikely(error == -ESRCH)) | 2881 | if (unlikely(error == -ESRCH)) |
2881 | error = 0; | 2882 | error = 0; |
2882 | } | 2883 | } |
2883 | } | 2884 | } |
2884 | rcu_read_unlock(); | 2885 | rcu_read_unlock(); |
2885 | 2886 | ||
2886 | return error; | 2887 | return error; |
2887 | } | 2888 | } |
2888 | 2889 | ||
2889 | static int do_tkill(pid_t tgid, pid_t pid, int sig) | 2890 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
2890 | { | 2891 | { |
2891 | struct siginfo info; | 2892 | struct siginfo info; |
2892 | 2893 | ||
2893 | info.si_signo = sig; | 2894 | info.si_signo = sig; |
2894 | info.si_errno = 0; | 2895 | info.si_errno = 0; |
2895 | info.si_code = SI_TKILL; | 2896 | info.si_code = SI_TKILL; |
2896 | info.si_pid = task_tgid_vnr(current); | 2897 | info.si_pid = task_tgid_vnr(current); |
2897 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); | 2898 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
2898 | 2899 | ||
2899 | return do_send_specific(tgid, pid, sig, &info); | 2900 | return do_send_specific(tgid, pid, sig, &info); |
2900 | } | 2901 | } |
2901 | 2902 | ||
2902 | /** | 2903 | /** |
2903 | * sys_tgkill - send signal to one specific thread | 2904 | * sys_tgkill - send signal to one specific thread |
2904 | * @tgid: the thread group ID of the thread | 2905 | * @tgid: the thread group ID of the thread |
2905 | * @pid: the PID of the thread | 2906 | * @pid: the PID of the thread |
2906 | * @sig: signal to be sent | 2907 | * @sig: signal to be sent |
2907 | * | 2908 | * |
2908 | * This syscall also checks the @tgid and returns -ESRCH even if the PID | 2909 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
2909 | * exists but it's not belonging to the target process anymore. This | 2910 | * exists but it's not belonging to the target process anymore. This |
2910 | * method solves the problem of threads exiting and PIDs getting reused. | 2911 | * method solves the problem of threads exiting and PIDs getting reused. |
2911 | */ | 2912 | */ |
2912 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) | 2913 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
2913 | { | 2914 | { |
2914 | /* This is only valid for single tasks */ | 2915 | /* This is only valid for single tasks */ |
2915 | if (pid <= 0 || tgid <= 0) | 2916 | if (pid <= 0 || tgid <= 0) |
2916 | return -EINVAL; | 2917 | return -EINVAL; |
2917 | 2918 | ||
2918 | return do_tkill(tgid, pid, sig); | 2919 | return do_tkill(tgid, pid, sig); |
2919 | } | 2920 | } |
2920 | 2921 | ||
2921 | /** | 2922 | /** |
2922 | * sys_tkill - send signal to one specific task | 2923 | * sys_tkill - send signal to one specific task |
2923 | * @pid: the PID of the task | 2924 | * @pid: the PID of the task |
2924 | * @sig: signal to be sent | 2925 | * @sig: signal to be sent |
2925 | * | 2926 | * |
2926 | * Send a signal to only one task, even if it's a CLONE_THREAD task. | 2927 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
2927 | */ | 2928 | */ |
2928 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) | 2929 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
2929 | { | 2930 | { |
2930 | /* This is only valid for single tasks */ | 2931 | /* This is only valid for single tasks */ |
2931 | if (pid <= 0) | 2932 | if (pid <= 0) |
2932 | return -EINVAL; | 2933 | return -EINVAL; |
2933 | 2934 | ||
2934 | return do_tkill(0, pid, sig); | 2935 | return do_tkill(0, pid, sig); |
2935 | } | 2936 | } |
2936 | 2937 | ||
2937 | /** | 2938 | /** |
2938 | * sys_rt_sigqueueinfo - send signal information to a signal | 2939 | * sys_rt_sigqueueinfo - send signal information to a signal |
2939 | * @pid: the PID of the thread | 2940 | * @pid: the PID of the thread |
2940 | * @sig: signal to be sent | 2941 | * @sig: signal to be sent |
2941 | * @uinfo: signal info to be sent | 2942 | * @uinfo: signal info to be sent |
2942 | */ | 2943 | */ |
2943 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, | 2944 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
2944 | siginfo_t __user *, uinfo) | 2945 | siginfo_t __user *, uinfo) |
2945 | { | 2946 | { |
2946 | siginfo_t info; | 2947 | siginfo_t info; |
2947 | 2948 | ||
2948 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) | 2949 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
2949 | return -EFAULT; | 2950 | return -EFAULT; |
2950 | 2951 | ||
2951 | /* Not even root can pretend to send signals from the kernel. | 2952 | /* Not even root can pretend to send signals from the kernel. |
2952 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 2953 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
2953 | */ | 2954 | */ |
2954 | if (info.si_code >= 0 || info.si_code == SI_TKILL) { | 2955 | if (info.si_code >= 0 || info.si_code == SI_TKILL) { |
2955 | /* We used to allow any < 0 si_code */ | 2956 | /* We used to allow any < 0 si_code */ |
2956 | WARN_ON_ONCE(info.si_code < 0); | 2957 | WARN_ON_ONCE(info.si_code < 0); |
2957 | return -EPERM; | 2958 | return -EPERM; |
2958 | } | 2959 | } |
2959 | info.si_signo = sig; | 2960 | info.si_signo = sig; |
2960 | 2961 | ||
2961 | /* POSIX.1b doesn't mention process groups. */ | 2962 | /* POSIX.1b doesn't mention process groups. */ |
2962 | return kill_proc_info(sig, &info, pid); | 2963 | return kill_proc_info(sig, &info, pid); |
2963 | } | 2964 | } |
2964 | 2965 | ||
2965 | long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) | 2966 | long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) |
2966 | { | 2967 | { |
2967 | /* This is only valid for single tasks */ | 2968 | /* This is only valid for single tasks */ |
2968 | if (pid <= 0 || tgid <= 0) | 2969 | if (pid <= 0 || tgid <= 0) |
2969 | return -EINVAL; | 2970 | return -EINVAL; |
2970 | 2971 | ||
2971 | /* Not even root can pretend to send signals from the kernel. | 2972 | /* Not even root can pretend to send signals from the kernel. |
2972 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 2973 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
2973 | */ | 2974 | */ |
2974 | if (info->si_code >= 0 || info->si_code == SI_TKILL) { | 2975 | if (info->si_code >= 0 || info->si_code == SI_TKILL) { |
2975 | /* We used to allow any < 0 si_code */ | 2976 | /* We used to allow any < 0 si_code */ |
2976 | WARN_ON_ONCE(info->si_code < 0); | 2977 | WARN_ON_ONCE(info->si_code < 0); |
2977 | return -EPERM; | 2978 | return -EPERM; |
2978 | } | 2979 | } |
2979 | info->si_signo = sig; | 2980 | info->si_signo = sig; |
2980 | 2981 | ||
2981 | return do_send_specific(tgid, pid, sig, info); | 2982 | return do_send_specific(tgid, pid, sig, info); |
2982 | } | 2983 | } |
2983 | 2984 | ||
2984 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, | 2985 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, |
2985 | siginfo_t __user *, uinfo) | 2986 | siginfo_t __user *, uinfo) |
2986 | { | 2987 | { |
2987 | siginfo_t info; | 2988 | siginfo_t info; |
2988 | 2989 | ||
2989 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) | 2990 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
2990 | return -EFAULT; | 2991 | return -EFAULT; |
2991 | 2992 | ||
2992 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); | 2993 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
2993 | } | 2994 | } |
2994 | 2995 | ||
2995 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | 2996 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
2996 | { | 2997 | { |
2997 | struct task_struct *t = current; | 2998 | struct task_struct *t = current; |
2998 | struct k_sigaction *k; | 2999 | struct k_sigaction *k; |
2999 | sigset_t mask; | 3000 | sigset_t mask; |
3000 | 3001 | ||
3001 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) | 3002 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
3002 | return -EINVAL; | 3003 | return -EINVAL; |
3003 | 3004 | ||
3004 | k = &t->sighand->action[sig-1]; | 3005 | k = &t->sighand->action[sig-1]; |
3005 | 3006 | ||
3006 | spin_lock_irq(¤t->sighand->siglock); | 3007 | spin_lock_irq(¤t->sighand->siglock); |
3007 | if (oact) | 3008 | if (oact) |
3008 | *oact = *k; | 3009 | *oact = *k; |
3009 | 3010 | ||
3010 | if (act) { | 3011 | if (act) { |
3011 | sigdelsetmask(&act->sa.sa_mask, | 3012 | sigdelsetmask(&act->sa.sa_mask, |
3012 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | 3013 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
3013 | *k = *act; | 3014 | *k = *act; |
3014 | /* | 3015 | /* |
3015 | * POSIX 3.3.1.3: | 3016 | * POSIX 3.3.1.3: |
3016 | * "Setting a signal action to SIG_IGN for a signal that is | 3017 | * "Setting a signal action to SIG_IGN for a signal that is |
3017 | * pending shall cause the pending signal to be discarded, | 3018 | * pending shall cause the pending signal to be discarded, |
3018 | * whether or not it is blocked." | 3019 | * whether or not it is blocked." |
3019 | * | 3020 | * |
3020 | * "Setting a signal action to SIG_DFL for a signal that is | 3021 | * "Setting a signal action to SIG_DFL for a signal that is |
3021 | * pending and whose default action is to ignore the signal | 3022 | * pending and whose default action is to ignore the signal |
3022 | * (for example, SIGCHLD), shall cause the pending signal to | 3023 | * (for example, SIGCHLD), shall cause the pending signal to |
3023 | * be discarded, whether or not it is blocked" | 3024 | * be discarded, whether or not it is blocked" |
3024 | */ | 3025 | */ |
3025 | if (sig_handler_ignored(sig_handler(t, sig), sig)) { | 3026 | if (sig_handler_ignored(sig_handler(t, sig), sig)) { |
3026 | sigemptyset(&mask); | 3027 | sigemptyset(&mask); |
3027 | sigaddset(&mask, sig); | 3028 | sigaddset(&mask, sig); |
3028 | rm_from_queue_full(&mask, &t->signal->shared_pending); | 3029 | rm_from_queue_full(&mask, &t->signal->shared_pending); |
3029 | do { | 3030 | do { |
3030 | rm_from_queue_full(&mask, &t->pending); | 3031 | rm_from_queue_full(&mask, &t->pending); |
3031 | t = next_thread(t); | 3032 | t = next_thread(t); |
3032 | } while (t != current); | 3033 | } while (t != current); |
3033 | } | 3034 | } |
3034 | } | 3035 | } |
3035 | 3036 | ||
3036 | spin_unlock_irq(¤t->sighand->siglock); | 3037 | spin_unlock_irq(¤t->sighand->siglock); |
3037 | return 0; | 3038 | return 0; |
3038 | } | 3039 | } |
3039 | 3040 | ||
3040 | int | 3041 | int |
3041 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) | 3042 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) |
3042 | { | 3043 | { |
3043 | stack_t oss; | 3044 | stack_t oss; |
3044 | int error; | 3045 | int error; |
3045 | 3046 | ||
3046 | oss.ss_sp = (void __user *) current->sas_ss_sp; | 3047 | oss.ss_sp = (void __user *) current->sas_ss_sp; |
3047 | oss.ss_size = current->sas_ss_size; | 3048 | oss.ss_size = current->sas_ss_size; |
3048 | oss.ss_flags = sas_ss_flags(sp); | 3049 | oss.ss_flags = sas_ss_flags(sp); |
3049 | 3050 | ||
3050 | if (uss) { | 3051 | if (uss) { |
3051 | void __user *ss_sp; | 3052 | void __user *ss_sp; |
3052 | size_t ss_size; | 3053 | size_t ss_size; |
3053 | int ss_flags; | 3054 | int ss_flags; |
3054 | 3055 | ||
3055 | error = -EFAULT; | 3056 | error = -EFAULT; |
3056 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) | 3057 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) |
3057 | goto out; | 3058 | goto out; |
3058 | error = __get_user(ss_sp, &uss->ss_sp) | | 3059 | error = __get_user(ss_sp, &uss->ss_sp) | |
3059 | __get_user(ss_flags, &uss->ss_flags) | | 3060 | __get_user(ss_flags, &uss->ss_flags) | |
3060 | __get_user(ss_size, &uss->ss_size); | 3061 | __get_user(ss_size, &uss->ss_size); |
3061 | if (error) | 3062 | if (error) |
3062 | goto out; | 3063 | goto out; |
3063 | 3064 | ||
3064 | error = -EPERM; | 3065 | error = -EPERM; |
3065 | if (on_sig_stack(sp)) | 3066 | if (on_sig_stack(sp)) |
3066 | goto out; | 3067 | goto out; |
3067 | 3068 | ||
3068 | error = -EINVAL; | 3069 | error = -EINVAL; |
3069 | /* | 3070 | /* |
3070 | * Note - this code used to test ss_flags incorrectly: | 3071 | * Note - this code used to test ss_flags incorrectly: |
3071 | * old code may have been written using ss_flags==0 | 3072 | * old code may have been written using ss_flags==0 |
3072 | * to mean ss_flags==SS_ONSTACK (as this was the only | 3073 | * to mean ss_flags==SS_ONSTACK (as this was the only |
3073 | * way that worked) - this fix preserves that older | 3074 | * way that worked) - this fix preserves that older |
3074 | * mechanism. | 3075 | * mechanism. |
3075 | */ | 3076 | */ |
3076 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) | 3077 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) |
3077 | goto out; | 3078 | goto out; |
3078 | 3079 | ||
3079 | if (ss_flags == SS_DISABLE) { | 3080 | if (ss_flags == SS_DISABLE) { |
3080 | ss_size = 0; | 3081 | ss_size = 0; |
3081 | ss_sp = NULL; | 3082 | ss_sp = NULL; |
3082 | } else { | 3083 | } else { |
3083 | error = -ENOMEM; | 3084 | error = -ENOMEM; |
3084 | if (ss_size < MINSIGSTKSZ) | 3085 | if (ss_size < MINSIGSTKSZ) |
3085 | goto out; | 3086 | goto out; |
3086 | } | 3087 | } |
3087 | 3088 | ||
3088 | current->sas_ss_sp = (unsigned long) ss_sp; | 3089 | current->sas_ss_sp = (unsigned long) ss_sp; |
3089 | current->sas_ss_size = ss_size; | 3090 | current->sas_ss_size = ss_size; |
3090 | } | 3091 | } |
3091 | 3092 | ||
3092 | error = 0; | 3093 | error = 0; |
3093 | if (uoss) { | 3094 | if (uoss) { |
3094 | error = -EFAULT; | 3095 | error = -EFAULT; |
3095 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) | 3096 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) |
3096 | goto out; | 3097 | goto out; |
3097 | error = __put_user(oss.ss_sp, &uoss->ss_sp) | | 3098 | error = __put_user(oss.ss_sp, &uoss->ss_sp) | |
3098 | __put_user(oss.ss_size, &uoss->ss_size) | | 3099 | __put_user(oss.ss_size, &uoss->ss_size) | |
3099 | __put_user(oss.ss_flags, &uoss->ss_flags); | 3100 | __put_user(oss.ss_flags, &uoss->ss_flags); |
3100 | } | 3101 | } |
3101 | 3102 | ||
3102 | out: | 3103 | out: |
3103 | return error; | 3104 | return error; |
3104 | } | 3105 | } |
3105 | #ifdef CONFIG_GENERIC_SIGALTSTACK | 3106 | #ifdef CONFIG_GENERIC_SIGALTSTACK |
3106 | SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) | 3107 | SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) |
3107 | { | 3108 | { |
3108 | return do_sigaltstack(uss, uoss, current_user_stack_pointer()); | 3109 | return do_sigaltstack(uss, uoss, current_user_stack_pointer()); |
3109 | } | 3110 | } |
3110 | #endif | 3111 | #endif |
3111 | 3112 | ||
3112 | int restore_altstack(const stack_t __user *uss) | 3113 | int restore_altstack(const stack_t __user *uss) |
3113 | { | 3114 | { |
3114 | int err = do_sigaltstack(uss, NULL, current_user_stack_pointer()); | 3115 | int err = do_sigaltstack(uss, NULL, current_user_stack_pointer()); |
3115 | /* squash all but EFAULT for now */ | 3116 | /* squash all but EFAULT for now */ |
3116 | return err == -EFAULT ? err : 0; | 3117 | return err == -EFAULT ? err : 0; |
3117 | } | 3118 | } |
3119 | |||
3120 | #ifdef CONFIG_COMPAT | ||
3121 | #ifdef CONFIG_GENERIC_SIGALTSTACK | ||
3122 | asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, | ||
3123 | compat_stack_t __user *uoss_ptr) | ||
3124 | { | ||
3125 | stack_t uss, uoss; | ||
3126 | int ret; | ||
3127 | mm_segment_t seg; | ||
3128 | |||
3129 | if (uss_ptr) { | ||
3130 | compat_stack_t uss32; | ||
3131 | |||
3132 | memset(&uss, 0, sizeof(stack_t)); | ||
3133 | if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) | ||
3134 | return -EFAULT; | ||
3135 | uss.ss_sp = compat_ptr(uss32.ss_sp); | ||
3136 | uss.ss_flags = uss32.ss_flags; | ||
3137 | uss.ss_size = uss32.ss_size; | ||
3138 | } | ||
3139 | seg = get_fs(); | ||
3140 | set_fs(KERNEL_DS); | ||
3141 | ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), | ||
3142 | (stack_t __force __user *) &uoss, | ||
3143 | compat_user_stack_pointer()); | ||
3144 | set_fs(seg); | ||
3145 | if (ret >= 0 && uoss_ptr) { | ||
3146 | if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) || | ||
3147 | __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || | ||
3148 | __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || | ||
3149 | __put_user(uoss.ss_size, &uoss_ptr->ss_size)) | ||
3150 | ret = -EFAULT; | ||
3151 | } | ||
3152 | return ret; | ||
3153 | } | ||
3154 | |||
3155 | int compat_restore_altstack(const compat_stack_t __user *uss) | ||
3156 | { | ||
3157 | int err = compat_sys_sigaltstack(uss, NULL); | ||
3158 | /* squash all but -EFAULT for now */ | ||
3159 | return err == -EFAULT ? err : 0; | ||
3160 | } | ||
3161 | #endif | ||
3162 | #endif | ||
3118 | 3163 | ||
3119 | #ifdef __ARCH_WANT_SYS_SIGPENDING | 3164 | #ifdef __ARCH_WANT_SYS_SIGPENDING |
3120 | 3165 | ||
3121 | /** | 3166 | /** |
3122 | * sys_sigpending - examine pending signals | 3167 | * sys_sigpending - examine pending signals |
3123 | * @set: where mask of pending signal is returned | 3168 | * @set: where mask of pending signal is returned |
3124 | */ | 3169 | */ |
3125 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) | 3170 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) |
3126 | { | 3171 | { |
3127 | return do_sigpending(set, sizeof(*set)); | 3172 | return do_sigpending(set, sizeof(*set)); |
3128 | } | 3173 | } |
3129 | 3174 | ||
3130 | #endif | 3175 | #endif |
3131 | 3176 | ||
3132 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK | 3177 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK |
3133 | /** | 3178 | /** |
3134 | * sys_sigprocmask - examine and change blocked signals | 3179 | * sys_sigprocmask - examine and change blocked signals |
3135 | * @how: whether to add, remove, or set signals | 3180 | * @how: whether to add, remove, or set signals |
3136 | * @nset: signals to add or remove (if non-null) | 3181 | * @nset: signals to add or remove (if non-null) |
3137 | * @oset: previous value of signal mask if non-null | 3182 | * @oset: previous value of signal mask if non-null |
3138 | * | 3183 | * |
3139 | * Some platforms have their own version with special arguments; | 3184 | * Some platforms have their own version with special arguments; |
3140 | * others support only sys_rt_sigprocmask. | 3185 | * others support only sys_rt_sigprocmask. |
3141 | */ | 3186 | */ |
3142 | 3187 | ||
3143 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, | 3188 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
3144 | old_sigset_t __user *, oset) | 3189 | old_sigset_t __user *, oset) |
3145 | { | 3190 | { |
3146 | old_sigset_t old_set, new_set; | 3191 | old_sigset_t old_set, new_set; |
3147 | sigset_t new_blocked; | 3192 | sigset_t new_blocked; |
3148 | 3193 | ||
3149 | old_set = current->blocked.sig[0]; | 3194 | old_set = current->blocked.sig[0]; |
3150 | 3195 | ||
3151 | if (nset) { | 3196 | if (nset) { |
3152 | if (copy_from_user(&new_set, nset, sizeof(*nset))) | 3197 | if (copy_from_user(&new_set, nset, sizeof(*nset))) |
3153 | return -EFAULT; | 3198 | return -EFAULT; |
3154 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); | 3199 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); |
3155 | 3200 | ||
3156 | new_blocked = current->blocked; | 3201 | new_blocked = current->blocked; |
3157 | 3202 | ||
3158 | switch (how) { | 3203 | switch (how) { |
3159 | case SIG_BLOCK: | 3204 | case SIG_BLOCK: |
3160 | sigaddsetmask(&new_blocked, new_set); | 3205 | sigaddsetmask(&new_blocked, new_set); |
3161 | break; | 3206 | break; |
3162 | case SIG_UNBLOCK: | 3207 | case SIG_UNBLOCK: |
3163 | sigdelsetmask(&new_blocked, new_set); | 3208 | sigdelsetmask(&new_blocked, new_set); |
3164 | break; | 3209 | break; |
3165 | case SIG_SETMASK: | 3210 | case SIG_SETMASK: |
3166 | new_blocked.sig[0] = new_set; | 3211 | new_blocked.sig[0] = new_set; |
3167 | break; | 3212 | break; |
3168 | default: | 3213 | default: |
3169 | return -EINVAL; | 3214 | return -EINVAL; |
3170 | } | 3215 | } |
3171 | 3216 | ||
3172 | __set_current_blocked(&new_blocked); | 3217 | __set_current_blocked(&new_blocked); |
3173 | } | 3218 | } |
3174 | 3219 | ||
3175 | if (oset) { | 3220 | if (oset) { |
3176 | if (copy_to_user(oset, &old_set, sizeof(*oset))) | 3221 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
3177 | return -EFAULT; | 3222 | return -EFAULT; |
3178 | } | 3223 | } |
3179 | 3224 | ||
3180 | return 0; | 3225 | return 0; |
3181 | } | 3226 | } |
3182 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ | 3227 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
3183 | 3228 | ||
3184 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION | 3229 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION |
3185 | /** | 3230 | /** |
3186 | * sys_rt_sigaction - alter an action taken by a process | 3231 | * sys_rt_sigaction - alter an action taken by a process |
3187 | * @sig: signal to be sent | 3232 | * @sig: signal to be sent |
3188 | * @act: new sigaction | 3233 | * @act: new sigaction |
3189 | * @oact: used to save the previous sigaction | 3234 | * @oact: used to save the previous sigaction |
3190 | * @sigsetsize: size of sigset_t type | 3235 | * @sigsetsize: size of sigset_t type |
3191 | */ | 3236 | */ |
3192 | SYSCALL_DEFINE4(rt_sigaction, int, sig, | 3237 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
3193 | const struct sigaction __user *, act, | 3238 | const struct sigaction __user *, act, |
3194 | struct sigaction __user *, oact, | 3239 | struct sigaction __user *, oact, |
3195 | size_t, sigsetsize) | 3240 | size_t, sigsetsize) |
3196 | { | 3241 | { |
3197 | struct k_sigaction new_sa, old_sa; | 3242 | struct k_sigaction new_sa, old_sa; |
3198 | int ret = -EINVAL; | 3243 | int ret = -EINVAL; |
3199 | 3244 | ||
3200 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 3245 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3201 | if (sigsetsize != sizeof(sigset_t)) | 3246 | if (sigsetsize != sizeof(sigset_t)) |
3202 | goto out; | 3247 | goto out; |
3203 | 3248 | ||
3204 | if (act) { | 3249 | if (act) { |
3205 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) | 3250 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) |
3206 | return -EFAULT; | 3251 | return -EFAULT; |
3207 | } | 3252 | } |
3208 | 3253 | ||
3209 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); | 3254 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); |
3210 | 3255 | ||
3211 | if (!ret && oact) { | 3256 | if (!ret && oact) { |
3212 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) | 3257 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) |
3213 | return -EFAULT; | 3258 | return -EFAULT; |
3214 | } | 3259 | } |
3215 | out: | 3260 | out: |
3216 | return ret; | 3261 | return ret; |
3217 | } | 3262 | } |
3218 | #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ | 3263 | #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ |
3219 | 3264 | ||
3220 | #ifdef __ARCH_WANT_SYS_SGETMASK | 3265 | #ifdef __ARCH_WANT_SYS_SGETMASK |
3221 | 3266 | ||
3222 | /* | 3267 | /* |
3223 | * For backwards compatibility. Functionality superseded by sigprocmask. | 3268 | * For backwards compatibility. Functionality superseded by sigprocmask. |
3224 | */ | 3269 | */ |
3225 | SYSCALL_DEFINE0(sgetmask) | 3270 | SYSCALL_DEFINE0(sgetmask) |
3226 | { | 3271 | { |
3227 | /* SMP safe */ | 3272 | /* SMP safe */ |
3228 | return current->blocked.sig[0]; | 3273 | return current->blocked.sig[0]; |
3229 | } | 3274 | } |
3230 | 3275 | ||
3231 | SYSCALL_DEFINE1(ssetmask, int, newmask) | 3276 | SYSCALL_DEFINE1(ssetmask, int, newmask) |
3232 | { | 3277 | { |
3233 | int old = current->blocked.sig[0]; | 3278 | int old = current->blocked.sig[0]; |
3234 | sigset_t newset; | 3279 | sigset_t newset; |
3235 | 3280 | ||
3236 | set_current_blocked(&newset); | 3281 | set_current_blocked(&newset); |
3237 | 3282 | ||
3238 | return old; | 3283 | return old; |
3239 | } | 3284 | } |
3240 | #endif /* __ARCH_WANT_SGETMASK */ | 3285 | #endif /* __ARCH_WANT_SGETMASK */ |
3241 | 3286 | ||
3242 | #ifdef __ARCH_WANT_SYS_SIGNAL | 3287 | #ifdef __ARCH_WANT_SYS_SIGNAL |
3243 | /* | 3288 | /* |
3244 | * For backwards compatibility. Functionality superseded by sigaction. | 3289 | * For backwards compatibility. Functionality superseded by sigaction. |
3245 | */ | 3290 | */ |
3246 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) | 3291 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
3247 | { | 3292 | { |
3248 | struct k_sigaction new_sa, old_sa; | 3293 | struct k_sigaction new_sa, old_sa; |
3249 | int ret; | 3294 | int ret; |
3250 | 3295 | ||
3251 | new_sa.sa.sa_handler = handler; | 3296 | new_sa.sa.sa_handler = handler; |
3252 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; | 3297 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; |
3253 | sigemptyset(&new_sa.sa.sa_mask); | 3298 | sigemptyset(&new_sa.sa.sa_mask); |
3254 | 3299 | ||
3255 | ret = do_sigaction(sig, &new_sa, &old_sa); | 3300 | ret = do_sigaction(sig, &new_sa, &old_sa); |
3256 | 3301 | ||
3257 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; | 3302 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; |
3258 | } | 3303 | } |
3259 | #endif /* __ARCH_WANT_SYS_SIGNAL */ | 3304 | #endif /* __ARCH_WANT_SYS_SIGNAL */ |
3260 | 3305 | ||
3261 | #ifdef __ARCH_WANT_SYS_PAUSE | 3306 | #ifdef __ARCH_WANT_SYS_PAUSE |
3262 | 3307 | ||
3263 | SYSCALL_DEFINE0(pause) | 3308 | SYSCALL_DEFINE0(pause) |
3264 | { | 3309 | { |
3265 | while (!signal_pending(current)) { | 3310 | while (!signal_pending(current)) { |
3266 | current->state = TASK_INTERRUPTIBLE; | 3311 | current->state = TASK_INTERRUPTIBLE; |
3267 | schedule(); | 3312 | schedule(); |
3268 | } | 3313 | } |
3269 | return -ERESTARTNOHAND; | 3314 | return -ERESTARTNOHAND; |
3270 | } | 3315 | } |
3271 | 3316 | ||
3272 | #endif | 3317 | #endif |
3273 | 3318 | ||
3274 | int sigsuspend(sigset_t *set) | 3319 | int sigsuspend(sigset_t *set) |
3275 | { | 3320 | { |
3276 | current->saved_sigmask = current->blocked; | 3321 | current->saved_sigmask = current->blocked; |
3277 | set_current_blocked(set); | 3322 | set_current_blocked(set); |
3278 | 3323 | ||
3279 | current->state = TASK_INTERRUPTIBLE; | 3324 | current->state = TASK_INTERRUPTIBLE; |
3280 | schedule(); | 3325 | schedule(); |
3281 | set_restore_sigmask(); | 3326 | set_restore_sigmask(); |
3282 | return -ERESTARTNOHAND; | 3327 | return -ERESTARTNOHAND; |
3283 | } | 3328 | } |
3284 | 3329 | ||
3285 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND | 3330 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND |
3286 | /** | 3331 | /** |
3287 | * sys_rt_sigsuspend - replace the signal mask for a value with the | 3332 | * sys_rt_sigsuspend - replace the signal mask for a value with the |
3288 | * @unewset value until a signal is received | 3333 | * @unewset value until a signal is received |
3289 | * @unewset: new signal mask value | 3334 | * @unewset: new signal mask value |
3290 | * @sigsetsize: size of sigset_t type | 3335 | * @sigsetsize: size of sigset_t type |
3291 | */ | 3336 | */ |
3292 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) | 3337 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
3293 | { | 3338 | { |
3294 | sigset_t newset; | 3339 | sigset_t newset; |
3295 | 3340 | ||
3296 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 3341 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3297 | if (sigsetsize != sizeof(sigset_t)) | 3342 | if (sigsetsize != sizeof(sigset_t)) |
3298 | return -EINVAL; | 3343 | return -EINVAL; |
3299 | 3344 | ||
3300 | if (copy_from_user(&newset, unewset, sizeof(newset))) | 3345 | if (copy_from_user(&newset, unewset, sizeof(newset))) |
3301 | return -EFAULT; | 3346 | return -EFAULT; |
3302 | return sigsuspend(&newset); | 3347 | return sigsuspend(&newset); |
3303 | } | 3348 | } |
3304 | #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ | 3349 | #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ |
3305 | 3350 | ||
3306 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) | 3351 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) |
3307 | { | 3352 | { |
3308 | return NULL; | 3353 | return NULL; |
3309 | } | 3354 | } |
3310 | 3355 | ||
3311 | void __init signals_init(void) | 3356 | void __init signals_init(void) |
3312 | { | 3357 | { |
3313 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); | 3358 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); |
3314 | } | 3359 | } |
3315 | 3360 | ||
3316 | #ifdef CONFIG_KGDB_KDB | 3361 | #ifdef CONFIG_KGDB_KDB |
3317 | #include <linux/kdb.h> | 3362 | #include <linux/kdb.h> |
3318 | /* | 3363 | /* |
3319 | * kdb_send_sig_info - Allows kdb to send signals without exposing | 3364 | * kdb_send_sig_info - Allows kdb to send signals without exposing |
3320 | * signal internals. This function checks if the required locks are | 3365 | * signal internals. This function checks if the required locks are |
3321 | * available before calling the main signal code, to avoid kdb | 3366 | * available before calling the main signal code, to avoid kdb |
3322 | * deadlocks. | 3367 | * deadlocks. |
3323 | */ | 3368 | */ |
3324 | void | 3369 | void |
3325 | kdb_send_sig_info(struct task_struct *t, struct siginfo *info) | 3370 | kdb_send_sig_info(struct task_struct *t, struct siginfo *info) |
3326 | { | 3371 | { |
3327 | static struct task_struct *kdb_prev_t; | 3372 | static struct task_struct *kdb_prev_t; |
3328 | int sig, new_t; | 3373 | int sig, new_t; |
3329 | if (!spin_trylock(&t->sighand->siglock)) { | 3374 | if (!spin_trylock(&t->sighand->siglock)) { |
3330 | kdb_printf("Can't do kill command now.\n" | 3375 | kdb_printf("Can't do kill command now.\n" |
3331 | "The sigmask lock is held somewhere else in " | 3376 | "The sigmask lock is held somewhere else in " |
3332 | "kernel, try again later\n"); | 3377 | "kernel, try again later\n"); |
3333 | return; | 3378 | return; |
3334 | } | 3379 | } |
3335 | spin_unlock(&t->sighand->siglock); | 3380 | spin_unlock(&t->sighand->siglock); |
3336 | new_t = kdb_prev_t != t; | 3381 | new_t = kdb_prev_t != t; |
3337 | kdb_prev_t = t; | 3382 | kdb_prev_t = t; |
3338 | if (t->state != TASK_RUNNING && new_t) { | 3383 | if (t->state != TASK_RUNNING && new_t) { |
3339 | kdb_printf("Process is not RUNNING, sending a signal from " | 3384 | kdb_printf("Process is not RUNNING, sending a signal from " |
3340 | "kdb risks deadlock\n" | 3385 | "kdb risks deadlock\n" |
3341 | "on the run queue locks. " | 3386 | "on the run queue locks. " |
3342 | "The signal has _not_ been sent.\n" | 3387 | "The signal has _not_ been sent.\n" |
3343 | "Reissue the kill command if you want to risk " | 3388 | "Reissue the kill command if you want to risk " |
3344 | "the deadlock.\n"); | 3389 | "the deadlock.\n"); |
3345 | return; | 3390 | return; |
3346 | } | 3391 | } |
3347 | sig = info->si_signo; | 3392 | sig = info->si_signo; |
3348 | if (send_sig_info(sig, info, t)) | 3393 | if (send_sig_info(sig, info, t)) |
3349 | kdb_printf("Fail to deliver Signal %d to process %d.\n", | 3394 | kdb_printf("Fail to deliver Signal %d to process %d.\n", |
3350 | sig, t->pid); | 3395 | sig, t->pid); |
3351 | else | 3396 | else |
3352 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); | 3397 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); |
3353 | } | 3398 | } |
3354 | #endif /* CONFIG_KGDB_KDB */ | 3399 | #endif /* CONFIG_KGDB_KDB */ |
3355 | 3400 |