Commit 5168ce2c647f02756803bef7b74906f485491a1c

Authored by Heiko Carstens
Committed by Martin Schwidefsky
1 parent cbdc229245

[S390] cputime: initialize per thread timer values on fork

Initialize per thread timer values instead of just copying them from
the parent. That way it is easily possible to tell how much time a
thread spent in user/system context.
Doesn't fix a bug, this is just for debugging purposes.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

Showing 1 changed file with 5 additions and 0 deletions Inline Diff

arch/s390/kernel/process.c
1 /* 1 /*
2 * This file handles the architecture dependent parts of process handling. 2 * This file handles the architecture dependent parts of process handling.
3 * 3 *
4 * Copyright IBM Corp. 1999,2009 4 * Copyright IBM Corp. 1999,2009
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6 * Hartmut Penner <hp@de.ibm.com>, 6 * Hartmut Penner <hp@de.ibm.com>,
7 * Denis Joseph Barrow, 7 * Denis Joseph Barrow,
8 */ 8 */
9 9
10 #include <linux/compiler.h> 10 #include <linux/compiler.h>
11 #include <linux/cpu.h> 11 #include <linux/cpu.h>
12 #include <linux/errno.h> 12 #include <linux/errno.h>
13 #include <linux/sched.h> 13 #include <linux/sched.h>
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/mm.h> 15 #include <linux/mm.h>
16 #include <linux/fs.h> 16 #include <linux/fs.h>
17 #include <linux/smp.h> 17 #include <linux/smp.h>
18 #include <linux/stddef.h> 18 #include <linux/stddef.h>
19 #include <linux/unistd.h> 19 #include <linux/unistd.h>
20 #include <linux/ptrace.h> 20 #include <linux/ptrace.h>
21 #include <linux/slab.h> 21 #include <linux/slab.h>
22 #include <linux/vmalloc.h> 22 #include <linux/vmalloc.h>
23 #include <linux/user.h> 23 #include <linux/user.h>
24 #include <linux/interrupt.h> 24 #include <linux/interrupt.h>
25 #include <linux/delay.h> 25 #include <linux/delay.h>
26 #include <linux/reboot.h> 26 #include <linux/reboot.h>
27 #include <linux/init.h> 27 #include <linux/init.h>
28 #include <linux/module.h> 28 #include <linux/module.h>
29 #include <linux/notifier.h> 29 #include <linux/notifier.h>
30 #include <linux/utsname.h> 30 #include <linux/utsname.h>
31 #include <linux/tick.h> 31 #include <linux/tick.h>
32 #include <linux/elfcore.h> 32 #include <linux/elfcore.h>
33 #include <linux/kernel_stat.h> 33 #include <linux/kernel_stat.h>
34 #include <linux/syscalls.h> 34 #include <linux/syscalls.h>
35 #include <asm/uaccess.h> 35 #include <asm/uaccess.h>
36 #include <asm/pgtable.h> 36 #include <asm/pgtable.h>
37 #include <asm/system.h> 37 #include <asm/system.h>
38 #include <asm/io.h> 38 #include <asm/io.h>
39 #include <asm/processor.h> 39 #include <asm/processor.h>
40 #include <asm/irq.h> 40 #include <asm/irq.h>
41 #include <asm/timer.h> 41 #include <asm/timer.h>
42 #include "entry.h" 42 #include "entry.h"
43 43
44 asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 44 asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
45 45
46 /* 46 /*
47 * Return saved PC of a blocked thread. used in kernel/sched. 47 * Return saved PC of a blocked thread. used in kernel/sched.
48 * resume in entry.S does not create a new stack frame, it 48 * resume in entry.S does not create a new stack frame, it
49 * just stores the registers %r6-%r15 to the frame given by 49 * just stores the registers %r6-%r15 to the frame given by
50 * schedule. We want to return the address of the caller of 50 * schedule. We want to return the address of the caller of
51 * schedule, so we have to walk the backchain one time to 51 * schedule, so we have to walk the backchain one time to
52 * find the frame schedule() store its return address. 52 * find the frame schedule() store its return address.
53 */ 53 */
54 unsigned long thread_saved_pc(struct task_struct *tsk) 54 unsigned long thread_saved_pc(struct task_struct *tsk)
55 { 55 {
56 struct stack_frame *sf, *low, *high; 56 struct stack_frame *sf, *low, *high;
57 57
58 if (!tsk || !task_stack_page(tsk)) 58 if (!tsk || !task_stack_page(tsk))
59 return 0; 59 return 0;
60 low = task_stack_page(tsk); 60 low = task_stack_page(tsk);
61 high = (struct stack_frame *) task_pt_regs(tsk); 61 high = (struct stack_frame *) task_pt_regs(tsk);
62 sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN); 62 sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN);
63 if (sf <= low || sf > high) 63 if (sf <= low || sf > high)
64 return 0; 64 return 0;
65 sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN); 65 sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
66 if (sf <= low || sf > high) 66 if (sf <= low || sf > high)
67 return 0; 67 return 0;
68 return sf->gprs[8]; 68 return sf->gprs[8];
69 } 69 }
70 70
71 extern void s390_handle_mcck(void); 71 extern void s390_handle_mcck(void);
72 /* 72 /*
73 * The idle loop on a S390... 73 * The idle loop on a S390...
74 */ 74 */
75 static void default_idle(void) 75 static void default_idle(void)
76 { 76 {
77 /* CPU is going idle. */ 77 /* CPU is going idle. */
78 local_irq_disable(); 78 local_irq_disable();
79 if (need_resched()) { 79 if (need_resched()) {
80 local_irq_enable(); 80 local_irq_enable();
81 return; 81 return;
82 } 82 }
83 #ifdef CONFIG_HOTPLUG_CPU 83 #ifdef CONFIG_HOTPLUG_CPU
84 if (cpu_is_offline(smp_processor_id())) { 84 if (cpu_is_offline(smp_processor_id())) {
85 preempt_enable_no_resched(); 85 preempt_enable_no_resched();
86 cpu_die(); 86 cpu_die();
87 } 87 }
88 #endif 88 #endif
89 local_mcck_disable(); 89 local_mcck_disable();
90 if (test_thread_flag(TIF_MCCK_PENDING)) { 90 if (test_thread_flag(TIF_MCCK_PENDING)) {
91 local_mcck_enable(); 91 local_mcck_enable();
92 local_irq_enable(); 92 local_irq_enable();
93 s390_handle_mcck(); 93 s390_handle_mcck();
94 return; 94 return;
95 } 95 }
96 trace_hardirqs_on(); 96 trace_hardirqs_on();
97 /* Don't trace preempt off for idle. */ 97 /* Don't trace preempt off for idle. */
98 stop_critical_timings(); 98 stop_critical_timings();
99 /* Stop virtual timer and halt the cpu. */ 99 /* Stop virtual timer and halt the cpu. */
100 vtime_stop_cpu(); 100 vtime_stop_cpu();
101 /* Reenable preemption tracer. */ 101 /* Reenable preemption tracer. */
102 start_critical_timings(); 102 start_critical_timings();
103 } 103 }
104 104
105 void cpu_idle(void) 105 void cpu_idle(void)
106 { 106 {
107 for (;;) { 107 for (;;) {
108 tick_nohz_stop_sched_tick(1); 108 tick_nohz_stop_sched_tick(1);
109 while (!need_resched()) 109 while (!need_resched())
110 default_idle(); 110 default_idle();
111 tick_nohz_restart_sched_tick(); 111 tick_nohz_restart_sched_tick();
112 preempt_enable_no_resched(); 112 preempt_enable_no_resched();
113 schedule(); 113 schedule();
114 preempt_disable(); 114 preempt_disable();
115 } 115 }
116 } 116 }
117 117
118 extern void kernel_thread_starter(void); 118 extern void kernel_thread_starter(void);
119 119
120 asm( 120 asm(
121 ".align 4\n" 121 ".align 4\n"
122 "kernel_thread_starter:\n" 122 "kernel_thread_starter:\n"
123 " la 2,0(10)\n" 123 " la 2,0(10)\n"
124 " basr 14,9\n" 124 " basr 14,9\n"
125 " la 2,0\n" 125 " la 2,0\n"
126 " br 11\n"); 126 " br 11\n");
127 127
128 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 128 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
129 { 129 {
130 struct pt_regs regs; 130 struct pt_regs regs;
131 131
132 memset(&regs, 0, sizeof(regs)); 132 memset(&regs, 0, sizeof(regs));
133 regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; 133 regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
134 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; 134 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
135 regs.gprs[9] = (unsigned long) fn; 135 regs.gprs[9] = (unsigned long) fn;
136 regs.gprs[10] = (unsigned long) arg; 136 regs.gprs[10] = (unsigned long) arg;
137 regs.gprs[11] = (unsigned long) do_exit; 137 regs.gprs[11] = (unsigned long) do_exit;
138 regs.orig_gpr2 = -1; 138 regs.orig_gpr2 = -1;
139 139
140 /* Ok, create the new process.. */ 140 /* Ok, create the new process.. */
141 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 141 return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
142 0, &regs, 0, NULL, NULL); 142 0, &regs, 0, NULL, NULL);
143 } 143 }
144 144
145 /* 145 /*
146 * Free current thread data structures etc.. 146 * Free current thread data structures etc..
147 */ 147 */
148 void exit_thread(void) 148 void exit_thread(void)
149 { 149 {
150 } 150 }
151 151
152 void flush_thread(void) 152 void flush_thread(void)
153 { 153 {
154 clear_used_math(); 154 clear_used_math();
155 clear_tsk_thread_flag(current, TIF_USEDFPU); 155 clear_tsk_thread_flag(current, TIF_USEDFPU);
156 } 156 }
157 157
158 void release_thread(struct task_struct *dead_task) 158 void release_thread(struct task_struct *dead_task)
159 { 159 {
160 } 160 }
161 161
162 int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, 162 int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
163 unsigned long unused, 163 unsigned long unused,
164 struct task_struct *p, struct pt_regs *regs) 164 struct task_struct *p, struct pt_regs *regs)
165 { 165 {
166 struct thread_info *ti;
166 struct fake_frame 167 struct fake_frame
167 { 168 {
168 struct stack_frame sf; 169 struct stack_frame sf;
169 struct pt_regs childregs; 170 struct pt_regs childregs;
170 } *frame; 171 } *frame;
171 172
172 frame = container_of(task_pt_regs(p), struct fake_frame, childregs); 173 frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
173 p->thread.ksp = (unsigned long) frame; 174 p->thread.ksp = (unsigned long) frame;
174 /* Store access registers to kernel stack of new process. */ 175 /* Store access registers to kernel stack of new process. */
175 frame->childregs = *regs; 176 frame->childregs = *regs;
176 frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ 177 frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */
177 frame->childregs.gprs[15] = new_stackp; 178 frame->childregs.gprs[15] = new_stackp;
178 frame->sf.back_chain = 0; 179 frame->sf.back_chain = 0;
179 180
180 /* new return point is ret_from_fork */ 181 /* new return point is ret_from_fork */
181 frame->sf.gprs[8] = (unsigned long) ret_from_fork; 182 frame->sf.gprs[8] = (unsigned long) ret_from_fork;
182 183
183 /* fake return stack for resume(), don't go back to schedule */ 184 /* fake return stack for resume(), don't go back to schedule */
184 frame->sf.gprs[9] = (unsigned long) frame; 185 frame->sf.gprs[9] = (unsigned long) frame;
185 186
186 /* Save access registers to new thread structure. */ 187 /* Save access registers to new thread structure. */
187 save_access_regs(&p->thread.acrs[0]); 188 save_access_regs(&p->thread.acrs[0]);
188 189
189 #ifndef CONFIG_64BIT 190 #ifndef CONFIG_64BIT
190 /* 191 /*
191 * save fprs to current->thread.fp_regs to merge them with 192 * save fprs to current->thread.fp_regs to merge them with
192 * the emulated registers and then copy the result to the child. 193 * the emulated registers and then copy the result to the child.
193 */ 194 */
194 save_fp_regs(&current->thread.fp_regs); 195 save_fp_regs(&current->thread.fp_regs);
195 memcpy(&p->thread.fp_regs, &current->thread.fp_regs, 196 memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
196 sizeof(s390_fp_regs)); 197 sizeof(s390_fp_regs));
197 /* Set a new TLS ? */ 198 /* Set a new TLS ? */
198 if (clone_flags & CLONE_SETTLS) 199 if (clone_flags & CLONE_SETTLS)
199 p->thread.acrs[0] = regs->gprs[6]; 200 p->thread.acrs[0] = regs->gprs[6];
200 #else /* CONFIG_64BIT */ 201 #else /* CONFIG_64BIT */
201 /* Save the fpu registers to new thread structure. */ 202 /* Save the fpu registers to new thread structure. */
202 save_fp_regs(&p->thread.fp_regs); 203 save_fp_regs(&p->thread.fp_regs);
203 /* Set a new TLS ? */ 204 /* Set a new TLS ? */
204 if (clone_flags & CLONE_SETTLS) { 205 if (clone_flags & CLONE_SETTLS) {
205 if (test_thread_flag(TIF_31BIT)) { 206 if (test_thread_flag(TIF_31BIT)) {
206 p->thread.acrs[0] = (unsigned int) regs->gprs[6]; 207 p->thread.acrs[0] = (unsigned int) regs->gprs[6];
207 } else { 208 } else {
208 p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32); 209 p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32);
209 p->thread.acrs[1] = (unsigned int) regs->gprs[6]; 210 p->thread.acrs[1] = (unsigned int) regs->gprs[6];
210 } 211 }
211 } 212 }
212 #endif /* CONFIG_64BIT */ 213 #endif /* CONFIG_64BIT */
213 /* start new process with ar4 pointing to the correct address space */ 214 /* start new process with ar4 pointing to the correct address space */
214 p->thread.mm_segment = get_fs(); 215 p->thread.mm_segment = get_fs();
215 /* Don't copy debug registers */ 216 /* Don't copy debug registers */
216 memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); 217 memset(&p->thread.per_info, 0, sizeof(p->thread.per_info));
218 /* Initialize per thread user and system timer values */
219 ti = task_thread_info(p);
220 ti->user_timer = 0;
221 ti->system_timer = 0;
217 return 0; 222 return 0;
218 } 223 }
219 224
220 SYSCALL_DEFINE0(fork) 225 SYSCALL_DEFINE0(fork)
221 { 226 {
222 struct pt_regs *regs = task_pt_regs(current); 227 struct pt_regs *regs = task_pt_regs(current);
223 return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL); 228 return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL);
224 } 229 }
225 230
226 SYSCALL_DEFINE0(clone) 231 SYSCALL_DEFINE0(clone)
227 { 232 {
228 struct pt_regs *regs = task_pt_regs(current); 233 struct pt_regs *regs = task_pt_regs(current);
229 unsigned long clone_flags; 234 unsigned long clone_flags;
230 unsigned long newsp; 235 unsigned long newsp;
231 int __user *parent_tidptr, *child_tidptr; 236 int __user *parent_tidptr, *child_tidptr;
232 237
233 clone_flags = regs->gprs[3]; 238 clone_flags = regs->gprs[3];
234 newsp = regs->orig_gpr2; 239 newsp = regs->orig_gpr2;
235 parent_tidptr = (int __user *) regs->gprs[4]; 240 parent_tidptr = (int __user *) regs->gprs[4];
236 child_tidptr = (int __user *) regs->gprs[5]; 241 child_tidptr = (int __user *) regs->gprs[5];
237 if (!newsp) 242 if (!newsp)
238 newsp = regs->gprs[15]; 243 newsp = regs->gprs[15];
239 return do_fork(clone_flags, newsp, regs, 0, 244 return do_fork(clone_flags, newsp, regs, 0,
240 parent_tidptr, child_tidptr); 245 parent_tidptr, child_tidptr);
241 } 246 }
242 247
243 /* 248 /*
244 * This is trivial, and on the face of it looks like it 249 * This is trivial, and on the face of it looks like it
245 * could equally well be done in user mode. 250 * could equally well be done in user mode.
246 * 251 *
247 * Not so, for quite unobvious reasons - register pressure. 252 * Not so, for quite unobvious reasons - register pressure.
248 * In user mode vfork() cannot have a stack frame, and if 253 * In user mode vfork() cannot have a stack frame, and if
249 * done by calling the "clone()" system call directly, you 254 * done by calling the "clone()" system call directly, you
250 * do not have enough call-clobbered registers to hold all 255 * do not have enough call-clobbered registers to hold all
251 * the information you need. 256 * the information you need.
252 */ 257 */
253 SYSCALL_DEFINE0(vfork) 258 SYSCALL_DEFINE0(vfork)
254 { 259 {
255 struct pt_regs *regs = task_pt_regs(current); 260 struct pt_regs *regs = task_pt_regs(current);
256 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 261 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
257 regs->gprs[15], regs, 0, NULL, NULL); 262 regs->gprs[15], regs, 0, NULL, NULL);
258 } 263 }
259 264
260 asmlinkage void execve_tail(void) 265 asmlinkage void execve_tail(void)
261 { 266 {
262 task_lock(current); 267 task_lock(current);
263 current->ptrace &= ~PT_DTRACE; 268 current->ptrace &= ~PT_DTRACE;
264 task_unlock(current); 269 task_unlock(current);
265 current->thread.fp_regs.fpc = 0; 270 current->thread.fp_regs.fpc = 0;
266 if (MACHINE_HAS_IEEE) 271 if (MACHINE_HAS_IEEE)
267 asm volatile("sfpc %0,%0" : : "d" (0)); 272 asm volatile("sfpc %0,%0" : : "d" (0));
268 } 273 }
269 274
270 /* 275 /*
271 * sys_execve() executes a new program. 276 * sys_execve() executes a new program.
272 */ 277 */
273 SYSCALL_DEFINE0(execve) 278 SYSCALL_DEFINE0(execve)
274 { 279 {
275 struct pt_regs *regs = task_pt_regs(current); 280 struct pt_regs *regs = task_pt_regs(current);
276 char *filename; 281 char *filename;
277 unsigned long result; 282 unsigned long result;
278 int rc; 283 int rc;
279 284
280 filename = getname((char __user *) regs->orig_gpr2); 285 filename = getname((char __user *) regs->orig_gpr2);
281 if (IS_ERR(filename)) { 286 if (IS_ERR(filename)) {
282 result = PTR_ERR(filename); 287 result = PTR_ERR(filename);
283 goto out; 288 goto out;
284 } 289 }
285 rc = do_execve(filename, (char __user * __user *) regs->gprs[3], 290 rc = do_execve(filename, (char __user * __user *) regs->gprs[3],
286 (char __user * __user *) regs->gprs[4], regs); 291 (char __user * __user *) regs->gprs[4], regs);
287 if (rc) { 292 if (rc) {
288 result = rc; 293 result = rc;
289 goto out_putname; 294 goto out_putname;
290 } 295 }
291 execve_tail(); 296 execve_tail();
292 result = regs->gprs[2]; 297 result = regs->gprs[2];
293 out_putname: 298 out_putname:
294 putname(filename); 299 putname(filename);
295 out: 300 out:
296 return result; 301 return result;
297 } 302 }
298 303
299 /* 304 /*
300 * fill in the FPU structure for a core dump. 305 * fill in the FPU structure for a core dump.
301 */ 306 */
302 int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) 307 int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
303 { 308 {
304 #ifndef CONFIG_64BIT 309 #ifndef CONFIG_64BIT
305 /* 310 /*
306 * save fprs to current->thread.fp_regs to merge them with 311 * save fprs to current->thread.fp_regs to merge them with
307 * the emulated registers and then copy the result to the dump. 312 * the emulated registers and then copy the result to the dump.
308 */ 313 */
309 save_fp_regs(&current->thread.fp_regs); 314 save_fp_regs(&current->thread.fp_regs);
310 memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs)); 315 memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
311 #else /* CONFIG_64BIT */ 316 #else /* CONFIG_64BIT */
312 save_fp_regs(fpregs); 317 save_fp_regs(fpregs);
313 #endif /* CONFIG_64BIT */ 318 #endif /* CONFIG_64BIT */
314 return 1; 319 return 1;
315 } 320 }
316 321
317 unsigned long get_wchan(struct task_struct *p) 322 unsigned long get_wchan(struct task_struct *p)
318 { 323 {
319 struct stack_frame *sf, *low, *high; 324 struct stack_frame *sf, *low, *high;
320 unsigned long return_address; 325 unsigned long return_address;
321 int count; 326 int count;
322 327
323 if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p)) 328 if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
324 return 0; 329 return 0;
325 low = task_stack_page(p); 330 low = task_stack_page(p);
326 high = (struct stack_frame *) task_pt_regs(p); 331 high = (struct stack_frame *) task_pt_regs(p);
327 sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN); 332 sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
328 if (sf <= low || sf > high) 333 if (sf <= low || sf > high)
329 return 0; 334 return 0;
330 for (count = 0; count < 16; count++) { 335 for (count = 0; count < 16; count++) {
331 sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN); 336 sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
332 if (sf <= low || sf > high) 337 if (sf <= low || sf > high)
333 return 0; 338 return 0;
334 return_address = sf->gprs[8] & PSW_ADDR_INSN; 339 return_address = sf->gprs[8] & PSW_ADDR_INSN;
335 if (!in_sched_functions(return_address)) 340 if (!in_sched_functions(return_address))
336 return return_address; 341 return return_address;
337 } 342 }
338 return 0; 343 return 0;
339 } 344 }
340 345