Commit 27f6b416626a240e1b46f646d2e0c5266f4eac95

Authored by Martin Schwidefsky
1 parent 921486b92b

s390/vtimer: rework virtual timer interface

The current virtual timer interface is inherently per-cpu and hard to
use. The sole user of the interface is appldata which uses it to execute
a function after a specific amount of cputime has been used over all cpus.

Rework the virtual timer interface to hook into the cputime accounting.
This makes the interface independent from the CPU timer interrupts, and
makes the virtual timers global as opposed to per-cpu.
Overall the code is greatly simplified. The downside is that the accuracy
is not as good as the original implementation, but it is still good enough
for appldata.

Reviewed-by: Jan Glauber <jang@linux.vnet.ibm.com>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

Showing 13 changed files with 222 additions and 472 deletions Side-by-side Diff

arch/s390/appldata/appldata_base.c
... ... @@ -27,7 +27,7 @@
27 27 #include <linux/suspend.h>
28 28 #include <linux/platform_device.h>
29 29 #include <asm/appldata.h>
30   -#include <asm/timer.h>
  30 +#include <asm/vtimer.h>
31 31 #include <asm/uaccess.h>
32 32 #include <asm/io.h>
33 33 #include <asm/smp.h>
... ... @@ -82,8 +82,7 @@
82 82 /*
83 83 * Timer
84 84 */
85   -static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
86   -static atomic_t appldata_expire_count = ATOMIC_INIT(0);
  85 +static struct vtimer_list appldata_timer;
87 86  
88 87 static DEFINE_SPINLOCK(appldata_timer_lock);
89 88 static int appldata_interval = APPLDATA_CPU_INTERVAL;
... ... @@ -113,10 +112,7 @@
113 112 */
114 113 static void appldata_timer_function(unsigned long data)
115 114 {
116   - if (atomic_dec_and_test(&appldata_expire_count)) {
117   - atomic_set(&appldata_expire_count, num_online_cpus());
118   - queue_work(appldata_wq, (struct work_struct *) data);
119   - }
  115 + queue_work(appldata_wq, (struct work_struct *) data);
120 116 }
121 117  
122 118 /*
... ... @@ -129,7 +125,6 @@
129 125 struct list_head *lh;
130 126 struct appldata_ops *ops;
131 127  
132   - get_online_cpus();
133 128 mutex_lock(&appldata_ops_mutex);
134 129 list_for_each(lh, &appldata_ops_list) {
135 130 ops = list_entry(lh, struct appldata_ops, list);
... ... @@ -138,7 +133,6 @@
138 133 }
139 134 }
140 135 mutex_unlock(&appldata_ops_mutex);
141   - put_online_cpus();
142 136 }
143 137  
144 138 /*
... ... @@ -166,20 +160,6 @@
166 160  
167 161 /****************************** /proc stuff **********************************/
168 162  
169   -/*
170   - * appldata_mod_vtimer_wrap()
171   - *
172   - * wrapper function for mod_virt_timer(), because smp_call_function_single()
173   - * accepts only one parameter.
174   - */
175   -static void __appldata_mod_vtimer_wrap(void *p) {
176   - struct {
177   - struct vtimer_list *timer;
178   - u64 expires;
179   - } *args = p;
180   - mod_virt_timer_periodic(args->timer, args->expires);
181   -}
182   -
183 163 #define APPLDATA_ADD_TIMER 0
184 164 #define APPLDATA_DEL_TIMER 1
185 165 #define APPLDATA_MOD_TIMER 2
186 166  
187 167  
188 168  
189 169  
190 170  
191 171  
... ... @@ -190,49 +170,28 @@
190 170 * Add, delete or modify virtual timers on all online cpus.
191 171 * The caller needs to get the appldata_timer_lock spinlock.
192 172 */
193   -static void
194   -__appldata_vtimer_setup(int cmd)
  173 +static void __appldata_vtimer_setup(int cmd)
195 174 {
196   - u64 per_cpu_interval;
197   - int i;
  175 + u64 timer_interval = (u64) appldata_interval * 1000 * TOD_MICRO;
198 176  
199 177 switch (cmd) {
200 178 case APPLDATA_ADD_TIMER:
201 179 if (appldata_timer_active)
202 180 break;
203   - per_cpu_interval = (u64) (appldata_interval*1000 /
204   - num_online_cpus()) * TOD_MICRO;
205   - for_each_online_cpu(i) {
206   - per_cpu(appldata_timer, i).expires = per_cpu_interval;
207   - smp_call_function_single(i, add_virt_timer_periodic,
208   - &per_cpu(appldata_timer, i),
209   - 1);
210   - }
  181 + appldata_timer.expires = timer_interval;
  182 + add_virt_timer_periodic(&appldata_timer);
211 183 appldata_timer_active = 1;
212 184 break;
213 185 case APPLDATA_DEL_TIMER:
214   - for_each_online_cpu(i)
215   - del_virt_timer(&per_cpu(appldata_timer, i));
  186 + del_virt_timer(&appldata_timer);
216 187 if (!appldata_timer_active)
217 188 break;
218 189 appldata_timer_active = 0;
219   - atomic_set(&appldata_expire_count, num_online_cpus());
220 190 break;
221 191 case APPLDATA_MOD_TIMER:
222   - per_cpu_interval = (u64) (appldata_interval*1000 /
223   - num_online_cpus()) * TOD_MICRO;
224 192 if (!appldata_timer_active)
225 193 break;
226   - for_each_online_cpu(i) {
227   - struct {
228   - struct vtimer_list *timer;
229   - u64 expires;
230   - } args;
231   - args.timer = &per_cpu(appldata_timer, i);
232   - args.expires = per_cpu_interval;
233   - smp_call_function_single(i, __appldata_mod_vtimer_wrap,
234   - &args, 1);
235   - }
  194 + mod_virt_timer_periodic(&appldata_timer, timer_interval);
236 195 }
237 196 }
238 197  
239 198  
... ... @@ -263,14 +222,12 @@
263 222 len = *lenp;
264 223 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
265 224 return -EFAULT;
266   - get_online_cpus();
267 225 spin_lock(&appldata_timer_lock);
268 226 if (buf[0] == '1')
269 227 __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
270 228 else if (buf[0] == '0')
271 229 __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
272 230 spin_unlock(&appldata_timer_lock);
273   - put_online_cpus();
274 231 out:
275 232 *lenp = len;
276 233 *ppos += len;
277 234  
278 235  
279 236  
... ... @@ -303,20 +260,17 @@
303 260 goto out;
304 261 }
305 262 len = *lenp;
306   - if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
  263 + if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
307 264 return -EFAULT;
308   - }
309 265 interval = 0;
310 266 sscanf(buf, "%i", &interval);
311 267 if (interval <= 0)
312 268 return -EINVAL;
313 269  
314   - get_online_cpus();
315 270 spin_lock(&appldata_timer_lock);
316 271 appldata_interval = interval;
317 272 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
318 273 spin_unlock(&appldata_timer_lock);
319   - put_online_cpus();
320 274 out:
321 275 *lenp = len;
322 276 *ppos += len;
323 277  
... ... @@ -483,14 +437,12 @@
483 437 int rc;
484 438 struct list_head *lh;
485 439  
486   - get_online_cpus();
487 440 spin_lock(&appldata_timer_lock);
488 441 if (appldata_timer_active) {
489 442 __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
490 443 appldata_timer_suspended = 1;
491 444 }
492 445 spin_unlock(&appldata_timer_lock);
493   - put_online_cpus();
494 446  
495 447 mutex_lock(&appldata_ops_mutex);
496 448 list_for_each(lh, &appldata_ops_list) {
497 449  
... ... @@ -514,14 +466,12 @@
514 466 int rc;
515 467 struct list_head *lh;
516 468  
517   - get_online_cpus();
518 469 spin_lock(&appldata_timer_lock);
519 470 if (appldata_timer_suspended) {
520 471 __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
521 472 appldata_timer_suspended = 0;
522 473 }
523 474 spin_unlock(&appldata_timer_lock);
524   - put_online_cpus();
525 475  
526 476 mutex_lock(&appldata_ops_mutex);
527 477 list_for_each(lh, &appldata_ops_list) {
... ... @@ -565,53 +515,6 @@
565 515  
566 516 /******************************* init / exit *********************************/
567 517  
568   -static void __cpuinit appldata_online_cpu(int cpu)
569   -{
570   - init_virt_timer(&per_cpu(appldata_timer, cpu));
571   - per_cpu(appldata_timer, cpu).function = appldata_timer_function;
572   - per_cpu(appldata_timer, cpu).data = (unsigned long)
573   - &appldata_work;
574   - atomic_inc(&appldata_expire_count);
575   - spin_lock(&appldata_timer_lock);
576   - __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
577   - spin_unlock(&appldata_timer_lock);
578   -}
579   -
580   -static void __cpuinit appldata_offline_cpu(int cpu)
581   -{
582   - del_virt_timer(&per_cpu(appldata_timer, cpu));
583   - if (atomic_dec_and_test(&appldata_expire_count)) {
584   - atomic_set(&appldata_expire_count, num_online_cpus());
585   - queue_work(appldata_wq, &appldata_work);
586   - }
587   - spin_lock(&appldata_timer_lock);
588   - __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
589   - spin_unlock(&appldata_timer_lock);
590   -}
591   -
592   -static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
593   - unsigned long action,
594   - void *hcpu)
595   -{
596   - switch (action) {
597   - case CPU_ONLINE:
598   - case CPU_ONLINE_FROZEN:
599   - appldata_online_cpu((long) hcpu);
600   - break;
601   - case CPU_DEAD:
602   - case CPU_DEAD_FROZEN:
603   - appldata_offline_cpu((long) hcpu);
604   - break;
605   - default:
606   - break;
607   - }
608   - return NOTIFY_OK;
609   -}
610   -
611   -static struct notifier_block __cpuinitdata appldata_nb = {
612   - .notifier_call = appldata_cpu_notify,
613   -};
614   -
615 518 /*
616 519 * appldata_init()
617 520 *
618 521  
... ... @@ -619,8 +522,11 @@
619 522 */
620 523 static int __init appldata_init(void)
621 524 {
622   - int i, rc;
  525 + int rc;
623 526  
  527 + appldata_timer.function = appldata_timer_function;
  528 + appldata_timer.data = (unsigned long) &appldata_work;
  529 +
624 530 rc = platform_driver_register(&appldata_pdrv);
625 531 if (rc)
626 532 return rc;
... ... @@ -636,14 +542,6 @@
636 542 rc = -ENOMEM;
637 543 goto out_device;
638 544 }
639   -
640   - get_online_cpus();
641   - for_each_online_cpu(i)
642   - appldata_online_cpu(i);
643   - put_online_cpus();
644   -
645   - /* Register cpu hotplug notifier */
646   - register_hotcpu_notifier(&appldata_nb);
647 545  
648 546 appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
649 547 return 0;
arch/s390/include/asm/cputime.h
... ... @@ -168,9 +168,11 @@
168 168 int nohz_delay;
169 169 unsigned int sequence;
170 170 unsigned long long idle_count;
171   - unsigned long long idle_enter;
172   - unsigned long long idle_exit;
173 171 unsigned long long idle_time;
  172 + unsigned long long clock_idle_enter;
  173 + unsigned long long clock_idle_exit;
  174 + unsigned long long timer_idle_enter;
  175 + unsigned long long timer_idle_exit;
174 176 };
175 177  
176 178 DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
arch/s390/include/asm/timer.h
1   -/*
2   - * include/asm-s390/timer.h
3   - *
4   - * (C) Copyright IBM Corp. 2003,2006
5   - * Virtual CPU timer
6   - *
7   - * Author: Jan Glauber (jang@de.ibm.com)
8   - */
9   -
10   -#ifndef _ASM_S390_TIMER_H
11   -#define _ASM_S390_TIMER_H
12   -
13   -#include <linux/timer.h>
14   -
15   -#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
16   -
17   -struct vtimer_list {
18   - struct list_head entry;
19   -
20   - int cpu;
21   - __u64 expires;
22   - __u64 interval;
23   -
24   - void (*function)(unsigned long);
25   - unsigned long data;
26   -};
27   -
28   -/* the vtimer value will wrap after ca. 71 years */
29   -struct vtimer_queue {
30   - struct list_head list;
31   - spinlock_t lock;
32   - __u64 timer; /* last programmed timer */
33   - __u64 elapsed; /* elapsed time of timer expire values */
34   - __u64 idle_enter; /* cpu timer on idle enter */
35   - __u64 idle_exit; /* cpu timer on idle exit */
36   -};
37   -
38   -extern void init_virt_timer(struct vtimer_list *timer);
39   -extern void add_virt_timer(void *new);
40   -extern void add_virt_timer_periodic(void *new);
41   -extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
42   -extern int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires);
43   -extern int del_virt_timer(struct vtimer_list *timer);
44   -
45   -extern void init_cpu_vtimer(void);
46   -extern void vtime_init(void);
47   -
48   -extern void vtime_stop_cpu(void);
49   -extern void vtime_start_leave(void);
50   -
51   -#endif /* _ASM_S390_TIMER_H */
arch/s390/include/asm/vtimer.h
  1 +/*
  2 + * Copyright IBM Corp. 2003, 2012
  3 + * Virtual CPU timer
  4 + *
  5 + * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
  6 + */
  7 +
  8 +#ifndef _ASM_S390_TIMER_H
  9 +#define _ASM_S390_TIMER_H
  10 +
  11 +#define VTIMER_MAX_SLICE (0x7fffffffffffffffULL)
  12 +
  13 +struct vtimer_list {
  14 + struct list_head entry;
  15 + u64 expires;
  16 + u64 interval;
  17 + void (*function)(unsigned long);
  18 + unsigned long data;
  19 +};
  20 +
  21 +extern void init_virt_timer(struct vtimer_list *timer);
  22 +extern void add_virt_timer(struct vtimer_list *timer);
  23 +extern void add_virt_timer_periodic(struct vtimer_list *timer);
  24 +extern int mod_virt_timer(struct vtimer_list *timer, u64 expires);
  25 +extern int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires);
  26 +extern int del_virt_timer(struct vtimer_list *timer);
  27 +
  28 +extern void init_cpu_vtimer(void);
  29 +extern void vtime_init(void);
  30 +
  31 +extern void vtime_stop_cpu(void);
  32 +
  33 +#endif /* _ASM_S390_TIMER_H */
arch/s390/kernel/asm-offsets.c
... ... @@ -9,7 +9,6 @@
9 9 #include <linux/kbuild.h>
10 10 #include <linux/sched.h>
11 11 #include <asm/cputime.h>
12   -#include <asm/timer.h>
13 12 #include <asm/vdso.h>
14 13 #include <asm/pgtable.h>
15 14  
... ... @@ -72,11 +71,10 @@
72 71 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
73 72 BLANK();
74 73 /* idle data offsets */
75   - DEFINE(__IDLE_ENTER, offsetof(struct s390_idle_data, idle_enter));
76   - DEFINE(__IDLE_EXIT, offsetof(struct s390_idle_data, idle_exit));
77   - /* vtimer queue offsets */
78   - DEFINE(__VQ_IDLE_ENTER, offsetof(struct vtimer_queue, idle_enter));
79   - DEFINE(__VQ_IDLE_EXIT, offsetof(struct vtimer_queue, idle_exit));
  74 + DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
  75 + DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit));
  76 + DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter));
  77 + DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit));
80 78 /* lowcore offsets */
81 79 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
82 80 DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
arch/s390/kernel/entry.S
... ... @@ -616,17 +616,13 @@
616 616 * Load idle PSW. The second "half" of this function is in cleanup_idle.
617 617 */
618 618 ENTRY(psw_idle)
619   - st %r4,__SF_EMPTY(%r15)
  619 + st %r3,__SF_EMPTY(%r15)
620 620 basr %r1,0
621 621 la %r1,psw_idle_lpsw+4-.(%r1)
622 622 st %r1,__SF_EMPTY+4(%r15)
623 623 oi __SF_EMPTY+4(%r15),0x80
624   - la %r1,.Lvtimer_max-psw_idle_lpsw-4(%r1)
625   - stck __IDLE_ENTER(%r2)
626   - ltr %r5,%r5
627   - stpt __VQ_IDLE_ENTER(%r3)
628   - jz psw_idle_lpsw
629   - spt 0(%r1)
  624 + stck __CLOCK_IDLE_ENTER(%r2)
  625 + stpt __TIMER_IDLE_ENTER(%r2)
630 626 psw_idle_lpsw:
631 627 lpsw __SF_EMPTY(%r15)
632 628 br %r14
633 629  
634 630  
635 631  
636 632  
637 633  
638 634  
... ... @@ -885,33 +881,28 @@
885 881  
886 882 cleanup_idle:
887 883 # copy interrupt clock & cpu timer
888   - mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK
889   - mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER
  884 + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
  885 + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
890 886 chi %r11,__LC_SAVE_AREA_ASYNC
891 887 je 0f
892   - mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
893   - mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER
  888 + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
  889 + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
894 890 0: # check if stck has been executed
895 891 cl %r9,BASED(cleanup_idle_insn)
896 892 jhe 1f
897   - mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2)
898   - mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3)
899   - j 2f
900   -1: # check if the cpu timer has been reprogrammed
901   - ltr %r5,%r5
902   - jz 2f
903   - spt __VQ_IDLE_ENTER(%r3)
904   -2: # account system time going idle
  893 + mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
  894 + mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
  895 +1: # account system time going idle
905 896 lm %r9,%r10,__LC_STEAL_TIMER
906   - ADD64 %r9,%r10,__IDLE_ENTER(%r2)
  897 + ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
907 898 SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
908 899 stm %r9,%r10,__LC_STEAL_TIMER
909   - mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2)
  900 + mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
910 901 lm %r9,%r10,__LC_SYSTEM_TIMER
911 902 ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
912   - SUB64 %r9,%r10,__VQ_IDLE_ENTER(%r3)
  903 + SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
913 904 stm %r9,%r10,__LC_SYSTEM_TIMER
914   - mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3)
  905 + mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
915 906 # prepare return psw
916 907 n %r8,BASED(cleanup_idle_wait) # clear wait state bit
917 908 l %r9,24(%r11) # return from psw_idle
arch/s390/kernel/entry.h
... ... @@ -5,7 +5,6 @@
5 5 #include <linux/signal.h>
6 6 #include <asm/ptrace.h>
7 7 #include <asm/cputime.h>
8   -#include <asm/timer.h>
9 8  
10 9 extern void (*pgm_check_table[128])(struct pt_regs *);
11 10 extern void *restart_stack;
... ... @@ -17,8 +16,7 @@
17 16 void mcck_int_handler(void);
18 17 void restart_int_handler(void);
19 18 void restart_call_handler(void);
20   -void psw_idle(struct s390_idle_data *, struct vtimer_queue *,
21   - unsigned long, int);
  19 +void psw_idle(struct s390_idle_data *, unsigned long);
22 20  
23 21 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
24 22 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
arch/s390/kernel/entry64.S
... ... @@ -642,15 +642,11 @@
642 642 * Load idle PSW. The second "half" of this function is in cleanup_idle.
643 643 */
644 644 ENTRY(psw_idle)
645   - stg %r4,__SF_EMPTY(%r15)
  645 + stg %r3,__SF_EMPTY(%r15)
646 646 larl %r1,psw_idle_lpsw+4
647 647 stg %r1,__SF_EMPTY+8(%r15)
648   - larl %r1,.Lvtimer_max
649   - STCK __IDLE_ENTER(%r2)
650   - ltr %r5,%r5
651   - stpt __VQ_IDLE_ENTER(%r3)
652   - jz psw_idle_lpsw
653   - spt 0(%r1)
  648 + STCK __CLOCK_IDLE_ENTER(%r2)
  649 + stpt __TIMER_IDLE_ENTER(%r2)
654 650 psw_idle_lpsw:
655 651 lpswe __SF_EMPTY(%r15)
656 652 br %r14
657 653  
658 654  
659 655  
660 656  
661 657  
662 658  
... ... @@ -918,33 +914,28 @@
918 914  
919 915 cleanup_idle:
920 916 # copy interrupt clock & cpu timer
921   - mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK
922   - mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER
  917 + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
  918 + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
923 919 cghi %r11,__LC_SAVE_AREA_ASYNC
924 920 je 0f
925   - mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
926   - mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER
  921 + mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
  922 + mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
927 923 0: # check if stck & stpt have been executed
928 924 clg %r9,BASED(cleanup_idle_insn)
929 925 jhe 1f
930   - mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2)
931   - mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3)
932   - j 2f
933   -1: # check if the cpu timer has been reprogrammed
934   - ltr %r5,%r5
935   - jz 2f
936   - spt __VQ_IDLE_ENTER(%r3)
937   -2: # account system time going idle
  926 + mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
  927 + mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
  928 +1: # account system time going idle
938 929 lg %r9,__LC_STEAL_TIMER
939   - alg %r9,__IDLE_ENTER(%r2)
  930 + alg %r9,__CLOCK_IDLE_ENTER(%r2)
940 931 slg %r9,__LC_LAST_UPDATE_CLOCK
941 932 stg %r9,__LC_STEAL_TIMER
942   - mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2)
  933 + mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
943 934 lg %r9,__LC_SYSTEM_TIMER
944 935 alg %r9,__LC_LAST_UPDATE_TIMER
945   - slg %r9,__VQ_IDLE_ENTER(%r3)
  936 + slg %r9,__TIMER_IDLE_ENTER(%r2)
946 937 stg %r9,__LC_SYSTEM_TIMER
947   - mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3)
  938 + mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
948 939 # prepare return psw
949 940 nihh %r8,0xfffd # clear wait state bit
950 941 lg %r9,48(%r11) # return from psw_idle
... ... @@ -960,8 +951,6 @@
960 951 .quad __critical_start
961 952 .Lcritical_length:
962 953 .quad __critical_end - __critical_start
963   -.Lvtimer_max:
964   - .quad 0x7fffffffffffffff
965 954  
966 955  
967 956 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
arch/s390/kernel/process.c
... ... @@ -25,8 +25,8 @@
25 25 #include <linux/module.h>
26 26 #include <asm/io.h>
27 27 #include <asm/processor.h>
  28 +#include <asm/vtimer.h>
28 29 #include <asm/irq.h>
29   -#include <asm/timer.h>
30 30 #include <asm/nmi.h>
31 31 #include <asm/smp.h>
32 32 #include <asm/switch_to.h>
arch/s390/kernel/smp.c
... ... @@ -38,7 +38,7 @@
38 38 #include <asm/setup.h>
39 39 #include <asm/irq.h>
40 40 #include <asm/tlbflush.h>
41   -#include <asm/timer.h>
  41 +#include <asm/vtimer.h>
42 42 #include <asm/lowcore.h>
43 43 #include <asm/sclp.h>
44 44 #include <asm/vdso.h>
... ... @@ -917,7 +917,7 @@
917 917 do {
918 918 sequence = ACCESS_ONCE(idle->sequence);
919 919 idle_count = ACCESS_ONCE(idle->idle_count);
920   - if (ACCESS_ONCE(idle->idle_enter))
  920 + if (ACCESS_ONCE(idle->clock_idle_enter))
921 921 idle_count++;
922 922 } while ((sequence & 1) || (idle->sequence != sequence));
923 923 return sprintf(buf, "%llu\n", idle_count);
... ... @@ -935,8 +935,8 @@
935 935 now = get_clock();
936 936 sequence = ACCESS_ONCE(idle->sequence);
937 937 idle_time = ACCESS_ONCE(idle->idle_time);
938   - idle_enter = ACCESS_ONCE(idle->idle_enter);
939   - idle_exit = ACCESS_ONCE(idle->idle_exit);
  938 + idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
  939 + idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
940 940 } while ((sequence & 1) || (idle->sequence != sequence));
941 941 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
942 942 return sprintf(buf, "%llu\n", idle_time >> 12);
arch/s390/kernel/time.c
... ... @@ -44,7 +44,7 @@
44 44 #include <asm/vdso.h>
45 45 #include <asm/irq.h>
46 46 #include <asm/irq_regs.h>
47   -#include <asm/timer.h>
  47 +#include <asm/vtimer.h>
48 48 #include <asm/etr.h>
49 49 #include <asm/cio.h>
50 50 #include "entry.h"
arch/s390/kernel/vtime.c
1 1 /*
2   - * arch/s390/kernel/vtime.c
3 2 * Virtual cpu timer based timer functions.
4 3 *
5   - * S390 version
6   - * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
  4 + * Copyright IBM Corp. 2004, 2012
7 5 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
8 6 */
9 7  
10   -#include <linux/module.h>
  8 +#include <linux/kernel_stat.h>
  9 +#include <linux/notifier.h>
  10 +#include <linux/kprobes.h>
  11 +#include <linux/export.h>
11 12 #include <linux/kernel.h>
12   -#include <linux/time.h>
13   -#include <linux/delay.h>
14   -#include <linux/init.h>
15   -#include <linux/smp.h>
16   -#include <linux/types.h>
17 13 #include <linux/timex.h>
18   -#include <linux/notifier.h>
19   -#include <linux/kernel_stat.h>
20   -#include <linux/rcupdate.h>
21   -#include <linux/posix-timers.h>
  14 +#include <linux/types.h>
  15 +#include <linux/time.h>
22 16 #include <linux/cpu.h>
23   -#include <linux/kprobes.h>
  17 +#include <linux/smp.h>
24 18  
25   -#include <asm/timer.h>
26 19 #include <asm/irq_regs.h>
27 20 #include <asm/cputime.h>
  21 +#include <asm/vtimer.h>
28 22 #include <asm/irq.h>
29 23 #include "entry.h"
30 24  
31   -static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
  25 +static void virt_timer_expire(void);
32 26  
33 27 DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
34 28  
35   -static inline __u64 get_vtimer(void)
  29 +static LIST_HEAD(virt_timer_list);
  30 +static DEFINE_SPINLOCK(virt_timer_lock);
  31 +static atomic64_t virt_timer_current;
  32 +static atomic64_t virt_timer_elapsed;
  33 +
  34 +static inline u64 get_vtimer(void)
36 35 {
37   - __u64 timer;
  36 + u64 timer;
38 37  
39   - asm volatile("STPT %0" : "=m" (timer));
  38 + asm volatile("stpt %0" : "=m" (timer));
40 39 return timer;
41 40 }
42 41  
43   -static inline void set_vtimer(__u64 expires)
  42 +static inline void set_vtimer(u64 expires)
44 43 {
45   - __u64 timer;
  44 + u64 timer;
46 45  
47   - asm volatile (" STPT %0\n" /* Store current cpu timer value */
48   - " SPT %1" /* Set new value immediately afterwards */
49   - : "=m" (timer) : "m" (expires) );
  46 + asm volatile(
  47 + " stpt %0\n" /* Store current cpu timer value */
  48 + " spt %1" /* Set new value imm. afterwards */
  49 + : "=m" (timer) : "m" (expires));
50 50 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
51 51 S390_lowcore.last_update_timer = expires;
52 52 }
53 53  
  54 +static inline int virt_timer_forward(u64 elapsed)
  55 +{
  56 + BUG_ON(!irqs_disabled());
  57 +
  58 + if (list_empty(&virt_timer_list))
  59 + return 0;
  60 + elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
  61 + return elapsed >= atomic64_read(&virt_timer_current);
  62 +}
  63 +
54 64 /*
55 65 * Update process times based on virtual cpu times stored by entry.S
56 66 * to the lowcore fields user_timer, system_timer & steal_clock.
57 67 */
58   -static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
  68 +static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
59 69 {
60 70 struct thread_info *ti = task_thread_info(tsk);
61   - __u64 timer, clock, user, system, steal;
  71 + u64 timer, clock, user, system, steal;
62 72  
63 73 timer = S390_lowcore.last_update_timer;
64 74 clock = S390_lowcore.last_update_clock;
65   - asm volatile (" STPT %0\n" /* Store current cpu timer value */
66   - " STCK %1" /* Store current tod clock value */
67   - : "=m" (S390_lowcore.last_update_timer),
68   - "=m" (S390_lowcore.last_update_clock) );
  75 + asm volatile(
  76 + " stpt %0\n" /* Store current cpu timer value */
  77 + " stck %1" /* Store current tod clock value */
  78 + : "=m" (S390_lowcore.last_update_timer),
  79 + "=m" (S390_lowcore.last_update_clock));
69 80 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
70 81 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
71 82  
... ... @@ -84,6 +95,8 @@
84 95 S390_lowcore.steal_timer = 0;
85 96 account_steal_time(steal);
86 97 }
  98 +
  99 + return virt_timer_forward(user + system);
87 100 }
88 101  
89 102 void account_vtime(struct task_struct *prev, struct task_struct *next)
... ... @@ -101,7 +114,8 @@
101 114  
102 115 void account_process_tick(struct task_struct *tsk, int user_tick)
103 116 {
104   - do_account_vtime(tsk, HARDIRQ_OFFSET);
  117 + if (do_account_vtime(tsk, HARDIRQ_OFFSET))
  118 + virt_timer_expire();
105 119 }
106 120  
107 121 /*
... ... @@ -111,7 +125,7 @@
111 125 void account_system_vtime(struct task_struct *tsk)
112 126 {
113 127 struct thread_info *ti = task_thread_info(tsk);
114   - __u64 timer, system;
  128 + u64 timer, system;
115 129  
116 130 timer = S390_lowcore.last_update_timer;
117 131 S390_lowcore.last_update_timer = get_vtimer();
118 132  
... ... @@ -121,13 +135,14 @@
121 135 S390_lowcore.steal_timer -= system;
122 136 ti->system_timer = S390_lowcore.system_timer;
123 137 account_system_time(tsk, 0, system, system);
  138 +
  139 + virt_timer_forward(system);
124 140 }
125 141 EXPORT_SYMBOL_GPL(account_system_vtime);
126 142  
127 143 void __kprobes vtime_stop_cpu(void)
128 144 {
129 145 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
130   - struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
131 146 unsigned long long idle_time;
132 147 unsigned long psw_mask;
133 148  
... ... @@ -141,7 +156,7 @@
141 156 idle->nohz_delay = 0;
142 157  
143 158 /* Call the assembler magic in entry.S */
144   - psw_idle(idle, vq, psw_mask, !list_empty(&vq->list));
  159 + psw_idle(idle, psw_mask);
145 160  
146 161 /* Reenable preemption tracer. */
147 162 start_critical_timings();
148 163  
... ... @@ -149,9 +164,9 @@
149 164 /* Account time spent with enabled wait psw loaded as idle time. */
150 165 idle->sequence++;
151 166 smp_wmb();
152   - idle_time = idle->idle_exit - idle->idle_enter;
  167 + idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
  168 + idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
153 169 idle->idle_time += idle_time;
154   - idle->idle_enter = idle->idle_exit = 0ULL;
155 170 idle->idle_count++;
156 171 account_idle_time(idle_time);
157 172 smp_wmb();
158 173  
... ... @@ -167,10 +182,10 @@
167 182 do {
168 183 now = get_clock();
169 184 sequence = ACCESS_ONCE(idle->sequence);
170   - idle_enter = ACCESS_ONCE(idle->idle_enter);
171   - idle_exit = ACCESS_ONCE(idle->idle_exit);
  185 + idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
  186 + idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
172 187 } while ((sequence & 1) || (idle->sequence != sequence));
173   - return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
  188 + return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
174 189 }
175 190  
176 191 /*
177 192  
... ... @@ -179,11 +194,11 @@
179 194 */
180 195 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
181 196 {
182   - struct vtimer_list *event;
  197 + struct vtimer_list *tmp;
183 198  
184   - list_for_each_entry(event, head, entry) {
185   - if (event->expires > timer->expires) {
186   - list_add_tail(&timer->entry, &event->entry);
  199 + list_for_each_entry(tmp, head, entry) {
  200 + if (tmp->expires > timer->expires) {
  201 + list_add_tail(&timer->entry, &tmp->entry);
187 202 return;
188 203 }
189 204 }
190 205  
191 206  
192 207  
193 208  
194 209  
195 210  
196 211  
197 212  
... ... @@ -191,82 +206,45 @@
191 206 }
192 207  
193 208 /*
194   - * Do the callback functions of expired vtimer events.
195   - * Called from within the interrupt handler.
  209 + * Handler for expired virtual CPU timer.
196 210 */
197   -static void do_callbacks(struct list_head *cb_list)
  211 +static void virt_timer_expire(void)
198 212 {
199   - struct vtimer_queue *vq;
200   - struct vtimer_list *event, *tmp;
  213 + struct vtimer_list *timer, *tmp;
  214 + unsigned long elapsed;
  215 + LIST_HEAD(cb_list);
201 216  
202   - if (list_empty(cb_list))
203   - return;
204   -
205   - vq = &__get_cpu_var(virt_cpu_timer);
206   -
207   - list_for_each_entry_safe(event, tmp, cb_list, entry) {
208   - list_del_init(&event->entry);
209   - (event->function)(event->data);
210   - if (event->interval) {
211   - /* Recharge interval timer */
212   - event->expires = event->interval + vq->elapsed;
213   - spin_lock(&vq->lock);
214   - list_add_sorted(event, &vq->list);
215   - spin_unlock(&vq->lock);
216   - }
217   - }
218   -}
219   -
220   -/*
221   - * Handler for the virtual CPU timer.
222   - */
223   -static void do_cpu_timer_interrupt(struct ext_code ext_code,
224   - unsigned int param32, unsigned long param64)
225   -{
226   - struct vtimer_queue *vq;
227   - struct vtimer_list *event, *tmp;
228   - struct list_head cb_list; /* the callback queue */
229   - __u64 elapsed, next;
230   -
231   - kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
232   - INIT_LIST_HEAD(&cb_list);
233   - vq = &__get_cpu_var(virt_cpu_timer);
234   -
235   - /* walk timer list, fire all expired events */
236   - spin_lock(&vq->lock);
237   -
238   - elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
239   - BUG_ON((s64) elapsed < 0);
240   - vq->elapsed = 0;
241   - list_for_each_entry_safe(event, tmp, &vq->list, entry) {
242   - if (event->expires < elapsed)
  217 + /* walk timer list, fire all expired timers */
  218 + spin_lock(&virt_timer_lock);
  219 + elapsed = atomic64_read(&virt_timer_elapsed);
  220 + list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
  221 + if (timer->expires < elapsed)
243 222 /* move expired timer to the callback queue */
244   - list_move_tail(&event->entry, &cb_list);
  223 + list_move_tail(&timer->entry, &cb_list);
245 224 else
246   - event->expires -= elapsed;
  225 + timer->expires -= elapsed;
247 226 }
248   - spin_unlock(&vq->lock);
  227 + if (!list_empty(&virt_timer_list)) {
  228 + timer = list_first_entry(&virt_timer_list,
  229 + struct vtimer_list, entry);
  230 + atomic64_set(&virt_timer_current, timer->expires);
  231 + }
  232 + atomic64_sub(elapsed, &virt_timer_elapsed);
  233 + spin_unlock(&virt_timer_lock);
249 234  
250   - do_callbacks(&cb_list);
251   -
252   - /* next event is first in list */
253   - next = VTIMER_MAX_SLICE;
254   - spin_lock(&vq->lock);
255   - if (!list_empty(&vq->list)) {
256   - event = list_first_entry(&vq->list, struct vtimer_list, entry);
257   - next = event->expires;
  235 + /* Do callbacks and recharge periodic timers */
  236 + list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
  237 + list_del_init(&timer->entry);
  238 + timer->function(timer->data);
  239 + if (timer->interval) {
  240 + /* Recharge interval timer */
  241 + timer->expires = timer->interval +
  242 + atomic64_read(&virt_timer_elapsed);
  243 + spin_lock(&virt_timer_lock);
  244 + list_add_sorted(timer, &virt_timer_list);
  245 + spin_unlock(&virt_timer_lock);
  246 + }
258 247 }
259   - spin_unlock(&vq->lock);
260   - /*
261   - * To improve precision add the time spent by the
262   - * interrupt handler to the elapsed time.
263   - * Note: CPU timer counts down and we got an interrupt,
264   - * the current content is negative
265   - */
266   - elapsed = S390_lowcore.async_enter_timer - get_vtimer();
267   - set_vtimer(next - elapsed);
268   - vq->timer = next - elapsed;
269   - vq->elapsed = elapsed;
270 248 }
271 249  
272 250 void init_virt_timer(struct vtimer_list *timer)
273 251  
274 252  
275 253  
276 254  
277 255  
278 256  
279 257  
280 258  
281 259  
282 260  
283 261  
284 262  
285 263  
286 264  
287 265  
288 266  
289 267  
290 268  
291 269  
292 270  
293 271  
294 272  
295 273  
296 274  
297 275  
298 276  
299 277  
... ... @@ -278,179 +256,108 @@
278 256  
279 257 static inline int vtimer_pending(struct vtimer_list *timer)
280 258 {
281   - return (!list_empty(&timer->entry));
  259 + return !list_empty(&timer->entry);
282 260 }
283 261  
284   -/*
285   - * this function should only run on the specified CPU
286   - */
287 262 static void internal_add_vtimer(struct vtimer_list *timer)
288 263 {
289   - struct vtimer_queue *vq;
290   - unsigned long flags;
291   - __u64 left, expires;
292   -
293   - vq = &per_cpu(virt_cpu_timer, timer->cpu);
294   - spin_lock_irqsave(&vq->lock, flags);
295   -
296   - BUG_ON(timer->cpu != smp_processor_id());
297   -
298   - if (list_empty(&vq->list)) {
299   - /* First timer on this cpu, just program it. */
300   - list_add(&timer->entry, &vq->list);
301   - set_vtimer(timer->expires);
302   - vq->timer = timer->expires;
303   - vq->elapsed = 0;
  264 + if (list_empty(&virt_timer_list)) {
  265 + /* First timer, just program it. */
  266 + atomic64_set(&virt_timer_current, timer->expires);
  267 + atomic64_set(&virt_timer_elapsed, 0);
  268 + list_add(&timer->entry, &virt_timer_list);
304 269 } else {
305   - /* Check progress of old timers. */
306   - expires = timer->expires;
307   - left = get_vtimer();
308   - if (likely((s64) expires < (s64) left)) {
  270 + /* Update timer against current base. */
  271 + timer->expires += atomic64_read(&virt_timer_elapsed);
  272 + if (likely((s64) timer->expires <
  273 + (s64) atomic64_read(&virt_timer_current)))
309 274 /* The new timer expires before the current timer. */
310   - set_vtimer(expires);
311   - vq->elapsed += vq->timer - left;
312   - vq->timer = expires;
313   - } else {
314   - vq->elapsed += vq->timer - left;
315   - vq->timer = left;
316   - }
317   - /* Insert new timer into per cpu list. */
318   - timer->expires += vq->elapsed;
319   - list_add_sorted(timer, &vq->list);
  275 + atomic64_set(&virt_timer_current, timer->expires);
  276 + /* Insert new timer into the list. */
  277 + list_add_sorted(timer, &virt_timer_list);
320 278 }
321   -
322   - spin_unlock_irqrestore(&vq->lock, flags);
323   - /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
324   - put_cpu();
325 279 }
326 280  
327   -static inline void prepare_vtimer(struct vtimer_list *timer)
  281 +static void __add_vtimer(struct vtimer_list *timer, int periodic)
328 282 {
329   - BUG_ON(!timer->function);
330   - BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
331   - BUG_ON(vtimer_pending(timer));
332   - timer->cpu = get_cpu();
  283 + unsigned long flags;
  284 +
  285 + timer->interval = periodic ? timer->expires : 0;
  286 + spin_lock_irqsave(&virt_timer_lock, flags);
  287 + internal_add_vtimer(timer);
  288 + spin_unlock_irqrestore(&virt_timer_lock, flags);
333 289 }
334 290  
335 291 /*
336 292 * add_virt_timer - add an oneshot virtual CPU timer
337 293 */
338   -void add_virt_timer(void *new)
  294 +void add_virt_timer(struct vtimer_list *timer)
339 295 {
340   - struct vtimer_list *timer;
341   -
342   - timer = (struct vtimer_list *)new;
343   - prepare_vtimer(timer);
344   - timer->interval = 0;
345   - internal_add_vtimer(timer);
  296 + __add_vtimer(timer, 0);
346 297 }
347 298 EXPORT_SYMBOL(add_virt_timer);
348 299  
349 300 /*
350 301 * add_virt_timer_int - add an interval virtual CPU timer
351 302 */
352   -void add_virt_timer_periodic(void *new)
  303 +void add_virt_timer_periodic(struct vtimer_list *timer)
353 304 {
354   - struct vtimer_list *timer;
355   -
356   - timer = (struct vtimer_list *)new;
357   - prepare_vtimer(timer);
358   - timer->interval = timer->expires;
359   - internal_add_vtimer(timer);
  305 + __add_vtimer(timer, 1);
360 306 }
361 307 EXPORT_SYMBOL(add_virt_timer_periodic);
362 308  
363   -static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic)
  309 +static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
364 310 {
365   - struct vtimer_queue *vq;
366 311 unsigned long flags;
367   - int cpu;
  312 + int rc;
368 313  
369 314 BUG_ON(!timer->function);
370   - BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
371 315  
372 316 if (timer->expires == expires && vtimer_pending(timer))
373 317 return 1;
374   -
375   - cpu = get_cpu();
376   - vq = &per_cpu(virt_cpu_timer, cpu);
377   -
378   - /* disable interrupts before test if timer is pending */
379   - spin_lock_irqsave(&vq->lock, flags);
380   -
381   - /* if timer isn't pending add it on the current CPU */
382   - if (!vtimer_pending(timer)) {
383   - spin_unlock_irqrestore(&vq->lock, flags);
384   -
385   - if (periodic)
386   - timer->interval = expires;
387   - else
388   - timer->interval = 0;
389   - timer->expires = expires;
390   - timer->cpu = cpu;
391   - internal_add_vtimer(timer);
392   - return 0;
393   - }
394   -
395   - /* check if we run on the right CPU */
396   - BUG_ON(timer->cpu != cpu);
397   -
398   - list_del_init(&timer->entry);
  318 + spin_lock_irqsave(&virt_timer_lock, flags);
  319 + rc = vtimer_pending(timer);
  320 + if (rc)
  321 + list_del_init(&timer->entry);
  322 + timer->interval = periodic ? expires : 0;
399 323 timer->expires = expires;
400   - if (periodic)
401   - timer->interval = expires;
402   -
403   - /* the timer can't expire anymore so we can release the lock */
404   - spin_unlock_irqrestore(&vq->lock, flags);
405 324 internal_add_vtimer(timer);
406   - return 1;
  325 + spin_unlock_irqrestore(&virt_timer_lock, flags);
  326 + return rc;
407 327 }
408 328  
409 329 /*
410   - * If we change a pending timer the function must be called on the CPU
411   - * where the timer is running on.
412   - *
413 330 * returns whether it has modified a pending timer (1) or not (0)
414 331 */
415   -int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
  332 +int mod_virt_timer(struct vtimer_list *timer, u64 expires)
416 333 {
417 334 return __mod_vtimer(timer, expires, 0);
418 335 }
419 336 EXPORT_SYMBOL(mod_virt_timer);
420 337  
421 338 /*
422   - * If we change a pending timer the function must be called on the CPU
423   - * where the timer is running on.
424   - *
425 339 * returns whether it has modified a pending timer (1) or not (0)
426 340 */
427   -int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires)
  341 +int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
428 342 {
429 343 return __mod_vtimer(timer, expires, 1);
430 344 }
431 345 EXPORT_SYMBOL(mod_virt_timer_periodic);
432 346  
433 347 /*
434   - * delete a virtual timer
  348 + * Delete a virtual timer.
435 349 *
436 350 * returns whether the deleted timer was pending (1) or not (0)
437 351 */
438 352 int del_virt_timer(struct vtimer_list *timer)
439 353 {
440 354 unsigned long flags;
441   - struct vtimer_queue *vq;
442 355  
443   - /* check if timer is pending */
444 356 if (!vtimer_pending(timer))
445 357 return 0;
446   -
447   - vq = &per_cpu(virt_cpu_timer, timer->cpu);
448   - spin_lock_irqsave(&vq->lock, flags);
449   -
450   - /* we don't interrupt a running timer, just let it expire! */
  358 + spin_lock_irqsave(&virt_timer_lock, flags);
451 359 list_del_init(&timer->entry);
452   -
453   - spin_unlock_irqrestore(&vq->lock, flags);
  360 + spin_unlock_irqrestore(&virt_timer_lock, flags);
454 361 return 1;
455 362 }
456 363 EXPORT_SYMBOL(del_virt_timer);
457 364  
458 365  
... ... @@ -458,20 +365,10 @@
458 365 /*
459 366 * Start the virtual CPU timer on the current CPU.
460 367 */
461   -void init_cpu_vtimer(void)
  368 +void __cpuinit init_cpu_vtimer(void)
462 369 {
463   - struct vtimer_queue *vq;
464   -
465   - /* initialize per cpu vtimer structure */
466   - vq = &__get_cpu_var(virt_cpu_timer);
467   - INIT_LIST_HEAD(&vq->list);
468   - spin_lock_init(&vq->lock);
469   -
470   - /* enable cpu timer interrupts */
471   - __ctl_set_bit(0,10);
472   -
473 370 /* set initial cpu timer */
474   - set_vtimer(0x7fffffffffffffffULL);
  371 + set_vtimer(VTIMER_MAX_SLICE);
475 372 }
476 373  
477 374 static int __cpuinit s390_nohz_notify(struct notifier_block *self,
... ... @@ -493,10 +390,6 @@
493 390  
494 391 void __init vtime_init(void)
495 392 {
496   - /* request the cpu timer external interrupt */
497   - if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
498   - panic("Couldn't request external interrupt 0x1005");
499   -
500 393 /* Enable cpu timer interrupts on the boot cpu. */
501 394 init_cpu_vtimer();
502 395 cpu_notifier(s390_nohz_notify, 0);
arch/s390/lib/delay.c
... ... @@ -12,8 +12,8 @@
12 12 #include <linux/module.h>
13 13 #include <linux/irqflags.h>
14 14 #include <linux/interrupt.h>
  15 +#include <asm/vtimer.h>
15 16 #include <asm/div64.h>
16   -#include <asm/timer.h>
17 17  
18 18 void __delay(unsigned long loops)
19 19 {