Commit 76d4e00a05d06c1d1552adea24fcf6182c9d8999

Authored by Martin Schwidefsky
1 parent 7aa79f9487

[S390] merge cpu.h into cputime.h

All definition in cpu.h have to do with cputime accounting. Move
them to cputime.h and remove the header file.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

Showing 7 changed files with 24 additions and 37 deletions Inline Diff

arch/s390/include/asm/cpu.h
1 /* File was deleted
2 * include/asm-s390/cpu.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8 #ifndef _ASM_S390_CPU_H_
9 #define _ASM_S390_CPU_H_
10
11 #include <linux/types.h>
12 #include <linux/percpu.h>
13 #include <linux/spinlock.h>
14
15 struct s390_idle_data {
16 spinlock_t lock;
17 unsigned long long idle_count;
18 unsigned long long idle_enter;
19 unsigned long long idle_time;
20 };
21
22 DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
23
24 void vtime_start_cpu(void);
25
26 static inline void s390_idle_check(void)
27 {
28 if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
29 vtime_start_cpu();
30 }
31
32 #endif /* _ASM_S390_CPU_H_ */
33 1 /*
arch/s390/include/asm/cputime.h
1 /* 1 /*
2 * include/asm-s390/cputime.h 2 * include/asm-s390/cputime.h
3 * 3 *
4 * (C) Copyright IBM Corp. 2004 4 * (C) Copyright IBM Corp. 2004
5 * 5 *
6 * Author: Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */ 7 */
8 8
9 #ifndef _S390_CPUTIME_H 9 #ifndef _S390_CPUTIME_H
10 #define _S390_CPUTIME_H 10 #define _S390_CPUTIME_H
11 11
12 #include <linux/types.h>
13 #include <linux/percpu.h>
14 #include <linux/spinlock.h>
12 #include <asm/div64.h> 15 #include <asm/div64.h>
13 16
14 /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ 17 /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
15 18
16 typedef unsigned long long cputime_t; 19 typedef unsigned long long cputime_t;
17 typedef unsigned long long cputime64_t; 20 typedef unsigned long long cputime64_t;
18 21
19 #ifndef __s390x__ 22 #ifndef __s390x__
20 23
21 static inline unsigned int 24 static inline unsigned int
22 __div(unsigned long long n, unsigned int base) 25 __div(unsigned long long n, unsigned int base)
23 { 26 {
24 register_pair rp; 27 register_pair rp;
25 28
26 rp.pair = n >> 1; 29 rp.pair = n >> 1;
27 asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); 30 asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
28 return rp.subreg.odd; 31 return rp.subreg.odd;
29 } 32 }
30 33
31 #else /* __s390x__ */ 34 #else /* __s390x__ */
32 35
33 static inline unsigned int 36 static inline unsigned int
34 __div(unsigned long long n, unsigned int base) 37 __div(unsigned long long n, unsigned int base)
35 { 38 {
36 return n / base; 39 return n / base;
37 } 40 }
38 41
39 #endif /* __s390x__ */ 42 #endif /* __s390x__ */
40 43
41 #define cputime_zero (0ULL) 44 #define cputime_zero (0ULL)
42 #define cputime_max ((~0UL >> 1) - 1) 45 #define cputime_max ((~0UL >> 1) - 1)
43 #define cputime_add(__a, __b) ((__a) + (__b)) 46 #define cputime_add(__a, __b) ((__a) + (__b))
44 #define cputime_sub(__a, __b) ((__a) - (__b)) 47 #define cputime_sub(__a, __b) ((__a) - (__b))
45 #define cputime_div(__a, __n) ({ \ 48 #define cputime_div(__a, __n) ({ \
46 unsigned long long __div = (__a); \ 49 unsigned long long __div = (__a); \
47 do_div(__div,__n); \ 50 do_div(__div,__n); \
48 __div; \ 51 __div; \
49 }) 52 })
50 #define cputime_halve(__a) ((__a) >> 1) 53 #define cputime_halve(__a) ((__a) >> 1)
51 #define cputime_eq(__a, __b) ((__a) == (__b)) 54 #define cputime_eq(__a, __b) ((__a) == (__b))
52 #define cputime_gt(__a, __b) ((__a) > (__b)) 55 #define cputime_gt(__a, __b) ((__a) > (__b))
53 #define cputime_ge(__a, __b) ((__a) >= (__b)) 56 #define cputime_ge(__a, __b) ((__a) >= (__b))
54 #define cputime_lt(__a, __b) ((__a) < (__b)) 57 #define cputime_lt(__a, __b) ((__a) < (__b))
55 #define cputime_le(__a, __b) ((__a) <= (__b)) 58 #define cputime_le(__a, __b) ((__a) <= (__b))
56 #define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ)) 59 #define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ))
57 #define cputime_to_scaled(__ct) (__ct) 60 #define cputime_to_scaled(__ct) (__ct)
58 #define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ)) 61 #define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ))
59 62
60 #define cputime64_zero (0ULL) 63 #define cputime64_zero (0ULL)
61 #define cputime64_add(__a, __b) ((__a) + (__b)) 64 #define cputime64_add(__a, __b) ((__a) + (__b))
62 #define cputime_to_cputime64(__ct) (__ct) 65 #define cputime_to_cputime64(__ct) (__ct)
63 66
64 static inline u64 67 static inline u64
65 cputime64_to_jiffies64(cputime64_t cputime) 68 cputime64_to_jiffies64(cputime64_t cputime)
66 { 69 {
67 do_div(cputime, 4096000000ULL / HZ); 70 do_div(cputime, 4096000000ULL / HZ);
68 return cputime; 71 return cputime;
69 } 72 }
70 73
71 /* 74 /*
72 * Convert cputime to milliseconds and back. 75 * Convert cputime to milliseconds and back.
73 */ 76 */
74 static inline unsigned int 77 static inline unsigned int
75 cputime_to_msecs(const cputime_t cputime) 78 cputime_to_msecs(const cputime_t cputime)
76 { 79 {
77 return __div(cputime, 4096000); 80 return __div(cputime, 4096000);
78 } 81 }
79 82
80 static inline cputime_t 83 static inline cputime_t
81 msecs_to_cputime(const unsigned int m) 84 msecs_to_cputime(const unsigned int m)
82 { 85 {
83 return (cputime_t) m * 4096000; 86 return (cputime_t) m * 4096000;
84 } 87 }
85 88
86 /* 89 /*
87 * Convert cputime to milliseconds and back. 90 * Convert cputime to milliseconds and back.
88 */ 91 */
89 static inline unsigned int 92 static inline unsigned int
90 cputime_to_secs(const cputime_t cputime) 93 cputime_to_secs(const cputime_t cputime)
91 { 94 {
92 return __div(cputime, 2048000000) >> 1; 95 return __div(cputime, 2048000000) >> 1;
93 } 96 }
94 97
95 static inline cputime_t 98 static inline cputime_t
96 secs_to_cputime(const unsigned int s) 99 secs_to_cputime(const unsigned int s)
97 { 100 {
98 return (cputime_t) s * 4096000000ULL; 101 return (cputime_t) s * 4096000000ULL;
99 } 102 }
100 103
101 /* 104 /*
102 * Convert cputime to timespec and back. 105 * Convert cputime to timespec and back.
103 */ 106 */
104 static inline cputime_t 107 static inline cputime_t
105 timespec_to_cputime(const struct timespec *value) 108 timespec_to_cputime(const struct timespec *value)
106 { 109 {
107 return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL; 110 return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL;
108 } 111 }
109 112
110 static inline void 113 static inline void
111 cputime_to_timespec(const cputime_t cputime, struct timespec *value) 114 cputime_to_timespec(const cputime_t cputime, struct timespec *value)
112 { 115 {
113 #ifndef __s390x__ 116 #ifndef __s390x__
114 register_pair rp; 117 register_pair rp;
115 118
116 rp.pair = cputime >> 1; 119 rp.pair = cputime >> 1;
117 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); 120 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
118 value->tv_nsec = rp.subreg.even * 1000 / 4096; 121 value->tv_nsec = rp.subreg.even * 1000 / 4096;
119 value->tv_sec = rp.subreg.odd; 122 value->tv_sec = rp.subreg.odd;
120 #else 123 #else
121 value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096; 124 value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096;
122 value->tv_sec = cputime / 4096000000ULL; 125 value->tv_sec = cputime / 4096000000ULL;
123 #endif 126 #endif
124 } 127 }
125 128
126 /* 129 /*
127 * Convert cputime to timeval and back. 130 * Convert cputime to timeval and back.
128 * Since cputime and timeval have the same resolution (microseconds) 131 * Since cputime and timeval have the same resolution (microseconds)
129 * this is easy. 132 * this is easy.
130 */ 133 */
131 static inline cputime_t 134 static inline cputime_t
132 timeval_to_cputime(const struct timeval *value) 135 timeval_to_cputime(const struct timeval *value)
133 { 136 {
134 return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL; 137 return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL;
135 } 138 }
136 139
137 static inline void 140 static inline void
138 cputime_to_timeval(const cputime_t cputime, struct timeval *value) 141 cputime_to_timeval(const cputime_t cputime, struct timeval *value)
139 { 142 {
140 #ifndef __s390x__ 143 #ifndef __s390x__
141 register_pair rp; 144 register_pair rp;
142 145
143 rp.pair = cputime >> 1; 146 rp.pair = cputime >> 1;
144 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); 147 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
145 value->tv_usec = rp.subreg.even / 4096; 148 value->tv_usec = rp.subreg.even / 4096;
146 value->tv_sec = rp.subreg.odd; 149 value->tv_sec = rp.subreg.odd;
147 #else 150 #else
148 value->tv_usec = (cputime % 4096000000ULL) / 4096; 151 value->tv_usec = (cputime % 4096000000ULL) / 4096;
149 value->tv_sec = cputime / 4096000000ULL; 152 value->tv_sec = cputime / 4096000000ULL;
150 #endif 153 #endif
151 } 154 }
152 155
153 /* 156 /*
154 * Convert cputime to clock and back. 157 * Convert cputime to clock and back.
155 */ 158 */
156 static inline clock_t 159 static inline clock_t
157 cputime_to_clock_t(cputime_t cputime) 160 cputime_to_clock_t(cputime_t cputime)
158 { 161 {
159 return __div(cputime, 4096000000ULL / USER_HZ); 162 return __div(cputime, 4096000000ULL / USER_HZ);
160 } 163 }
161 164
162 static inline cputime_t 165 static inline cputime_t
163 clock_t_to_cputime(unsigned long x) 166 clock_t_to_cputime(unsigned long x)
164 { 167 {
165 return (cputime_t) x * (4096000000ULL / USER_HZ); 168 return (cputime_t) x * (4096000000ULL / USER_HZ);
166 } 169 }
167 170
168 /* 171 /*
169 * Convert cputime64 to clock. 172 * Convert cputime64 to clock.
170 */ 173 */
171 static inline clock_t 174 static inline clock_t
172 cputime64_to_clock_t(cputime64_t cputime) 175 cputime64_to_clock_t(cputime64_t cputime)
173 { 176 {
174 return __div(cputime, 4096000000ULL / USER_HZ); 177 return __div(cputime, 4096000000ULL / USER_HZ);
175 } 178 }
176 179
180 struct s390_idle_data {
181 spinlock_t lock;
182 unsigned long long idle_count;
183 unsigned long long idle_enter;
184 unsigned long long idle_time;
185 };
186
187 DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
188
189 void vtime_start_cpu(void);
177 cputime64_t s390_get_idle_time(int cpu); 190 cputime64_t s390_get_idle_time(int cpu);
178 191
179 #define arch_idle_time(cpu) s390_get_idle_time(cpu) 192 #define arch_idle_time(cpu) s390_get_idle_time(cpu)
193
194 static inline void s390_idle_check(void)
195 {
196 if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
197 vtime_start_cpu();
198 }
180 199
181 #endif /* _S390_CPUTIME_H */ 200 #endif /* _S390_CPUTIME_H */
182 201
arch/s390/kernel/nmi.c
1 /* 1 /*
2 * Machine check handler 2 * Machine check handler
3 * 3 *
4 * Copyright IBM Corp. 2000,2009 4 * Copyright IBM Corp. 2000,2009
5 * Author(s): Ingo Adlung <adlung@de.ibm.com>, 5 * Author(s): Ingo Adlung <adlung@de.ibm.com>,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Cornelia Huck <cornelia.huck@de.ibm.com>, 7 * Cornelia Huck <cornelia.huck@de.ibm.com>,
8 * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 */ 9 */
10 10
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <linux/errno.h> 12 #include <linux/errno.h>
13 #include <linux/hardirq.h> 13 #include <linux/hardirq.h>
14 #include <linux/time.h> 14 #include <linux/time.h>
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <asm/lowcore.h> 16 #include <asm/lowcore.h>
17 #include <asm/smp.h> 17 #include <asm/smp.h>
18 #include <asm/etr.h> 18 #include <asm/etr.h>
19 #include <asm/cpu.h> 19 #include <asm/cputime.h>
20 #include <asm/nmi.h> 20 #include <asm/nmi.h>
21 #include <asm/crw.h> 21 #include <asm/crw.h>
22 22
23 struct mcck_struct { 23 struct mcck_struct {
24 int kill_task; 24 int kill_task;
25 int channel_report; 25 int channel_report;
26 int warning; 26 int warning;
27 unsigned long long mcck_code; 27 unsigned long long mcck_code;
28 }; 28 };
29 29
30 static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck); 30 static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
31 31
32 static NORET_TYPE void s390_handle_damage(char *msg) 32 static NORET_TYPE void s390_handle_damage(char *msg)
33 { 33 {
34 smp_send_stop(); 34 smp_send_stop();
35 disabled_wait((unsigned long) __builtin_return_address(0)); 35 disabled_wait((unsigned long) __builtin_return_address(0));
36 while (1); 36 while (1);
37 } 37 }
38 38
39 /* 39 /*
40 * Main machine check handler function. Will be called with interrupts enabled 40 * Main machine check handler function. Will be called with interrupts enabled
41 * or disabled and machine checks enabled or disabled. 41 * or disabled and machine checks enabled or disabled.
42 */ 42 */
43 void s390_handle_mcck(void) 43 void s390_handle_mcck(void)
44 { 44 {
45 unsigned long flags; 45 unsigned long flags;
46 struct mcck_struct mcck; 46 struct mcck_struct mcck;
47 47
48 /* 48 /*
49 * Disable machine checks and get the current state of accumulated 49 * Disable machine checks and get the current state of accumulated
50 * machine checks. Afterwards delete the old state and enable machine 50 * machine checks. Afterwards delete the old state and enable machine
51 * checks again. 51 * checks again.
52 */ 52 */
53 local_irq_save(flags); 53 local_irq_save(flags);
54 local_mcck_disable(); 54 local_mcck_disable();
55 mcck = __get_cpu_var(cpu_mcck); 55 mcck = __get_cpu_var(cpu_mcck);
56 memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); 56 memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct));
57 clear_thread_flag(TIF_MCCK_PENDING); 57 clear_thread_flag(TIF_MCCK_PENDING);
58 local_mcck_enable(); 58 local_mcck_enable();
59 local_irq_restore(flags); 59 local_irq_restore(flags);
60 60
61 if (mcck.channel_report) 61 if (mcck.channel_report)
62 crw_handle_channel_report(); 62 crw_handle_channel_report();
63 /* 63 /*
64 * A warning may remain for a prolonged period on the bare iron. 64 * A warning may remain for a prolonged period on the bare iron.
65 * (actually until the machine is powered off, or the problem is gone) 65 * (actually until the machine is powered off, or the problem is gone)
66 * So we just stop listening for the WARNING MCH and avoid continuously 66 * So we just stop listening for the WARNING MCH and avoid continuously
67 * being interrupted. One caveat is however, that we must do this per 67 * being interrupted. One caveat is however, that we must do this per
68 * processor and cannot use the smp version of ctl_clear_bit(). 68 * processor and cannot use the smp version of ctl_clear_bit().
69 * On VM we only get one interrupt per virtally presented machinecheck. 69 * On VM we only get one interrupt per virtally presented machinecheck.
70 * Though one suffices, we may get one interrupt per (virtual) cpu. 70 * Though one suffices, we may get one interrupt per (virtual) cpu.
71 */ 71 */
72 if (mcck.warning) { /* WARNING pending ? */ 72 if (mcck.warning) { /* WARNING pending ? */
73 static int mchchk_wng_posted = 0; 73 static int mchchk_wng_posted = 0;
74 74
75 /* Use single cpu clear, as we cannot handle smp here. */ 75 /* Use single cpu clear, as we cannot handle smp here. */
76 __ctl_clear_bit(14, 24); /* Disable WARNING MCH */ 76 __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
77 if (xchg(&mchchk_wng_posted, 1) == 0) 77 if (xchg(&mchchk_wng_posted, 1) == 0)
78 kill_cad_pid(SIGPWR, 1); 78 kill_cad_pid(SIGPWR, 1);
79 } 79 }
80 if (mcck.kill_task) { 80 if (mcck.kill_task) {
81 local_irq_enable(); 81 local_irq_enable();
82 printk(KERN_EMERG "mcck: Terminating task because of machine " 82 printk(KERN_EMERG "mcck: Terminating task because of machine "
83 "malfunction (code 0x%016llx).\n", mcck.mcck_code); 83 "malfunction (code 0x%016llx).\n", mcck.mcck_code);
84 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", 84 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
85 current->comm, current->pid); 85 current->comm, current->pid);
86 do_exit(SIGSEGV); 86 do_exit(SIGSEGV);
87 } 87 }
88 } 88 }
89 EXPORT_SYMBOL_GPL(s390_handle_mcck); 89 EXPORT_SYMBOL_GPL(s390_handle_mcck);
90 90
91 /* 91 /*
92 * returns 0 if all registers could be validated 92 * returns 0 if all registers could be validated
93 * returns 1 otherwise 93 * returns 1 otherwise
94 */ 94 */
95 static int notrace s390_revalidate_registers(struct mci *mci) 95 static int notrace s390_revalidate_registers(struct mci *mci)
96 { 96 {
97 int kill_task; 97 int kill_task;
98 u64 tmpclock; 98 u64 tmpclock;
99 u64 zero; 99 u64 zero;
100 void *fpt_save_area, *fpt_creg_save_area; 100 void *fpt_save_area, *fpt_creg_save_area;
101 101
102 kill_task = 0; 102 kill_task = 0;
103 zero = 0; 103 zero = 0;
104 104
105 if (!mci->gr) { 105 if (!mci->gr) {
106 /* 106 /*
107 * General purpose registers couldn't be restored and have 107 * General purpose registers couldn't be restored and have
108 * unknown contents. Process needs to be terminated. 108 * unknown contents. Process needs to be terminated.
109 */ 109 */
110 kill_task = 1; 110 kill_task = 1;
111 } 111 }
112 if (!mci->fp) { 112 if (!mci->fp) {
113 /* 113 /*
114 * Floating point registers can't be restored and 114 * Floating point registers can't be restored and
115 * therefore the process needs to be terminated. 115 * therefore the process needs to be terminated.
116 */ 116 */
117 kill_task = 1; 117 kill_task = 1;
118 } 118 }
119 #ifndef CONFIG_64BIT 119 #ifndef CONFIG_64BIT
120 asm volatile( 120 asm volatile(
121 " ld 0,0(%0)\n" 121 " ld 0,0(%0)\n"
122 " ld 2,8(%0)\n" 122 " ld 2,8(%0)\n"
123 " ld 4,16(%0)\n" 123 " ld 4,16(%0)\n"
124 " ld 6,24(%0)" 124 " ld 6,24(%0)"
125 : : "a" (&S390_lowcore.floating_pt_save_area)); 125 : : "a" (&S390_lowcore.floating_pt_save_area));
126 #endif 126 #endif
127 127
128 if (MACHINE_HAS_IEEE) { 128 if (MACHINE_HAS_IEEE) {
129 #ifdef CONFIG_64BIT 129 #ifdef CONFIG_64BIT
130 fpt_save_area = &S390_lowcore.floating_pt_save_area; 130 fpt_save_area = &S390_lowcore.floating_pt_save_area;
131 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 131 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
132 #else 132 #else
133 fpt_save_area = (void *) S390_lowcore.extended_save_area_addr; 133 fpt_save_area = (void *) S390_lowcore.extended_save_area_addr;
134 fpt_creg_save_area = fpt_save_area + 128; 134 fpt_creg_save_area = fpt_save_area + 128;
135 #endif 135 #endif
136 if (!mci->fc) { 136 if (!mci->fc) {
137 /* 137 /*
138 * Floating point control register can't be restored. 138 * Floating point control register can't be restored.
139 * Task will be terminated. 139 * Task will be terminated.
140 */ 140 */
141 asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); 141 asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
142 kill_task = 1; 142 kill_task = 1;
143 143
144 } else 144 } else
145 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); 145 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
146 146
147 asm volatile( 147 asm volatile(
148 " ld 0,0(%0)\n" 148 " ld 0,0(%0)\n"
149 " ld 1,8(%0)\n" 149 " ld 1,8(%0)\n"
150 " ld 2,16(%0)\n" 150 " ld 2,16(%0)\n"
151 " ld 3,24(%0)\n" 151 " ld 3,24(%0)\n"
152 " ld 4,32(%0)\n" 152 " ld 4,32(%0)\n"
153 " ld 5,40(%0)\n" 153 " ld 5,40(%0)\n"
154 " ld 6,48(%0)\n" 154 " ld 6,48(%0)\n"
155 " ld 7,56(%0)\n" 155 " ld 7,56(%0)\n"
156 " ld 8,64(%0)\n" 156 " ld 8,64(%0)\n"
157 " ld 9,72(%0)\n" 157 " ld 9,72(%0)\n"
158 " ld 10,80(%0)\n" 158 " ld 10,80(%0)\n"
159 " ld 11,88(%0)\n" 159 " ld 11,88(%0)\n"
160 " ld 12,96(%0)\n" 160 " ld 12,96(%0)\n"
161 " ld 13,104(%0)\n" 161 " ld 13,104(%0)\n"
162 " ld 14,112(%0)\n" 162 " ld 14,112(%0)\n"
163 " ld 15,120(%0)\n" 163 " ld 15,120(%0)\n"
164 : : "a" (fpt_save_area)); 164 : : "a" (fpt_save_area));
165 } 165 }
166 /* Revalidate access registers */ 166 /* Revalidate access registers */
167 asm volatile( 167 asm volatile(
168 " lam 0,15,0(%0)" 168 " lam 0,15,0(%0)"
169 : : "a" (&S390_lowcore.access_regs_save_area)); 169 : : "a" (&S390_lowcore.access_regs_save_area));
170 if (!mci->ar) { 170 if (!mci->ar) {
171 /* 171 /*
172 * Access registers have unknown contents. 172 * Access registers have unknown contents.
173 * Terminating task. 173 * Terminating task.
174 */ 174 */
175 kill_task = 1; 175 kill_task = 1;
176 } 176 }
177 /* Revalidate control registers */ 177 /* Revalidate control registers */
178 if (!mci->cr) { 178 if (!mci->cr) {
179 /* 179 /*
180 * Control registers have unknown contents. 180 * Control registers have unknown contents.
181 * Can't recover and therefore stopping machine. 181 * Can't recover and therefore stopping machine.
182 */ 182 */
183 s390_handle_damage("invalid control registers."); 183 s390_handle_damage("invalid control registers.");
184 } else { 184 } else {
185 #ifdef CONFIG_64BIT 185 #ifdef CONFIG_64BIT
186 asm volatile( 186 asm volatile(
187 " lctlg 0,15,0(%0)" 187 " lctlg 0,15,0(%0)"
188 : : "a" (&S390_lowcore.cregs_save_area)); 188 : : "a" (&S390_lowcore.cregs_save_area));
189 #else 189 #else
190 asm volatile( 190 asm volatile(
191 " lctl 0,15,0(%0)" 191 " lctl 0,15,0(%0)"
192 : : "a" (&S390_lowcore.cregs_save_area)); 192 : : "a" (&S390_lowcore.cregs_save_area));
193 #endif 193 #endif
194 } 194 }
195 /* 195 /*
196 * We don't even try to revalidate the TOD register, since we simply 196 * We don't even try to revalidate the TOD register, since we simply
197 * can't write something sensible into that register. 197 * can't write something sensible into that register.
198 */ 198 */
199 #ifdef CONFIG_64BIT 199 #ifdef CONFIG_64BIT
200 /* 200 /*
201 * See if we can revalidate the TOD programmable register with its 201 * See if we can revalidate the TOD programmable register with its
202 * old contents (should be zero) otherwise set it to zero. 202 * old contents (should be zero) otherwise set it to zero.
203 */ 203 */
204 if (!mci->pr) 204 if (!mci->pr)
205 asm volatile( 205 asm volatile(
206 " sr 0,0\n" 206 " sr 0,0\n"
207 " sckpf" 207 " sckpf"
208 : : : "0", "cc"); 208 : : : "0", "cc");
209 else 209 else
210 asm volatile( 210 asm volatile(
211 " l 0,0(%0)\n" 211 " l 0,0(%0)\n"
212 " sckpf" 212 " sckpf"
213 : : "a" (&S390_lowcore.tod_progreg_save_area) 213 : : "a" (&S390_lowcore.tod_progreg_save_area)
214 : "0", "cc"); 214 : "0", "cc");
215 #endif 215 #endif
216 /* Revalidate clock comparator register */ 216 /* Revalidate clock comparator register */
217 asm volatile( 217 asm volatile(
218 " stck 0(%1)\n" 218 " stck 0(%1)\n"
219 " sckc 0(%1)" 219 " sckc 0(%1)"
220 : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); 220 : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
221 221
222 /* Check if old PSW is valid */ 222 /* Check if old PSW is valid */
223 if (!mci->wp) 223 if (!mci->wp)
224 /* 224 /*
225 * Can't tell if we come from user or kernel mode 225 * Can't tell if we come from user or kernel mode
226 * -> stopping machine. 226 * -> stopping machine.
227 */ 227 */
228 s390_handle_damage("old psw invalid."); 228 s390_handle_damage("old psw invalid.");
229 229
230 if (!mci->ms || !mci->pm || !mci->ia) 230 if (!mci->ms || !mci->pm || !mci->ia)
231 kill_task = 1; 231 kill_task = 1;
232 232
233 return kill_task; 233 return kill_task;
234 } 234 }
235 235
236 #define MAX_IPD_COUNT 29 236 #define MAX_IPD_COUNT 29
237 #define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */ 237 #define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
238 238
239 #define ED_STP_ISLAND 6 /* External damage STP island check */ 239 #define ED_STP_ISLAND 6 /* External damage STP island check */
240 #define ED_STP_SYNC 7 /* External damage STP sync check */ 240 #define ED_STP_SYNC 7 /* External damage STP sync check */
241 #define ED_ETR_SYNC 12 /* External damage ETR sync check */ 241 #define ED_ETR_SYNC 12 /* External damage ETR sync check */
242 #define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ 242 #define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
243 243
244 /* 244 /*
245 * machine check handler. 245 * machine check handler.
246 */ 246 */
247 void notrace s390_do_machine_check(struct pt_regs *regs) 247 void notrace s390_do_machine_check(struct pt_regs *regs)
248 { 248 {
249 static int ipd_count; 249 static int ipd_count;
250 static DEFINE_SPINLOCK(ipd_lock); 250 static DEFINE_SPINLOCK(ipd_lock);
251 static unsigned long long last_ipd; 251 static unsigned long long last_ipd;
252 struct mcck_struct *mcck; 252 struct mcck_struct *mcck;
253 unsigned long long tmp; 253 unsigned long long tmp;
254 struct mci *mci; 254 struct mci *mci;
255 int umode; 255 int umode;
256 256
257 nmi_enter(); 257 nmi_enter();
258 s390_idle_check(); 258 s390_idle_check();
259 259
260 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 260 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
261 mcck = &__get_cpu_var(cpu_mcck); 261 mcck = &__get_cpu_var(cpu_mcck);
262 umode = user_mode(regs); 262 umode = user_mode(regs);
263 263
264 if (mci->sd) { 264 if (mci->sd) {
265 /* System damage -> stopping machine */ 265 /* System damage -> stopping machine */
266 s390_handle_damage("received system damage machine check."); 266 s390_handle_damage("received system damage machine check.");
267 } 267 }
268 if (mci->pd) { 268 if (mci->pd) {
269 if (mci->b) { 269 if (mci->b) {
270 /* Processing backup -> verify if we can survive this */ 270 /* Processing backup -> verify if we can survive this */
271 u64 z_mcic, o_mcic, t_mcic; 271 u64 z_mcic, o_mcic, t_mcic;
272 #ifdef CONFIG_64BIT 272 #ifdef CONFIG_64BIT
273 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); 273 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
274 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 274 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
275 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 275 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
276 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | 276 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
277 1ULL<<16); 277 1ULL<<16);
278 #else 278 #else
279 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 | 279 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 |
280 1ULL<<29); 280 1ULL<<29);
281 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 281 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
282 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 282 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
283 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16); 283 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16);
284 #endif 284 #endif
285 t_mcic = *(u64 *)mci; 285 t_mcic = *(u64 *)mci;
286 286
287 if (((t_mcic & z_mcic) != 0) || 287 if (((t_mcic & z_mcic) != 0) ||
288 ((t_mcic & o_mcic) != o_mcic)) { 288 ((t_mcic & o_mcic) != o_mcic)) {
289 s390_handle_damage("processing backup machine " 289 s390_handle_damage("processing backup machine "
290 "check with damage."); 290 "check with damage.");
291 } 291 }
292 292
293 /* 293 /*
294 * Nullifying exigent condition, therefore we might 294 * Nullifying exigent condition, therefore we might
295 * retry this instruction. 295 * retry this instruction.
296 */ 296 */
297 spin_lock(&ipd_lock); 297 spin_lock(&ipd_lock);
298 tmp = get_clock(); 298 tmp = get_clock();
299 if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME) 299 if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
300 ipd_count++; 300 ipd_count++;
301 else 301 else
302 ipd_count = 1; 302 ipd_count = 1;
303 last_ipd = tmp; 303 last_ipd = tmp;
304 if (ipd_count == MAX_IPD_COUNT) 304 if (ipd_count == MAX_IPD_COUNT)
305 s390_handle_damage("too many ipd retries."); 305 s390_handle_damage("too many ipd retries.");
306 spin_unlock(&ipd_lock); 306 spin_unlock(&ipd_lock);
307 } else { 307 } else {
308 /* Processing damage -> stopping machine */ 308 /* Processing damage -> stopping machine */
309 s390_handle_damage("received instruction processing " 309 s390_handle_damage("received instruction processing "
310 "damage machine check."); 310 "damage machine check.");
311 } 311 }
312 } 312 }
313 if (s390_revalidate_registers(mci)) { 313 if (s390_revalidate_registers(mci)) {
314 if (umode) { 314 if (umode) {
315 /* 315 /*
316 * Couldn't restore all register contents while in 316 * Couldn't restore all register contents while in
317 * user mode -> mark task for termination. 317 * user mode -> mark task for termination.
318 */ 318 */
319 mcck->kill_task = 1; 319 mcck->kill_task = 1;
320 mcck->mcck_code = *(unsigned long long *) mci; 320 mcck->mcck_code = *(unsigned long long *) mci;
321 set_thread_flag(TIF_MCCK_PENDING); 321 set_thread_flag(TIF_MCCK_PENDING);
322 } else { 322 } else {
323 /* 323 /*
324 * Couldn't restore all register contents while in 324 * Couldn't restore all register contents while in
325 * kernel mode -> stopping machine. 325 * kernel mode -> stopping machine.
326 */ 326 */
327 s390_handle_damage("unable to revalidate registers."); 327 s390_handle_damage("unable to revalidate registers.");
328 } 328 }
329 } 329 }
330 if (mci->cd) { 330 if (mci->cd) {
331 /* Timing facility damage */ 331 /* Timing facility damage */
332 s390_handle_damage("TOD clock damaged"); 332 s390_handle_damage("TOD clock damaged");
333 } 333 }
334 if (mci->ed && mci->ec) { 334 if (mci->ed && mci->ec) {
335 /* External damage */ 335 /* External damage */
336 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC)) 336 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
337 etr_sync_check(); 337 etr_sync_check();
338 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) 338 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
339 etr_switch_to_local(); 339 etr_switch_to_local();
340 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC)) 340 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
341 stp_sync_check(); 341 stp_sync_check();
342 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) 342 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
343 stp_island_check(); 343 stp_island_check();
344 } 344 }
345 if (mci->se) 345 if (mci->se)
346 /* Storage error uncorrected */ 346 /* Storage error uncorrected */
347 s390_handle_damage("received storage error uncorrected " 347 s390_handle_damage("received storage error uncorrected "
348 "machine check."); 348 "machine check.");
349 if (mci->ke) 349 if (mci->ke)
350 /* Storage key-error uncorrected */ 350 /* Storage key-error uncorrected */
351 s390_handle_damage("received storage key-error uncorrected " 351 s390_handle_damage("received storage key-error uncorrected "
352 "machine check."); 352 "machine check.");
353 if (mci->ds && mci->fa) 353 if (mci->ds && mci->fa)
354 /* Storage degradation */ 354 /* Storage degradation */
355 s390_handle_damage("received storage degradation machine " 355 s390_handle_damage("received storage degradation machine "
356 "check."); 356 "check.");
357 if (mci->cp) { 357 if (mci->cp) {
358 /* Channel report word pending */ 358 /* Channel report word pending */
359 mcck->channel_report = 1; 359 mcck->channel_report = 1;
360 set_thread_flag(TIF_MCCK_PENDING); 360 set_thread_flag(TIF_MCCK_PENDING);
361 } 361 }
362 if (mci->w) { 362 if (mci->w) {
363 /* Warning pending */ 363 /* Warning pending */
364 mcck->warning = 1; 364 mcck->warning = 1;
365 set_thread_flag(TIF_MCCK_PENDING); 365 set_thread_flag(TIF_MCCK_PENDING);
366 } 366 }
367 nmi_exit(); 367 nmi_exit();
368 } 368 }
369 369
370 static int __init machine_check_init(void) 370 static int __init machine_check_init(void)
371 { 371 {
372 ctl_set_bit(14, 25); /* enable external damage MCH */ 372 ctl_set_bit(14, 25); /* enable external damage MCH */
373 ctl_set_bit(14, 27); /* enable system recovery MCH */ 373 ctl_set_bit(14, 27); /* enable system recovery MCH */
374 ctl_set_bit(14, 24); /* enable warning MCH */ 374 ctl_set_bit(14, 24); /* enable warning MCH */
375 return 0; 375 return 0;
376 } 376 }
377 arch_initcall(machine_check_init); 377 arch_initcall(machine_check_init);
378 378
arch/s390/kernel/s390_ext.c
1 /* 1 /*
2 * arch/s390/kernel/s390_ext.c 2 * arch/s390/kernel/s390_ext.c
3 * 3 *
4 * S390 version 4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com), 6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 */ 8 */
9 9
10 #include <linux/module.h> 10 #include <linux/module.h>
11 #include <linux/kernel.h> 11 #include <linux/kernel.h>
12 #include <linux/slab.h> 12 #include <linux/slab.h>
13 #include <linux/errno.h> 13 #include <linux/errno.h>
14 #include <linux/kernel_stat.h> 14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h> 15 #include <linux/interrupt.h>
16 #include <asm/cpu.h> 16 #include <asm/cputime.h>
17 #include <asm/lowcore.h> 17 #include <asm/lowcore.h>
18 #include <asm/s390_ext.h> 18 #include <asm/s390_ext.h>
19 #include <asm/irq_regs.h> 19 #include <asm/irq_regs.h>
20 #include <asm/irq.h> 20 #include <asm/irq.h>
21 #include "entry.h" 21 #include "entry.h"
22 22
23 /* 23 /*
24 * ext_int_hash[index] is the start of the list for all external interrupts 24 * ext_int_hash[index] is the start of the list for all external interrupts
25 * that hash to this index. With the current set of external interrupts 25 * that hash to this index. With the current set of external interrupts
26 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000 26 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
27 * iucv and 0x2603 pfault) this is always the first element. 27 * iucv and 0x2603 pfault) this is always the first element.
28 */ 28 */
29 ext_int_info_t *ext_int_hash[256] = { NULL, }; 29 ext_int_info_t *ext_int_hash[256] = { NULL, };
30 30
31 static inline int ext_hash(__u16 code) 31 static inline int ext_hash(__u16 code)
32 { 32 {
33 return (code + (code >> 9)) & 0xff; 33 return (code + (code >> 9)) & 0xff;
34 } 34 }
35 35
36 int register_external_interrupt(__u16 code, ext_int_handler_t handler) 36 int register_external_interrupt(__u16 code, ext_int_handler_t handler)
37 { 37 {
38 ext_int_info_t *p; 38 ext_int_info_t *p;
39 int index; 39 int index;
40 40
41 p = kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC); 41 p = kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
42 if (p == NULL) 42 if (p == NULL)
43 return -ENOMEM; 43 return -ENOMEM;
44 p->code = code; 44 p->code = code;
45 p->handler = handler; 45 p->handler = handler;
46 index = ext_hash(code); 46 index = ext_hash(code);
47 p->next = ext_int_hash[index]; 47 p->next = ext_int_hash[index];
48 ext_int_hash[index] = p; 48 ext_int_hash[index] = p;
49 return 0; 49 return 0;
50 } 50 }
51 51
52 int register_early_external_interrupt(__u16 code, ext_int_handler_t handler, 52 int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
53 ext_int_info_t *p) 53 ext_int_info_t *p)
54 { 54 {
55 int index; 55 int index;
56 56
57 if (p == NULL) 57 if (p == NULL)
58 return -EINVAL; 58 return -EINVAL;
59 p->code = code; 59 p->code = code;
60 p->handler = handler; 60 p->handler = handler;
61 index = ext_hash(code); 61 index = ext_hash(code);
62 p->next = ext_int_hash[index]; 62 p->next = ext_int_hash[index];
63 ext_int_hash[index] = p; 63 ext_int_hash[index] = p;
64 return 0; 64 return 0;
65 } 65 }
66 66
67 int unregister_external_interrupt(__u16 code, ext_int_handler_t handler) 67 int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
68 { 68 {
69 ext_int_info_t *p, *q; 69 ext_int_info_t *p, *q;
70 int index; 70 int index;
71 71
72 index = ext_hash(code); 72 index = ext_hash(code);
73 q = NULL; 73 q = NULL;
74 p = ext_int_hash[index]; 74 p = ext_int_hash[index];
75 while (p != NULL) { 75 while (p != NULL) {
76 if (p->code == code && p->handler == handler) 76 if (p->code == code && p->handler == handler)
77 break; 77 break;
78 q = p; 78 q = p;
79 p = p->next; 79 p = p->next;
80 } 80 }
81 if (p == NULL) 81 if (p == NULL)
82 return -ENOENT; 82 return -ENOENT;
83 if (q != NULL) 83 if (q != NULL)
84 q->next = p->next; 84 q->next = p->next;
85 else 85 else
86 ext_int_hash[index] = p->next; 86 ext_int_hash[index] = p->next;
87 kfree(p); 87 kfree(p);
88 return 0; 88 return 0;
89 } 89 }
90 90
91 int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler, 91 int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
92 ext_int_info_t *p) 92 ext_int_info_t *p)
93 { 93 {
94 ext_int_info_t *q; 94 ext_int_info_t *q;
95 int index; 95 int index;
96 96
97 if (p == NULL || p->code != code || p->handler != handler) 97 if (p == NULL || p->code != code || p->handler != handler)
98 return -EINVAL; 98 return -EINVAL;
99 index = ext_hash(code); 99 index = ext_hash(code);
100 q = ext_int_hash[index]; 100 q = ext_int_hash[index];
101 if (p != q) { 101 if (p != q) {
102 while (q != NULL) { 102 while (q != NULL) {
103 if (q->next == p) 103 if (q->next == p)
104 break; 104 break;
105 q = q->next; 105 q = q->next;
106 } 106 }
107 if (q == NULL) 107 if (q == NULL)
108 return -ENOENT; 108 return -ENOENT;
109 q->next = p->next; 109 q->next = p->next;
110 } else 110 } else
111 ext_int_hash[index] = p->next; 111 ext_int_hash[index] = p->next;
112 return 0; 112 return 0;
113 } 113 }
114 114
115 void do_extint(struct pt_regs *regs, unsigned short code) 115 void do_extint(struct pt_regs *regs, unsigned short code)
116 { 116 {
117 ext_int_info_t *p; 117 ext_int_info_t *p;
118 int index; 118 int index;
119 struct pt_regs *old_regs; 119 struct pt_regs *old_regs;
120 120
121 old_regs = set_irq_regs(regs); 121 old_regs = set_irq_regs(regs);
122 s390_idle_check(); 122 s390_idle_check();
123 irq_enter(); 123 irq_enter();
124 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 124 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
125 /* Serve timer interrupts first. */ 125 /* Serve timer interrupts first. */
126 clock_comparator_work(); 126 clock_comparator_work();
127 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 127 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
128 index = ext_hash(code); 128 index = ext_hash(code);
129 for (p = ext_int_hash[index]; p; p = p->next) { 129 for (p = ext_int_hash[index]; p; p = p->next) {
130 if (likely(p->code == code)) 130 if (likely(p->code == code))
131 p->handler(code); 131 p->handler(code);
132 } 132 }
133 irq_exit(); 133 irq_exit();
134 set_irq_regs(old_regs); 134 set_irq_regs(old_regs);
135 } 135 }
136 136
137 EXPORT_SYMBOL(register_external_interrupt); 137 EXPORT_SYMBOL(register_external_interrupt);
138 EXPORT_SYMBOL(unregister_external_interrupt); 138 EXPORT_SYMBOL(unregister_external_interrupt);
139 139
arch/s390/kernel/smp.c
1 /* 1 /*
2 * arch/s390/kernel/smp.c 2 * arch/s390/kernel/smp.c
3 * 3 *
4 * Copyright IBM Corp. 1999,2007 4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com)
8 * 8 *
9 * based on other smp stuff by 9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar 11 * (c) 1998 Ingo Molnar
12 * 12 *
13 * We work with logical cpu numbering everywhere we can. The only 13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp 14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping. 15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is 16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array 17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical 18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and 19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures. 20 * cpu_number_map in other architectures.
21 */ 21 */
22 22
23 #define KMSG_COMPONENT "cpu" 23 #define KMSG_COMPONENT "cpu"
24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25 25
26 #include <linux/module.h> 26 #include <linux/module.h>
27 #include <linux/init.h> 27 #include <linux/init.h>
28 #include <linux/mm.h> 28 #include <linux/mm.h>
29 #include <linux/err.h> 29 #include <linux/err.h>
30 #include <linux/spinlock.h> 30 #include <linux/spinlock.h>
31 #include <linux/kernel_stat.h> 31 #include <linux/kernel_stat.h>
32 #include <linux/delay.h> 32 #include <linux/delay.h>
33 #include <linux/cache.h> 33 #include <linux/cache.h>
34 #include <linux/interrupt.h> 34 #include <linux/interrupt.h>
35 #include <linux/irqflags.h> 35 #include <linux/irqflags.h>
36 #include <linux/cpu.h> 36 #include <linux/cpu.h>
37 #include <linux/timex.h> 37 #include <linux/timex.h>
38 #include <linux/bootmem.h> 38 #include <linux/bootmem.h>
39 #include <asm/ipl.h> 39 #include <asm/ipl.h>
40 #include <asm/setup.h> 40 #include <asm/setup.h>
41 #include <asm/sigp.h> 41 #include <asm/sigp.h>
42 #include <asm/pgalloc.h> 42 #include <asm/pgalloc.h>
43 #include <asm/irq.h> 43 #include <asm/irq.h>
44 #include <asm/s390_ext.h> 44 #include <asm/s390_ext.h>
45 #include <asm/cpcmd.h> 45 #include <asm/cpcmd.h>
46 #include <asm/tlbflush.h> 46 #include <asm/tlbflush.h>
47 #include <asm/timer.h> 47 #include <asm/timer.h>
48 #include <asm/lowcore.h> 48 #include <asm/lowcore.h>
49 #include <asm/sclp.h> 49 #include <asm/sclp.h>
50 #include <asm/cpu.h> 50 #include <asm/cputime.h>
51 #include <asm/vdso.h> 51 #include <asm/vdso.h>
52 #include "entry.h" 52 #include "entry.h"
53 53
54 static struct task_struct *current_set[NR_CPUS]; 54 static struct task_struct *current_set[NR_CPUS];
55 55
56 static u8 smp_cpu_type; 56 static u8 smp_cpu_type;
57 static int smp_use_sigp_detection; 57 static int smp_use_sigp_detection;
58 58
59 enum s390_cpu_state { 59 enum s390_cpu_state {
60 CPU_STATE_STANDBY, 60 CPU_STATE_STANDBY,
61 CPU_STATE_CONFIGURED, 61 CPU_STATE_CONFIGURED,
62 }; 62 };
63 63
64 DEFINE_MUTEX(smp_cpu_state_mutex); 64 DEFINE_MUTEX(smp_cpu_state_mutex);
65 int smp_cpu_polarization[NR_CPUS]; 65 int smp_cpu_polarization[NR_CPUS];
66 static int smp_cpu_state[NR_CPUS]; 66 static int smp_cpu_state[NR_CPUS];
67 static int cpu_management; 67 static int cpu_management;
68 68
69 static DEFINE_PER_CPU(struct cpu, cpu_devices); 69 static DEFINE_PER_CPU(struct cpu, cpu_devices);
70 70
71 static void smp_ext_bitcall(int, ec_bit_sig); 71 static void smp_ext_bitcall(int, ec_bit_sig);
72 72
73 void smp_send_stop(void) 73 void smp_send_stop(void)
74 { 74 {
75 int cpu, rc; 75 int cpu, rc;
76 76
77 /* Disable all interrupts/machine checks */ 77 /* Disable all interrupts/machine checks */
78 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 78 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
79 trace_hardirqs_off(); 79 trace_hardirqs_off();
80 80
81 /* stop all processors */ 81 /* stop all processors */
82 for_each_online_cpu(cpu) { 82 for_each_online_cpu(cpu) {
83 if (cpu == smp_processor_id()) 83 if (cpu == smp_processor_id())
84 continue; 84 continue;
85 do { 85 do {
86 rc = signal_processor(cpu, sigp_stop); 86 rc = signal_processor(cpu, sigp_stop);
87 } while (rc == sigp_busy); 87 } while (rc == sigp_busy);
88 88
89 while (!smp_cpu_not_running(cpu)) 89 while (!smp_cpu_not_running(cpu))
90 cpu_relax(); 90 cpu_relax();
91 } 91 }
92 } 92 }
93 93
94 /* 94 /*
95 * This is the main routine where commands issued by other 95 * This is the main routine where commands issued by other
96 * cpus are handled. 96 * cpus are handled.
97 */ 97 */
98 98
99 static void do_ext_call_interrupt(__u16 code) 99 static void do_ext_call_interrupt(__u16 code)
100 { 100 {
101 unsigned long bits; 101 unsigned long bits;
102 102
103 /* 103 /*
104 * handle bit signal external calls 104 * handle bit signal external calls
105 * 105 *
106 * For the ec_schedule signal we have to do nothing. All the work 106 * For the ec_schedule signal we have to do nothing. All the work
107 * is done automatically when we return from the interrupt. 107 * is done automatically when we return from the interrupt.
108 */ 108 */
109 bits = xchg(&S390_lowcore.ext_call_fast, 0); 109 bits = xchg(&S390_lowcore.ext_call_fast, 0);
110 110
111 if (test_bit(ec_call_function, &bits)) 111 if (test_bit(ec_call_function, &bits))
112 generic_smp_call_function_interrupt(); 112 generic_smp_call_function_interrupt();
113 113
114 if (test_bit(ec_call_function_single, &bits)) 114 if (test_bit(ec_call_function_single, &bits))
115 generic_smp_call_function_single_interrupt(); 115 generic_smp_call_function_single_interrupt();
116 } 116 }
117 117
118 /* 118 /*
119 * Send an external call sigp to another cpu and return without waiting 119 * Send an external call sigp to another cpu and return without waiting
120 * for its completion. 120 * for its completion.
121 */ 121 */
122 static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 122 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
123 { 123 {
124 /* 124 /*
125 * Set signaling bit in lowcore of target cpu and kick it 125 * Set signaling bit in lowcore of target cpu and kick it
126 */ 126 */
127 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 127 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
128 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 128 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
129 udelay(10); 129 udelay(10);
130 } 130 }
131 131
132 void arch_send_call_function_ipi(cpumask_t mask) 132 void arch_send_call_function_ipi(cpumask_t mask)
133 { 133 {
134 int cpu; 134 int cpu;
135 135
136 for_each_cpu_mask(cpu, mask) 136 for_each_cpu_mask(cpu, mask)
137 smp_ext_bitcall(cpu, ec_call_function); 137 smp_ext_bitcall(cpu, ec_call_function);
138 } 138 }
139 139
140 void arch_send_call_function_single_ipi(int cpu) 140 void arch_send_call_function_single_ipi(int cpu)
141 { 141 {
142 smp_ext_bitcall(cpu, ec_call_function_single); 142 smp_ext_bitcall(cpu, ec_call_function_single);
143 } 143 }
144 144
145 #ifndef CONFIG_64BIT 145 #ifndef CONFIG_64BIT
146 /* 146 /*
147 * this function sends a 'purge tlb' signal to another CPU. 147 * this function sends a 'purge tlb' signal to another CPU.
148 */ 148 */
149 static void smp_ptlb_callback(void *info) 149 static void smp_ptlb_callback(void *info)
150 { 150 {
151 __tlb_flush_local(); 151 __tlb_flush_local();
152 } 152 }
153 153
154 void smp_ptlb_all(void) 154 void smp_ptlb_all(void)
155 { 155 {
156 on_each_cpu(smp_ptlb_callback, NULL, 1); 156 on_each_cpu(smp_ptlb_callback, NULL, 1);
157 } 157 }
158 EXPORT_SYMBOL(smp_ptlb_all); 158 EXPORT_SYMBOL(smp_ptlb_all);
159 #endif /* ! CONFIG_64BIT */ 159 #endif /* ! CONFIG_64BIT */
160 160
161 /* 161 /*
162 * this function sends a 'reschedule' IPI to another CPU. 162 * this function sends a 'reschedule' IPI to another CPU.
163 * it goes straight through and wastes no time serializing 163 * it goes straight through and wastes no time serializing
164 * anything. Worst case is that we lose a reschedule ... 164 * anything. Worst case is that we lose a reschedule ...
165 */ 165 */
166 void smp_send_reschedule(int cpu) 166 void smp_send_reschedule(int cpu)
167 { 167 {
168 smp_ext_bitcall(cpu, ec_schedule); 168 smp_ext_bitcall(cpu, ec_schedule);
169 } 169 }
170 170
171 /* 171 /*
172 * parameter area for the set/clear control bit callbacks 172 * parameter area for the set/clear control bit callbacks
173 */ 173 */
174 struct ec_creg_mask_parms { 174 struct ec_creg_mask_parms {
175 unsigned long orvals[16]; 175 unsigned long orvals[16];
176 unsigned long andvals[16]; 176 unsigned long andvals[16];
177 }; 177 };
178 178
179 /* 179 /*
180 * callback for setting/clearing control bits 180 * callback for setting/clearing control bits
181 */ 181 */
182 static void smp_ctl_bit_callback(void *info) 182 static void smp_ctl_bit_callback(void *info)
183 { 183 {
184 struct ec_creg_mask_parms *pp = info; 184 struct ec_creg_mask_parms *pp = info;
185 unsigned long cregs[16]; 185 unsigned long cregs[16];
186 int i; 186 int i;
187 187
188 __ctl_store(cregs, 0, 15); 188 __ctl_store(cregs, 0, 15);
189 for (i = 0; i <= 15; i++) 189 for (i = 0; i <= 15; i++)
190 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 190 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
191 __ctl_load(cregs, 0, 15); 191 __ctl_load(cregs, 0, 15);
192 } 192 }
193 193
194 /* 194 /*
195 * Set a bit in a control register of all cpus 195 * Set a bit in a control register of all cpus
196 */ 196 */
197 void smp_ctl_set_bit(int cr, int bit) 197 void smp_ctl_set_bit(int cr, int bit)
198 { 198 {
199 struct ec_creg_mask_parms parms; 199 struct ec_creg_mask_parms parms;
200 200
201 memset(&parms.orvals, 0, sizeof(parms.orvals)); 201 memset(&parms.orvals, 0, sizeof(parms.orvals));
202 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 202 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
203 parms.orvals[cr] = 1 << bit; 203 parms.orvals[cr] = 1 << bit;
204 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 204 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
205 } 205 }
206 EXPORT_SYMBOL(smp_ctl_set_bit); 206 EXPORT_SYMBOL(smp_ctl_set_bit);
207 207
208 /* 208 /*
209 * Clear a bit in a control register of all cpus 209 * Clear a bit in a control register of all cpus
210 */ 210 */
211 void smp_ctl_clear_bit(int cr, int bit) 211 void smp_ctl_clear_bit(int cr, int bit)
212 { 212 {
213 struct ec_creg_mask_parms parms; 213 struct ec_creg_mask_parms parms;
214 214
215 memset(&parms.orvals, 0, sizeof(parms.orvals)); 215 memset(&parms.orvals, 0, sizeof(parms.orvals));
216 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 216 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
217 parms.andvals[cr] = ~(1L << bit); 217 parms.andvals[cr] = ~(1L << bit);
218 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 218 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
219 } 219 }
220 EXPORT_SYMBOL(smp_ctl_clear_bit); 220 EXPORT_SYMBOL(smp_ctl_clear_bit);
221 221
222 /* 222 /*
223 * In early ipl state a temp. logically cpu number is needed, so the sigp 223 * In early ipl state a temp. logically cpu number is needed, so the sigp
224 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on 224 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
225 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1. 225 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
226 */ 226 */
227 #define CPU_INIT_NO 1 227 #define CPU_INIT_NO 1
228 228
229 #ifdef CONFIG_ZFCPDUMP 229 #ifdef CONFIG_ZFCPDUMP
230 230
231 /* 231 /*
232 * zfcpdump_prefix_array holds prefix registers for the following scenario: 232 * zfcpdump_prefix_array holds prefix registers for the following scenario:
233 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to 233 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
234 * save its prefix registers, since they get lost, when switching from 31 bit 234 * save its prefix registers, since they get lost, when switching from 31 bit
235 * to 64 bit. 235 * to 64 bit.
236 */ 236 */
237 unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ 237 unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
238 __attribute__((__section__(".data"))); 238 __attribute__((__section__(".data")));
239 239
240 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 240 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
241 { 241 {
242 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 242 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
243 return; 243 return;
244 if (cpu >= NR_CPUS) { 244 if (cpu >= NR_CPUS) {
245 pr_warning("CPU %i exceeds the maximum %i and is excluded from " 245 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
246 "the dump\n", cpu, NR_CPUS - 1); 246 "the dump\n", cpu, NR_CPUS - 1);
247 return; 247 return;
248 } 248 }
249 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); 249 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
250 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; 250 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
251 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) == 251 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
252 sigp_busy) 252 sigp_busy)
253 cpu_relax(); 253 cpu_relax();
254 memcpy(zfcpdump_save_areas[cpu], 254 memcpy(zfcpdump_save_areas[cpu],
255 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 255 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
256 SAVE_AREA_SIZE); 256 SAVE_AREA_SIZE);
257 #ifdef CONFIG_64BIT 257 #ifdef CONFIG_64BIT
258 /* copy original prefix register */ 258 /* copy original prefix register */
259 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu]; 259 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
260 #endif 260 #endif
261 } 261 }
262 262
263 union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 263 union save_area *zfcpdump_save_areas[NR_CPUS + 1];
264 EXPORT_SYMBOL_GPL(zfcpdump_save_areas); 264 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
265 265
266 #else 266 #else
267 267
268 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } 268 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
269 269
270 #endif /* CONFIG_ZFCPDUMP */ 270 #endif /* CONFIG_ZFCPDUMP */
271 271
272 static int cpu_stopped(int cpu) 272 static int cpu_stopped(int cpu)
273 { 273 {
274 __u32 status; 274 __u32 status;
275 275
276 /* Check for stopped state */ 276 /* Check for stopped state */
277 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == 277 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
278 sigp_status_stored) { 278 sigp_status_stored) {
279 if (status & 0x40) 279 if (status & 0x40)
280 return 1; 280 return 1;
281 } 281 }
282 return 0; 282 return 0;
283 } 283 }
284 284
285 static int cpu_known(int cpu_id) 285 static int cpu_known(int cpu_id)
286 { 286 {
287 int cpu; 287 int cpu;
288 288
289 for_each_present_cpu(cpu) { 289 for_each_present_cpu(cpu) {
290 if (__cpu_logical_map[cpu] == cpu_id) 290 if (__cpu_logical_map[cpu] == cpu_id)
291 return 1; 291 return 1;
292 } 292 }
293 return 0; 293 return 0;
294 } 294 }
295 295
296 static int smp_rescan_cpus_sigp(cpumask_t avail) 296 static int smp_rescan_cpus_sigp(cpumask_t avail)
297 { 297 {
298 int cpu_id, logical_cpu; 298 int cpu_id, logical_cpu;
299 299
300 logical_cpu = cpumask_first(&avail); 300 logical_cpu = cpumask_first(&avail);
301 if (logical_cpu >= nr_cpu_ids) 301 if (logical_cpu >= nr_cpu_ids)
302 return 0; 302 return 0;
303 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { 303 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
304 if (cpu_known(cpu_id)) 304 if (cpu_known(cpu_id))
305 continue; 305 continue;
306 __cpu_logical_map[logical_cpu] = cpu_id; 306 __cpu_logical_map[logical_cpu] = cpu_id;
307 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; 307 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
308 if (!cpu_stopped(logical_cpu)) 308 if (!cpu_stopped(logical_cpu))
309 continue; 309 continue;
310 cpu_set(logical_cpu, cpu_present_map); 310 cpu_set(logical_cpu, cpu_present_map);
311 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; 311 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
312 logical_cpu = cpumask_next(logical_cpu, &avail); 312 logical_cpu = cpumask_next(logical_cpu, &avail);
313 if (logical_cpu >= nr_cpu_ids) 313 if (logical_cpu >= nr_cpu_ids)
314 break; 314 break;
315 } 315 }
316 return 0; 316 return 0;
317 } 317 }
318 318
319 static int smp_rescan_cpus_sclp(cpumask_t avail) 319 static int smp_rescan_cpus_sclp(cpumask_t avail)
320 { 320 {
321 struct sclp_cpu_info *info; 321 struct sclp_cpu_info *info;
322 int cpu_id, logical_cpu, cpu; 322 int cpu_id, logical_cpu, cpu;
323 int rc; 323 int rc;
324 324
325 logical_cpu = cpumask_first(&avail); 325 logical_cpu = cpumask_first(&avail);
326 if (logical_cpu >= nr_cpu_ids) 326 if (logical_cpu >= nr_cpu_ids)
327 return 0; 327 return 0;
328 info = kmalloc(sizeof(*info), GFP_KERNEL); 328 info = kmalloc(sizeof(*info), GFP_KERNEL);
329 if (!info) 329 if (!info)
330 return -ENOMEM; 330 return -ENOMEM;
331 rc = sclp_get_cpu_info(info); 331 rc = sclp_get_cpu_info(info);
332 if (rc) 332 if (rc)
333 goto out; 333 goto out;
334 for (cpu = 0; cpu < info->combined; cpu++) { 334 for (cpu = 0; cpu < info->combined; cpu++) {
335 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) 335 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
336 continue; 336 continue;
337 cpu_id = info->cpu[cpu].address; 337 cpu_id = info->cpu[cpu].address;
338 if (cpu_known(cpu_id)) 338 if (cpu_known(cpu_id))
339 continue; 339 continue;
340 __cpu_logical_map[logical_cpu] = cpu_id; 340 __cpu_logical_map[logical_cpu] = cpu_id;
341 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; 341 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
342 cpu_set(logical_cpu, cpu_present_map); 342 cpu_set(logical_cpu, cpu_present_map);
343 if (cpu >= info->configured) 343 if (cpu >= info->configured)
344 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; 344 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
345 else 345 else
346 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; 346 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
347 logical_cpu = cpumask_next(logical_cpu, &avail); 347 logical_cpu = cpumask_next(logical_cpu, &avail);
348 if (logical_cpu >= nr_cpu_ids) 348 if (logical_cpu >= nr_cpu_ids)
349 break; 349 break;
350 } 350 }
351 out: 351 out:
352 kfree(info); 352 kfree(info);
353 return rc; 353 return rc;
354 } 354 }
355 355
356 static int __smp_rescan_cpus(void) 356 static int __smp_rescan_cpus(void)
357 { 357 {
358 cpumask_t avail; 358 cpumask_t avail;
359 359
360 cpus_xor(avail, cpu_possible_map, cpu_present_map); 360 cpus_xor(avail, cpu_possible_map, cpu_present_map);
361 if (smp_use_sigp_detection) 361 if (smp_use_sigp_detection)
362 return smp_rescan_cpus_sigp(avail); 362 return smp_rescan_cpus_sigp(avail);
363 else 363 else
364 return smp_rescan_cpus_sclp(avail); 364 return smp_rescan_cpus_sclp(avail);
365 } 365 }
366 366
367 static void __init smp_detect_cpus(void) 367 static void __init smp_detect_cpus(void)
368 { 368 {
369 unsigned int cpu, c_cpus, s_cpus; 369 unsigned int cpu, c_cpus, s_cpus;
370 struct sclp_cpu_info *info; 370 struct sclp_cpu_info *info;
371 u16 boot_cpu_addr, cpu_addr; 371 u16 boot_cpu_addr, cpu_addr;
372 372
373 c_cpus = 1; 373 c_cpus = 1;
374 s_cpus = 0; 374 s_cpus = 0;
375 boot_cpu_addr = __cpu_logical_map[0]; 375 boot_cpu_addr = __cpu_logical_map[0];
376 info = kmalloc(sizeof(*info), GFP_KERNEL); 376 info = kmalloc(sizeof(*info), GFP_KERNEL);
377 if (!info) 377 if (!info)
378 panic("smp_detect_cpus failed to allocate memory\n"); 378 panic("smp_detect_cpus failed to allocate memory\n");
379 /* Use sigp detection algorithm if sclp doesn't work. */ 379 /* Use sigp detection algorithm if sclp doesn't work. */
380 if (sclp_get_cpu_info(info)) { 380 if (sclp_get_cpu_info(info)) {
381 smp_use_sigp_detection = 1; 381 smp_use_sigp_detection = 1;
382 for (cpu = 0; cpu <= 65535; cpu++) { 382 for (cpu = 0; cpu <= 65535; cpu++) {
383 if (cpu == boot_cpu_addr) 383 if (cpu == boot_cpu_addr)
384 continue; 384 continue;
385 __cpu_logical_map[CPU_INIT_NO] = cpu; 385 __cpu_logical_map[CPU_INIT_NO] = cpu;
386 if (!cpu_stopped(CPU_INIT_NO)) 386 if (!cpu_stopped(CPU_INIT_NO))
387 continue; 387 continue;
388 smp_get_save_area(c_cpus, cpu); 388 smp_get_save_area(c_cpus, cpu);
389 c_cpus++; 389 c_cpus++;
390 } 390 }
391 goto out; 391 goto out;
392 } 392 }
393 393
394 if (info->has_cpu_type) { 394 if (info->has_cpu_type) {
395 for (cpu = 0; cpu < info->combined; cpu++) { 395 for (cpu = 0; cpu < info->combined; cpu++) {
396 if (info->cpu[cpu].address == boot_cpu_addr) { 396 if (info->cpu[cpu].address == boot_cpu_addr) {
397 smp_cpu_type = info->cpu[cpu].type; 397 smp_cpu_type = info->cpu[cpu].type;
398 break; 398 break;
399 } 399 }
400 } 400 }
401 } 401 }
402 402
403 for (cpu = 0; cpu < info->combined; cpu++) { 403 for (cpu = 0; cpu < info->combined; cpu++) {
404 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) 404 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
405 continue; 405 continue;
406 cpu_addr = info->cpu[cpu].address; 406 cpu_addr = info->cpu[cpu].address;
407 if (cpu_addr == boot_cpu_addr) 407 if (cpu_addr == boot_cpu_addr)
408 continue; 408 continue;
409 __cpu_logical_map[CPU_INIT_NO] = cpu_addr; 409 __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
410 if (!cpu_stopped(CPU_INIT_NO)) { 410 if (!cpu_stopped(CPU_INIT_NO)) {
411 s_cpus++; 411 s_cpus++;
412 continue; 412 continue;
413 } 413 }
414 smp_get_save_area(c_cpus, cpu_addr); 414 smp_get_save_area(c_cpus, cpu_addr);
415 c_cpus++; 415 c_cpus++;
416 } 416 }
417 out: 417 out:
418 kfree(info); 418 kfree(info);
419 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 419 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
420 get_online_cpus(); 420 get_online_cpus();
421 __smp_rescan_cpus(); 421 __smp_rescan_cpus();
422 put_online_cpus(); 422 put_online_cpus();
423 } 423 }
424 424
425 /* 425 /*
426 * Activate a secondary processor. 426 * Activate a secondary processor.
427 */ 427 */
428 int __cpuinit start_secondary(void *cpuvoid) 428 int __cpuinit start_secondary(void *cpuvoid)
429 { 429 {
430 /* Setup the cpu */ 430 /* Setup the cpu */
431 cpu_init(); 431 cpu_init();
432 preempt_disable(); 432 preempt_disable();
433 /* Enable TOD clock interrupts on the secondary cpu. */ 433 /* Enable TOD clock interrupts on the secondary cpu. */
434 init_cpu_timer(); 434 init_cpu_timer();
435 /* Enable cpu timer interrupts on the secondary cpu. */ 435 /* Enable cpu timer interrupts on the secondary cpu. */
436 init_cpu_vtimer(); 436 init_cpu_vtimer();
437 /* Enable pfault pseudo page faults on this cpu. */ 437 /* Enable pfault pseudo page faults on this cpu. */
438 pfault_init(); 438 pfault_init();
439 439
440 /* call cpu notifiers */ 440 /* call cpu notifiers */
441 notify_cpu_starting(smp_processor_id()); 441 notify_cpu_starting(smp_processor_id());
442 /* Mark this cpu as online */ 442 /* Mark this cpu as online */
443 ipi_call_lock(); 443 ipi_call_lock();
444 cpu_set(smp_processor_id(), cpu_online_map); 444 cpu_set(smp_processor_id(), cpu_online_map);
445 ipi_call_unlock(); 445 ipi_call_unlock();
446 /* Switch on interrupts */ 446 /* Switch on interrupts */
447 local_irq_enable(); 447 local_irq_enable();
448 /* Print info about this processor */ 448 /* Print info about this processor */
449 print_cpu_info(); 449 print_cpu_info();
450 /* cpu_idle will call schedule for us */ 450 /* cpu_idle will call schedule for us */
451 cpu_idle(); 451 cpu_idle();
452 return 0; 452 return 0;
453 } 453 }
454 454
455 static void __init smp_create_idle(unsigned int cpu) 455 static void __init smp_create_idle(unsigned int cpu)
456 { 456 {
457 struct task_struct *p; 457 struct task_struct *p;
458 458
459 /* 459 /*
460 * don't care about the psw and regs settings since we'll never 460 * don't care about the psw and regs settings since we'll never
461 * reschedule the forked task. 461 * reschedule the forked task.
462 */ 462 */
463 p = fork_idle(cpu); 463 p = fork_idle(cpu);
464 if (IS_ERR(p)) 464 if (IS_ERR(p))
465 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 465 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
466 current_set[cpu] = p; 466 current_set[cpu] = p;
467 } 467 }
468 468
469 static int __cpuinit smp_alloc_lowcore(int cpu) 469 static int __cpuinit smp_alloc_lowcore(int cpu)
470 { 470 {
471 unsigned long async_stack, panic_stack; 471 unsigned long async_stack, panic_stack;
472 struct _lowcore *lowcore; 472 struct _lowcore *lowcore;
473 int lc_order; 473 int lc_order;
474 474
475 lc_order = sizeof(long) == 8 ? 1 : 0; 475 lc_order = sizeof(long) == 8 ? 1 : 0;
476 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order); 476 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
477 if (!lowcore) 477 if (!lowcore)
478 return -ENOMEM; 478 return -ENOMEM;
479 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 479 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
480 panic_stack = __get_free_page(GFP_KERNEL); 480 panic_stack = __get_free_page(GFP_KERNEL);
481 if (!panic_stack || !async_stack) 481 if (!panic_stack || !async_stack)
482 goto out; 482 goto out;
483 memcpy(lowcore, &S390_lowcore, 512); 483 memcpy(lowcore, &S390_lowcore, 512);
484 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); 484 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
485 lowcore->async_stack = async_stack + ASYNC_SIZE; 485 lowcore->async_stack = async_stack + ASYNC_SIZE;
486 lowcore->panic_stack = panic_stack + PAGE_SIZE; 486 lowcore->panic_stack = panic_stack + PAGE_SIZE;
487 487
488 #ifndef CONFIG_64BIT 488 #ifndef CONFIG_64BIT
489 if (MACHINE_HAS_IEEE) { 489 if (MACHINE_HAS_IEEE) {
490 unsigned long save_area; 490 unsigned long save_area;
491 491
492 save_area = get_zeroed_page(GFP_KERNEL); 492 save_area = get_zeroed_page(GFP_KERNEL);
493 if (!save_area) 493 if (!save_area)
494 goto out; 494 goto out;
495 lowcore->extended_save_area_addr = (u32) save_area; 495 lowcore->extended_save_area_addr = (u32) save_area;
496 } 496 }
497 #else 497 #else
498 if (vdso_alloc_per_cpu(cpu, lowcore)) 498 if (vdso_alloc_per_cpu(cpu, lowcore))
499 goto out; 499 goto out;
500 #endif 500 #endif
501 lowcore_ptr[cpu] = lowcore; 501 lowcore_ptr[cpu] = lowcore;
502 return 0; 502 return 0;
503 503
504 out: 504 out:
505 free_page(panic_stack); 505 free_page(panic_stack);
506 free_pages(async_stack, ASYNC_ORDER); 506 free_pages(async_stack, ASYNC_ORDER);
507 free_pages((unsigned long) lowcore, lc_order); 507 free_pages((unsigned long) lowcore, lc_order);
508 return -ENOMEM; 508 return -ENOMEM;
509 } 509 }
510 510
511 static void smp_free_lowcore(int cpu) 511 static void smp_free_lowcore(int cpu)
512 { 512 {
513 struct _lowcore *lowcore; 513 struct _lowcore *lowcore;
514 int lc_order; 514 int lc_order;
515 515
516 lc_order = sizeof(long) == 8 ? 1 : 0; 516 lc_order = sizeof(long) == 8 ? 1 : 0;
517 lowcore = lowcore_ptr[cpu]; 517 lowcore = lowcore_ptr[cpu];
518 #ifndef CONFIG_64BIT 518 #ifndef CONFIG_64BIT
519 if (MACHINE_HAS_IEEE) 519 if (MACHINE_HAS_IEEE)
520 free_page((unsigned long) lowcore->extended_save_area_addr); 520 free_page((unsigned long) lowcore->extended_save_area_addr);
521 #else 521 #else
522 vdso_free_per_cpu(cpu, lowcore); 522 vdso_free_per_cpu(cpu, lowcore);
523 #endif 523 #endif
524 free_page(lowcore->panic_stack - PAGE_SIZE); 524 free_page(lowcore->panic_stack - PAGE_SIZE);
525 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); 525 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
526 free_pages((unsigned long) lowcore, lc_order); 526 free_pages((unsigned long) lowcore, lc_order);
527 lowcore_ptr[cpu] = NULL; 527 lowcore_ptr[cpu] = NULL;
528 } 528 }
529 529
530 /* Upping and downing of CPUs */ 530 /* Upping and downing of CPUs */
531 int __cpuinit __cpu_up(unsigned int cpu) 531 int __cpuinit __cpu_up(unsigned int cpu)
532 { 532 {
533 struct task_struct *idle; 533 struct task_struct *idle;
534 struct _lowcore *cpu_lowcore; 534 struct _lowcore *cpu_lowcore;
535 struct stack_frame *sf; 535 struct stack_frame *sf;
536 sigp_ccode ccode; 536 sigp_ccode ccode;
537 u32 lowcore; 537 u32 lowcore;
538 538
539 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 539 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
540 return -EIO; 540 return -EIO;
541 if (smp_alloc_lowcore(cpu)) 541 if (smp_alloc_lowcore(cpu))
542 return -ENOMEM; 542 return -ENOMEM;
543 do { 543 do {
544 ccode = signal_processor(cpu, sigp_initial_cpu_reset); 544 ccode = signal_processor(cpu, sigp_initial_cpu_reset);
545 if (ccode == sigp_busy) 545 if (ccode == sigp_busy)
546 udelay(10); 546 udelay(10);
547 if (ccode == sigp_not_operational) 547 if (ccode == sigp_not_operational)
548 goto err_out; 548 goto err_out;
549 } while (ccode == sigp_busy); 549 } while (ccode == sigp_busy);
550 550
551 lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; 551 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
552 while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) 552 while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
553 udelay(10); 553 udelay(10);
554 554
555 idle = current_set[cpu]; 555 idle = current_set[cpu];
556 cpu_lowcore = lowcore_ptr[cpu]; 556 cpu_lowcore = lowcore_ptr[cpu];
557 cpu_lowcore->kernel_stack = (unsigned long) 557 cpu_lowcore->kernel_stack = (unsigned long)
558 task_stack_page(idle) + THREAD_SIZE; 558 task_stack_page(idle) + THREAD_SIZE;
559 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle); 559 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
560 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 560 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
561 - sizeof(struct pt_regs) 561 - sizeof(struct pt_regs)
562 - sizeof(struct stack_frame)); 562 - sizeof(struct stack_frame));
563 memset(sf, 0, sizeof(struct stack_frame)); 563 memset(sf, 0, sizeof(struct stack_frame));
564 sf->gprs[9] = (unsigned long) sf; 564 sf->gprs[9] = (unsigned long) sf;
565 cpu_lowcore->save_area[15] = (unsigned long) sf; 565 cpu_lowcore->save_area[15] = (unsigned long) sf;
566 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); 566 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
567 asm volatile( 567 asm volatile(
568 " stam 0,15,0(%0)" 568 " stam 0,15,0(%0)"
569 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 569 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
570 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 570 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
571 cpu_lowcore->current_task = (unsigned long) idle; 571 cpu_lowcore->current_task = (unsigned long) idle;
572 cpu_lowcore->cpu_nr = cpu; 572 cpu_lowcore->cpu_nr = cpu;
573 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; 573 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
574 cpu_lowcore->machine_flags = S390_lowcore.machine_flags; 574 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
575 eieio(); 575 eieio();
576 576
577 while (signal_processor(cpu, sigp_restart) == sigp_busy) 577 while (signal_processor(cpu, sigp_restart) == sigp_busy)
578 udelay(10); 578 udelay(10);
579 579
580 while (!cpu_online(cpu)) 580 while (!cpu_online(cpu))
581 cpu_relax(); 581 cpu_relax();
582 return 0; 582 return 0;
583 583
584 err_out: 584 err_out:
585 smp_free_lowcore(cpu); 585 smp_free_lowcore(cpu);
586 return -EIO; 586 return -EIO;
587 } 587 }
588 588
589 static int __init setup_possible_cpus(char *s) 589 static int __init setup_possible_cpus(char *s)
590 { 590 {
591 int pcpus, cpu; 591 int pcpus, cpu;
592 592
593 pcpus = simple_strtoul(s, NULL, 0); 593 pcpus = simple_strtoul(s, NULL, 0);
594 init_cpu_possible(cpumask_of(0)); 594 init_cpu_possible(cpumask_of(0));
595 for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) 595 for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
596 set_cpu_possible(cpu, true); 596 set_cpu_possible(cpu, true);
597 return 0; 597 return 0;
598 } 598 }
599 early_param("possible_cpus", setup_possible_cpus); 599 early_param("possible_cpus", setup_possible_cpus);
600 600
601 #ifdef CONFIG_HOTPLUG_CPU 601 #ifdef CONFIG_HOTPLUG_CPU
602 602
603 int __cpu_disable(void) 603 int __cpu_disable(void)
604 { 604 {
605 struct ec_creg_mask_parms cr_parms; 605 struct ec_creg_mask_parms cr_parms;
606 int cpu = smp_processor_id(); 606 int cpu = smp_processor_id();
607 607
608 cpu_clear(cpu, cpu_online_map); 608 cpu_clear(cpu, cpu_online_map);
609 609
610 /* Disable pfault pseudo page faults on this cpu. */ 610 /* Disable pfault pseudo page faults on this cpu. */
611 pfault_fini(); 611 pfault_fini();
612 612
613 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); 613 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
614 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); 614 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
615 615
616 /* disable all external interrupts */ 616 /* disable all external interrupts */
617 cr_parms.orvals[0] = 0; 617 cr_parms.orvals[0] = 0;
618 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | 618 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
619 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); 619 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
620 /* disable all I/O interrupts */ 620 /* disable all I/O interrupts */
621 cr_parms.orvals[6] = 0; 621 cr_parms.orvals[6] = 0;
622 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 622 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
623 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); 623 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
624 /* disable most machine checks */ 624 /* disable most machine checks */
625 cr_parms.orvals[14] = 0; 625 cr_parms.orvals[14] = 0;
626 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | 626 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
627 1 << 25 | 1 << 24); 627 1 << 25 | 1 << 24);
628 628
629 smp_ctl_bit_callback(&cr_parms); 629 smp_ctl_bit_callback(&cr_parms);
630 630
631 return 0; 631 return 0;
632 } 632 }
633 633
634 void __cpu_die(unsigned int cpu) 634 void __cpu_die(unsigned int cpu)
635 { 635 {
636 /* Wait until target cpu is down */ 636 /* Wait until target cpu is down */
637 while (!smp_cpu_not_running(cpu)) 637 while (!smp_cpu_not_running(cpu))
638 cpu_relax(); 638 cpu_relax();
639 smp_free_lowcore(cpu); 639 smp_free_lowcore(cpu);
640 pr_info("Processor %d stopped\n", cpu); 640 pr_info("Processor %d stopped\n", cpu);
641 } 641 }
642 642
643 void cpu_die(void) 643 void cpu_die(void)
644 { 644 {
645 idle_task_exit(); 645 idle_task_exit();
646 signal_processor(smp_processor_id(), sigp_stop); 646 signal_processor(smp_processor_id(), sigp_stop);
647 BUG(); 647 BUG();
648 for (;;); 648 for (;;);
649 } 649 }
650 650
651 #endif /* CONFIG_HOTPLUG_CPU */ 651 #endif /* CONFIG_HOTPLUG_CPU */
652 652
653 void __init smp_prepare_cpus(unsigned int max_cpus) 653 void __init smp_prepare_cpus(unsigned int max_cpus)
654 { 654 {
655 #ifndef CONFIG_64BIT 655 #ifndef CONFIG_64BIT
656 unsigned long save_area = 0; 656 unsigned long save_area = 0;
657 #endif 657 #endif
658 unsigned long async_stack, panic_stack; 658 unsigned long async_stack, panic_stack;
659 struct _lowcore *lowcore; 659 struct _lowcore *lowcore;
660 unsigned int cpu; 660 unsigned int cpu;
661 int lc_order; 661 int lc_order;
662 662
663 smp_detect_cpus(); 663 smp_detect_cpus();
664 664
665 /* request the 0x1201 emergency signal external interrupt */ 665 /* request the 0x1201 emergency signal external interrupt */
666 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 666 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
667 panic("Couldn't request external interrupt 0x1201"); 667 panic("Couldn't request external interrupt 0x1201");
668 print_cpu_info(); 668 print_cpu_info();
669 669
670 /* Reallocate current lowcore, but keep its contents. */ 670 /* Reallocate current lowcore, but keep its contents. */
671 lc_order = sizeof(long) == 8 ? 1 : 0; 671 lc_order = sizeof(long) == 8 ? 1 : 0;
672 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order); 672 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
673 panic_stack = __get_free_page(GFP_KERNEL); 673 panic_stack = __get_free_page(GFP_KERNEL);
674 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 674 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
675 BUG_ON(!lowcore || !panic_stack || !async_stack); 675 BUG_ON(!lowcore || !panic_stack || !async_stack);
676 #ifndef CONFIG_64BIT 676 #ifndef CONFIG_64BIT
677 if (MACHINE_HAS_IEEE) 677 if (MACHINE_HAS_IEEE)
678 save_area = get_zeroed_page(GFP_KERNEL); 678 save_area = get_zeroed_page(GFP_KERNEL);
679 #endif 679 #endif
680 local_irq_disable(); 680 local_irq_disable();
681 local_mcck_disable(); 681 local_mcck_disable();
682 lowcore_ptr[smp_processor_id()] = lowcore; 682 lowcore_ptr[smp_processor_id()] = lowcore;
683 *lowcore = S390_lowcore; 683 *lowcore = S390_lowcore;
684 lowcore->panic_stack = panic_stack + PAGE_SIZE; 684 lowcore->panic_stack = panic_stack + PAGE_SIZE;
685 lowcore->async_stack = async_stack + ASYNC_SIZE; 685 lowcore->async_stack = async_stack + ASYNC_SIZE;
686 #ifndef CONFIG_64BIT 686 #ifndef CONFIG_64BIT
687 if (MACHINE_HAS_IEEE) 687 if (MACHINE_HAS_IEEE)
688 lowcore->extended_save_area_addr = (u32) save_area; 688 lowcore->extended_save_area_addr = (u32) save_area;
689 #else 689 #else
690 if (vdso_alloc_per_cpu(smp_processor_id(), lowcore)) 690 if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
691 BUG(); 691 BUG();
692 #endif 692 #endif
693 set_prefix((u32)(unsigned long) lowcore); 693 set_prefix((u32)(unsigned long) lowcore);
694 local_mcck_enable(); 694 local_mcck_enable();
695 local_irq_enable(); 695 local_irq_enable();
696 for_each_possible_cpu(cpu) 696 for_each_possible_cpu(cpu)
697 if (cpu != smp_processor_id()) 697 if (cpu != smp_processor_id())
698 smp_create_idle(cpu); 698 smp_create_idle(cpu);
699 } 699 }
700 700
701 void __init smp_prepare_boot_cpu(void) 701 void __init smp_prepare_boot_cpu(void)
702 { 702 {
703 BUG_ON(smp_processor_id() != 0); 703 BUG_ON(smp_processor_id() != 0);
704 704
705 current_thread_info()->cpu = 0; 705 current_thread_info()->cpu = 0;
706 cpu_set(0, cpu_present_map); 706 cpu_set(0, cpu_present_map);
707 cpu_set(0, cpu_online_map); 707 cpu_set(0, cpu_online_map);
708 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 708 S390_lowcore.percpu_offset = __per_cpu_offset[0];
709 current_set[0] = current; 709 current_set[0] = current;
710 smp_cpu_state[0] = CPU_STATE_CONFIGURED; 710 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
711 smp_cpu_polarization[0] = POLARIZATION_UNKNWN; 711 smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
712 } 712 }
713 713
714 void __init smp_cpus_done(unsigned int max_cpus) 714 void __init smp_cpus_done(unsigned int max_cpus)
715 { 715 {
716 } 716 }
717 717
718 /* 718 /*
719 * the frequency of the profiling timer can be changed 719 * the frequency of the profiling timer can be changed
720 * by writing a multiplier value into /proc/profile. 720 * by writing a multiplier value into /proc/profile.
721 * 721 *
722 * usually you want to run this on all CPUs ;) 722 * usually you want to run this on all CPUs ;)
723 */ 723 */
724 int setup_profiling_timer(unsigned int multiplier) 724 int setup_profiling_timer(unsigned int multiplier)
725 { 725 {
726 return 0; 726 return 0;
727 } 727 }
728 728
729 #ifdef CONFIG_HOTPLUG_CPU 729 #ifdef CONFIG_HOTPLUG_CPU
730 static ssize_t cpu_configure_show(struct sys_device *dev, 730 static ssize_t cpu_configure_show(struct sys_device *dev,
731 struct sysdev_attribute *attr, char *buf) 731 struct sysdev_attribute *attr, char *buf)
732 { 732 {
733 ssize_t count; 733 ssize_t count;
734 734
735 mutex_lock(&smp_cpu_state_mutex); 735 mutex_lock(&smp_cpu_state_mutex);
736 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); 736 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
737 mutex_unlock(&smp_cpu_state_mutex); 737 mutex_unlock(&smp_cpu_state_mutex);
738 return count; 738 return count;
739 } 739 }
740 740
741 static ssize_t cpu_configure_store(struct sys_device *dev, 741 static ssize_t cpu_configure_store(struct sys_device *dev,
742 struct sysdev_attribute *attr, 742 struct sysdev_attribute *attr,
743 const char *buf, size_t count) 743 const char *buf, size_t count)
744 { 744 {
745 int cpu = dev->id; 745 int cpu = dev->id;
746 int val, rc; 746 int val, rc;
747 char delim; 747 char delim;
748 748
749 if (sscanf(buf, "%d %c", &val, &delim) != 1) 749 if (sscanf(buf, "%d %c", &val, &delim) != 1)
750 return -EINVAL; 750 return -EINVAL;
751 if (val != 0 && val != 1) 751 if (val != 0 && val != 1)
752 return -EINVAL; 752 return -EINVAL;
753 753
754 get_online_cpus(); 754 get_online_cpus();
755 mutex_lock(&smp_cpu_state_mutex); 755 mutex_lock(&smp_cpu_state_mutex);
756 rc = -EBUSY; 756 rc = -EBUSY;
757 if (cpu_online(cpu)) 757 if (cpu_online(cpu))
758 goto out; 758 goto out;
759 rc = 0; 759 rc = 0;
760 switch (val) { 760 switch (val) {
761 case 0: 761 case 0:
762 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { 762 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
763 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); 763 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
764 if (!rc) { 764 if (!rc) {
765 smp_cpu_state[cpu] = CPU_STATE_STANDBY; 765 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
766 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; 766 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
767 } 767 }
768 } 768 }
769 break; 769 break;
770 case 1: 770 case 1:
771 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { 771 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
772 rc = sclp_cpu_configure(__cpu_logical_map[cpu]); 772 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
773 if (!rc) { 773 if (!rc) {
774 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; 774 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
775 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; 775 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
776 } 776 }
777 } 777 }
778 break; 778 break;
779 default: 779 default:
780 break; 780 break;
781 } 781 }
782 out: 782 out:
783 mutex_unlock(&smp_cpu_state_mutex); 783 mutex_unlock(&smp_cpu_state_mutex);
784 put_online_cpus(); 784 put_online_cpus();
785 return rc ? rc : count; 785 return rc ? rc : count;
786 } 786 }
787 static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); 787 static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
788 #endif /* CONFIG_HOTPLUG_CPU */ 788 #endif /* CONFIG_HOTPLUG_CPU */
789 789
790 static ssize_t cpu_polarization_show(struct sys_device *dev, 790 static ssize_t cpu_polarization_show(struct sys_device *dev,
791 struct sysdev_attribute *attr, char *buf) 791 struct sysdev_attribute *attr, char *buf)
792 { 792 {
793 int cpu = dev->id; 793 int cpu = dev->id;
794 ssize_t count; 794 ssize_t count;
795 795
796 mutex_lock(&smp_cpu_state_mutex); 796 mutex_lock(&smp_cpu_state_mutex);
797 switch (smp_cpu_polarization[cpu]) { 797 switch (smp_cpu_polarization[cpu]) {
798 case POLARIZATION_HRZ: 798 case POLARIZATION_HRZ:
799 count = sprintf(buf, "horizontal\n"); 799 count = sprintf(buf, "horizontal\n");
800 break; 800 break;
801 case POLARIZATION_VL: 801 case POLARIZATION_VL:
802 count = sprintf(buf, "vertical:low\n"); 802 count = sprintf(buf, "vertical:low\n");
803 break; 803 break;
804 case POLARIZATION_VM: 804 case POLARIZATION_VM:
805 count = sprintf(buf, "vertical:medium\n"); 805 count = sprintf(buf, "vertical:medium\n");
806 break; 806 break;
807 case POLARIZATION_VH: 807 case POLARIZATION_VH:
808 count = sprintf(buf, "vertical:high\n"); 808 count = sprintf(buf, "vertical:high\n");
809 break; 809 break;
810 default: 810 default:
811 count = sprintf(buf, "unknown\n"); 811 count = sprintf(buf, "unknown\n");
812 break; 812 break;
813 } 813 }
814 mutex_unlock(&smp_cpu_state_mutex); 814 mutex_unlock(&smp_cpu_state_mutex);
815 return count; 815 return count;
816 } 816 }
817 static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); 817 static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
818 818
819 static ssize_t show_cpu_address(struct sys_device *dev, 819 static ssize_t show_cpu_address(struct sys_device *dev,
820 struct sysdev_attribute *attr, char *buf) 820 struct sysdev_attribute *attr, char *buf)
821 { 821 {
822 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); 822 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
823 } 823 }
824 static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL); 824 static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
825 825
826 826
827 static struct attribute *cpu_common_attrs[] = { 827 static struct attribute *cpu_common_attrs[] = {
828 #ifdef CONFIG_HOTPLUG_CPU 828 #ifdef CONFIG_HOTPLUG_CPU
829 &attr_configure.attr, 829 &attr_configure.attr,
830 #endif 830 #endif
831 &attr_address.attr, 831 &attr_address.attr,
832 &attr_polarization.attr, 832 &attr_polarization.attr,
833 NULL, 833 NULL,
834 }; 834 };
835 835
836 static struct attribute_group cpu_common_attr_group = { 836 static struct attribute_group cpu_common_attr_group = {
837 .attrs = cpu_common_attrs, 837 .attrs = cpu_common_attrs,
838 }; 838 };
839 839
840 static ssize_t show_capability(struct sys_device *dev, 840 static ssize_t show_capability(struct sys_device *dev,
841 struct sysdev_attribute *attr, char *buf) 841 struct sysdev_attribute *attr, char *buf)
842 { 842 {
843 unsigned int capability; 843 unsigned int capability;
844 int rc; 844 int rc;
845 845
846 rc = get_cpu_capability(&capability); 846 rc = get_cpu_capability(&capability);
847 if (rc) 847 if (rc)
848 return rc; 848 return rc;
849 return sprintf(buf, "%u\n", capability); 849 return sprintf(buf, "%u\n", capability);
850 } 850 }
851 static SYSDEV_ATTR(capability, 0444, show_capability, NULL); 851 static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
852 852
853 static ssize_t show_idle_count(struct sys_device *dev, 853 static ssize_t show_idle_count(struct sys_device *dev,
854 struct sysdev_attribute *attr, char *buf) 854 struct sysdev_attribute *attr, char *buf)
855 { 855 {
856 struct s390_idle_data *idle; 856 struct s390_idle_data *idle;
857 unsigned long long idle_count; 857 unsigned long long idle_count;
858 858
859 idle = &per_cpu(s390_idle, dev->id); 859 idle = &per_cpu(s390_idle, dev->id);
860 spin_lock(&idle->lock); 860 spin_lock(&idle->lock);
861 idle_count = idle->idle_count; 861 idle_count = idle->idle_count;
862 if (idle->idle_enter) 862 if (idle->idle_enter)
863 idle_count++; 863 idle_count++;
864 spin_unlock(&idle->lock); 864 spin_unlock(&idle->lock);
865 return sprintf(buf, "%llu\n", idle_count); 865 return sprintf(buf, "%llu\n", idle_count);
866 } 866 }
867 static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); 867 static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
868 868
869 static ssize_t show_idle_time(struct sys_device *dev, 869 static ssize_t show_idle_time(struct sys_device *dev,
870 struct sysdev_attribute *attr, char *buf) 870 struct sysdev_attribute *attr, char *buf)
871 { 871 {
872 struct s390_idle_data *idle; 872 struct s390_idle_data *idle;
873 unsigned long long now, idle_time, idle_enter; 873 unsigned long long now, idle_time, idle_enter;
874 874
875 idle = &per_cpu(s390_idle, dev->id); 875 idle = &per_cpu(s390_idle, dev->id);
876 spin_lock(&idle->lock); 876 spin_lock(&idle->lock);
877 now = get_clock(); 877 now = get_clock();
878 idle_time = idle->idle_time; 878 idle_time = idle->idle_time;
879 idle_enter = idle->idle_enter; 879 idle_enter = idle->idle_enter;
880 if (idle_enter != 0ULL && idle_enter < now) 880 if (idle_enter != 0ULL && idle_enter < now)
881 idle_time += now - idle_enter; 881 idle_time += now - idle_enter;
882 spin_unlock(&idle->lock); 882 spin_unlock(&idle->lock);
883 return sprintf(buf, "%llu\n", idle_time >> 12); 883 return sprintf(buf, "%llu\n", idle_time >> 12);
884 } 884 }
885 static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); 885 static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
886 886
887 static struct attribute *cpu_online_attrs[] = { 887 static struct attribute *cpu_online_attrs[] = {
888 &attr_capability.attr, 888 &attr_capability.attr,
889 &attr_idle_count.attr, 889 &attr_idle_count.attr,
890 &attr_idle_time_us.attr, 890 &attr_idle_time_us.attr,
891 NULL, 891 NULL,
892 }; 892 };
893 893
894 static struct attribute_group cpu_online_attr_group = { 894 static struct attribute_group cpu_online_attr_group = {
895 .attrs = cpu_online_attrs, 895 .attrs = cpu_online_attrs,
896 }; 896 };
897 897
898 static int __cpuinit smp_cpu_notify(struct notifier_block *self, 898 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
899 unsigned long action, void *hcpu) 899 unsigned long action, void *hcpu)
900 { 900 {
901 unsigned int cpu = (unsigned int)(long)hcpu; 901 unsigned int cpu = (unsigned int)(long)hcpu;
902 struct cpu *c = &per_cpu(cpu_devices, cpu); 902 struct cpu *c = &per_cpu(cpu_devices, cpu);
903 struct sys_device *s = &c->sysdev; 903 struct sys_device *s = &c->sysdev;
904 struct s390_idle_data *idle; 904 struct s390_idle_data *idle;
905 905
906 switch (action) { 906 switch (action) {
907 case CPU_ONLINE: 907 case CPU_ONLINE:
908 case CPU_ONLINE_FROZEN: 908 case CPU_ONLINE_FROZEN:
909 idle = &per_cpu(s390_idle, cpu); 909 idle = &per_cpu(s390_idle, cpu);
910 spin_lock_irq(&idle->lock); 910 spin_lock_irq(&idle->lock);
911 idle->idle_enter = 0; 911 idle->idle_enter = 0;
912 idle->idle_time = 0; 912 idle->idle_time = 0;
913 idle->idle_count = 0; 913 idle->idle_count = 0;
914 spin_unlock_irq(&idle->lock); 914 spin_unlock_irq(&idle->lock);
915 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) 915 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
916 return NOTIFY_BAD; 916 return NOTIFY_BAD;
917 break; 917 break;
918 case CPU_DEAD: 918 case CPU_DEAD:
919 case CPU_DEAD_FROZEN: 919 case CPU_DEAD_FROZEN:
920 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 920 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
921 break; 921 break;
922 } 922 }
923 return NOTIFY_OK; 923 return NOTIFY_OK;
924 } 924 }
925 925
926 static struct notifier_block __cpuinitdata smp_cpu_nb = { 926 static struct notifier_block __cpuinitdata smp_cpu_nb = {
927 .notifier_call = smp_cpu_notify, 927 .notifier_call = smp_cpu_notify,
928 }; 928 };
929 929
930 static int __devinit smp_add_present_cpu(int cpu) 930 static int __devinit smp_add_present_cpu(int cpu)
931 { 931 {
932 struct cpu *c = &per_cpu(cpu_devices, cpu); 932 struct cpu *c = &per_cpu(cpu_devices, cpu);
933 struct sys_device *s = &c->sysdev; 933 struct sys_device *s = &c->sysdev;
934 int rc; 934 int rc;
935 935
936 c->hotpluggable = 1; 936 c->hotpluggable = 1;
937 rc = register_cpu(c, cpu); 937 rc = register_cpu(c, cpu);
938 if (rc) 938 if (rc)
939 goto out; 939 goto out;
940 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); 940 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
941 if (rc) 941 if (rc)
942 goto out_cpu; 942 goto out_cpu;
943 if (!cpu_online(cpu)) 943 if (!cpu_online(cpu))
944 goto out; 944 goto out;
945 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); 945 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
946 if (!rc) 946 if (!rc)
947 return 0; 947 return 0;
948 sysfs_remove_group(&s->kobj, &cpu_common_attr_group); 948 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
949 out_cpu: 949 out_cpu:
950 #ifdef CONFIG_HOTPLUG_CPU 950 #ifdef CONFIG_HOTPLUG_CPU
951 unregister_cpu(c); 951 unregister_cpu(c);
952 #endif 952 #endif
953 out: 953 out:
954 return rc; 954 return rc;
955 } 955 }
956 956
957 #ifdef CONFIG_HOTPLUG_CPU 957 #ifdef CONFIG_HOTPLUG_CPU
958 958
959 int __ref smp_rescan_cpus(void) 959 int __ref smp_rescan_cpus(void)
960 { 960 {
961 cpumask_t newcpus; 961 cpumask_t newcpus;
962 int cpu; 962 int cpu;
963 int rc; 963 int rc;
964 964
965 get_online_cpus(); 965 get_online_cpus();
966 mutex_lock(&smp_cpu_state_mutex); 966 mutex_lock(&smp_cpu_state_mutex);
967 newcpus = cpu_present_map; 967 newcpus = cpu_present_map;
968 rc = __smp_rescan_cpus(); 968 rc = __smp_rescan_cpus();
969 if (rc) 969 if (rc)
970 goto out; 970 goto out;
971 cpus_andnot(newcpus, cpu_present_map, newcpus); 971 cpus_andnot(newcpus, cpu_present_map, newcpus);
972 for_each_cpu_mask(cpu, newcpus) { 972 for_each_cpu_mask(cpu, newcpus) {
973 rc = smp_add_present_cpu(cpu); 973 rc = smp_add_present_cpu(cpu);
974 if (rc) 974 if (rc)
975 cpu_clear(cpu, cpu_present_map); 975 cpu_clear(cpu, cpu_present_map);
976 } 976 }
977 rc = 0; 977 rc = 0;
978 out: 978 out:
979 mutex_unlock(&smp_cpu_state_mutex); 979 mutex_unlock(&smp_cpu_state_mutex);
980 put_online_cpus(); 980 put_online_cpus();
981 if (!cpus_empty(newcpus)) 981 if (!cpus_empty(newcpus))
982 topology_schedule_update(); 982 topology_schedule_update();
983 return rc; 983 return rc;
984 } 984 }
985 985
986 static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf, 986 static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
987 size_t count) 987 size_t count)
988 { 988 {
989 int rc; 989 int rc;
990 990
991 rc = smp_rescan_cpus(); 991 rc = smp_rescan_cpus();
992 return rc ? rc : count; 992 return rc ? rc : count;
993 } 993 }
994 static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); 994 static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
995 #endif /* CONFIG_HOTPLUG_CPU */ 995 #endif /* CONFIG_HOTPLUG_CPU */
996 996
997 static ssize_t dispatching_show(struct sysdev_class *class, char *buf) 997 static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
998 { 998 {
999 ssize_t count; 999 ssize_t count;
1000 1000
1001 mutex_lock(&smp_cpu_state_mutex); 1001 mutex_lock(&smp_cpu_state_mutex);
1002 count = sprintf(buf, "%d\n", cpu_management); 1002 count = sprintf(buf, "%d\n", cpu_management);
1003 mutex_unlock(&smp_cpu_state_mutex); 1003 mutex_unlock(&smp_cpu_state_mutex);
1004 return count; 1004 return count;
1005 } 1005 }
1006 1006
1007 static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf, 1007 static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf,
1008 size_t count) 1008 size_t count)
1009 { 1009 {
1010 int val, rc; 1010 int val, rc;
1011 char delim; 1011 char delim;
1012 1012
1013 if (sscanf(buf, "%d %c", &val, &delim) != 1) 1013 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1014 return -EINVAL; 1014 return -EINVAL;
1015 if (val != 0 && val != 1) 1015 if (val != 0 && val != 1)
1016 return -EINVAL; 1016 return -EINVAL;
1017 rc = 0; 1017 rc = 0;
1018 get_online_cpus(); 1018 get_online_cpus();
1019 mutex_lock(&smp_cpu_state_mutex); 1019 mutex_lock(&smp_cpu_state_mutex);
1020 if (cpu_management == val) 1020 if (cpu_management == val)
1021 goto out; 1021 goto out;
1022 rc = topology_set_cpu_management(val); 1022 rc = topology_set_cpu_management(val);
1023 if (!rc) 1023 if (!rc)
1024 cpu_management = val; 1024 cpu_management = val;
1025 out: 1025 out:
1026 mutex_unlock(&smp_cpu_state_mutex); 1026 mutex_unlock(&smp_cpu_state_mutex);
1027 put_online_cpus(); 1027 put_online_cpus();
1028 return rc ? rc : count; 1028 return rc ? rc : count;
1029 } 1029 }
1030 static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, 1030 static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
1031 dispatching_store); 1031 dispatching_store);
1032 1032
1033 static int __init topology_init(void) 1033 static int __init topology_init(void)
1034 { 1034 {
1035 int cpu; 1035 int cpu;
1036 int rc; 1036 int rc;
1037 1037
1038 register_cpu_notifier(&smp_cpu_nb); 1038 register_cpu_notifier(&smp_cpu_nb);
1039 1039
1040 #ifdef CONFIG_HOTPLUG_CPU 1040 #ifdef CONFIG_HOTPLUG_CPU
1041 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan); 1041 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
1042 if (rc) 1042 if (rc)
1043 return rc; 1043 return rc;
1044 #endif 1044 #endif
1045 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching); 1045 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
1046 if (rc) 1046 if (rc)
1047 return rc; 1047 return rc;
1048 for_each_present_cpu(cpu) { 1048 for_each_present_cpu(cpu) {
1049 rc = smp_add_present_cpu(cpu); 1049 rc = smp_add_present_cpu(cpu);
1050 if (rc) 1050 if (rc)
1051 return rc; 1051 return rc;
1052 } 1052 }
1053 return 0; 1053 return 0;
1054 } 1054 }
1055 subsys_initcall(topology_init); 1055 subsys_initcall(topology_init);
1056 1056
arch/s390/kernel/vtime.c
1 /* 1 /*
2 * arch/s390/kernel/vtime.c 2 * arch/s390/kernel/vtime.c
3 * Virtual cpu timer based timer functions. 3 * Virtual cpu timer based timer functions.
4 * 4 *
5 * S390 version 5 * S390 version
6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
8 */ 8 */
9 9
10 #include <linux/module.h> 10 #include <linux/module.h>
11 #include <linux/kernel.h> 11 #include <linux/kernel.h>
12 #include <linux/time.h> 12 #include <linux/time.h>
13 #include <linux/delay.h> 13 #include <linux/delay.h>
14 #include <linux/init.h> 14 #include <linux/init.h>
15 #include <linux/smp.h> 15 #include <linux/smp.h>
16 #include <linux/types.h> 16 #include <linux/types.h>
17 #include <linux/timex.h> 17 #include <linux/timex.h>
18 #include <linux/notifier.h> 18 #include <linux/notifier.h>
19 #include <linux/kernel_stat.h> 19 #include <linux/kernel_stat.h>
20 #include <linux/rcupdate.h> 20 #include <linux/rcupdate.h>
21 #include <linux/posix-timers.h> 21 #include <linux/posix-timers.h>
22 22
23 #include <asm/s390_ext.h> 23 #include <asm/s390_ext.h>
24 #include <asm/timer.h> 24 #include <asm/timer.h>
25 #include <asm/irq_regs.h> 25 #include <asm/irq_regs.h>
26 #include <asm/cpu.h> 26 #include <asm/cputime.h>
27 27
28 static ext_int_info_t ext_int_info_timer; 28 static ext_int_info_t ext_int_info_timer;
29 29
30 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 30 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
31 31
32 DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = { 32 DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
33 .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock) 33 .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
34 }; 34 };
35 35
36 static inline __u64 get_vtimer(void) 36 static inline __u64 get_vtimer(void)
37 { 37 {
38 __u64 timer; 38 __u64 timer;
39 39
40 asm volatile("STPT %0" : "=m" (timer)); 40 asm volatile("STPT %0" : "=m" (timer));
41 return timer; 41 return timer;
42 } 42 }
43 43
44 static inline void set_vtimer(__u64 expires) 44 static inline void set_vtimer(__u64 expires)
45 { 45 {
46 __u64 timer; 46 __u64 timer;
47 47
48 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 48 asm volatile (" STPT %0\n" /* Store current cpu timer value */
49 " SPT %1" /* Set new value immediatly afterwards */ 49 " SPT %1" /* Set new value immediatly afterwards */
50 : "=m" (timer) : "m" (expires) ); 50 : "=m" (timer) : "m" (expires) );
51 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 51 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
52 S390_lowcore.last_update_timer = expires; 52 S390_lowcore.last_update_timer = expires;
53 } 53 }
54 54
55 /* 55 /*
56 * Update process times based on virtual cpu times stored by entry.S 56 * Update process times based on virtual cpu times stored by entry.S
57 * to the lowcore fields user_timer, system_timer & steal_clock. 57 * to the lowcore fields user_timer, system_timer & steal_clock.
58 */ 58 */
59 static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) 59 static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
60 { 60 {
61 struct thread_info *ti = task_thread_info(tsk); 61 struct thread_info *ti = task_thread_info(tsk);
62 __u64 timer, clock, user, system, steal; 62 __u64 timer, clock, user, system, steal;
63 63
64 timer = S390_lowcore.last_update_timer; 64 timer = S390_lowcore.last_update_timer;
65 clock = S390_lowcore.last_update_clock; 65 clock = S390_lowcore.last_update_clock;
66 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 66 asm volatile (" STPT %0\n" /* Store current cpu timer value */
67 " STCK %1" /* Store current tod clock value */ 67 " STCK %1" /* Store current tod clock value */
68 : "=m" (S390_lowcore.last_update_timer), 68 : "=m" (S390_lowcore.last_update_timer),
69 "=m" (S390_lowcore.last_update_clock) ); 69 "=m" (S390_lowcore.last_update_clock) );
70 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 70 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
71 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 71 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
72 72
73 user = S390_lowcore.user_timer - ti->user_timer; 73 user = S390_lowcore.user_timer - ti->user_timer;
74 S390_lowcore.steal_timer -= user; 74 S390_lowcore.steal_timer -= user;
75 ti->user_timer = S390_lowcore.user_timer; 75 ti->user_timer = S390_lowcore.user_timer;
76 account_user_time(tsk, user, user); 76 account_user_time(tsk, user, user);
77 77
78 system = S390_lowcore.system_timer - ti->system_timer; 78 system = S390_lowcore.system_timer - ti->system_timer;
79 S390_lowcore.steal_timer -= system; 79 S390_lowcore.steal_timer -= system;
80 ti->system_timer = S390_lowcore.system_timer; 80 ti->system_timer = S390_lowcore.system_timer;
81 account_system_time(tsk, hardirq_offset, system, system); 81 account_system_time(tsk, hardirq_offset, system, system);
82 82
83 steal = S390_lowcore.steal_timer; 83 steal = S390_lowcore.steal_timer;
84 if ((s64) steal > 0) { 84 if ((s64) steal > 0) {
85 S390_lowcore.steal_timer = 0; 85 S390_lowcore.steal_timer = 0;
86 account_steal_time(steal); 86 account_steal_time(steal);
87 } 87 }
88 } 88 }
89 89
90 void account_vtime(struct task_struct *prev, struct task_struct *next) 90 void account_vtime(struct task_struct *prev, struct task_struct *next)
91 { 91 {
92 struct thread_info *ti; 92 struct thread_info *ti;
93 93
94 do_account_vtime(prev, 0); 94 do_account_vtime(prev, 0);
95 ti = task_thread_info(prev); 95 ti = task_thread_info(prev);
96 ti->user_timer = S390_lowcore.user_timer; 96 ti->user_timer = S390_lowcore.user_timer;
97 ti->system_timer = S390_lowcore.system_timer; 97 ti->system_timer = S390_lowcore.system_timer;
98 ti = task_thread_info(next); 98 ti = task_thread_info(next);
99 S390_lowcore.user_timer = ti->user_timer; 99 S390_lowcore.user_timer = ti->user_timer;
100 S390_lowcore.system_timer = ti->system_timer; 100 S390_lowcore.system_timer = ti->system_timer;
101 } 101 }
102 102
103 void account_process_tick(struct task_struct *tsk, int user_tick) 103 void account_process_tick(struct task_struct *tsk, int user_tick)
104 { 104 {
105 do_account_vtime(tsk, HARDIRQ_OFFSET); 105 do_account_vtime(tsk, HARDIRQ_OFFSET);
106 } 106 }
107 107
108 /* 108 /*
109 * Update process times based on virtual cpu times stored by entry.S 109 * Update process times based on virtual cpu times stored by entry.S
110 * to the lowcore fields user_timer, system_timer & steal_clock. 110 * to the lowcore fields user_timer, system_timer & steal_clock.
111 */ 111 */
112 void account_system_vtime(struct task_struct *tsk) 112 void account_system_vtime(struct task_struct *tsk)
113 { 113 {
114 struct thread_info *ti = task_thread_info(tsk); 114 struct thread_info *ti = task_thread_info(tsk);
115 __u64 timer, system; 115 __u64 timer, system;
116 116
117 timer = S390_lowcore.last_update_timer; 117 timer = S390_lowcore.last_update_timer;
118 S390_lowcore.last_update_timer = get_vtimer(); 118 S390_lowcore.last_update_timer = get_vtimer();
119 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 119 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
120 120
121 system = S390_lowcore.system_timer - ti->system_timer; 121 system = S390_lowcore.system_timer - ti->system_timer;
122 S390_lowcore.steal_timer -= system; 122 S390_lowcore.steal_timer -= system;
123 ti->system_timer = S390_lowcore.system_timer; 123 ti->system_timer = S390_lowcore.system_timer;
124 account_system_time(tsk, 0, system, system); 124 account_system_time(tsk, 0, system, system);
125 } 125 }
126 EXPORT_SYMBOL_GPL(account_system_vtime); 126 EXPORT_SYMBOL_GPL(account_system_vtime);
127 127
128 void vtime_start_cpu(void) 128 void vtime_start_cpu(void)
129 { 129 {
130 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 130 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
131 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 131 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
132 __u64 idle_time, expires; 132 __u64 idle_time, expires;
133 133
134 /* Account time spent with enabled wait psw loaded as idle time. */ 134 /* Account time spent with enabled wait psw loaded as idle time. */
135 idle_time = S390_lowcore.int_clock - idle->idle_enter; 135 idle_time = S390_lowcore.int_clock - idle->idle_enter;
136 account_idle_time(idle_time); 136 account_idle_time(idle_time);
137 S390_lowcore.steal_timer += 137 S390_lowcore.steal_timer +=
138 idle->idle_enter - S390_lowcore.last_update_clock; 138 idle->idle_enter - S390_lowcore.last_update_clock;
139 S390_lowcore.last_update_clock = S390_lowcore.int_clock; 139 S390_lowcore.last_update_clock = S390_lowcore.int_clock;
140 140
141 /* Account system time spent going idle. */ 141 /* Account system time spent going idle. */
142 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle; 142 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
143 S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer; 143 S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer;
144 144
145 /* Restart vtime CPU timer */ 145 /* Restart vtime CPU timer */
146 if (vq->do_spt) { 146 if (vq->do_spt) {
147 /* Program old expire value but first save progress. */ 147 /* Program old expire value but first save progress. */
148 expires = vq->idle - S390_lowcore.async_enter_timer; 148 expires = vq->idle - S390_lowcore.async_enter_timer;
149 expires += get_vtimer(); 149 expires += get_vtimer();
150 set_vtimer(expires); 150 set_vtimer(expires);
151 } else { 151 } else {
152 /* Don't account the CPU timer delta while the cpu was idle. */ 152 /* Don't account the CPU timer delta while the cpu was idle. */
153 vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer; 153 vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
154 } 154 }
155 155
156 spin_lock(&idle->lock); 156 spin_lock(&idle->lock);
157 idle->idle_time += idle_time; 157 idle->idle_time += idle_time;
158 idle->idle_enter = 0ULL; 158 idle->idle_enter = 0ULL;
159 idle->idle_count++; 159 idle->idle_count++;
160 spin_unlock(&idle->lock); 160 spin_unlock(&idle->lock);
161 } 161 }
162 162
163 void vtime_stop_cpu(void) 163 void vtime_stop_cpu(void)
164 { 164 {
165 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 165 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
166 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 166 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
167 psw_t psw; 167 psw_t psw;
168 168
169 /* Wait for external, I/O or machine check interrupt. */ 169 /* Wait for external, I/O or machine check interrupt. */
170 psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT; 170 psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT;
171 171
172 /* Check if the CPU timer needs to be reprogrammed. */ 172 /* Check if the CPU timer needs to be reprogrammed. */
173 if (vq->do_spt) { 173 if (vq->do_spt) {
174 __u64 vmax = VTIMER_MAX_SLICE; 174 __u64 vmax = VTIMER_MAX_SLICE;
175 /* 175 /*
176 * The inline assembly is equivalent to 176 * The inline assembly is equivalent to
177 * vq->idle = get_cpu_timer(); 177 * vq->idle = get_cpu_timer();
178 * set_cpu_timer(VTIMER_MAX_SLICE); 178 * set_cpu_timer(VTIMER_MAX_SLICE);
179 * idle->idle_enter = get_clock(); 179 * idle->idle_enter = get_clock();
180 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 180 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
181 * PSW_MASK_IO | PSW_MASK_EXT); 181 * PSW_MASK_IO | PSW_MASK_EXT);
182 * The difference is that the inline assembly makes sure that 182 * The difference is that the inline assembly makes sure that
183 * the last three instruction are stpt, stck and lpsw in that 183 * the last three instruction are stpt, stck and lpsw in that
184 * order. This is done to increase the precision. 184 * order. This is done to increase the precision.
185 */ 185 */
186 asm volatile( 186 asm volatile(
187 #ifndef CONFIG_64BIT 187 #ifndef CONFIG_64BIT
188 " basr 1,0\n" 188 " basr 1,0\n"
189 "0: ahi 1,1f-0b\n" 189 "0: ahi 1,1f-0b\n"
190 " st 1,4(%2)\n" 190 " st 1,4(%2)\n"
191 #else /* CONFIG_64BIT */ 191 #else /* CONFIG_64BIT */
192 " larl 1,1f\n" 192 " larl 1,1f\n"
193 " stg 1,8(%2)\n" 193 " stg 1,8(%2)\n"
194 #endif /* CONFIG_64BIT */ 194 #endif /* CONFIG_64BIT */
195 " stpt 0(%4)\n" 195 " stpt 0(%4)\n"
196 " spt 0(%5)\n" 196 " spt 0(%5)\n"
197 " stck 0(%3)\n" 197 " stck 0(%3)\n"
198 #ifndef CONFIG_64BIT 198 #ifndef CONFIG_64BIT
199 " lpsw 0(%2)\n" 199 " lpsw 0(%2)\n"
200 #else /* CONFIG_64BIT */ 200 #else /* CONFIG_64BIT */
201 " lpswe 0(%2)\n" 201 " lpswe 0(%2)\n"
202 #endif /* CONFIG_64BIT */ 202 #endif /* CONFIG_64BIT */
203 "1:" 203 "1:"
204 : "=m" (idle->idle_enter), "=m" (vq->idle) 204 : "=m" (idle->idle_enter), "=m" (vq->idle)
205 : "a" (&psw), "a" (&idle->idle_enter), 205 : "a" (&psw), "a" (&idle->idle_enter),
206 "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw) 206 "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw)
207 : "memory", "cc", "1"); 207 : "memory", "cc", "1");
208 } else { 208 } else {
209 /* 209 /*
210 * The inline assembly is equivalent to 210 * The inline assembly is equivalent to
211 * vq->idle = get_cpu_timer(); 211 * vq->idle = get_cpu_timer();
212 * idle->idle_enter = get_clock(); 212 * idle->idle_enter = get_clock();
213 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 213 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
214 * PSW_MASK_IO | PSW_MASK_EXT); 214 * PSW_MASK_IO | PSW_MASK_EXT);
215 * The difference is that the inline assembly makes sure that 215 * The difference is that the inline assembly makes sure that
216 * the last three instruction are stpt, stck and lpsw in that 216 * the last three instruction are stpt, stck and lpsw in that
217 * order. This is done to increase the precision. 217 * order. This is done to increase the precision.
218 */ 218 */
219 asm volatile( 219 asm volatile(
220 #ifndef CONFIG_64BIT 220 #ifndef CONFIG_64BIT
221 " basr 1,0\n" 221 " basr 1,0\n"
222 "0: ahi 1,1f-0b\n" 222 "0: ahi 1,1f-0b\n"
223 " st 1,4(%2)\n" 223 " st 1,4(%2)\n"
224 #else /* CONFIG_64BIT */ 224 #else /* CONFIG_64BIT */
225 " larl 1,1f\n" 225 " larl 1,1f\n"
226 " stg 1,8(%2)\n" 226 " stg 1,8(%2)\n"
227 #endif /* CONFIG_64BIT */ 227 #endif /* CONFIG_64BIT */
228 " stpt 0(%4)\n" 228 " stpt 0(%4)\n"
229 " stck 0(%3)\n" 229 " stck 0(%3)\n"
230 #ifndef CONFIG_64BIT 230 #ifndef CONFIG_64BIT
231 " lpsw 0(%2)\n" 231 " lpsw 0(%2)\n"
232 #else /* CONFIG_64BIT */ 232 #else /* CONFIG_64BIT */
233 " lpswe 0(%2)\n" 233 " lpswe 0(%2)\n"
234 #endif /* CONFIG_64BIT */ 234 #endif /* CONFIG_64BIT */
235 "1:" 235 "1:"
236 : "=m" (idle->idle_enter), "=m" (vq->idle) 236 : "=m" (idle->idle_enter), "=m" (vq->idle)
237 : "a" (&psw), "a" (&idle->idle_enter), 237 : "a" (&psw), "a" (&idle->idle_enter),
238 "a" (&vq->idle), "m" (psw) 238 "a" (&vq->idle), "m" (psw)
239 : "memory", "cc", "1"); 239 : "memory", "cc", "1");
240 } 240 }
241 } 241 }
242 242
243 cputime64_t s390_get_idle_time(int cpu) 243 cputime64_t s390_get_idle_time(int cpu)
244 { 244 {
245 struct s390_idle_data *idle; 245 struct s390_idle_data *idle;
246 unsigned long long now, idle_time, idle_enter; 246 unsigned long long now, idle_time, idle_enter;
247 247
248 idle = &per_cpu(s390_idle, cpu); 248 idle = &per_cpu(s390_idle, cpu);
249 spin_lock(&idle->lock); 249 spin_lock(&idle->lock);
250 now = get_clock(); 250 now = get_clock();
251 idle_time = 0; 251 idle_time = 0;
252 idle_enter = idle->idle_enter; 252 idle_enter = idle->idle_enter;
253 if (idle_enter != 0ULL && idle_enter < now) 253 if (idle_enter != 0ULL && idle_enter < now)
254 idle_time = now - idle_enter; 254 idle_time = now - idle_enter;
255 spin_unlock(&idle->lock); 255 spin_unlock(&idle->lock);
256 return idle_time; 256 return idle_time;
257 } 257 }
258 258
259 /* 259 /*
260 * Sorted add to a list. List is linear searched until first bigger 260 * Sorted add to a list. List is linear searched until first bigger
261 * element is found. 261 * element is found.
262 */ 262 */
263 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 263 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
264 { 264 {
265 struct vtimer_list *event; 265 struct vtimer_list *event;
266 266
267 list_for_each_entry(event, head, entry) { 267 list_for_each_entry(event, head, entry) {
268 if (event->expires > timer->expires) { 268 if (event->expires > timer->expires) {
269 list_add_tail(&timer->entry, &event->entry); 269 list_add_tail(&timer->entry, &event->entry);
270 return; 270 return;
271 } 271 }
272 } 272 }
273 list_add_tail(&timer->entry, head); 273 list_add_tail(&timer->entry, head);
274 } 274 }
275 275
276 /* 276 /*
277 * Do the callback functions of expired vtimer events. 277 * Do the callback functions of expired vtimer events.
278 * Called from within the interrupt handler. 278 * Called from within the interrupt handler.
279 */ 279 */
280 static void do_callbacks(struct list_head *cb_list) 280 static void do_callbacks(struct list_head *cb_list)
281 { 281 {
282 struct vtimer_queue *vq; 282 struct vtimer_queue *vq;
283 struct vtimer_list *event, *tmp; 283 struct vtimer_list *event, *tmp;
284 284
285 if (list_empty(cb_list)) 285 if (list_empty(cb_list))
286 return; 286 return;
287 287
288 vq = &__get_cpu_var(virt_cpu_timer); 288 vq = &__get_cpu_var(virt_cpu_timer);
289 289
290 list_for_each_entry_safe(event, tmp, cb_list, entry) { 290 list_for_each_entry_safe(event, tmp, cb_list, entry) {
291 list_del_init(&event->entry); 291 list_del_init(&event->entry);
292 (event->function)(event->data); 292 (event->function)(event->data);
293 if (event->interval) { 293 if (event->interval) {
294 /* Recharge interval timer */ 294 /* Recharge interval timer */
295 event->expires = event->interval + vq->elapsed; 295 event->expires = event->interval + vq->elapsed;
296 spin_lock(&vq->lock); 296 spin_lock(&vq->lock);
297 list_add_sorted(event, &vq->list); 297 list_add_sorted(event, &vq->list);
298 spin_unlock(&vq->lock); 298 spin_unlock(&vq->lock);
299 } 299 }
300 } 300 }
301 } 301 }
302 302
303 /* 303 /*
304 * Handler for the virtual CPU timer. 304 * Handler for the virtual CPU timer.
305 */ 305 */
306 static void do_cpu_timer_interrupt(__u16 error_code) 306 static void do_cpu_timer_interrupt(__u16 error_code)
307 { 307 {
308 struct vtimer_queue *vq; 308 struct vtimer_queue *vq;
309 struct vtimer_list *event, *tmp; 309 struct vtimer_list *event, *tmp;
310 struct list_head cb_list; /* the callback queue */ 310 struct list_head cb_list; /* the callback queue */
311 __u64 elapsed, next; 311 __u64 elapsed, next;
312 312
313 INIT_LIST_HEAD(&cb_list); 313 INIT_LIST_HEAD(&cb_list);
314 vq = &__get_cpu_var(virt_cpu_timer); 314 vq = &__get_cpu_var(virt_cpu_timer);
315 315
316 /* walk timer list, fire all expired events */ 316 /* walk timer list, fire all expired events */
317 spin_lock(&vq->lock); 317 spin_lock(&vq->lock);
318 318
319 elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer); 319 elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
320 BUG_ON((s64) elapsed < 0); 320 BUG_ON((s64) elapsed < 0);
321 vq->elapsed = 0; 321 vq->elapsed = 0;
322 list_for_each_entry_safe(event, tmp, &vq->list, entry) { 322 list_for_each_entry_safe(event, tmp, &vq->list, entry) {
323 if (event->expires < elapsed) 323 if (event->expires < elapsed)
324 /* move expired timer to the callback queue */ 324 /* move expired timer to the callback queue */
325 list_move_tail(&event->entry, &cb_list); 325 list_move_tail(&event->entry, &cb_list);
326 else 326 else
327 event->expires -= elapsed; 327 event->expires -= elapsed;
328 } 328 }
329 spin_unlock(&vq->lock); 329 spin_unlock(&vq->lock);
330 330
331 vq->do_spt = list_empty(&cb_list); 331 vq->do_spt = list_empty(&cb_list);
332 do_callbacks(&cb_list); 332 do_callbacks(&cb_list);
333 333
334 /* next event is first in list */ 334 /* next event is first in list */
335 next = VTIMER_MAX_SLICE; 335 next = VTIMER_MAX_SLICE;
336 spin_lock(&vq->lock); 336 spin_lock(&vq->lock);
337 if (!list_empty(&vq->list)) { 337 if (!list_empty(&vq->list)) {
338 event = list_first_entry(&vq->list, struct vtimer_list, entry); 338 event = list_first_entry(&vq->list, struct vtimer_list, entry);
339 next = event->expires; 339 next = event->expires;
340 } else 340 } else
341 vq->do_spt = 0; 341 vq->do_spt = 0;
342 spin_unlock(&vq->lock); 342 spin_unlock(&vq->lock);
343 /* 343 /*
344 * To improve precision add the time spent by the 344 * To improve precision add the time spent by the
345 * interrupt handler to the elapsed time. 345 * interrupt handler to the elapsed time.
346 * Note: CPU timer counts down and we got an interrupt, 346 * Note: CPU timer counts down and we got an interrupt,
347 * the current content is negative 347 * the current content is negative
348 */ 348 */
349 elapsed = S390_lowcore.async_enter_timer - get_vtimer(); 349 elapsed = S390_lowcore.async_enter_timer - get_vtimer();
350 set_vtimer(next - elapsed); 350 set_vtimer(next - elapsed);
351 vq->timer = next - elapsed; 351 vq->timer = next - elapsed;
352 vq->elapsed = elapsed; 352 vq->elapsed = elapsed;
353 } 353 }
354 354
355 void init_virt_timer(struct vtimer_list *timer) 355 void init_virt_timer(struct vtimer_list *timer)
356 { 356 {
357 timer->function = NULL; 357 timer->function = NULL;
358 INIT_LIST_HEAD(&timer->entry); 358 INIT_LIST_HEAD(&timer->entry);
359 } 359 }
360 EXPORT_SYMBOL(init_virt_timer); 360 EXPORT_SYMBOL(init_virt_timer);
361 361
362 static inline int vtimer_pending(struct vtimer_list *timer) 362 static inline int vtimer_pending(struct vtimer_list *timer)
363 { 363 {
364 return (!list_empty(&timer->entry)); 364 return (!list_empty(&timer->entry));
365 } 365 }
366 366
367 /* 367 /*
368 * this function should only run on the specified CPU 368 * this function should only run on the specified CPU
369 */ 369 */
370 static void internal_add_vtimer(struct vtimer_list *timer) 370 static void internal_add_vtimer(struct vtimer_list *timer)
371 { 371 {
372 struct vtimer_queue *vq; 372 struct vtimer_queue *vq;
373 unsigned long flags; 373 unsigned long flags;
374 __u64 left, expires; 374 __u64 left, expires;
375 375
376 vq = &per_cpu(virt_cpu_timer, timer->cpu); 376 vq = &per_cpu(virt_cpu_timer, timer->cpu);
377 spin_lock_irqsave(&vq->lock, flags); 377 spin_lock_irqsave(&vq->lock, flags);
378 378
379 BUG_ON(timer->cpu != smp_processor_id()); 379 BUG_ON(timer->cpu != smp_processor_id());
380 380
381 if (list_empty(&vq->list)) { 381 if (list_empty(&vq->list)) {
382 /* First timer on this cpu, just program it. */ 382 /* First timer on this cpu, just program it. */
383 list_add(&timer->entry, &vq->list); 383 list_add(&timer->entry, &vq->list);
384 set_vtimer(timer->expires); 384 set_vtimer(timer->expires);
385 vq->timer = timer->expires; 385 vq->timer = timer->expires;
386 vq->elapsed = 0; 386 vq->elapsed = 0;
387 } else { 387 } else {
388 /* Check progress of old timers. */ 388 /* Check progress of old timers. */
389 expires = timer->expires; 389 expires = timer->expires;
390 left = get_vtimer(); 390 left = get_vtimer();
391 if (likely((s64) expires < (s64) left)) { 391 if (likely((s64) expires < (s64) left)) {
392 /* The new timer expires before the current timer. */ 392 /* The new timer expires before the current timer. */
393 set_vtimer(expires); 393 set_vtimer(expires);
394 vq->elapsed += vq->timer - left; 394 vq->elapsed += vq->timer - left;
395 vq->timer = expires; 395 vq->timer = expires;
396 } else { 396 } else {
397 vq->elapsed += vq->timer - left; 397 vq->elapsed += vq->timer - left;
398 vq->timer = left; 398 vq->timer = left;
399 } 399 }
400 /* Insert new timer into per cpu list. */ 400 /* Insert new timer into per cpu list. */
401 timer->expires += vq->elapsed; 401 timer->expires += vq->elapsed;
402 list_add_sorted(timer, &vq->list); 402 list_add_sorted(timer, &vq->list);
403 } 403 }
404 404
405 spin_unlock_irqrestore(&vq->lock, flags); 405 spin_unlock_irqrestore(&vq->lock, flags);
406 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */ 406 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
407 put_cpu(); 407 put_cpu();
408 } 408 }
409 409
410 static inline void prepare_vtimer(struct vtimer_list *timer) 410 static inline void prepare_vtimer(struct vtimer_list *timer)
411 { 411 {
412 BUG_ON(!timer->function); 412 BUG_ON(!timer->function);
413 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE); 413 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
414 BUG_ON(vtimer_pending(timer)); 414 BUG_ON(vtimer_pending(timer));
415 timer->cpu = get_cpu(); 415 timer->cpu = get_cpu();
416 } 416 }
417 417
418 /* 418 /*
419 * add_virt_timer - add an oneshot virtual CPU timer 419 * add_virt_timer - add an oneshot virtual CPU timer
420 */ 420 */
421 void add_virt_timer(void *new) 421 void add_virt_timer(void *new)
422 { 422 {
423 struct vtimer_list *timer; 423 struct vtimer_list *timer;
424 424
425 timer = (struct vtimer_list *)new; 425 timer = (struct vtimer_list *)new;
426 prepare_vtimer(timer); 426 prepare_vtimer(timer);
427 timer->interval = 0; 427 timer->interval = 0;
428 internal_add_vtimer(timer); 428 internal_add_vtimer(timer);
429 } 429 }
430 EXPORT_SYMBOL(add_virt_timer); 430 EXPORT_SYMBOL(add_virt_timer);
431 431
432 /* 432 /*
433 * add_virt_timer_int - add an interval virtual CPU timer 433 * add_virt_timer_int - add an interval virtual CPU timer
434 */ 434 */
435 void add_virt_timer_periodic(void *new) 435 void add_virt_timer_periodic(void *new)
436 { 436 {
437 struct vtimer_list *timer; 437 struct vtimer_list *timer;
438 438
439 timer = (struct vtimer_list *)new; 439 timer = (struct vtimer_list *)new;
440 prepare_vtimer(timer); 440 prepare_vtimer(timer);
441 timer->interval = timer->expires; 441 timer->interval = timer->expires;
442 internal_add_vtimer(timer); 442 internal_add_vtimer(timer);
443 } 443 }
444 EXPORT_SYMBOL(add_virt_timer_periodic); 444 EXPORT_SYMBOL(add_virt_timer_periodic);
445 445
446 int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) 446 int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic)
447 { 447 {
448 struct vtimer_queue *vq; 448 struct vtimer_queue *vq;
449 unsigned long flags; 449 unsigned long flags;
450 int cpu; 450 int cpu;
451 451
452 BUG_ON(!timer->function); 452 BUG_ON(!timer->function);
453 BUG_ON(!expires || expires > VTIMER_MAX_SLICE); 453 BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
454 454
455 if (timer->expires == expires && vtimer_pending(timer)) 455 if (timer->expires == expires && vtimer_pending(timer))
456 return 1; 456 return 1;
457 457
458 cpu = get_cpu(); 458 cpu = get_cpu();
459 vq = &per_cpu(virt_cpu_timer, cpu); 459 vq = &per_cpu(virt_cpu_timer, cpu);
460 460
461 /* disable interrupts before test if timer is pending */ 461 /* disable interrupts before test if timer is pending */
462 spin_lock_irqsave(&vq->lock, flags); 462 spin_lock_irqsave(&vq->lock, flags);
463 463
464 /* if timer isn't pending add it on the current CPU */ 464 /* if timer isn't pending add it on the current CPU */
465 if (!vtimer_pending(timer)) { 465 if (!vtimer_pending(timer)) {
466 spin_unlock_irqrestore(&vq->lock, flags); 466 spin_unlock_irqrestore(&vq->lock, flags);
467 467
468 if (periodic) 468 if (periodic)
469 timer->interval = expires; 469 timer->interval = expires;
470 else 470 else
471 timer->interval = 0; 471 timer->interval = 0;
472 timer->expires = expires; 472 timer->expires = expires;
473 timer->cpu = cpu; 473 timer->cpu = cpu;
474 internal_add_vtimer(timer); 474 internal_add_vtimer(timer);
475 return 0; 475 return 0;
476 } 476 }
477 477
478 /* check if we run on the right CPU */ 478 /* check if we run on the right CPU */
479 BUG_ON(timer->cpu != cpu); 479 BUG_ON(timer->cpu != cpu);
480 480
481 list_del_init(&timer->entry); 481 list_del_init(&timer->entry);
482 timer->expires = expires; 482 timer->expires = expires;
483 if (periodic) 483 if (periodic)
484 timer->interval = expires; 484 timer->interval = expires;
485 485
486 /* the timer can't expire anymore so we can release the lock */ 486 /* the timer can't expire anymore so we can release the lock */
487 spin_unlock_irqrestore(&vq->lock, flags); 487 spin_unlock_irqrestore(&vq->lock, flags);
488 internal_add_vtimer(timer); 488 internal_add_vtimer(timer);
489 return 1; 489 return 1;
490 } 490 }
491 491
492 /* 492 /*
493 * If we change a pending timer the function must be called on the CPU 493 * If we change a pending timer the function must be called on the CPU
494 * where the timer is running on. 494 * where the timer is running on.
495 * 495 *
496 * returns whether it has modified a pending timer (1) or not (0) 496 * returns whether it has modified a pending timer (1) or not (0)
497 */ 497 */
498 int mod_virt_timer(struct vtimer_list *timer, __u64 expires) 498 int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
499 { 499 {
500 return __mod_vtimer(timer, expires, 0); 500 return __mod_vtimer(timer, expires, 0);
501 } 501 }
502 EXPORT_SYMBOL(mod_virt_timer); 502 EXPORT_SYMBOL(mod_virt_timer);
503 503
504 /* 504 /*
505 * If we change a pending timer the function must be called on the CPU 505 * If we change a pending timer the function must be called on the CPU
506 * where the timer is running on. 506 * where the timer is running on.
507 * 507 *
508 * returns whether it has modified a pending timer (1) or not (0) 508 * returns whether it has modified a pending timer (1) or not (0)
509 */ 509 */
510 int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires) 510 int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires)
511 { 511 {
512 return __mod_vtimer(timer, expires, 1); 512 return __mod_vtimer(timer, expires, 1);
513 } 513 }
514 EXPORT_SYMBOL(mod_virt_timer_periodic); 514 EXPORT_SYMBOL(mod_virt_timer_periodic);
515 515
516 /* 516 /*
517 * delete a virtual timer 517 * delete a virtual timer
518 * 518 *
519 * returns whether the deleted timer was pending (1) or not (0) 519 * returns whether the deleted timer was pending (1) or not (0)
520 */ 520 */
521 int del_virt_timer(struct vtimer_list *timer) 521 int del_virt_timer(struct vtimer_list *timer)
522 { 522 {
523 unsigned long flags; 523 unsigned long flags;
524 struct vtimer_queue *vq; 524 struct vtimer_queue *vq;
525 525
526 /* check if timer is pending */ 526 /* check if timer is pending */
527 if (!vtimer_pending(timer)) 527 if (!vtimer_pending(timer))
528 return 0; 528 return 0;
529 529
530 vq = &per_cpu(virt_cpu_timer, timer->cpu); 530 vq = &per_cpu(virt_cpu_timer, timer->cpu);
531 spin_lock_irqsave(&vq->lock, flags); 531 spin_lock_irqsave(&vq->lock, flags);
532 532
533 /* we don't interrupt a running timer, just let it expire! */ 533 /* we don't interrupt a running timer, just let it expire! */
534 list_del_init(&timer->entry); 534 list_del_init(&timer->entry);
535 535
536 spin_unlock_irqrestore(&vq->lock, flags); 536 spin_unlock_irqrestore(&vq->lock, flags);
537 return 1; 537 return 1;
538 } 538 }
539 EXPORT_SYMBOL(del_virt_timer); 539 EXPORT_SYMBOL(del_virt_timer);
540 540
541 /* 541 /*
542 * Start the virtual CPU timer on the current CPU. 542 * Start the virtual CPU timer on the current CPU.
543 */ 543 */
544 void init_cpu_vtimer(void) 544 void init_cpu_vtimer(void)
545 { 545 {
546 struct vtimer_queue *vq; 546 struct vtimer_queue *vq;
547 547
548 /* initialize per cpu vtimer structure */ 548 /* initialize per cpu vtimer structure */
549 vq = &__get_cpu_var(virt_cpu_timer); 549 vq = &__get_cpu_var(virt_cpu_timer);
550 INIT_LIST_HEAD(&vq->list); 550 INIT_LIST_HEAD(&vq->list);
551 spin_lock_init(&vq->lock); 551 spin_lock_init(&vq->lock);
552 552
553 /* enable cpu timer interrupts */ 553 /* enable cpu timer interrupts */
554 __ctl_set_bit(0,10); 554 __ctl_set_bit(0,10);
555 } 555 }
556 556
557 void __init vtime_init(void) 557 void __init vtime_init(void)
558 { 558 {
559 /* request the cpu timer external interrupt */ 559 /* request the cpu timer external interrupt */
560 if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt, 560 if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
561 &ext_int_info_timer) != 0) 561 &ext_int_info_timer) != 0)
562 panic("Couldn't request external interrupt 0x1005"); 562 panic("Couldn't request external interrupt 0x1005");
563 563
564 /* Enable cpu timer interrupts on the boot cpu. */ 564 /* Enable cpu timer interrupts on the boot cpu. */
565 init_cpu_vtimer(); 565 init_cpu_vtimer();
566 } 566 }
567 567
568 568
drivers/s390/cio/cio.c
1 /* 1 /*
2 * drivers/s390/cio/cio.c 2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls 3 * S/390 common I/O routines -- low level i/o calls
4 * 4 *
5 * Copyright IBM Corp. 1999,2008 5 * Copyright IBM Corp. 1999,2008
6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
9 * Martin Schwidefsky (schwidefsky@de.ibm.com) 9 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 */ 10 */
11 11
12 #define KMSG_COMPONENT "cio" 12 #define KMSG_COMPONENT "cio"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 14
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/init.h> 16 #include <linux/init.h>
17 #include <linux/slab.h> 17 #include <linux/slab.h>
18 #include <linux/device.h> 18 #include <linux/device.h>
19 #include <linux/kernel_stat.h> 19 #include <linux/kernel_stat.h>
20 #include <linux/interrupt.h> 20 #include <linux/interrupt.h>
21 #include <asm/cio.h> 21 #include <asm/cio.h>
22 #include <asm/delay.h> 22 #include <asm/delay.h>
23 #include <asm/irq.h> 23 #include <asm/irq.h>
24 #include <asm/irq_regs.h> 24 #include <asm/irq_regs.h>
25 #include <asm/setup.h> 25 #include <asm/setup.h>
26 #include <asm/reset.h> 26 #include <asm/reset.h>
27 #include <asm/ipl.h> 27 #include <asm/ipl.h>
28 #include <asm/chpid.h> 28 #include <asm/chpid.h>
29 #include <asm/airq.h> 29 #include <asm/airq.h>
30 #include <asm/isc.h> 30 #include <asm/isc.h>
31 #include <asm/cpu.h> 31 #include <asm/cputime.h>
32 #include <asm/fcx.h> 32 #include <asm/fcx.h>
33 #include <asm/nmi.h> 33 #include <asm/nmi.h>
34 #include <asm/crw.h> 34 #include <asm/crw.h>
35 #include "cio.h" 35 #include "cio.h"
36 #include "css.h" 36 #include "css.h"
37 #include "chsc.h" 37 #include "chsc.h"
38 #include "ioasm.h" 38 #include "ioasm.h"
39 #include "io_sch.h" 39 #include "io_sch.h"
40 #include "blacklist.h" 40 #include "blacklist.h"
41 #include "cio_debug.h" 41 #include "cio_debug.h"
42 #include "chp.h" 42 #include "chp.h"
43 43
44 debug_info_t *cio_debug_msg_id; 44 debug_info_t *cio_debug_msg_id;
45 debug_info_t *cio_debug_trace_id; 45 debug_info_t *cio_debug_trace_id;
46 debug_info_t *cio_debug_crw_id; 46 debug_info_t *cio_debug_crw_id;
47 47
48 /* 48 /*
49 * Function: cio_debug_init 49 * Function: cio_debug_init
50 * Initializes three debug logs for common I/O: 50 * Initializes three debug logs for common I/O:
51 * - cio_msg logs generic cio messages 51 * - cio_msg logs generic cio messages
52 * - cio_trace logs the calling of different functions 52 * - cio_trace logs the calling of different functions
53 * - cio_crw logs machine check related cio messages 53 * - cio_crw logs machine check related cio messages
54 */ 54 */
55 static int __init cio_debug_init(void) 55 static int __init cio_debug_init(void)
56 { 56 {
57 cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long)); 57 cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
58 if (!cio_debug_msg_id) 58 if (!cio_debug_msg_id)
59 goto out_unregister; 59 goto out_unregister;
60 debug_register_view(cio_debug_msg_id, &debug_sprintf_view); 60 debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
61 debug_set_level(cio_debug_msg_id, 2); 61 debug_set_level(cio_debug_msg_id, 2);
62 cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16); 62 cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
63 if (!cio_debug_trace_id) 63 if (!cio_debug_trace_id)
64 goto out_unregister; 64 goto out_unregister;
65 debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view); 65 debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
66 debug_set_level(cio_debug_trace_id, 2); 66 debug_set_level(cio_debug_trace_id, 2);
67 cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long)); 67 cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
68 if (!cio_debug_crw_id) 68 if (!cio_debug_crw_id)
69 goto out_unregister; 69 goto out_unregister;
70 debug_register_view(cio_debug_crw_id, &debug_sprintf_view); 70 debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
71 debug_set_level(cio_debug_crw_id, 4); 71 debug_set_level(cio_debug_crw_id, 4);
72 return 0; 72 return 0;
73 73
74 out_unregister: 74 out_unregister:
75 if (cio_debug_msg_id) 75 if (cio_debug_msg_id)
76 debug_unregister(cio_debug_msg_id); 76 debug_unregister(cio_debug_msg_id);
77 if (cio_debug_trace_id) 77 if (cio_debug_trace_id)
78 debug_unregister(cio_debug_trace_id); 78 debug_unregister(cio_debug_trace_id);
79 if (cio_debug_crw_id) 79 if (cio_debug_crw_id)
80 debug_unregister(cio_debug_crw_id); 80 debug_unregister(cio_debug_crw_id);
81 return -1; 81 return -1;
82 } 82 }
83 83
84 arch_initcall (cio_debug_init); 84 arch_initcall (cio_debug_init);
85 85
86 int 86 int
87 cio_set_options (struct subchannel *sch, int flags) 87 cio_set_options (struct subchannel *sch, int flags)
88 { 88 {
89 sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; 89 sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
90 sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0; 90 sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
91 sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0; 91 sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
92 return 0; 92 return 0;
93 } 93 }
94 94
95 /* FIXME: who wants to use this? */ 95 /* FIXME: who wants to use this? */
96 int 96 int
97 cio_get_options (struct subchannel *sch) 97 cio_get_options (struct subchannel *sch)
98 { 98 {
99 int flags; 99 int flags;
100 100
101 flags = 0; 101 flags = 0;
102 if (sch->options.suspend) 102 if (sch->options.suspend)
103 flags |= DOIO_ALLOW_SUSPEND; 103 flags |= DOIO_ALLOW_SUSPEND;
104 if (sch->options.prefetch) 104 if (sch->options.prefetch)
105 flags |= DOIO_DENY_PREFETCH; 105 flags |= DOIO_DENY_PREFETCH;
106 if (sch->options.inter) 106 if (sch->options.inter)
107 flags |= DOIO_SUPPRESS_INTER; 107 flags |= DOIO_SUPPRESS_INTER;
108 return flags; 108 return flags;
109 } 109 }
110 110
111 static int 111 static int
112 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) 112 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
113 { 113 {
114 char dbf_text[15]; 114 char dbf_text[15];
115 115
116 if (lpm != 0) 116 if (lpm != 0)
117 sch->lpm &= ~lpm; 117 sch->lpm &= ~lpm;
118 else 118 else
119 sch->lpm = 0; 119 sch->lpm = 0;
120 120
121 CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " 121 CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
122 "subchannel 0.%x.%04x!\n", sch->schid.ssid, 122 "subchannel 0.%x.%04x!\n", sch->schid.ssid,
123 sch->schid.sch_no); 123 sch->schid.sch_no);
124 124
125 if (cio_update_schib(sch)) 125 if (cio_update_schib(sch))
126 return -ENODEV; 126 return -ENODEV;
127 127
128 sprintf(dbf_text, "no%s", dev_name(&sch->dev)); 128 sprintf(dbf_text, "no%s", dev_name(&sch->dev));
129 CIO_TRACE_EVENT(0, dbf_text); 129 CIO_TRACE_EVENT(0, dbf_text);
130 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); 130 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
131 131
132 return (sch->lpm ? -EACCES : -ENODEV); 132 return (sch->lpm ? -EACCES : -ENODEV);
133 } 133 }
134 134
135 int 135 int
136 cio_start_key (struct subchannel *sch, /* subchannel structure */ 136 cio_start_key (struct subchannel *sch, /* subchannel structure */
137 struct ccw1 * cpa, /* logical channel prog addr */ 137 struct ccw1 * cpa, /* logical channel prog addr */
138 __u8 lpm, /* logical path mask */ 138 __u8 lpm, /* logical path mask */
139 __u8 key) /* storage key */ 139 __u8 key) /* storage key */
140 { 140 {
141 char dbf_txt[15]; 141 char dbf_txt[15];
142 int ccode; 142 int ccode;
143 union orb *orb; 143 union orb *orb;
144 144
145 CIO_TRACE_EVENT(4, "stIO"); 145 CIO_TRACE_EVENT(4, "stIO");
146 CIO_TRACE_EVENT(4, dev_name(&sch->dev)); 146 CIO_TRACE_EVENT(4, dev_name(&sch->dev));
147 147
148 orb = &to_io_private(sch)->orb; 148 orb = &to_io_private(sch)->orb;
149 memset(orb, 0, sizeof(union orb)); 149 memset(orb, 0, sizeof(union orb));
150 /* sch is always under 2G. */ 150 /* sch is always under 2G. */
151 orb->cmd.intparm = (u32)(addr_t)sch; 151 orb->cmd.intparm = (u32)(addr_t)sch;
152 orb->cmd.fmt = 1; 152 orb->cmd.fmt = 1;
153 153
154 orb->cmd.pfch = sch->options.prefetch == 0; 154 orb->cmd.pfch = sch->options.prefetch == 0;
155 orb->cmd.spnd = sch->options.suspend; 155 orb->cmd.spnd = sch->options.suspend;
156 orb->cmd.ssic = sch->options.suspend && sch->options.inter; 156 orb->cmd.ssic = sch->options.suspend && sch->options.inter;
157 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; 157 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
158 #ifdef CONFIG_64BIT 158 #ifdef CONFIG_64BIT
159 /* 159 /*
160 * for 64 bit we always support 64 bit IDAWs with 4k page size only 160 * for 64 bit we always support 64 bit IDAWs with 4k page size only
161 */ 161 */
162 orb->cmd.c64 = 1; 162 orb->cmd.c64 = 1;
163 orb->cmd.i2k = 0; 163 orb->cmd.i2k = 0;
164 #endif 164 #endif
165 orb->cmd.key = key >> 4; 165 orb->cmd.key = key >> 4;
166 /* issue "Start Subchannel" */ 166 /* issue "Start Subchannel" */
167 orb->cmd.cpa = (__u32) __pa(cpa); 167 orb->cmd.cpa = (__u32) __pa(cpa);
168 ccode = ssch(sch->schid, orb); 168 ccode = ssch(sch->schid, orb);
169 169
170 /* process condition code */ 170 /* process condition code */
171 sprintf(dbf_txt, "ccode:%d", ccode); 171 sprintf(dbf_txt, "ccode:%d", ccode);
172 CIO_TRACE_EVENT(4, dbf_txt); 172 CIO_TRACE_EVENT(4, dbf_txt);
173 173
174 switch (ccode) { 174 switch (ccode) {
175 case 0: 175 case 0:
176 /* 176 /*
177 * initialize device status information 177 * initialize device status information
178 */ 178 */
179 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 179 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
180 return 0; 180 return 0;
181 case 1: /* status pending */ 181 case 1: /* status pending */
182 case 2: /* busy */ 182 case 2: /* busy */
183 return -EBUSY; 183 return -EBUSY;
184 case 3: /* device/path not operational */ 184 case 3: /* device/path not operational */
185 return cio_start_handle_notoper(sch, lpm); 185 return cio_start_handle_notoper(sch, lpm);
186 default: 186 default:
187 return ccode; 187 return ccode;
188 } 188 }
189 } 189 }
190 190
191 int 191 int
192 cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) 192 cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
193 { 193 {
194 return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY); 194 return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
195 } 195 }
196 196
197 /* 197 /*
198 * resume suspended I/O operation 198 * resume suspended I/O operation
199 */ 199 */
200 int 200 int
201 cio_resume (struct subchannel *sch) 201 cio_resume (struct subchannel *sch)
202 { 202 {
203 char dbf_txt[15]; 203 char dbf_txt[15];
204 int ccode; 204 int ccode;
205 205
206 CIO_TRACE_EVENT (4, "resIO"); 206 CIO_TRACE_EVENT (4, "resIO");
207 CIO_TRACE_EVENT(4, dev_name(&sch->dev)); 207 CIO_TRACE_EVENT(4, dev_name(&sch->dev));
208 208
209 ccode = rsch (sch->schid); 209 ccode = rsch (sch->schid);
210 210
211 sprintf (dbf_txt, "ccode:%d", ccode); 211 sprintf (dbf_txt, "ccode:%d", ccode);
212 CIO_TRACE_EVENT (4, dbf_txt); 212 CIO_TRACE_EVENT (4, dbf_txt);
213 213
214 switch (ccode) { 214 switch (ccode) {
215 case 0: 215 case 0:
216 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND; 216 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
217 return 0; 217 return 0;
218 case 1: 218 case 1:
219 return -EBUSY; 219 return -EBUSY;
220 case 2: 220 case 2:
221 return -EINVAL; 221 return -EINVAL;
222 default: 222 default:
223 /* 223 /*
224 * useless to wait for request completion 224 * useless to wait for request completion
225 * as device is no longer operational ! 225 * as device is no longer operational !
226 */ 226 */
227 return -ENODEV; 227 return -ENODEV;
228 } 228 }
229 } 229 }
230 230
231 /* 231 /*
232 * halt I/O operation 232 * halt I/O operation
233 */ 233 */
234 int 234 int
235 cio_halt(struct subchannel *sch) 235 cio_halt(struct subchannel *sch)
236 { 236 {
237 char dbf_txt[15]; 237 char dbf_txt[15];
238 int ccode; 238 int ccode;
239 239
240 if (!sch) 240 if (!sch)
241 return -ENODEV; 241 return -ENODEV;
242 242
243 CIO_TRACE_EVENT (2, "haltIO"); 243 CIO_TRACE_EVENT (2, "haltIO");
244 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 244 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
245 245
246 /* 246 /*
247 * Issue "Halt subchannel" and process condition code 247 * Issue "Halt subchannel" and process condition code
248 */ 248 */
249 ccode = hsch (sch->schid); 249 ccode = hsch (sch->schid);
250 250
251 sprintf (dbf_txt, "ccode:%d", ccode); 251 sprintf (dbf_txt, "ccode:%d", ccode);
252 CIO_TRACE_EVENT (2, dbf_txt); 252 CIO_TRACE_EVENT (2, dbf_txt);
253 253
254 switch (ccode) { 254 switch (ccode) {
255 case 0: 255 case 0:
256 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; 256 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
257 return 0; 257 return 0;
258 case 1: /* status pending */ 258 case 1: /* status pending */
259 case 2: /* busy */ 259 case 2: /* busy */
260 return -EBUSY; 260 return -EBUSY;
261 default: /* device not operational */ 261 default: /* device not operational */
262 return -ENODEV; 262 return -ENODEV;
263 } 263 }
264 } 264 }
265 265
266 /* 266 /*
267 * Clear I/O operation 267 * Clear I/O operation
268 */ 268 */
269 int 269 int
270 cio_clear(struct subchannel *sch) 270 cio_clear(struct subchannel *sch)
271 { 271 {
272 char dbf_txt[15]; 272 char dbf_txt[15];
273 int ccode; 273 int ccode;
274 274
275 if (!sch) 275 if (!sch)
276 return -ENODEV; 276 return -ENODEV;
277 277
278 CIO_TRACE_EVENT (2, "clearIO"); 278 CIO_TRACE_EVENT (2, "clearIO");
279 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 279 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
280 280
281 /* 281 /*
282 * Issue "Clear subchannel" and process condition code 282 * Issue "Clear subchannel" and process condition code
283 */ 283 */
284 ccode = csch (sch->schid); 284 ccode = csch (sch->schid);
285 285
286 sprintf (dbf_txt, "ccode:%d", ccode); 286 sprintf (dbf_txt, "ccode:%d", ccode);
287 CIO_TRACE_EVENT (2, dbf_txt); 287 CIO_TRACE_EVENT (2, dbf_txt);
288 288
289 switch (ccode) { 289 switch (ccode) {
290 case 0: 290 case 0:
291 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND; 291 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
292 return 0; 292 return 0;
293 default: /* device not operational */ 293 default: /* device not operational */
294 return -ENODEV; 294 return -ENODEV;
295 } 295 }
296 } 296 }
297 297
298 /* 298 /*
299 * Function: cio_cancel 299 * Function: cio_cancel
300 * Issues a "Cancel Subchannel" on the specified subchannel 300 * Issues a "Cancel Subchannel" on the specified subchannel
301 * Note: We don't need any fancy intparms and flags here 301 * Note: We don't need any fancy intparms and flags here
302 * since xsch is executed synchronously. 302 * since xsch is executed synchronously.
303 * Only for common I/O internal use as for now. 303 * Only for common I/O internal use as for now.
304 */ 304 */
305 int 305 int
306 cio_cancel (struct subchannel *sch) 306 cio_cancel (struct subchannel *sch)
307 { 307 {
308 char dbf_txt[15]; 308 char dbf_txt[15];
309 int ccode; 309 int ccode;
310 310
311 if (!sch) 311 if (!sch)
312 return -ENODEV; 312 return -ENODEV;
313 313
314 CIO_TRACE_EVENT (2, "cancelIO"); 314 CIO_TRACE_EVENT (2, "cancelIO");
315 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 315 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
316 316
317 ccode = xsch (sch->schid); 317 ccode = xsch (sch->schid);
318 318
319 sprintf (dbf_txt, "ccode:%d", ccode); 319 sprintf (dbf_txt, "ccode:%d", ccode);
320 CIO_TRACE_EVENT (2, dbf_txt); 320 CIO_TRACE_EVENT (2, dbf_txt);
321 321
322 switch (ccode) { 322 switch (ccode) {
323 case 0: /* success */ 323 case 0: /* success */
324 /* Update information in scsw. */ 324 /* Update information in scsw. */
325 if (cio_update_schib(sch)) 325 if (cio_update_schib(sch))
326 return -ENODEV; 326 return -ENODEV;
327 return 0; 327 return 0;
328 case 1: /* status pending */ 328 case 1: /* status pending */
329 return -EBUSY; 329 return -EBUSY;
330 case 2: /* not applicable */ 330 case 2: /* not applicable */
331 return -EINVAL; 331 return -EINVAL;
332 default: /* not oper */ 332 default: /* not oper */
333 return -ENODEV; 333 return -ENODEV;
334 } 334 }
335 } 335 }
336 336
337 337
338 static void cio_apply_config(struct subchannel *sch, struct schib *schib) 338 static void cio_apply_config(struct subchannel *sch, struct schib *schib)
339 { 339 {
340 schib->pmcw.intparm = sch->config.intparm; 340 schib->pmcw.intparm = sch->config.intparm;
341 schib->pmcw.mbi = sch->config.mbi; 341 schib->pmcw.mbi = sch->config.mbi;
342 schib->pmcw.isc = sch->config.isc; 342 schib->pmcw.isc = sch->config.isc;
343 schib->pmcw.ena = sch->config.ena; 343 schib->pmcw.ena = sch->config.ena;
344 schib->pmcw.mme = sch->config.mme; 344 schib->pmcw.mme = sch->config.mme;
345 schib->pmcw.mp = sch->config.mp; 345 schib->pmcw.mp = sch->config.mp;
346 schib->pmcw.csense = sch->config.csense; 346 schib->pmcw.csense = sch->config.csense;
347 schib->pmcw.mbfc = sch->config.mbfc; 347 schib->pmcw.mbfc = sch->config.mbfc;
348 if (sch->config.mbfc) 348 if (sch->config.mbfc)
349 schib->mba = sch->config.mba; 349 schib->mba = sch->config.mba;
350 } 350 }
351 351
352 static int cio_check_config(struct subchannel *sch, struct schib *schib) 352 static int cio_check_config(struct subchannel *sch, struct schib *schib)
353 { 353 {
354 return (schib->pmcw.intparm == sch->config.intparm) && 354 return (schib->pmcw.intparm == sch->config.intparm) &&
355 (schib->pmcw.mbi == sch->config.mbi) && 355 (schib->pmcw.mbi == sch->config.mbi) &&
356 (schib->pmcw.isc == sch->config.isc) && 356 (schib->pmcw.isc == sch->config.isc) &&
357 (schib->pmcw.ena == sch->config.ena) && 357 (schib->pmcw.ena == sch->config.ena) &&
358 (schib->pmcw.mme == sch->config.mme) && 358 (schib->pmcw.mme == sch->config.mme) &&
359 (schib->pmcw.mp == sch->config.mp) && 359 (schib->pmcw.mp == sch->config.mp) &&
360 (schib->pmcw.csense == sch->config.csense) && 360 (schib->pmcw.csense == sch->config.csense) &&
361 (schib->pmcw.mbfc == sch->config.mbfc) && 361 (schib->pmcw.mbfc == sch->config.mbfc) &&
362 (!sch->config.mbfc || (schib->mba == sch->config.mba)); 362 (!sch->config.mbfc || (schib->mba == sch->config.mba));
363 } 363 }
364 364
365 /* 365 /*
366 * cio_commit_config - apply configuration to the subchannel 366 * cio_commit_config - apply configuration to the subchannel
367 */ 367 */
368 int cio_commit_config(struct subchannel *sch) 368 int cio_commit_config(struct subchannel *sch)
369 { 369 {
370 struct schib schib; 370 struct schib schib;
371 int ccode, retry, ret = 0; 371 int ccode, retry, ret = 0;
372 372
373 if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) 373 if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
374 return -ENODEV; 374 return -ENODEV;
375 375
376 for (retry = 0; retry < 5; retry++) { 376 for (retry = 0; retry < 5; retry++) {
377 /* copy desired changes to local schib */ 377 /* copy desired changes to local schib */
378 cio_apply_config(sch, &schib); 378 cio_apply_config(sch, &schib);
379 ccode = msch_err(sch->schid, &schib); 379 ccode = msch_err(sch->schid, &schib);
380 if (ccode < 0) /* -EIO if msch gets a program check. */ 380 if (ccode < 0) /* -EIO if msch gets a program check. */
381 return ccode; 381 return ccode;
382 switch (ccode) { 382 switch (ccode) {
383 case 0: /* successful */ 383 case 0: /* successful */
384 if (stsch(sch->schid, &schib) || 384 if (stsch(sch->schid, &schib) ||
385 !css_sch_is_valid(&schib)) 385 !css_sch_is_valid(&schib))
386 return -ENODEV; 386 return -ENODEV;
387 if (cio_check_config(sch, &schib)) { 387 if (cio_check_config(sch, &schib)) {
388 /* commit changes from local schib */ 388 /* commit changes from local schib */
389 memcpy(&sch->schib, &schib, sizeof(schib)); 389 memcpy(&sch->schib, &schib, sizeof(schib));
390 return 0; 390 return 0;
391 } 391 }
392 ret = -EAGAIN; 392 ret = -EAGAIN;
393 break; 393 break;
394 case 1: /* status pending */ 394 case 1: /* status pending */
395 return -EBUSY; 395 return -EBUSY;
396 case 2: /* busy */ 396 case 2: /* busy */
397 udelay(100); /* allow for recovery */ 397 udelay(100); /* allow for recovery */
398 ret = -EBUSY; 398 ret = -EBUSY;
399 break; 399 break;
400 case 3: /* not operational */ 400 case 3: /* not operational */
401 return -ENODEV; 401 return -ENODEV;
402 } 402 }
403 } 403 }
404 return ret; 404 return ret;
405 } 405 }
406 406
407 /** 407 /**
408 * cio_update_schib - Perform stsch and update schib if subchannel is valid. 408 * cio_update_schib - Perform stsch and update schib if subchannel is valid.
409 * @sch: subchannel on which to perform stsch 409 * @sch: subchannel on which to perform stsch
410 * Return zero on success, -ENODEV otherwise. 410 * Return zero on success, -ENODEV otherwise.
411 */ 411 */
412 int cio_update_schib(struct subchannel *sch) 412 int cio_update_schib(struct subchannel *sch)
413 { 413 {
414 struct schib schib; 414 struct schib schib;
415 415
416 if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) 416 if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
417 return -ENODEV; 417 return -ENODEV;
418 418
419 memcpy(&sch->schib, &schib, sizeof(schib)); 419 memcpy(&sch->schib, &schib, sizeof(schib));
420 return 0; 420 return 0;
421 } 421 }
422 EXPORT_SYMBOL_GPL(cio_update_schib); 422 EXPORT_SYMBOL_GPL(cio_update_schib);
423 423
424 /** 424 /**
425 * cio_enable_subchannel - enable a subchannel. 425 * cio_enable_subchannel - enable a subchannel.
426 * @sch: subchannel to be enabled 426 * @sch: subchannel to be enabled
427 * @intparm: interruption parameter to set 427 * @intparm: interruption parameter to set
428 */ 428 */
429 int cio_enable_subchannel(struct subchannel *sch, u32 intparm) 429 int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
430 { 430 {
431 char dbf_txt[15]; 431 char dbf_txt[15];
432 int retry; 432 int retry;
433 int ret; 433 int ret;
434 434
435 CIO_TRACE_EVENT (2, "ensch"); 435 CIO_TRACE_EVENT (2, "ensch");
436 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 436 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
437 437
438 if (sch_is_pseudo_sch(sch)) 438 if (sch_is_pseudo_sch(sch))
439 return -EINVAL; 439 return -EINVAL;
440 if (cio_update_schib(sch)) 440 if (cio_update_schib(sch))
441 return -ENODEV; 441 return -ENODEV;
442 442
443 sch->config.ena = 1; 443 sch->config.ena = 1;
444 sch->config.isc = sch->isc; 444 sch->config.isc = sch->isc;
445 sch->config.intparm = intparm; 445 sch->config.intparm = intparm;
446 446
447 for (retry = 0; retry < 3; retry++) { 447 for (retry = 0; retry < 3; retry++) {
448 ret = cio_commit_config(sch); 448 ret = cio_commit_config(sch);
449 if (ret == -EIO) { 449 if (ret == -EIO) {
450 /* 450 /*
451 * Got a program check in msch. Try without 451 * Got a program check in msch. Try without
452 * the concurrent sense bit the next time. 452 * the concurrent sense bit the next time.
453 */ 453 */
454 sch->config.csense = 0; 454 sch->config.csense = 0;
455 } else if (ret == -EBUSY) { 455 } else if (ret == -EBUSY) {
456 struct irb irb; 456 struct irb irb;
457 if (tsch(sch->schid, &irb) != 0) 457 if (tsch(sch->schid, &irb) != 0)
458 break; 458 break;
459 } else 459 } else
460 break; 460 break;
461 } 461 }
462 sprintf (dbf_txt, "ret:%d", ret); 462 sprintf (dbf_txt, "ret:%d", ret);
463 CIO_TRACE_EVENT (2, dbf_txt); 463 CIO_TRACE_EVENT (2, dbf_txt);
464 return ret; 464 return ret;
465 } 465 }
466 EXPORT_SYMBOL_GPL(cio_enable_subchannel); 466 EXPORT_SYMBOL_GPL(cio_enable_subchannel);
467 467
468 /** 468 /**
469 * cio_disable_subchannel - disable a subchannel. 469 * cio_disable_subchannel - disable a subchannel.
470 * @sch: subchannel to disable 470 * @sch: subchannel to disable
471 */ 471 */
472 int cio_disable_subchannel(struct subchannel *sch) 472 int cio_disable_subchannel(struct subchannel *sch)
473 { 473 {
474 char dbf_txt[15]; 474 char dbf_txt[15];
475 int retry; 475 int retry;
476 int ret; 476 int ret;
477 477
478 CIO_TRACE_EVENT (2, "dissch"); 478 CIO_TRACE_EVENT (2, "dissch");
479 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 479 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
480 480
481 if (sch_is_pseudo_sch(sch)) 481 if (sch_is_pseudo_sch(sch))
482 return 0; 482 return 0;
483 if (cio_update_schib(sch)) 483 if (cio_update_schib(sch))
484 return -ENODEV; 484 return -ENODEV;
485 485
486 sch->config.ena = 0; 486 sch->config.ena = 0;
487 487
488 for (retry = 0; retry < 3; retry++) { 488 for (retry = 0; retry < 3; retry++) {
489 ret = cio_commit_config(sch); 489 ret = cio_commit_config(sch);
490 if (ret == -EBUSY) { 490 if (ret == -EBUSY) {
491 struct irb irb; 491 struct irb irb;
492 if (tsch(sch->schid, &irb) != 0) 492 if (tsch(sch->schid, &irb) != 0)
493 break; 493 break;
494 } else 494 } else
495 break; 495 break;
496 } 496 }
497 sprintf (dbf_txt, "ret:%d", ret); 497 sprintf (dbf_txt, "ret:%d", ret);
498 CIO_TRACE_EVENT (2, dbf_txt); 498 CIO_TRACE_EVENT (2, dbf_txt);
499 return ret; 499 return ret;
500 } 500 }
501 EXPORT_SYMBOL_GPL(cio_disable_subchannel); 501 EXPORT_SYMBOL_GPL(cio_disable_subchannel);
502 502
503 int cio_create_sch_lock(struct subchannel *sch) 503 int cio_create_sch_lock(struct subchannel *sch)
504 { 504 {
505 sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); 505 sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
506 if (!sch->lock) 506 if (!sch->lock)
507 return -ENOMEM; 507 return -ENOMEM;
508 spin_lock_init(sch->lock); 508 spin_lock_init(sch->lock);
509 return 0; 509 return 0;
510 } 510 }
511 511
512 static int cio_check_devno_blacklisted(struct subchannel *sch) 512 static int cio_check_devno_blacklisted(struct subchannel *sch)
513 { 513 {
514 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { 514 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
515 /* 515 /*
516 * This device must not be known to Linux. So we simply 516 * This device must not be known to Linux. So we simply
517 * say that there is no device and return ENODEV. 517 * say that there is no device and return ENODEV.
518 */ 518 */
519 CIO_MSG_EVENT(6, "Blacklisted device detected " 519 CIO_MSG_EVENT(6, "Blacklisted device detected "
520 "at devno %04X, subchannel set %x\n", 520 "at devno %04X, subchannel set %x\n",
521 sch->schib.pmcw.dev, sch->schid.ssid); 521 sch->schib.pmcw.dev, sch->schid.ssid);
522 return -ENODEV; 522 return -ENODEV;
523 } 523 }
524 return 0; 524 return 0;
525 } 525 }
526 526
527 static int cio_validate_io_subchannel(struct subchannel *sch) 527 static int cio_validate_io_subchannel(struct subchannel *sch)
528 { 528 {
529 /* Initialization for io subchannels. */ 529 /* Initialization for io subchannels. */
530 if (!css_sch_is_valid(&sch->schib)) 530 if (!css_sch_is_valid(&sch->schib))
531 return -ENODEV; 531 return -ENODEV;
532 532
533 /* Devno is valid. */ 533 /* Devno is valid. */
534 return cio_check_devno_blacklisted(sch); 534 return cio_check_devno_blacklisted(sch);
535 } 535 }
536 536
537 static int cio_validate_msg_subchannel(struct subchannel *sch) 537 static int cio_validate_msg_subchannel(struct subchannel *sch)
538 { 538 {
539 /* Initialization for message subchannels. */ 539 /* Initialization for message subchannels. */
540 if (!css_sch_is_valid(&sch->schib)) 540 if (!css_sch_is_valid(&sch->schib))
541 return -ENODEV; 541 return -ENODEV;
542 542
543 /* Devno is valid. */ 543 /* Devno is valid. */
544 return cio_check_devno_blacklisted(sch); 544 return cio_check_devno_blacklisted(sch);
545 } 545 }
546 546
547 /** 547 /**
548 * cio_validate_subchannel - basic validation of subchannel 548 * cio_validate_subchannel - basic validation of subchannel
549 * @sch: subchannel structure to be filled out 549 * @sch: subchannel structure to be filled out
550 * @schid: subchannel id 550 * @schid: subchannel id
551 * 551 *
552 * Find out subchannel type and initialize struct subchannel. 552 * Find out subchannel type and initialize struct subchannel.
553 * Return codes: 553 * Return codes:
554 * 0 on success 554 * 0 on success
555 * -ENXIO for non-defined subchannels 555 * -ENXIO for non-defined subchannels
556 * -ENODEV for invalid subchannels or blacklisted devices 556 * -ENODEV for invalid subchannels or blacklisted devices
557 * -EIO for subchannels in an invalid subchannel set 557 * -EIO for subchannels in an invalid subchannel set
558 */ 558 */
559 int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) 559 int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
560 { 560 {
561 char dbf_txt[15]; 561 char dbf_txt[15];
562 int ccode; 562 int ccode;
563 int err; 563 int err;
564 564
565 sprintf(dbf_txt, "valsch%x", schid.sch_no); 565 sprintf(dbf_txt, "valsch%x", schid.sch_no);
566 CIO_TRACE_EVENT(4, dbf_txt); 566 CIO_TRACE_EVENT(4, dbf_txt);
567 567
568 /* Nuke all fields. */ 568 /* Nuke all fields. */
569 memset(sch, 0, sizeof(struct subchannel)); 569 memset(sch, 0, sizeof(struct subchannel));
570 570
571 sch->schid = schid; 571 sch->schid = schid;
572 if (cio_is_console(schid)) { 572 if (cio_is_console(schid)) {
573 sch->lock = cio_get_console_lock(); 573 sch->lock = cio_get_console_lock();
574 } else { 574 } else {
575 err = cio_create_sch_lock(sch); 575 err = cio_create_sch_lock(sch);
576 if (err) 576 if (err)
577 goto out; 577 goto out;
578 } 578 }
579 mutex_init(&sch->reg_mutex); 579 mutex_init(&sch->reg_mutex);
580 /* Set a name for the subchannel */ 580 /* Set a name for the subchannel */
581 if (cio_is_console(schid)) 581 if (cio_is_console(schid))
582 sch->dev.init_name = cio_get_console_sch_name(schid); 582 sch->dev.init_name = cio_get_console_sch_name(schid);
583 else 583 else
584 dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no); 584 dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no);
585 585
586 /* 586 /*
587 * The first subchannel that is not-operational (ccode==3) 587 * The first subchannel that is not-operational (ccode==3)
588 * indicates that there aren't any more devices available. 588 * indicates that there aren't any more devices available.
589 * If stsch gets an exception, it means the current subchannel set 589 * If stsch gets an exception, it means the current subchannel set
590 * is not valid. 590 * is not valid.
591 */ 591 */
592 ccode = stsch_err (schid, &sch->schib); 592 ccode = stsch_err (schid, &sch->schib);
593 if (ccode) { 593 if (ccode) {
594 err = (ccode == 3) ? -ENXIO : ccode; 594 err = (ccode == 3) ? -ENXIO : ccode;
595 goto out; 595 goto out;
596 } 596 }
597 /* Copy subchannel type from path management control word. */ 597 /* Copy subchannel type from path management control word. */
598 sch->st = sch->schib.pmcw.st; 598 sch->st = sch->schib.pmcw.st;
599 599
600 switch (sch->st) { 600 switch (sch->st) {
601 case SUBCHANNEL_TYPE_IO: 601 case SUBCHANNEL_TYPE_IO:
602 err = cio_validate_io_subchannel(sch); 602 err = cio_validate_io_subchannel(sch);
603 break; 603 break;
604 case SUBCHANNEL_TYPE_MSG: 604 case SUBCHANNEL_TYPE_MSG:
605 err = cio_validate_msg_subchannel(sch); 605 err = cio_validate_msg_subchannel(sch);
606 break; 606 break;
607 default: 607 default:
608 err = 0; 608 err = 0;
609 } 609 }
610 if (err) 610 if (err)
611 goto out; 611 goto out;
612 612
613 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", 613 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
614 sch->schid.ssid, sch->schid.sch_no, sch->st); 614 sch->schid.ssid, sch->schid.sch_no, sch->st);
615 return 0; 615 return 0;
616 out: 616 out:
617 if (!cio_is_console(schid)) 617 if (!cio_is_console(schid))
618 kfree(sch->lock); 618 kfree(sch->lock);
619 sch->lock = NULL; 619 sch->lock = NULL;
620 return err; 620 return err;
621 } 621 }
622 622
623 /* 623 /*
624 * do_IRQ() handles all normal I/O device IRQ's (the special 624 * do_IRQ() handles all normal I/O device IRQ's (the special
625 * SMP cross-CPU interrupts have their own specific 625 * SMP cross-CPU interrupts have their own specific
626 * handlers). 626 * handlers).
627 * 627 *
628 */ 628 */
629 void 629 void
630 do_IRQ (struct pt_regs *regs) 630 do_IRQ (struct pt_regs *regs)
631 { 631 {
632 struct tpi_info *tpi_info; 632 struct tpi_info *tpi_info;
633 struct subchannel *sch; 633 struct subchannel *sch;
634 struct irb *irb; 634 struct irb *irb;
635 struct pt_regs *old_regs; 635 struct pt_regs *old_regs;
636 636
637 old_regs = set_irq_regs(regs); 637 old_regs = set_irq_regs(regs);
638 s390_idle_check(); 638 s390_idle_check();
639 irq_enter(); 639 irq_enter();
640 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 640 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
641 /* Serve timer interrupts first. */ 641 /* Serve timer interrupts first. */
642 clock_comparator_work(); 642 clock_comparator_work();
643 /* 643 /*
644 * Get interrupt information from lowcore 644 * Get interrupt information from lowcore
645 */ 645 */
646 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 646 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
647 irb = (struct irb *) __LC_IRB; 647 irb = (struct irb *) __LC_IRB;
648 do { 648 do {
649 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; 649 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
650 /* 650 /*
651 * Non I/O-subchannel thin interrupts are processed differently 651 * Non I/O-subchannel thin interrupts are processed differently
652 */ 652 */
653 if (tpi_info->adapter_IO == 1 && 653 if (tpi_info->adapter_IO == 1 &&
654 tpi_info->int_type == IO_INTERRUPT_TYPE) { 654 tpi_info->int_type == IO_INTERRUPT_TYPE) {
655 do_adapter_IO(tpi_info->isc); 655 do_adapter_IO(tpi_info->isc);
656 continue; 656 continue;
657 } 657 }
658 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 658 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
659 if (!sch) { 659 if (!sch) {
660 /* Clear pending interrupt condition. */ 660 /* Clear pending interrupt condition. */
661 tsch(tpi_info->schid, irb); 661 tsch(tpi_info->schid, irb);
662 continue; 662 continue;
663 } 663 }
664 spin_lock(sch->lock); 664 spin_lock(sch->lock);
665 /* Store interrupt response block to lowcore. */ 665 /* Store interrupt response block to lowcore. */
666 if (tsch(tpi_info->schid, irb) == 0) { 666 if (tsch(tpi_info->schid, irb) == 0) {
667 /* Keep subchannel information word up to date. */ 667 /* Keep subchannel information word up to date. */
668 memcpy (&sch->schib.scsw, &irb->scsw, 668 memcpy (&sch->schib.scsw, &irb->scsw,
669 sizeof (irb->scsw)); 669 sizeof (irb->scsw));
670 /* Call interrupt handler if there is one. */ 670 /* Call interrupt handler if there is one. */
671 if (sch->driver && sch->driver->irq) 671 if (sch->driver && sch->driver->irq)
672 sch->driver->irq(sch); 672 sch->driver->irq(sch);
673 } 673 }
674 spin_unlock(sch->lock); 674 spin_unlock(sch->lock);
675 /* 675 /*
676 * Are more interrupts pending? 676 * Are more interrupts pending?
677 * If so, the tpi instruction will update the lowcore 677 * If so, the tpi instruction will update the lowcore
678 * to hold the info for the next interrupt. 678 * to hold the info for the next interrupt.
679 * We don't do this for VM because a tpi drops the cpu 679 * We don't do this for VM because a tpi drops the cpu
680 * out of the sie which costs more cycles than it saves. 680 * out of the sie which costs more cycles than it saves.
681 */ 681 */
682 } while (!MACHINE_IS_VM && tpi (NULL) != 0); 682 } while (!MACHINE_IS_VM && tpi (NULL) != 0);
683 irq_exit(); 683 irq_exit();
684 set_irq_regs(old_regs); 684 set_irq_regs(old_regs);
685 } 685 }
686 686
687 #ifdef CONFIG_CCW_CONSOLE 687 #ifdef CONFIG_CCW_CONSOLE
688 static struct subchannel console_subchannel; 688 static struct subchannel console_subchannel;
689 static char console_sch_name[10] = "0.x.xxxx"; 689 static char console_sch_name[10] = "0.x.xxxx";
690 static struct io_subchannel_private console_priv; 690 static struct io_subchannel_private console_priv;
691 static int console_subchannel_in_use; 691 static int console_subchannel_in_use;
692 692
693 /* 693 /*
694 * Use tpi to get a pending interrupt, call the interrupt handler and 694 * Use tpi to get a pending interrupt, call the interrupt handler and
695 * return a pointer to the subchannel structure. 695 * return a pointer to the subchannel structure.
696 */ 696 */
697 static int cio_tpi(void) 697 static int cio_tpi(void)
698 { 698 {
699 struct tpi_info *tpi_info; 699 struct tpi_info *tpi_info;
700 struct subchannel *sch; 700 struct subchannel *sch;
701 struct irb *irb; 701 struct irb *irb;
702 int irq_context; 702 int irq_context;
703 703
704 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 704 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
705 if (tpi(NULL) != 1) 705 if (tpi(NULL) != 1)
706 return 0; 706 return 0;
707 irb = (struct irb *) __LC_IRB; 707 irb = (struct irb *) __LC_IRB;
708 /* Store interrupt response block to lowcore. */ 708 /* Store interrupt response block to lowcore. */
709 if (tsch(tpi_info->schid, irb) != 0) 709 if (tsch(tpi_info->schid, irb) != 0)
710 /* Not status pending or not operational. */ 710 /* Not status pending or not operational. */
711 return 1; 711 return 1;
712 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 712 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
713 if (!sch) 713 if (!sch)
714 return 1; 714 return 1;
715 irq_context = in_interrupt(); 715 irq_context = in_interrupt();
716 if (!irq_context) 716 if (!irq_context)
717 local_bh_disable(); 717 local_bh_disable();
718 irq_enter(); 718 irq_enter();
719 spin_lock(sch->lock); 719 spin_lock(sch->lock);
720 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); 720 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
721 if (sch->driver && sch->driver->irq) 721 if (sch->driver && sch->driver->irq)
722 sch->driver->irq(sch); 722 sch->driver->irq(sch);
723 spin_unlock(sch->lock); 723 spin_unlock(sch->lock);
724 irq_exit(); 724 irq_exit();
725 if (!irq_context) 725 if (!irq_context)
726 _local_bh_enable(); 726 _local_bh_enable();
727 return 1; 727 return 1;
728 } 728 }
729 729
730 void *cio_get_console_priv(void) 730 void *cio_get_console_priv(void)
731 { 731 {
732 return &console_priv; 732 return &console_priv;
733 } 733 }
734 734
735 /* 735 /*
736 * busy wait for the next interrupt on the console 736 * busy wait for the next interrupt on the console
737 */ 737 */
738 void wait_cons_dev(void) 738 void wait_cons_dev(void)
739 __releases(console_subchannel.lock) 739 __releases(console_subchannel.lock)
740 __acquires(console_subchannel.lock) 740 __acquires(console_subchannel.lock)
741 { 741 {
742 unsigned long cr6 __attribute__ ((aligned (8))); 742 unsigned long cr6 __attribute__ ((aligned (8)));
743 unsigned long save_cr6 __attribute__ ((aligned (8))); 743 unsigned long save_cr6 __attribute__ ((aligned (8)));
744 744
745 /* 745 /*
746 * before entering the spinlock we may already have 746 * before entering the spinlock we may already have
747 * processed the interrupt on a different CPU... 747 * processed the interrupt on a different CPU...
748 */ 748 */
749 if (!console_subchannel_in_use) 749 if (!console_subchannel_in_use)
750 return; 750 return;
751 751
752 /* disable all but the console isc */ 752 /* disable all but the console isc */
753 __ctl_store (save_cr6, 6, 6); 753 __ctl_store (save_cr6, 6, 6);
754 cr6 = 1UL << (31 - CONSOLE_ISC); 754 cr6 = 1UL << (31 - CONSOLE_ISC);
755 __ctl_load (cr6, 6, 6); 755 __ctl_load (cr6, 6, 6);
756 756
757 do { 757 do {
758 spin_unlock(console_subchannel.lock); 758 spin_unlock(console_subchannel.lock);
759 if (!cio_tpi()) 759 if (!cio_tpi())
760 cpu_relax(); 760 cpu_relax();
761 spin_lock(console_subchannel.lock); 761 spin_lock(console_subchannel.lock);
762 } while (console_subchannel.schib.scsw.cmd.actl != 0); 762 } while (console_subchannel.schib.scsw.cmd.actl != 0);
763 /* 763 /*
764 * restore previous isc value 764 * restore previous isc value
765 */ 765 */
766 __ctl_load (save_cr6, 6, 6); 766 __ctl_load (save_cr6, 6, 6);
767 } 767 }
768 768
769 static int 769 static int
770 cio_test_for_console(struct subchannel_id schid, void *data) 770 cio_test_for_console(struct subchannel_id schid, void *data)
771 { 771 {
772 if (stsch_err(schid, &console_subchannel.schib) != 0) 772 if (stsch_err(schid, &console_subchannel.schib) != 0)
773 return -ENXIO; 773 return -ENXIO;
774 if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) && 774 if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
775 console_subchannel.schib.pmcw.dnv && 775 console_subchannel.schib.pmcw.dnv &&
776 (console_subchannel.schib.pmcw.dev == console_devno)) { 776 (console_subchannel.schib.pmcw.dev == console_devno)) {
777 console_irq = schid.sch_no; 777 console_irq = schid.sch_no;
778 return 1; /* found */ 778 return 1; /* found */
779 } 779 }
780 return 0; 780 return 0;
781 } 781 }
782 782
783 783
784 static int 784 static int
785 cio_get_console_sch_no(void) 785 cio_get_console_sch_no(void)
786 { 786 {
787 struct subchannel_id schid; 787 struct subchannel_id schid;
788 788
789 init_subchannel_id(&schid); 789 init_subchannel_id(&schid);
790 if (console_irq != -1) { 790 if (console_irq != -1) {
791 /* VM provided us with the irq number of the console. */ 791 /* VM provided us with the irq number of the console. */
792 schid.sch_no = console_irq; 792 schid.sch_no = console_irq;
793 if (stsch(schid, &console_subchannel.schib) != 0 || 793 if (stsch(schid, &console_subchannel.schib) != 0 ||
794 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || 794 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
795 !console_subchannel.schib.pmcw.dnv) 795 !console_subchannel.schib.pmcw.dnv)
796 return -1; 796 return -1;
797 console_devno = console_subchannel.schib.pmcw.dev; 797 console_devno = console_subchannel.schib.pmcw.dev;
798 } else if (console_devno != -1) { 798 } else if (console_devno != -1) {
799 /* At least the console device number is known. */ 799 /* At least the console device number is known. */
800 for_each_subchannel(cio_test_for_console, NULL); 800 for_each_subchannel(cio_test_for_console, NULL);
801 if (console_irq == -1) 801 if (console_irq == -1)
802 return -1; 802 return -1;
803 } else { 803 } else {
804 /* unlike in 2.4, we cannot autoprobe here, since 804 /* unlike in 2.4, we cannot autoprobe here, since
805 * the channel subsystem is not fully initialized. 805 * the channel subsystem is not fully initialized.
806 * With some luck, the HWC console can take over */ 806 * With some luck, the HWC console can take over */
807 return -1; 807 return -1;
808 } 808 }
809 return console_irq; 809 return console_irq;
810 } 810 }
811 811
812 struct subchannel * 812 struct subchannel *
813 cio_probe_console(void) 813 cio_probe_console(void)
814 { 814 {
815 int sch_no, ret; 815 int sch_no, ret;
816 struct subchannel_id schid; 816 struct subchannel_id schid;
817 817
818 if (xchg(&console_subchannel_in_use, 1) != 0) 818 if (xchg(&console_subchannel_in_use, 1) != 0)
819 return ERR_PTR(-EBUSY); 819 return ERR_PTR(-EBUSY);
820 sch_no = cio_get_console_sch_no(); 820 sch_no = cio_get_console_sch_no();
821 if (sch_no == -1) { 821 if (sch_no == -1) {
822 console_subchannel_in_use = 0; 822 console_subchannel_in_use = 0;
823 pr_warning("No CCW console was found\n"); 823 pr_warning("No CCW console was found\n");
824 return ERR_PTR(-ENODEV); 824 return ERR_PTR(-ENODEV);
825 } 825 }
826 memset(&console_subchannel, 0, sizeof(struct subchannel)); 826 memset(&console_subchannel, 0, sizeof(struct subchannel));
827 init_subchannel_id(&schid); 827 init_subchannel_id(&schid);
828 schid.sch_no = sch_no; 828 schid.sch_no = sch_no;
829 ret = cio_validate_subchannel(&console_subchannel, schid); 829 ret = cio_validate_subchannel(&console_subchannel, schid);
830 if (ret) { 830 if (ret) {
831 console_subchannel_in_use = 0; 831 console_subchannel_in_use = 0;
832 return ERR_PTR(-ENODEV); 832 return ERR_PTR(-ENODEV);
833 } 833 }
834 834
835 /* 835 /*
836 * enable console I/O-interrupt subclass 836 * enable console I/O-interrupt subclass
837 */ 837 */
838 isc_register(CONSOLE_ISC); 838 isc_register(CONSOLE_ISC);
839 console_subchannel.config.isc = CONSOLE_ISC; 839 console_subchannel.config.isc = CONSOLE_ISC;
840 console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; 840 console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel;
841 ret = cio_commit_config(&console_subchannel); 841 ret = cio_commit_config(&console_subchannel);
842 if (ret) { 842 if (ret) {
843 isc_unregister(CONSOLE_ISC); 843 isc_unregister(CONSOLE_ISC);
844 console_subchannel_in_use = 0; 844 console_subchannel_in_use = 0;
845 return ERR_PTR(ret); 845 return ERR_PTR(ret);
846 } 846 }
847 return &console_subchannel; 847 return &console_subchannel;
848 } 848 }
849 849
850 void 850 void
851 cio_release_console(void) 851 cio_release_console(void)
852 { 852 {
853 console_subchannel.config.intparm = 0; 853 console_subchannel.config.intparm = 0;
854 cio_commit_config(&console_subchannel); 854 cio_commit_config(&console_subchannel);
855 isc_unregister(CONSOLE_ISC); 855 isc_unregister(CONSOLE_ISC);
856 console_subchannel_in_use = 0; 856 console_subchannel_in_use = 0;
857 } 857 }
858 858
859 /* Bah... hack to catch console special sausages. */ 859 /* Bah... hack to catch console special sausages. */
860 int 860 int
861 cio_is_console(struct subchannel_id schid) 861 cio_is_console(struct subchannel_id schid)
862 { 862 {
863 if (!console_subchannel_in_use) 863 if (!console_subchannel_in_use)
864 return 0; 864 return 0;
865 return schid_equal(&schid, &console_subchannel.schid); 865 return schid_equal(&schid, &console_subchannel.schid);
866 } 866 }
867 867
868 struct subchannel * 868 struct subchannel *
869 cio_get_console_subchannel(void) 869 cio_get_console_subchannel(void)
870 { 870 {
871 if (!console_subchannel_in_use) 871 if (!console_subchannel_in_use)
872 return NULL; 872 return NULL;
873 return &console_subchannel; 873 return &console_subchannel;
874 } 874 }
875 875
876 const char *cio_get_console_sch_name(struct subchannel_id schid) 876 const char *cio_get_console_sch_name(struct subchannel_id schid)
877 { 877 {
878 snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no); 878 snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no);
879 return (const char *)console_sch_name; 879 return (const char *)console_sch_name;
880 } 880 }
881 881
882 #endif 882 #endif
883 static int 883 static int
884 __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 884 __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
885 { 885 {
886 int retry, cc; 886 int retry, cc;
887 887
888 cc = 0; 888 cc = 0;
889 for (retry=0;retry<3;retry++) { 889 for (retry=0;retry<3;retry++) {
890 schib->pmcw.ena = 0; 890 schib->pmcw.ena = 0;
891 cc = msch(schid, schib); 891 cc = msch(schid, schib);
892 if (cc) 892 if (cc)
893 return (cc==3?-ENODEV:-EBUSY); 893 return (cc==3?-ENODEV:-EBUSY);
894 if (stsch(schid, schib) || !css_sch_is_valid(schib)) 894 if (stsch(schid, schib) || !css_sch_is_valid(schib))
895 return -ENODEV; 895 return -ENODEV;
896 if (!schib->pmcw.ena) 896 if (!schib->pmcw.ena)
897 return 0; 897 return 0;
898 } 898 }
899 return -EBUSY; /* uhm... */ 899 return -EBUSY; /* uhm... */
900 } 900 }
901 901
902 static int 902 static int
903 __clear_io_subchannel_easy(struct subchannel_id schid) 903 __clear_io_subchannel_easy(struct subchannel_id schid)
904 { 904 {
905 int retry; 905 int retry;
906 906
907 if (csch(schid)) 907 if (csch(schid))
908 return -ENODEV; 908 return -ENODEV;
909 for (retry=0;retry<20;retry++) { 909 for (retry=0;retry<20;retry++) {
910 struct tpi_info ti; 910 struct tpi_info ti;
911 911
912 if (tpi(&ti)) { 912 if (tpi(&ti)) {
913 tsch(ti.schid, (struct irb *)__LC_IRB); 913 tsch(ti.schid, (struct irb *)__LC_IRB);
914 if (schid_equal(&ti.schid, &schid)) 914 if (schid_equal(&ti.schid, &schid))
915 return 0; 915 return 0;
916 } 916 }
917 udelay_simple(100); 917 udelay_simple(100);
918 } 918 }
919 return -EBUSY; 919 return -EBUSY;
920 } 920 }
921 921
922 static void __clear_chsc_subchannel_easy(void) 922 static void __clear_chsc_subchannel_easy(void)
923 { 923 {
924 /* It seems we can only wait for a bit here :/ */ 924 /* It seems we can only wait for a bit here :/ */
925 udelay_simple(100); 925 udelay_simple(100);
926 } 926 }
927 927
928 static int pgm_check_occured; 928 static int pgm_check_occured;
929 929
930 static void cio_reset_pgm_check_handler(void) 930 static void cio_reset_pgm_check_handler(void)
931 { 931 {
932 pgm_check_occured = 1; 932 pgm_check_occured = 1;
933 } 933 }
934 934
935 static int stsch_reset(struct subchannel_id schid, struct schib *addr) 935 static int stsch_reset(struct subchannel_id schid, struct schib *addr)
936 { 936 {
937 int rc; 937 int rc;
938 938
939 pgm_check_occured = 0; 939 pgm_check_occured = 0;
940 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; 940 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
941 rc = stsch(schid, addr); 941 rc = stsch(schid, addr);
942 s390_base_pgm_handler_fn = NULL; 942 s390_base_pgm_handler_fn = NULL;
943 943
944 /* The program check handler could have changed pgm_check_occured. */ 944 /* The program check handler could have changed pgm_check_occured. */
945 barrier(); 945 barrier();
946 946
947 if (pgm_check_occured) 947 if (pgm_check_occured)
948 return -EIO; 948 return -EIO;
949 else 949 else
950 return rc; 950 return rc;
951 } 951 }
952 952
953 static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) 953 static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
954 { 954 {
955 struct schib schib; 955 struct schib schib;
956 956
957 if (stsch_reset(schid, &schib)) 957 if (stsch_reset(schid, &schib))
958 return -ENXIO; 958 return -ENXIO;
959 if (!schib.pmcw.ena) 959 if (!schib.pmcw.ena)
960 return 0; 960 return 0;
961 switch(__disable_subchannel_easy(schid, &schib)) { 961 switch(__disable_subchannel_easy(schid, &schib)) {
962 case 0: 962 case 0:
963 case -ENODEV: 963 case -ENODEV:
964 break; 964 break;
965 default: /* -EBUSY */ 965 default: /* -EBUSY */
966 switch (schib.pmcw.st) { 966 switch (schib.pmcw.st) {
967 case SUBCHANNEL_TYPE_IO: 967 case SUBCHANNEL_TYPE_IO:
968 if (__clear_io_subchannel_easy(schid)) 968 if (__clear_io_subchannel_easy(schid))
969 goto out; /* give up... */ 969 goto out; /* give up... */
970 break; 970 break;
971 case SUBCHANNEL_TYPE_CHSC: 971 case SUBCHANNEL_TYPE_CHSC:
972 __clear_chsc_subchannel_easy(); 972 __clear_chsc_subchannel_easy();
973 break; 973 break;
974 default: 974 default:
975 /* No default clear strategy */ 975 /* No default clear strategy */
976 break; 976 break;
977 } 977 }
978 stsch(schid, &schib); 978 stsch(schid, &schib);
979 __disable_subchannel_easy(schid, &schib); 979 __disable_subchannel_easy(schid, &schib);
980 } 980 }
981 out: 981 out:
982 return 0; 982 return 0;
983 } 983 }
984 984
985 static atomic_t chpid_reset_count; 985 static atomic_t chpid_reset_count;
986 986
987 static void s390_reset_chpids_mcck_handler(void) 987 static void s390_reset_chpids_mcck_handler(void)
988 { 988 {
989 struct crw crw; 989 struct crw crw;
990 struct mci *mci; 990 struct mci *mci;
991 991
992 /* Check for pending channel report word. */ 992 /* Check for pending channel report word. */
993 mci = (struct mci *)&S390_lowcore.mcck_interruption_code; 993 mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
994 if (!mci->cp) 994 if (!mci->cp)
995 return; 995 return;
996 /* Process channel report words. */ 996 /* Process channel report words. */
997 while (stcrw(&crw) == 0) { 997 while (stcrw(&crw) == 0) {
998 /* Check for responses to RCHP. */ 998 /* Check for responses to RCHP. */
999 if (crw.slct && crw.rsc == CRW_RSC_CPATH) 999 if (crw.slct && crw.rsc == CRW_RSC_CPATH)
1000 atomic_dec(&chpid_reset_count); 1000 atomic_dec(&chpid_reset_count);
1001 } 1001 }
1002 } 1002 }
1003 1003
1004 #define RCHP_TIMEOUT (30 * USEC_PER_SEC) 1004 #define RCHP_TIMEOUT (30 * USEC_PER_SEC)
1005 static void css_reset(void) 1005 static void css_reset(void)
1006 { 1006 {
1007 int i, ret; 1007 int i, ret;
1008 unsigned long long timeout; 1008 unsigned long long timeout;
1009 struct chp_id chpid; 1009 struct chp_id chpid;
1010 1010
1011 /* Reset subchannels. */ 1011 /* Reset subchannels. */
1012 for_each_subchannel(__shutdown_subchannel_easy, NULL); 1012 for_each_subchannel(__shutdown_subchannel_easy, NULL);
1013 /* Reset channel paths. */ 1013 /* Reset channel paths. */
1014 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; 1014 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
1015 /* Enable channel report machine checks. */ 1015 /* Enable channel report machine checks. */
1016 __ctl_set_bit(14, 28); 1016 __ctl_set_bit(14, 28);
1017 /* Temporarily reenable machine checks. */ 1017 /* Temporarily reenable machine checks. */
1018 local_mcck_enable(); 1018 local_mcck_enable();
1019 chp_id_init(&chpid); 1019 chp_id_init(&chpid);
1020 for (i = 0; i <= __MAX_CHPID; i++) { 1020 for (i = 0; i <= __MAX_CHPID; i++) {
1021 chpid.id = i; 1021 chpid.id = i;
1022 ret = rchp(chpid); 1022 ret = rchp(chpid);
1023 if ((ret == 0) || (ret == 2)) 1023 if ((ret == 0) || (ret == 2))
1024 /* 1024 /*
1025 * rchp either succeeded, or another rchp is already 1025 * rchp either succeeded, or another rchp is already
1026 * in progress. In either case, we'll get a crw. 1026 * in progress. In either case, we'll get a crw.
1027 */ 1027 */
1028 atomic_inc(&chpid_reset_count); 1028 atomic_inc(&chpid_reset_count);
1029 } 1029 }
1030 /* Wait for machine check for all channel paths. */ 1030 /* Wait for machine check for all channel paths. */
1031 timeout = get_clock() + (RCHP_TIMEOUT << 12); 1031 timeout = get_clock() + (RCHP_TIMEOUT << 12);
1032 while (atomic_read(&chpid_reset_count) != 0) { 1032 while (atomic_read(&chpid_reset_count) != 0) {
1033 if (get_clock() > timeout) 1033 if (get_clock() > timeout)
1034 break; 1034 break;
1035 cpu_relax(); 1035 cpu_relax();
1036 } 1036 }
1037 /* Disable machine checks again. */ 1037 /* Disable machine checks again. */
1038 local_mcck_disable(); 1038 local_mcck_disable();
1039 /* Disable channel report machine checks. */ 1039 /* Disable channel report machine checks. */
1040 __ctl_clear_bit(14, 28); 1040 __ctl_clear_bit(14, 28);
1041 s390_base_mcck_handler_fn = NULL; 1041 s390_base_mcck_handler_fn = NULL;
1042 } 1042 }
1043 1043
1044 static struct reset_call css_reset_call = { 1044 static struct reset_call css_reset_call = {
1045 .fn = css_reset, 1045 .fn = css_reset,
1046 }; 1046 };
1047 1047
1048 static int __init init_css_reset_call(void) 1048 static int __init init_css_reset_call(void)
1049 { 1049 {
1050 atomic_set(&chpid_reset_count, 0); 1050 atomic_set(&chpid_reset_count, 0);
1051 register_reset_call(&css_reset_call); 1051 register_reset_call(&css_reset_call);
1052 return 0; 1052 return 0;
1053 } 1053 }
1054 1054
1055 arch_initcall(init_css_reset_call); 1055 arch_initcall(init_css_reset_call);
1056 1056
1057 struct sch_match_id { 1057 struct sch_match_id {
1058 struct subchannel_id schid; 1058 struct subchannel_id schid;
1059 struct ccw_dev_id devid; 1059 struct ccw_dev_id devid;
1060 int rc; 1060 int rc;
1061 }; 1061 };
1062 1062
1063 static int __reipl_subchannel_match(struct subchannel_id schid, void *data) 1063 static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
1064 { 1064 {
1065 struct schib schib; 1065 struct schib schib;
1066 struct sch_match_id *match_id = data; 1066 struct sch_match_id *match_id = data;
1067 1067
1068 if (stsch_reset(schid, &schib)) 1068 if (stsch_reset(schid, &schib))
1069 return -ENXIO; 1069 return -ENXIO;
1070 if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && 1070 if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
1071 (schib.pmcw.dev == match_id->devid.devno) && 1071 (schib.pmcw.dev == match_id->devid.devno) &&
1072 (schid.ssid == match_id->devid.ssid)) { 1072 (schid.ssid == match_id->devid.ssid)) {
1073 match_id->schid = schid; 1073 match_id->schid = schid;
1074 match_id->rc = 0; 1074 match_id->rc = 0;
1075 return 1; 1075 return 1;
1076 } 1076 }
1077 return 0; 1077 return 0;
1078 } 1078 }
1079 1079
1080 static int reipl_find_schid(struct ccw_dev_id *devid, 1080 static int reipl_find_schid(struct ccw_dev_id *devid,
1081 struct subchannel_id *schid) 1081 struct subchannel_id *schid)
1082 { 1082 {
1083 struct sch_match_id match_id; 1083 struct sch_match_id match_id;
1084 1084
1085 match_id.devid = *devid; 1085 match_id.devid = *devid;
1086 match_id.rc = -ENODEV; 1086 match_id.rc = -ENODEV;
1087 for_each_subchannel(__reipl_subchannel_match, &match_id); 1087 for_each_subchannel(__reipl_subchannel_match, &match_id);
1088 if (match_id.rc == 0) 1088 if (match_id.rc == 0)
1089 *schid = match_id.schid; 1089 *schid = match_id.schid;
1090 return match_id.rc; 1090 return match_id.rc;
1091 } 1091 }
1092 1092
1093 extern void do_reipl_asm(__u32 schid); 1093 extern void do_reipl_asm(__u32 schid);
1094 1094
1095 /* Make sure all subchannels are quiet before we re-ipl an lpar. */ 1095 /* Make sure all subchannels are quiet before we re-ipl an lpar. */
1096 void reipl_ccw_dev(struct ccw_dev_id *devid) 1096 void reipl_ccw_dev(struct ccw_dev_id *devid)
1097 { 1097 {
1098 struct subchannel_id schid; 1098 struct subchannel_id schid;
1099 1099
1100 s390_reset_system(); 1100 s390_reset_system();
1101 if (reipl_find_schid(devid, &schid) != 0) 1101 if (reipl_find_schid(devid, &schid) != 0)
1102 panic("IPL Device not found\n"); 1102 panic("IPL Device not found\n");
1103 do_reipl_asm(*((__u32*)&schid)); 1103 do_reipl_asm(*((__u32*)&schid));
1104 } 1104 }
1105 1105
1106 int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) 1106 int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1107 { 1107 {
1108 struct subchannel_id schid; 1108 struct subchannel_id schid;
1109 struct schib schib; 1109 struct schib schib;
1110 1110
1111 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; 1111 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
1112 if (!schid.one) 1112 if (!schid.one)
1113 return -ENODEV; 1113 return -ENODEV;
1114 if (stsch(schid, &schib)) 1114 if (stsch(schid, &schib))
1115 return -ENODEV; 1115 return -ENODEV;
1116 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) 1116 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
1117 return -ENODEV; 1117 return -ENODEV;
1118 if (!schib.pmcw.dnv) 1118 if (!schib.pmcw.dnv)
1119 return -ENODEV; 1119 return -ENODEV;
1120 iplinfo->devno = schib.pmcw.dev; 1120 iplinfo->devno = schib.pmcw.dev;
1121 iplinfo->is_qdio = schib.pmcw.qf; 1121 iplinfo->is_qdio = schib.pmcw.qf;
1122 return 0; 1122 return 0;
1123 } 1123 }
1124 1124
1125 /** 1125 /**
1126 * cio_tm_start_key - perform start function 1126 * cio_tm_start_key - perform start function
1127 * @sch: subchannel on which to perform the start function 1127 * @sch: subchannel on which to perform the start function
1128 * @tcw: transport-command word to be started 1128 * @tcw: transport-command word to be started
1129 * @lpm: mask of paths to use 1129 * @lpm: mask of paths to use
1130 * @key: storage key to use for storage access 1130 * @key: storage key to use for storage access
1131 * 1131 *
1132 * Start the tcw on the given subchannel. Return zero on success, non-zero 1132 * Start the tcw on the given subchannel. Return zero on success, non-zero
1133 * otherwise. 1133 * otherwise.
1134 */ 1134 */
1135 int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) 1135 int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
1136 { 1136 {
1137 int cc; 1137 int cc;
1138 union orb *orb = &to_io_private(sch)->orb; 1138 union orb *orb = &to_io_private(sch)->orb;
1139 1139
1140 memset(orb, 0, sizeof(union orb)); 1140 memset(orb, 0, sizeof(union orb));
1141 orb->tm.intparm = (u32) (addr_t) sch; 1141 orb->tm.intparm = (u32) (addr_t) sch;
1142 orb->tm.key = key >> 4; 1142 orb->tm.key = key >> 4;
1143 orb->tm.b = 1; 1143 orb->tm.b = 1;
1144 orb->tm.lpm = lpm ? lpm : sch->lpm; 1144 orb->tm.lpm = lpm ? lpm : sch->lpm;
1145 orb->tm.tcw = (u32) (addr_t) tcw; 1145 orb->tm.tcw = (u32) (addr_t) tcw;
1146 cc = ssch(sch->schid, orb); 1146 cc = ssch(sch->schid, orb);
1147 switch (cc) { 1147 switch (cc) {
1148 case 0: 1148 case 0:
1149 return 0; 1149 return 0;
1150 case 1: 1150 case 1:
1151 case 2: 1151 case 2:
1152 return -EBUSY; 1152 return -EBUSY;
1153 default: 1153 default:
1154 return cio_start_handle_notoper(sch, lpm); 1154 return cio_start_handle_notoper(sch, lpm);
1155 } 1155 }
1156 } 1156 }
1157 1157
1158 /** 1158 /**
1159 * cio_tm_intrg - perform interrogate function 1159 * cio_tm_intrg - perform interrogate function
1160 * @sch - subchannel on which to perform the interrogate function 1160 * @sch - subchannel on which to perform the interrogate function
1161 * 1161 *
1162 * If the specified subchannel is running in transport-mode, perform the 1162 * If the specified subchannel is running in transport-mode, perform the
1163 * interrogate function. Return zero on success, non-zero otherwie. 1163 * interrogate function. Return zero on success, non-zero otherwie.
1164 */ 1164 */
1165 int cio_tm_intrg(struct subchannel *sch) 1165 int cio_tm_intrg(struct subchannel *sch)
1166 { 1166 {
1167 int cc; 1167 int cc;
1168 1168
1169 if (!to_io_private(sch)->orb.tm.b) 1169 if (!to_io_private(sch)->orb.tm.b)
1170 return -EINVAL; 1170 return -EINVAL;
1171 cc = xsch(sch->schid); 1171 cc = xsch(sch->schid);
1172 switch (cc) { 1172 switch (cc) {
1173 case 0: 1173 case 0:
1174 case 2: 1174 case 2:
1175 return 0; 1175 return 0;
1176 case 1: 1176 case 1:
1177 return -EBUSY; 1177 return -EBUSY;
1178 default: 1178 default:
1179 return -ENODEV; 1179 return -ENODEV;
1180 } 1180 }
1181 } 1181 }
1182 1182