Commit 34aec07c170b972a29c954b37047184bd0f9f294
Committed by
Martin Schwidefsky
1 parent
b730f3a933
Exists in
master
and in
7 other branches
[S390] chsc: initialization fixes
This patch fixes: * kfree vs. free_page usage * structure definition for determine_css_characteristics * naming convention for the chsc init function * deregistration of crw handlers in the cleanup path Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Showing 3 changed files with 17 additions and 17 deletions Inline Diff
drivers/s390/cio/chsc.c
1 | /* | 1 | /* |
2 | * drivers/s390/cio/chsc.c | 2 | * drivers/s390/cio/chsc.c |
3 | * S/390 common I/O routines -- channel subsystem call | 3 | * S/390 common I/O routines -- channel subsystem call |
4 | * | 4 | * |
5 | * Copyright IBM Corp. 1999,2008 | 5 | * Copyright IBM Corp. 1999,2010 |
6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | 6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) |
7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
8 | * Arnd Bergmann (arndb@de.ibm.com) | 8 | * Arnd Bergmann (arndb@de.ibm.com) |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "cio" | 11 | #define KMSG_COMPONENT "cio" |
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | 18 | ||
19 | #include <asm/cio.h> | 19 | #include <asm/cio.h> |
20 | #include <asm/chpid.h> | 20 | #include <asm/chpid.h> |
21 | #include <asm/chsc.h> | 21 | #include <asm/chsc.h> |
22 | #include <asm/crw.h> | 22 | #include <asm/crw.h> |
23 | 23 | ||
24 | #include "css.h" | 24 | #include "css.h" |
25 | #include "cio.h" | 25 | #include "cio.h" |
26 | #include "cio_debug.h" | 26 | #include "cio_debug.h" |
27 | #include "ioasm.h" | 27 | #include "ioasm.h" |
28 | #include "chp.h" | 28 | #include "chp.h" |
29 | #include "chsc.h" | 29 | #include "chsc.h" |
30 | 30 | ||
31 | static void *sei_page; | 31 | static void *sei_page; |
32 | static DEFINE_SPINLOCK(siosl_lock); | 32 | static DEFINE_SPINLOCK(siosl_lock); |
33 | static DEFINE_SPINLOCK(sda_lock); | 33 | static DEFINE_SPINLOCK(sda_lock); |
34 | 34 | ||
35 | /** | 35 | /** |
36 | * chsc_error_from_response() - convert a chsc response to an error | 36 | * chsc_error_from_response() - convert a chsc response to an error |
37 | * @response: chsc response code | 37 | * @response: chsc response code |
38 | * | 38 | * |
39 | * Returns an appropriate Linux error code for @response. | 39 | * Returns an appropriate Linux error code for @response. |
40 | */ | 40 | */ |
41 | int chsc_error_from_response(int response) | 41 | int chsc_error_from_response(int response) |
42 | { | 42 | { |
43 | switch (response) { | 43 | switch (response) { |
44 | case 0x0001: | 44 | case 0x0001: |
45 | return 0; | 45 | return 0; |
46 | case 0x0002: | 46 | case 0x0002: |
47 | case 0x0003: | 47 | case 0x0003: |
48 | case 0x0006: | 48 | case 0x0006: |
49 | case 0x0007: | 49 | case 0x0007: |
50 | case 0x0008: | 50 | case 0x0008: |
51 | case 0x000a: | 51 | case 0x000a: |
52 | case 0x0104: | 52 | case 0x0104: |
53 | return -EINVAL; | 53 | return -EINVAL; |
54 | case 0x0004: | 54 | case 0x0004: |
55 | return -EOPNOTSUPP; | 55 | return -EOPNOTSUPP; |
56 | default: | 56 | default: |
57 | return -EIO; | 57 | return -EIO; |
58 | } | 58 | } |
59 | } | 59 | } |
60 | EXPORT_SYMBOL_GPL(chsc_error_from_response); | 60 | EXPORT_SYMBOL_GPL(chsc_error_from_response); |
61 | 61 | ||
62 | struct chsc_ssd_area { | 62 | struct chsc_ssd_area { |
63 | struct chsc_header request; | 63 | struct chsc_header request; |
64 | u16 :10; | 64 | u16 :10; |
65 | u16 ssid:2; | 65 | u16 ssid:2; |
66 | u16 :4; | 66 | u16 :4; |
67 | u16 f_sch; /* first subchannel */ | 67 | u16 f_sch; /* first subchannel */ |
68 | u16 :16; | 68 | u16 :16; |
69 | u16 l_sch; /* last subchannel */ | 69 | u16 l_sch; /* last subchannel */ |
70 | u32 :32; | 70 | u32 :32; |
71 | struct chsc_header response; | 71 | struct chsc_header response; |
72 | u32 :32; | 72 | u32 :32; |
73 | u8 sch_valid : 1; | 73 | u8 sch_valid : 1; |
74 | u8 dev_valid : 1; | 74 | u8 dev_valid : 1; |
75 | u8 st : 3; /* subchannel type */ | 75 | u8 st : 3; /* subchannel type */ |
76 | u8 zeroes : 3; | 76 | u8 zeroes : 3; |
77 | u8 unit_addr; /* unit address */ | 77 | u8 unit_addr; /* unit address */ |
78 | u16 devno; /* device number */ | 78 | u16 devno; /* device number */ |
79 | u8 path_mask; | 79 | u8 path_mask; |
80 | u8 fla_valid_mask; | 80 | u8 fla_valid_mask; |
81 | u16 sch; /* subchannel */ | 81 | u16 sch; /* subchannel */ |
82 | u8 chpid[8]; /* chpids 0-7 */ | 82 | u8 chpid[8]; /* chpids 0-7 */ |
83 | u16 fla[8]; /* full link addresses 0-7 */ | 83 | u16 fla[8]; /* full link addresses 0-7 */ |
84 | } __attribute__ ((packed)); | 84 | } __attribute__ ((packed)); |
85 | 85 | ||
86 | int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) | 86 | int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) |
87 | { | 87 | { |
88 | unsigned long page; | 88 | unsigned long page; |
89 | struct chsc_ssd_area *ssd_area; | 89 | struct chsc_ssd_area *ssd_area; |
90 | int ccode; | 90 | int ccode; |
91 | int ret; | 91 | int ret; |
92 | int i; | 92 | int i; |
93 | int mask; | 93 | int mask; |
94 | 94 | ||
95 | page = get_zeroed_page(GFP_KERNEL | GFP_DMA); | 95 | page = get_zeroed_page(GFP_KERNEL | GFP_DMA); |
96 | if (!page) | 96 | if (!page) |
97 | return -ENOMEM; | 97 | return -ENOMEM; |
98 | ssd_area = (struct chsc_ssd_area *) page; | 98 | ssd_area = (struct chsc_ssd_area *) page; |
99 | ssd_area->request.length = 0x0010; | 99 | ssd_area->request.length = 0x0010; |
100 | ssd_area->request.code = 0x0004; | 100 | ssd_area->request.code = 0x0004; |
101 | ssd_area->ssid = schid.ssid; | 101 | ssd_area->ssid = schid.ssid; |
102 | ssd_area->f_sch = schid.sch_no; | 102 | ssd_area->f_sch = schid.sch_no; |
103 | ssd_area->l_sch = schid.sch_no; | 103 | ssd_area->l_sch = schid.sch_no; |
104 | 104 | ||
105 | ccode = chsc(ssd_area); | 105 | ccode = chsc(ssd_area); |
106 | /* Check response. */ | 106 | /* Check response. */ |
107 | if (ccode > 0) { | 107 | if (ccode > 0) { |
108 | ret = (ccode == 3) ? -ENODEV : -EBUSY; | 108 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
109 | goto out_free; | 109 | goto out_free; |
110 | } | 110 | } |
111 | ret = chsc_error_from_response(ssd_area->response.code); | 111 | ret = chsc_error_from_response(ssd_area->response.code); |
112 | if (ret != 0) { | 112 | if (ret != 0) { |
113 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", | 113 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", |
114 | schid.ssid, schid.sch_no, | 114 | schid.ssid, schid.sch_no, |
115 | ssd_area->response.code); | 115 | ssd_area->response.code); |
116 | goto out_free; | 116 | goto out_free; |
117 | } | 117 | } |
118 | if (!ssd_area->sch_valid) { | 118 | if (!ssd_area->sch_valid) { |
119 | ret = -ENODEV; | 119 | ret = -ENODEV; |
120 | goto out_free; | 120 | goto out_free; |
121 | } | 121 | } |
122 | /* Copy data */ | 122 | /* Copy data */ |
123 | ret = 0; | 123 | ret = 0; |
124 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); | 124 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); |
125 | if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && | 125 | if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && |
126 | (ssd_area->st != SUBCHANNEL_TYPE_MSG)) | 126 | (ssd_area->st != SUBCHANNEL_TYPE_MSG)) |
127 | goto out_free; | 127 | goto out_free; |
128 | ssd->path_mask = ssd_area->path_mask; | 128 | ssd->path_mask = ssd_area->path_mask; |
129 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; | 129 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; |
130 | for (i = 0; i < 8; i++) { | 130 | for (i = 0; i < 8; i++) { |
131 | mask = 0x80 >> i; | 131 | mask = 0x80 >> i; |
132 | if (ssd_area->path_mask & mask) { | 132 | if (ssd_area->path_mask & mask) { |
133 | chp_id_init(&ssd->chpid[i]); | 133 | chp_id_init(&ssd->chpid[i]); |
134 | ssd->chpid[i].id = ssd_area->chpid[i]; | 134 | ssd->chpid[i].id = ssd_area->chpid[i]; |
135 | } | 135 | } |
136 | if (ssd_area->fla_valid_mask & mask) | 136 | if (ssd_area->fla_valid_mask & mask) |
137 | ssd->fla[i] = ssd_area->fla[i]; | 137 | ssd->fla[i] = ssd_area->fla[i]; |
138 | } | 138 | } |
139 | out_free: | 139 | out_free: |
140 | free_page(page); | 140 | free_page(page); |
141 | return ret; | 141 | return ret; |
142 | } | 142 | } |
143 | 143 | ||
144 | static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) | 144 | static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) |
145 | { | 145 | { |
146 | spin_lock_irq(sch->lock); | 146 | spin_lock_irq(sch->lock); |
147 | if (sch->driver && sch->driver->chp_event) | 147 | if (sch->driver && sch->driver->chp_event) |
148 | if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) | 148 | if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) |
149 | goto out_unreg; | 149 | goto out_unreg; |
150 | spin_unlock_irq(sch->lock); | 150 | spin_unlock_irq(sch->lock); |
151 | return 0; | 151 | return 0; |
152 | 152 | ||
153 | out_unreg: | 153 | out_unreg: |
154 | sch->lpm = 0; | 154 | sch->lpm = 0; |
155 | spin_unlock_irq(sch->lock); | 155 | spin_unlock_irq(sch->lock); |
156 | css_schedule_eval(sch->schid); | 156 | css_schedule_eval(sch->schid); |
157 | return 0; | 157 | return 0; |
158 | } | 158 | } |
159 | 159 | ||
160 | void chsc_chp_offline(struct chp_id chpid) | 160 | void chsc_chp_offline(struct chp_id chpid) |
161 | { | 161 | { |
162 | char dbf_txt[15]; | 162 | char dbf_txt[15]; |
163 | struct chp_link link; | 163 | struct chp_link link; |
164 | 164 | ||
165 | sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); | 165 | sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); |
166 | CIO_TRACE_EVENT(2, dbf_txt); | 166 | CIO_TRACE_EVENT(2, dbf_txt); |
167 | 167 | ||
168 | if (chp_get_status(chpid) <= 0) | 168 | if (chp_get_status(chpid) <= 0) |
169 | return; | 169 | return; |
170 | memset(&link, 0, sizeof(struct chp_link)); | 170 | memset(&link, 0, sizeof(struct chp_link)); |
171 | link.chpid = chpid; | 171 | link.chpid = chpid; |
172 | /* Wait until previous actions have settled. */ | 172 | /* Wait until previous actions have settled. */ |
173 | css_wait_for_slow_path(); | 173 | css_wait_for_slow_path(); |
174 | for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); | 174 | for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); |
175 | } | 175 | } |
176 | 176 | ||
177 | static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) | 177 | static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) |
178 | { | 178 | { |
179 | struct schib schib; | 179 | struct schib schib; |
180 | /* | 180 | /* |
181 | * We don't know the device yet, but since a path | 181 | * We don't know the device yet, but since a path |
182 | * may be available now to the device we'll have | 182 | * may be available now to the device we'll have |
183 | * to do recognition again. | 183 | * to do recognition again. |
184 | * Since we don't have any idea about which chpid | 184 | * Since we don't have any idea about which chpid |
185 | * that beast may be on we'll have to do a stsch | 185 | * that beast may be on we'll have to do a stsch |
186 | * on all devices, grr... | 186 | * on all devices, grr... |
187 | */ | 187 | */ |
188 | if (stsch_err(schid, &schib)) | 188 | if (stsch_err(schid, &schib)) |
189 | /* We're through */ | 189 | /* We're through */ |
190 | return -ENXIO; | 190 | return -ENXIO; |
191 | 191 | ||
192 | /* Put it on the slow path. */ | 192 | /* Put it on the slow path. */ |
193 | css_schedule_eval(schid); | 193 | css_schedule_eval(schid); |
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
196 | 196 | ||
197 | static int __s390_process_res_acc(struct subchannel *sch, void *data) | 197 | static int __s390_process_res_acc(struct subchannel *sch, void *data) |
198 | { | 198 | { |
199 | spin_lock_irq(sch->lock); | 199 | spin_lock_irq(sch->lock); |
200 | if (sch->driver && sch->driver->chp_event) | 200 | if (sch->driver && sch->driver->chp_event) |
201 | sch->driver->chp_event(sch, data, CHP_ONLINE); | 201 | sch->driver->chp_event(sch, data, CHP_ONLINE); |
202 | spin_unlock_irq(sch->lock); | 202 | spin_unlock_irq(sch->lock); |
203 | 203 | ||
204 | return 0; | 204 | return 0; |
205 | } | 205 | } |
206 | 206 | ||
207 | static void s390_process_res_acc(struct chp_link *link) | 207 | static void s390_process_res_acc(struct chp_link *link) |
208 | { | 208 | { |
209 | char dbf_txt[15]; | 209 | char dbf_txt[15]; |
210 | 210 | ||
211 | sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, | 211 | sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, |
212 | link->chpid.id); | 212 | link->chpid.id); |
213 | CIO_TRACE_EVENT( 2, dbf_txt); | 213 | CIO_TRACE_EVENT( 2, dbf_txt); |
214 | if (link->fla != 0) { | 214 | if (link->fla != 0) { |
215 | sprintf(dbf_txt, "fla%x", link->fla); | 215 | sprintf(dbf_txt, "fla%x", link->fla); |
216 | CIO_TRACE_EVENT( 2, dbf_txt); | 216 | CIO_TRACE_EVENT( 2, dbf_txt); |
217 | } | 217 | } |
218 | /* Wait until previous actions have settled. */ | 218 | /* Wait until previous actions have settled. */ |
219 | css_wait_for_slow_path(); | 219 | css_wait_for_slow_path(); |
220 | /* | 220 | /* |
221 | * I/O resources may have become accessible. | 221 | * I/O resources may have become accessible. |
222 | * Scan through all subchannels that may be concerned and | 222 | * Scan through all subchannels that may be concerned and |
223 | * do a validation on those. | 223 | * do a validation on those. |
224 | * The more information we have (info), the less scanning | 224 | * The more information we have (info), the less scanning |
225 | * will we have to do. | 225 | * will we have to do. |
226 | */ | 226 | */ |
227 | for_each_subchannel_staged(__s390_process_res_acc, | 227 | for_each_subchannel_staged(__s390_process_res_acc, |
228 | s390_process_res_acc_new_sch, link); | 228 | s390_process_res_acc_new_sch, link); |
229 | } | 229 | } |
230 | 230 | ||
231 | static int | 231 | static int |
232 | __get_chpid_from_lir(void *data) | 232 | __get_chpid_from_lir(void *data) |
233 | { | 233 | { |
234 | struct lir { | 234 | struct lir { |
235 | u8 iq; | 235 | u8 iq; |
236 | u8 ic; | 236 | u8 ic; |
237 | u16 sci; | 237 | u16 sci; |
238 | /* incident-node descriptor */ | 238 | /* incident-node descriptor */ |
239 | u32 indesc[28]; | 239 | u32 indesc[28]; |
240 | /* attached-node descriptor */ | 240 | /* attached-node descriptor */ |
241 | u32 andesc[28]; | 241 | u32 andesc[28]; |
242 | /* incident-specific information */ | 242 | /* incident-specific information */ |
243 | u32 isinfo[28]; | 243 | u32 isinfo[28]; |
244 | } __attribute__ ((packed)) *lir; | 244 | } __attribute__ ((packed)) *lir; |
245 | 245 | ||
246 | lir = data; | 246 | lir = data; |
247 | if (!(lir->iq&0x80)) | 247 | if (!(lir->iq&0x80)) |
248 | /* NULL link incident record */ | 248 | /* NULL link incident record */ |
249 | return -EINVAL; | 249 | return -EINVAL; |
250 | if (!(lir->indesc[0]&0xc0000000)) | 250 | if (!(lir->indesc[0]&0xc0000000)) |
251 | /* node descriptor not valid */ | 251 | /* node descriptor not valid */ |
252 | return -EINVAL; | 252 | return -EINVAL; |
253 | if (!(lir->indesc[0]&0x10000000)) | 253 | if (!(lir->indesc[0]&0x10000000)) |
254 | /* don't handle device-type nodes - FIXME */ | 254 | /* don't handle device-type nodes - FIXME */ |
255 | return -EINVAL; | 255 | return -EINVAL; |
256 | /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ | 256 | /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ |
257 | 257 | ||
258 | return (u16) (lir->indesc[0]&0x000000ff); | 258 | return (u16) (lir->indesc[0]&0x000000ff); |
259 | } | 259 | } |
260 | 260 | ||
261 | struct chsc_sei_area { | 261 | struct chsc_sei_area { |
262 | struct chsc_header request; | 262 | struct chsc_header request; |
263 | u32 reserved1; | 263 | u32 reserved1; |
264 | u32 reserved2; | 264 | u32 reserved2; |
265 | u32 reserved3; | 265 | u32 reserved3; |
266 | struct chsc_header response; | 266 | struct chsc_header response; |
267 | u32 reserved4; | 267 | u32 reserved4; |
268 | u8 flags; | 268 | u8 flags; |
269 | u8 vf; /* validity flags */ | 269 | u8 vf; /* validity flags */ |
270 | u8 rs; /* reporting source */ | 270 | u8 rs; /* reporting source */ |
271 | u8 cc; /* content code */ | 271 | u8 cc; /* content code */ |
272 | u16 fla; /* full link address */ | 272 | u16 fla; /* full link address */ |
273 | u16 rsid; /* reporting source id */ | 273 | u16 rsid; /* reporting source id */ |
274 | u32 reserved5; | 274 | u32 reserved5; |
275 | u32 reserved6; | 275 | u32 reserved6; |
276 | u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ | 276 | u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ |
277 | /* ccdf has to be big enough for a link-incident record */ | 277 | /* ccdf has to be big enough for a link-incident record */ |
278 | } __attribute__ ((packed)); | 278 | } __attribute__ ((packed)); |
279 | 279 | ||
280 | static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) | 280 | static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) |
281 | { | 281 | { |
282 | struct chp_id chpid; | 282 | struct chp_id chpid; |
283 | int id; | 283 | int id; |
284 | 284 | ||
285 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", | 285 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", |
286 | sei_area->rs, sei_area->rsid); | 286 | sei_area->rs, sei_area->rsid); |
287 | if (sei_area->rs != 4) | 287 | if (sei_area->rs != 4) |
288 | return; | 288 | return; |
289 | id = __get_chpid_from_lir(sei_area->ccdf); | 289 | id = __get_chpid_from_lir(sei_area->ccdf); |
290 | if (id < 0) | 290 | if (id < 0) |
291 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); | 291 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); |
292 | else { | 292 | else { |
293 | chp_id_init(&chpid); | 293 | chp_id_init(&chpid); |
294 | chpid.id = id; | 294 | chpid.id = id; |
295 | chsc_chp_offline(chpid); | 295 | chsc_chp_offline(chpid); |
296 | } | 296 | } |
297 | } | 297 | } |
298 | 298 | ||
299 | static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | 299 | static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) |
300 | { | 300 | { |
301 | struct chp_link link; | 301 | struct chp_link link; |
302 | struct chp_id chpid; | 302 | struct chp_id chpid; |
303 | int status; | 303 | int status; |
304 | 304 | ||
305 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " | 305 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " |
306 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); | 306 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); |
307 | if (sei_area->rs != 4) | 307 | if (sei_area->rs != 4) |
308 | return; | 308 | return; |
309 | chp_id_init(&chpid); | 309 | chp_id_init(&chpid); |
310 | chpid.id = sei_area->rsid; | 310 | chpid.id = sei_area->rsid; |
311 | /* allocate a new channel path structure, if needed */ | 311 | /* allocate a new channel path structure, if needed */ |
312 | status = chp_get_status(chpid); | 312 | status = chp_get_status(chpid); |
313 | if (status < 0) | 313 | if (status < 0) |
314 | chp_new(chpid); | 314 | chp_new(chpid); |
315 | else if (!status) | 315 | else if (!status) |
316 | return; | 316 | return; |
317 | memset(&link, 0, sizeof(struct chp_link)); | 317 | memset(&link, 0, sizeof(struct chp_link)); |
318 | link.chpid = chpid; | 318 | link.chpid = chpid; |
319 | if ((sei_area->vf & 0xc0) != 0) { | 319 | if ((sei_area->vf & 0xc0) != 0) { |
320 | link.fla = sei_area->fla; | 320 | link.fla = sei_area->fla; |
321 | if ((sei_area->vf & 0xc0) == 0xc0) | 321 | if ((sei_area->vf & 0xc0) == 0xc0) |
322 | /* full link address */ | 322 | /* full link address */ |
323 | link.fla_mask = 0xffff; | 323 | link.fla_mask = 0xffff; |
324 | else | 324 | else |
325 | /* link address */ | 325 | /* link address */ |
326 | link.fla_mask = 0xff00; | 326 | link.fla_mask = 0xff00; |
327 | } | 327 | } |
328 | s390_process_res_acc(&link); | 328 | s390_process_res_acc(&link); |
329 | } | 329 | } |
330 | 330 | ||
331 | struct chp_config_data { | 331 | struct chp_config_data { |
332 | u8 map[32]; | 332 | u8 map[32]; |
333 | u8 op; | 333 | u8 op; |
334 | u8 pc; | 334 | u8 pc; |
335 | }; | 335 | }; |
336 | 336 | ||
337 | static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | 337 | static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) |
338 | { | 338 | { |
339 | struct chp_config_data *data; | 339 | struct chp_config_data *data; |
340 | struct chp_id chpid; | 340 | struct chp_id chpid; |
341 | int num; | 341 | int num; |
342 | char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; | 342 | char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; |
343 | 343 | ||
344 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); | 344 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); |
345 | if (sei_area->rs != 0) | 345 | if (sei_area->rs != 0) |
346 | return; | 346 | return; |
347 | data = (struct chp_config_data *) &(sei_area->ccdf); | 347 | data = (struct chp_config_data *) &(sei_area->ccdf); |
348 | chp_id_init(&chpid); | 348 | chp_id_init(&chpid); |
349 | for (num = 0; num <= __MAX_CHPID; num++) { | 349 | for (num = 0; num <= __MAX_CHPID; num++) { |
350 | if (!chp_test_bit(data->map, num)) | 350 | if (!chp_test_bit(data->map, num)) |
351 | continue; | 351 | continue; |
352 | chpid.id = num; | 352 | chpid.id = num; |
353 | pr_notice("Processing %s for channel path %x.%02x\n", | 353 | pr_notice("Processing %s for channel path %x.%02x\n", |
354 | events[data->op], chpid.cssid, chpid.id); | 354 | events[data->op], chpid.cssid, chpid.id); |
355 | switch (data->op) { | 355 | switch (data->op) { |
356 | case 0: | 356 | case 0: |
357 | chp_cfg_schedule(chpid, 1); | 357 | chp_cfg_schedule(chpid, 1); |
358 | break; | 358 | break; |
359 | case 1: | 359 | case 1: |
360 | chp_cfg_schedule(chpid, 0); | 360 | chp_cfg_schedule(chpid, 0); |
361 | break; | 361 | break; |
362 | case 2: | 362 | case 2: |
363 | chp_cfg_cancel_deconfigure(chpid); | 363 | chp_cfg_cancel_deconfigure(chpid); |
364 | break; | 364 | break; |
365 | } | 365 | } |
366 | } | 366 | } |
367 | } | 367 | } |
368 | 368 | ||
369 | static void chsc_process_sei(struct chsc_sei_area *sei_area) | 369 | static void chsc_process_sei(struct chsc_sei_area *sei_area) |
370 | { | 370 | { |
371 | /* Check if we might have lost some information. */ | 371 | /* Check if we might have lost some information. */ |
372 | if (sei_area->flags & 0x40) { | 372 | if (sei_area->flags & 0x40) { |
373 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); | 373 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); |
374 | css_schedule_eval_all(); | 374 | css_schedule_eval_all(); |
375 | } | 375 | } |
376 | /* which kind of information was stored? */ | 376 | /* which kind of information was stored? */ |
377 | switch (sei_area->cc) { | 377 | switch (sei_area->cc) { |
378 | case 1: /* link incident*/ | 378 | case 1: /* link incident*/ |
379 | chsc_process_sei_link_incident(sei_area); | 379 | chsc_process_sei_link_incident(sei_area); |
380 | break; | 380 | break; |
381 | case 2: /* i/o resource accessibiliy */ | 381 | case 2: /* i/o resource accessibiliy */ |
382 | chsc_process_sei_res_acc(sei_area); | 382 | chsc_process_sei_res_acc(sei_area); |
383 | break; | 383 | break; |
384 | case 8: /* channel-path-configuration notification */ | 384 | case 8: /* channel-path-configuration notification */ |
385 | chsc_process_sei_chp_config(sei_area); | 385 | chsc_process_sei_chp_config(sei_area); |
386 | break; | 386 | break; |
387 | default: /* other stuff */ | 387 | default: /* other stuff */ |
388 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", | 388 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", |
389 | sei_area->cc); | 389 | sei_area->cc); |
390 | break; | 390 | break; |
391 | } | 391 | } |
392 | } | 392 | } |
393 | 393 | ||
394 | static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) | 394 | static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) |
395 | { | 395 | { |
396 | struct chsc_sei_area *sei_area; | 396 | struct chsc_sei_area *sei_area; |
397 | 397 | ||
398 | if (overflow) { | 398 | if (overflow) { |
399 | css_schedule_eval_all(); | 399 | css_schedule_eval_all(); |
400 | return; | 400 | return; |
401 | } | 401 | } |
402 | CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " | 402 | CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " |
403 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | 403 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", |
404 | crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, | 404 | crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, |
405 | crw0->erc, crw0->rsid); | 405 | crw0->erc, crw0->rsid); |
406 | if (!sei_page) | 406 | if (!sei_page) |
407 | return; | 407 | return; |
408 | /* Access to sei_page is serialized through machine check handler | 408 | /* Access to sei_page is serialized through machine check handler |
409 | * thread, so no need for locking. */ | 409 | * thread, so no need for locking. */ |
410 | sei_area = sei_page; | 410 | sei_area = sei_page; |
411 | 411 | ||
412 | CIO_TRACE_EVENT(2, "prcss"); | 412 | CIO_TRACE_EVENT(2, "prcss"); |
413 | do { | 413 | do { |
414 | memset(sei_area, 0, sizeof(*sei_area)); | 414 | memset(sei_area, 0, sizeof(*sei_area)); |
415 | sei_area->request.length = 0x0010; | 415 | sei_area->request.length = 0x0010; |
416 | sei_area->request.code = 0x000e; | 416 | sei_area->request.code = 0x000e; |
417 | if (chsc(sei_area)) | 417 | if (chsc(sei_area)) |
418 | break; | 418 | break; |
419 | 419 | ||
420 | if (sei_area->response.code == 0x0001) { | 420 | if (sei_area->response.code == 0x0001) { |
421 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); | 421 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); |
422 | chsc_process_sei(sei_area); | 422 | chsc_process_sei(sei_area); |
423 | } else { | 423 | } else { |
424 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", | 424 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", |
425 | sei_area->response.code); | 425 | sei_area->response.code); |
426 | break; | 426 | break; |
427 | } | 427 | } |
428 | } while (sei_area->flags & 0x80); | 428 | } while (sei_area->flags & 0x80); |
429 | } | 429 | } |
430 | 430 | ||
431 | void chsc_chp_online(struct chp_id chpid) | 431 | void chsc_chp_online(struct chp_id chpid) |
432 | { | 432 | { |
433 | char dbf_txt[15]; | 433 | char dbf_txt[15]; |
434 | struct chp_link link; | 434 | struct chp_link link; |
435 | 435 | ||
436 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); | 436 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); |
437 | CIO_TRACE_EVENT(2, dbf_txt); | 437 | CIO_TRACE_EVENT(2, dbf_txt); |
438 | 438 | ||
439 | if (chp_get_status(chpid) != 0) { | 439 | if (chp_get_status(chpid) != 0) { |
440 | memset(&link, 0, sizeof(struct chp_link)); | 440 | memset(&link, 0, sizeof(struct chp_link)); |
441 | link.chpid = chpid; | 441 | link.chpid = chpid; |
442 | /* Wait until previous actions have settled. */ | 442 | /* Wait until previous actions have settled. */ |
443 | css_wait_for_slow_path(); | 443 | css_wait_for_slow_path(); |
444 | for_each_subchannel_staged(__s390_process_res_acc, NULL, | 444 | for_each_subchannel_staged(__s390_process_res_acc, NULL, |
445 | &link); | 445 | &link); |
446 | } | 446 | } |
447 | } | 447 | } |
448 | 448 | ||
449 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, | 449 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, |
450 | struct chp_id chpid, int on) | 450 | struct chp_id chpid, int on) |
451 | { | 451 | { |
452 | unsigned long flags; | 452 | unsigned long flags; |
453 | struct chp_link link; | 453 | struct chp_link link; |
454 | 454 | ||
455 | memset(&link, 0, sizeof(struct chp_link)); | 455 | memset(&link, 0, sizeof(struct chp_link)); |
456 | link.chpid = chpid; | 456 | link.chpid = chpid; |
457 | spin_lock_irqsave(sch->lock, flags); | 457 | spin_lock_irqsave(sch->lock, flags); |
458 | if (sch->driver && sch->driver->chp_event) | 458 | if (sch->driver && sch->driver->chp_event) |
459 | sch->driver->chp_event(sch, &link, | 459 | sch->driver->chp_event(sch, &link, |
460 | on ? CHP_VARY_ON : CHP_VARY_OFF); | 460 | on ? CHP_VARY_ON : CHP_VARY_OFF); |
461 | spin_unlock_irqrestore(sch->lock, flags); | 461 | spin_unlock_irqrestore(sch->lock, flags); |
462 | } | 462 | } |
463 | 463 | ||
464 | static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) | 464 | static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) |
465 | { | 465 | { |
466 | struct chp_id *chpid = data; | 466 | struct chp_id *chpid = data; |
467 | 467 | ||
468 | __s390_subchannel_vary_chpid(sch, *chpid, 0); | 468 | __s390_subchannel_vary_chpid(sch, *chpid, 0); |
469 | return 0; | 469 | return 0; |
470 | } | 470 | } |
471 | 471 | ||
472 | static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) | 472 | static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) |
473 | { | 473 | { |
474 | struct chp_id *chpid = data; | 474 | struct chp_id *chpid = data; |
475 | 475 | ||
476 | __s390_subchannel_vary_chpid(sch, *chpid, 1); | 476 | __s390_subchannel_vary_chpid(sch, *chpid, 1); |
477 | return 0; | 477 | return 0; |
478 | } | 478 | } |
479 | 479 | ||
480 | static int | 480 | static int |
481 | __s390_vary_chpid_on(struct subchannel_id schid, void *data) | 481 | __s390_vary_chpid_on(struct subchannel_id schid, void *data) |
482 | { | 482 | { |
483 | struct schib schib; | 483 | struct schib schib; |
484 | 484 | ||
485 | if (stsch_err(schid, &schib)) | 485 | if (stsch_err(schid, &schib)) |
486 | /* We're through */ | 486 | /* We're through */ |
487 | return -ENXIO; | 487 | return -ENXIO; |
488 | /* Put it on the slow path. */ | 488 | /* Put it on the slow path. */ |
489 | css_schedule_eval(schid); | 489 | css_schedule_eval(schid); |
490 | return 0; | 490 | return 0; |
491 | } | 491 | } |
492 | 492 | ||
493 | /** | 493 | /** |
494 | * chsc_chp_vary - propagate channel-path vary operation to subchannels | 494 | * chsc_chp_vary - propagate channel-path vary operation to subchannels |
495 | * @chpid: channl-path ID | 495 | * @chpid: channl-path ID |
496 | * @on: non-zero for vary online, zero for vary offline | 496 | * @on: non-zero for vary online, zero for vary offline |
497 | */ | 497 | */ |
498 | int chsc_chp_vary(struct chp_id chpid, int on) | 498 | int chsc_chp_vary(struct chp_id chpid, int on) |
499 | { | 499 | { |
500 | struct chp_link link; | 500 | struct chp_link link; |
501 | 501 | ||
502 | memset(&link, 0, sizeof(struct chp_link)); | 502 | memset(&link, 0, sizeof(struct chp_link)); |
503 | link.chpid = chpid; | 503 | link.chpid = chpid; |
504 | /* Wait until previous actions have settled. */ | 504 | /* Wait until previous actions have settled. */ |
505 | css_wait_for_slow_path(); | 505 | css_wait_for_slow_path(); |
506 | /* | 506 | /* |
507 | * Redo PathVerification on the devices the chpid connects to | 507 | * Redo PathVerification on the devices the chpid connects to |
508 | */ | 508 | */ |
509 | 509 | ||
510 | if (on) | 510 | if (on) |
511 | for_each_subchannel_staged(s390_subchannel_vary_chpid_on, | 511 | for_each_subchannel_staged(s390_subchannel_vary_chpid_on, |
512 | __s390_vary_chpid_on, &link); | 512 | __s390_vary_chpid_on, &link); |
513 | else | 513 | else |
514 | for_each_subchannel_staged(s390_subchannel_vary_chpid_off, | 514 | for_each_subchannel_staged(s390_subchannel_vary_chpid_off, |
515 | NULL, &link); | 515 | NULL, &link); |
516 | 516 | ||
517 | return 0; | 517 | return 0; |
518 | } | 518 | } |
519 | 519 | ||
520 | static void | 520 | static void |
521 | chsc_remove_cmg_attr(struct channel_subsystem *css) | 521 | chsc_remove_cmg_attr(struct channel_subsystem *css) |
522 | { | 522 | { |
523 | int i; | 523 | int i; |
524 | 524 | ||
525 | for (i = 0; i <= __MAX_CHPID; i++) { | 525 | for (i = 0; i <= __MAX_CHPID; i++) { |
526 | if (!css->chps[i]) | 526 | if (!css->chps[i]) |
527 | continue; | 527 | continue; |
528 | chp_remove_cmg_attr(css->chps[i]); | 528 | chp_remove_cmg_attr(css->chps[i]); |
529 | } | 529 | } |
530 | } | 530 | } |
531 | 531 | ||
532 | static int | 532 | static int |
533 | chsc_add_cmg_attr(struct channel_subsystem *css) | 533 | chsc_add_cmg_attr(struct channel_subsystem *css) |
534 | { | 534 | { |
535 | int i, ret; | 535 | int i, ret; |
536 | 536 | ||
537 | ret = 0; | 537 | ret = 0; |
538 | for (i = 0; i <= __MAX_CHPID; i++) { | 538 | for (i = 0; i <= __MAX_CHPID; i++) { |
539 | if (!css->chps[i]) | 539 | if (!css->chps[i]) |
540 | continue; | 540 | continue; |
541 | ret = chp_add_cmg_attr(css->chps[i]); | 541 | ret = chp_add_cmg_attr(css->chps[i]); |
542 | if (ret) | 542 | if (ret) |
543 | goto cleanup; | 543 | goto cleanup; |
544 | } | 544 | } |
545 | return ret; | 545 | return ret; |
546 | cleanup: | 546 | cleanup: |
547 | for (--i; i >= 0; i--) { | 547 | for (--i; i >= 0; i--) { |
548 | if (!css->chps[i]) | 548 | if (!css->chps[i]) |
549 | continue; | 549 | continue; |
550 | chp_remove_cmg_attr(css->chps[i]); | 550 | chp_remove_cmg_attr(css->chps[i]); |
551 | } | 551 | } |
552 | return ret; | 552 | return ret; |
553 | } | 553 | } |
554 | 554 | ||
555 | int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | 555 | int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) |
556 | { | 556 | { |
557 | struct { | 557 | struct { |
558 | struct chsc_header request; | 558 | struct chsc_header request; |
559 | u32 operation_code : 2; | 559 | u32 operation_code : 2; |
560 | u32 : 30; | 560 | u32 : 30; |
561 | u32 key : 4; | 561 | u32 key : 4; |
562 | u32 : 28; | 562 | u32 : 28; |
563 | u32 zeroes1; | 563 | u32 zeroes1; |
564 | u32 cub_addr1; | 564 | u32 cub_addr1; |
565 | u32 zeroes2; | 565 | u32 zeroes2; |
566 | u32 cub_addr2; | 566 | u32 cub_addr2; |
567 | u32 reserved[13]; | 567 | u32 reserved[13]; |
568 | struct chsc_header response; | 568 | struct chsc_header response; |
569 | u32 status : 8; | 569 | u32 status : 8; |
570 | u32 : 4; | 570 | u32 : 4; |
571 | u32 fmt : 4; | 571 | u32 fmt : 4; |
572 | u32 : 16; | 572 | u32 : 16; |
573 | } __attribute__ ((packed)) *secm_area; | 573 | } __attribute__ ((packed)) *secm_area; |
574 | int ret, ccode; | 574 | int ret, ccode; |
575 | 575 | ||
576 | secm_area = page; | 576 | secm_area = page; |
577 | secm_area->request.length = 0x0050; | 577 | secm_area->request.length = 0x0050; |
578 | secm_area->request.code = 0x0016; | 578 | secm_area->request.code = 0x0016; |
579 | 579 | ||
580 | secm_area->key = PAGE_DEFAULT_KEY >> 4; | 580 | secm_area->key = PAGE_DEFAULT_KEY >> 4; |
581 | secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; | 581 | secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; |
582 | secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; | 582 | secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; |
583 | 583 | ||
584 | secm_area->operation_code = enable ? 0 : 1; | 584 | secm_area->operation_code = enable ? 0 : 1; |
585 | 585 | ||
586 | ccode = chsc(secm_area); | 586 | ccode = chsc(secm_area); |
587 | if (ccode > 0) | 587 | if (ccode > 0) |
588 | return (ccode == 3) ? -ENODEV : -EBUSY; | 588 | return (ccode == 3) ? -ENODEV : -EBUSY; |
589 | 589 | ||
590 | switch (secm_area->response.code) { | 590 | switch (secm_area->response.code) { |
591 | case 0x0102: | 591 | case 0x0102: |
592 | case 0x0103: | 592 | case 0x0103: |
593 | ret = -EINVAL; | 593 | ret = -EINVAL; |
594 | break; | 594 | break; |
595 | default: | 595 | default: |
596 | ret = chsc_error_from_response(secm_area->response.code); | 596 | ret = chsc_error_from_response(secm_area->response.code); |
597 | } | 597 | } |
598 | if (ret != 0) | 598 | if (ret != 0) |
599 | CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", | 599 | CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", |
600 | secm_area->response.code); | 600 | secm_area->response.code); |
601 | return ret; | 601 | return ret; |
602 | } | 602 | } |
603 | 603 | ||
604 | int | 604 | int |
605 | chsc_secm(struct channel_subsystem *css, int enable) | 605 | chsc_secm(struct channel_subsystem *css, int enable) |
606 | { | 606 | { |
607 | void *secm_area; | 607 | void *secm_area; |
608 | int ret; | 608 | int ret; |
609 | 609 | ||
610 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 610 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
611 | if (!secm_area) | 611 | if (!secm_area) |
612 | return -ENOMEM; | 612 | return -ENOMEM; |
613 | 613 | ||
614 | if (enable && !css->cm_enabled) { | 614 | if (enable && !css->cm_enabled) { |
615 | css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 615 | css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
616 | css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 616 | css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
617 | if (!css->cub_addr1 || !css->cub_addr2) { | 617 | if (!css->cub_addr1 || !css->cub_addr2) { |
618 | free_page((unsigned long)css->cub_addr1); | 618 | free_page((unsigned long)css->cub_addr1); |
619 | free_page((unsigned long)css->cub_addr2); | 619 | free_page((unsigned long)css->cub_addr2); |
620 | free_page((unsigned long)secm_area); | 620 | free_page((unsigned long)secm_area); |
621 | return -ENOMEM; | 621 | return -ENOMEM; |
622 | } | 622 | } |
623 | } | 623 | } |
624 | ret = __chsc_do_secm(css, enable, secm_area); | 624 | ret = __chsc_do_secm(css, enable, secm_area); |
625 | if (!ret) { | 625 | if (!ret) { |
626 | css->cm_enabled = enable; | 626 | css->cm_enabled = enable; |
627 | if (css->cm_enabled) { | 627 | if (css->cm_enabled) { |
628 | ret = chsc_add_cmg_attr(css); | 628 | ret = chsc_add_cmg_attr(css); |
629 | if (ret) { | 629 | if (ret) { |
630 | memset(secm_area, 0, PAGE_SIZE); | 630 | memset(secm_area, 0, PAGE_SIZE); |
631 | __chsc_do_secm(css, 0, secm_area); | 631 | __chsc_do_secm(css, 0, secm_area); |
632 | css->cm_enabled = 0; | 632 | css->cm_enabled = 0; |
633 | } | 633 | } |
634 | } else | 634 | } else |
635 | chsc_remove_cmg_attr(css); | 635 | chsc_remove_cmg_attr(css); |
636 | } | 636 | } |
637 | if (!css->cm_enabled) { | 637 | if (!css->cm_enabled) { |
638 | free_page((unsigned long)css->cub_addr1); | 638 | free_page((unsigned long)css->cub_addr1); |
639 | free_page((unsigned long)css->cub_addr2); | 639 | free_page((unsigned long)css->cub_addr2); |
640 | } | 640 | } |
641 | free_page((unsigned long)secm_area); | 641 | free_page((unsigned long)secm_area); |
642 | return ret; | 642 | return ret; |
643 | } | 643 | } |
644 | 644 | ||
645 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, | 645 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, |
646 | int c, int m, | 646 | int c, int m, |
647 | struct chsc_response_struct *resp) | 647 | struct chsc_response_struct *resp) |
648 | { | 648 | { |
649 | int ccode, ret; | 649 | int ccode, ret; |
650 | 650 | ||
651 | struct { | 651 | struct { |
652 | struct chsc_header request; | 652 | struct chsc_header request; |
653 | u32 : 2; | 653 | u32 : 2; |
654 | u32 m : 1; | 654 | u32 m : 1; |
655 | u32 c : 1; | 655 | u32 c : 1; |
656 | u32 fmt : 4; | 656 | u32 fmt : 4; |
657 | u32 cssid : 8; | 657 | u32 cssid : 8; |
658 | u32 : 4; | 658 | u32 : 4; |
659 | u32 rfmt : 4; | 659 | u32 rfmt : 4; |
660 | u32 first_chpid : 8; | 660 | u32 first_chpid : 8; |
661 | u32 : 24; | 661 | u32 : 24; |
662 | u32 last_chpid : 8; | 662 | u32 last_chpid : 8; |
663 | u32 zeroes1; | 663 | u32 zeroes1; |
664 | struct chsc_header response; | 664 | struct chsc_header response; |
665 | u8 data[PAGE_SIZE - 20]; | 665 | u8 data[PAGE_SIZE - 20]; |
666 | } __attribute__ ((packed)) *scpd_area; | 666 | } __attribute__ ((packed)) *scpd_area; |
667 | 667 | ||
668 | if ((rfmt == 1) && !css_general_characteristics.fcs) | 668 | if ((rfmt == 1) && !css_general_characteristics.fcs) |
669 | return -EINVAL; | 669 | return -EINVAL; |
670 | if ((rfmt == 2) && !css_general_characteristics.cib) | 670 | if ((rfmt == 2) && !css_general_characteristics.cib) |
671 | return -EINVAL; | 671 | return -EINVAL; |
672 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 672 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
673 | if (!scpd_area) | 673 | if (!scpd_area) |
674 | return -ENOMEM; | 674 | return -ENOMEM; |
675 | 675 | ||
676 | scpd_area->request.length = 0x0010; | 676 | scpd_area->request.length = 0x0010; |
677 | scpd_area->request.code = 0x0002; | 677 | scpd_area->request.code = 0x0002; |
678 | 678 | ||
679 | scpd_area->cssid = chpid.cssid; | 679 | scpd_area->cssid = chpid.cssid; |
680 | scpd_area->first_chpid = chpid.id; | 680 | scpd_area->first_chpid = chpid.id; |
681 | scpd_area->last_chpid = chpid.id; | 681 | scpd_area->last_chpid = chpid.id; |
682 | scpd_area->m = m; | 682 | scpd_area->m = m; |
683 | scpd_area->c = c; | 683 | scpd_area->c = c; |
684 | scpd_area->fmt = fmt; | 684 | scpd_area->fmt = fmt; |
685 | scpd_area->rfmt = rfmt; | 685 | scpd_area->rfmt = rfmt; |
686 | 686 | ||
687 | ccode = chsc(scpd_area); | 687 | ccode = chsc(scpd_area); |
688 | if (ccode > 0) { | 688 | if (ccode > 0) { |
689 | ret = (ccode == 3) ? -ENODEV : -EBUSY; | 689 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
690 | goto out; | 690 | goto out; |
691 | } | 691 | } |
692 | 692 | ||
693 | ret = chsc_error_from_response(scpd_area->response.code); | 693 | ret = chsc_error_from_response(scpd_area->response.code); |
694 | if (ret == 0) | 694 | if (ret == 0) |
695 | /* Success. */ | 695 | /* Success. */ |
696 | memcpy(resp, &scpd_area->response, scpd_area->response.length); | 696 | memcpy(resp, &scpd_area->response, scpd_area->response.length); |
697 | else | 697 | else |
698 | CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", | 698 | CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", |
699 | scpd_area->response.code); | 699 | scpd_area->response.code); |
700 | out: | 700 | out: |
701 | free_page((unsigned long)scpd_area); | 701 | free_page((unsigned long)scpd_area); |
702 | return ret; | 702 | return ret; |
703 | } | 703 | } |
704 | EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); | 704 | EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); |
705 | 705 | ||
706 | int chsc_determine_base_channel_path_desc(struct chp_id chpid, | 706 | int chsc_determine_base_channel_path_desc(struct chp_id chpid, |
707 | struct channel_path_desc *desc) | 707 | struct channel_path_desc *desc) |
708 | { | 708 | { |
709 | struct chsc_response_struct *chsc_resp; | 709 | struct chsc_response_struct *chsc_resp; |
710 | int ret; | 710 | int ret; |
711 | 711 | ||
712 | chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); | 712 | chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); |
713 | if (!chsc_resp) | 713 | if (!chsc_resp) |
714 | return -ENOMEM; | 714 | return -ENOMEM; |
715 | ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); | 715 | ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); |
716 | if (ret) | 716 | if (ret) |
717 | goto out_free; | 717 | goto out_free; |
718 | memcpy(desc, &chsc_resp->data, sizeof(*desc)); | 718 | memcpy(desc, &chsc_resp->data, sizeof(*desc)); |
719 | out_free: | 719 | out_free: |
720 | kfree(chsc_resp); | 720 | kfree(chsc_resp); |
721 | return ret; | 721 | return ret; |
722 | } | 722 | } |
723 | 723 | ||
724 | static void | 724 | static void |
725 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, | 725 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, |
726 | struct cmg_chars *chars) | 726 | struct cmg_chars *chars) |
727 | { | 727 | { |
728 | switch (chp->cmg) { | 728 | switch (chp->cmg) { |
729 | case 2: | 729 | case 2: |
730 | case 3: | 730 | case 3: |
731 | chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), | 731 | chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), |
732 | GFP_KERNEL); | 732 | GFP_KERNEL); |
733 | if (chp->cmg_chars) { | 733 | if (chp->cmg_chars) { |
734 | int i, mask; | 734 | int i, mask; |
735 | struct cmg_chars *cmg_chars; | 735 | struct cmg_chars *cmg_chars; |
736 | 736 | ||
737 | cmg_chars = chp->cmg_chars; | 737 | cmg_chars = chp->cmg_chars; |
738 | for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { | 738 | for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { |
739 | mask = 0x80 >> (i + 3); | 739 | mask = 0x80 >> (i + 3); |
740 | if (cmcv & mask) | 740 | if (cmcv & mask) |
741 | cmg_chars->values[i] = chars->values[i]; | 741 | cmg_chars->values[i] = chars->values[i]; |
742 | else | 742 | else |
743 | cmg_chars->values[i] = 0; | 743 | cmg_chars->values[i] = 0; |
744 | } | 744 | } |
745 | } | 745 | } |
746 | break; | 746 | break; |
747 | default: | 747 | default: |
748 | /* No cmg-dependent data. */ | 748 | /* No cmg-dependent data. */ |
749 | break; | 749 | break; |
750 | } | 750 | } |
751 | } | 751 | } |
752 | 752 | ||
753 | int chsc_get_channel_measurement_chars(struct channel_path *chp) | 753 | int chsc_get_channel_measurement_chars(struct channel_path *chp) |
754 | { | 754 | { |
755 | int ccode, ret; | 755 | int ccode, ret; |
756 | 756 | ||
757 | struct { | 757 | struct { |
758 | struct chsc_header request; | 758 | struct chsc_header request; |
759 | u32 : 24; | 759 | u32 : 24; |
760 | u32 first_chpid : 8; | 760 | u32 first_chpid : 8; |
761 | u32 : 24; | 761 | u32 : 24; |
762 | u32 last_chpid : 8; | 762 | u32 last_chpid : 8; |
763 | u32 zeroes1; | 763 | u32 zeroes1; |
764 | struct chsc_header response; | 764 | struct chsc_header response; |
765 | u32 zeroes2; | 765 | u32 zeroes2; |
766 | u32 not_valid : 1; | 766 | u32 not_valid : 1; |
767 | u32 shared : 1; | 767 | u32 shared : 1; |
768 | u32 : 22; | 768 | u32 : 22; |
769 | u32 chpid : 8; | 769 | u32 chpid : 8; |
770 | u32 cmcv : 5; | 770 | u32 cmcv : 5; |
771 | u32 : 11; | 771 | u32 : 11; |
772 | u32 cmgq : 8; | 772 | u32 cmgq : 8; |
773 | u32 cmg : 8; | 773 | u32 cmg : 8; |
774 | u32 zeroes3; | 774 | u32 zeroes3; |
775 | u32 data[NR_MEASUREMENT_CHARS]; | 775 | u32 data[NR_MEASUREMENT_CHARS]; |
776 | } __attribute__ ((packed)) *scmc_area; | 776 | } __attribute__ ((packed)) *scmc_area; |
777 | 777 | ||
778 | scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 778 | scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
779 | if (!scmc_area) | 779 | if (!scmc_area) |
780 | return -ENOMEM; | 780 | return -ENOMEM; |
781 | 781 | ||
782 | scmc_area->request.length = 0x0010; | 782 | scmc_area->request.length = 0x0010; |
783 | scmc_area->request.code = 0x0022; | 783 | scmc_area->request.code = 0x0022; |
784 | 784 | ||
785 | scmc_area->first_chpid = chp->chpid.id; | 785 | scmc_area->first_chpid = chp->chpid.id; |
786 | scmc_area->last_chpid = chp->chpid.id; | 786 | scmc_area->last_chpid = chp->chpid.id; |
787 | 787 | ||
788 | ccode = chsc(scmc_area); | 788 | ccode = chsc(scmc_area); |
789 | if (ccode > 0) { | 789 | if (ccode > 0) { |
790 | ret = (ccode == 3) ? -ENODEV : -EBUSY; | 790 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
791 | goto out; | 791 | goto out; |
792 | } | 792 | } |
793 | 793 | ||
794 | ret = chsc_error_from_response(scmc_area->response.code); | 794 | ret = chsc_error_from_response(scmc_area->response.code); |
795 | if (ret == 0) { | 795 | if (ret == 0) { |
796 | /* Success. */ | 796 | /* Success. */ |
797 | if (!scmc_area->not_valid) { | 797 | if (!scmc_area->not_valid) { |
798 | chp->cmg = scmc_area->cmg; | 798 | chp->cmg = scmc_area->cmg; |
799 | chp->shared = scmc_area->shared; | 799 | chp->shared = scmc_area->shared; |
800 | chsc_initialize_cmg_chars(chp, scmc_area->cmcv, | 800 | chsc_initialize_cmg_chars(chp, scmc_area->cmcv, |
801 | (struct cmg_chars *) | 801 | (struct cmg_chars *) |
802 | &scmc_area->data); | 802 | &scmc_area->data); |
803 | } else { | 803 | } else { |
804 | chp->cmg = -1; | 804 | chp->cmg = -1; |
805 | chp->shared = -1; | 805 | chp->shared = -1; |
806 | } | 806 | } |
807 | } else { | 807 | } else { |
808 | CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", | 808 | CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", |
809 | scmc_area->response.code); | 809 | scmc_area->response.code); |
810 | } | 810 | } |
811 | out: | 811 | out: |
812 | free_page((unsigned long)scmc_area); | 812 | free_page((unsigned long)scmc_area); |
813 | return ret; | 813 | return ret; |
814 | } | 814 | } |
815 | 815 | ||
816 | int __init chsc_alloc_sei_area(void) | 816 | int __init chsc_init(void) |
817 | { | 817 | { |
818 | int ret; | 818 | int ret; |
819 | 819 | ||
820 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 820 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
821 | if (!sei_page) { | 821 | if (!sei_page) { |
822 | CIO_MSG_EVENT(0, "Can't allocate page for processing of " | 822 | CIO_MSG_EVENT(0, "Can't allocate page for processing of " |
823 | "chsc machine checks!\n"); | 823 | "chsc machine checks!\n"); |
824 | return -ENOMEM; | 824 | return -ENOMEM; |
825 | } | 825 | } |
826 | ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); | 826 | ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); |
827 | if (ret) | 827 | if (ret) |
828 | kfree(sei_page); | 828 | free_page((unsigned long)sei_page); |
829 | return ret; | 829 | return ret; |
830 | } | 830 | } |
831 | 831 | ||
832 | void __init chsc_free_sei_area(void) | 832 | void __init chsc_init_cleanup(void) |
833 | { | 833 | { |
834 | crw_unregister_handler(CRW_RSC_CSS); | 834 | crw_unregister_handler(CRW_RSC_CSS); |
835 | kfree(sei_page); | 835 | free_page((unsigned long)sei_page); |
836 | } | 836 | } |
837 | 837 | ||
838 | int chsc_enable_facility(int operation_code) | 838 | int chsc_enable_facility(int operation_code) |
839 | { | 839 | { |
840 | int ret; | 840 | int ret; |
841 | static struct { | 841 | static struct { |
842 | struct chsc_header request; | 842 | struct chsc_header request; |
843 | u8 reserved1:4; | 843 | u8 reserved1:4; |
844 | u8 format:4; | 844 | u8 format:4; |
845 | u8 reserved2; | 845 | u8 reserved2; |
846 | u16 operation_code; | 846 | u16 operation_code; |
847 | u32 reserved3; | 847 | u32 reserved3; |
848 | u32 reserved4; | 848 | u32 reserved4; |
849 | u32 operation_data_area[252]; | 849 | u32 operation_data_area[252]; |
850 | struct chsc_header response; | 850 | struct chsc_header response; |
851 | u32 reserved5:4; | 851 | u32 reserved5:4; |
852 | u32 format2:4; | 852 | u32 format2:4; |
853 | u32 reserved6:24; | 853 | u32 reserved6:24; |
854 | } __attribute__ ((packed, aligned(4096))) sda_area; | 854 | } __attribute__ ((packed, aligned(4096))) sda_area; |
855 | 855 | ||
856 | spin_lock(&sda_lock); | 856 | spin_lock(&sda_lock); |
857 | memset(&sda_area, 0, sizeof(sda_area)); | 857 | memset(&sda_area, 0, sizeof(sda_area)); |
858 | sda_area.request.length = 0x0400; | 858 | sda_area.request.length = 0x0400; |
859 | sda_area.request.code = 0x0031; | 859 | sda_area.request.code = 0x0031; |
860 | sda_area.operation_code = operation_code; | 860 | sda_area.operation_code = operation_code; |
861 | 861 | ||
862 | ret = chsc(&sda_area); | 862 | ret = chsc(&sda_area); |
863 | if (ret > 0) { | 863 | if (ret > 0) { |
864 | ret = (ret == 3) ? -ENODEV : -EBUSY; | 864 | ret = (ret == 3) ? -ENODEV : -EBUSY; |
865 | goto out; | 865 | goto out; |
866 | } | 866 | } |
867 | 867 | ||
868 | switch (sda_area.response.code) { | 868 | switch (sda_area.response.code) { |
869 | case 0x0101: | 869 | case 0x0101: |
870 | ret = -EOPNOTSUPP; | 870 | ret = -EOPNOTSUPP; |
871 | break; | 871 | break; |
872 | default: | 872 | default: |
873 | ret = chsc_error_from_response(sda_area.response.code); | 873 | ret = chsc_error_from_response(sda_area.response.code); |
874 | } | 874 | } |
875 | if (ret != 0) | 875 | if (ret != 0) |
876 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", | 876 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", |
877 | operation_code, sda_area.response.code); | 877 | operation_code, sda_area.response.code); |
878 | out: | 878 | out: |
879 | spin_unlock(&sda_lock); | 879 | spin_unlock(&sda_lock); |
880 | return ret; | 880 | return ret; |
881 | } | 881 | } |
882 | 882 | ||
883 | struct css_general_char css_general_characteristics; | 883 | struct css_general_char css_general_characteristics; |
884 | struct css_chsc_char css_chsc_characteristics; | 884 | struct css_chsc_char css_chsc_characteristics; |
885 | 885 | ||
886 | int __init | 886 | int __init |
887 | chsc_determine_css_characteristics(void) | 887 | chsc_determine_css_characteristics(void) |
888 | { | 888 | { |
889 | int result; | 889 | int result; |
890 | struct { | 890 | struct { |
891 | struct chsc_header request; | 891 | struct chsc_header request; |
892 | u32 reserved1; | 892 | u32 reserved1; |
893 | u32 reserved2; | 893 | u32 reserved2; |
894 | u32 reserved3; | 894 | u32 reserved3; |
895 | struct chsc_header response; | 895 | struct chsc_header response; |
896 | u32 reserved4; | 896 | u32 reserved4; |
897 | u32 general_char[510]; | 897 | u32 general_char[510]; |
898 | u32 chsc_char[518]; | 898 | u32 chsc_char[508]; |
899 | } __attribute__ ((packed)) *scsc_area; | 899 | } __attribute__ ((packed)) *scsc_area; |
900 | 900 | ||
901 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 901 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
902 | if (!scsc_area) | 902 | if (!scsc_area) |
903 | return -ENOMEM; | 903 | return -ENOMEM; |
904 | 904 | ||
905 | scsc_area->request.length = 0x0010; | 905 | scsc_area->request.length = 0x0010; |
906 | scsc_area->request.code = 0x0010; | 906 | scsc_area->request.code = 0x0010; |
907 | 907 | ||
908 | result = chsc(scsc_area); | 908 | result = chsc(scsc_area); |
909 | if (result) { | 909 | if (result) { |
910 | result = (result == 3) ? -ENODEV : -EBUSY; | 910 | result = (result == 3) ? -ENODEV : -EBUSY; |
911 | goto exit; | 911 | goto exit; |
912 | } | 912 | } |
913 | 913 | ||
914 | result = chsc_error_from_response(scsc_area->response.code); | 914 | result = chsc_error_from_response(scsc_area->response.code); |
915 | if (result == 0) { | 915 | if (result == 0) { |
916 | memcpy(&css_general_characteristics, scsc_area->general_char, | 916 | memcpy(&css_general_characteristics, scsc_area->general_char, |
917 | sizeof(css_general_characteristics)); | 917 | sizeof(css_general_characteristics)); |
918 | memcpy(&css_chsc_characteristics, scsc_area->chsc_char, | 918 | memcpy(&css_chsc_characteristics, scsc_area->chsc_char, |
919 | sizeof(css_chsc_characteristics)); | 919 | sizeof(css_chsc_characteristics)); |
920 | } else | 920 | } else |
921 | CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", | 921 | CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", |
922 | scsc_area->response.code); | 922 | scsc_area->response.code); |
923 | exit: | 923 | exit: |
924 | free_page ((unsigned long) scsc_area); | 924 | free_page ((unsigned long) scsc_area); |
925 | return result; | 925 | return result; |
926 | } | 926 | } |
927 | 927 | ||
928 | EXPORT_SYMBOL_GPL(css_general_characteristics); | 928 | EXPORT_SYMBOL_GPL(css_general_characteristics); |
929 | EXPORT_SYMBOL_GPL(css_chsc_characteristics); | 929 | EXPORT_SYMBOL_GPL(css_chsc_characteristics); |
930 | 930 | ||
931 | int chsc_sstpc(void *page, unsigned int op, u16 ctrl) | 931 | int chsc_sstpc(void *page, unsigned int op, u16 ctrl) |
932 | { | 932 | { |
933 | struct { | 933 | struct { |
934 | struct chsc_header request; | 934 | struct chsc_header request; |
935 | unsigned int rsvd0; | 935 | unsigned int rsvd0; |
936 | unsigned int op : 8; | 936 | unsigned int op : 8; |
937 | unsigned int rsvd1 : 8; | 937 | unsigned int rsvd1 : 8; |
938 | unsigned int ctrl : 16; | 938 | unsigned int ctrl : 16; |
939 | unsigned int rsvd2[5]; | 939 | unsigned int rsvd2[5]; |
940 | struct chsc_header response; | 940 | struct chsc_header response; |
941 | unsigned int rsvd3[7]; | 941 | unsigned int rsvd3[7]; |
942 | } __attribute__ ((packed)) *rr; | 942 | } __attribute__ ((packed)) *rr; |
943 | int rc; | 943 | int rc; |
944 | 944 | ||
945 | memset(page, 0, PAGE_SIZE); | 945 | memset(page, 0, PAGE_SIZE); |
946 | rr = page; | 946 | rr = page; |
947 | rr->request.length = 0x0020; | 947 | rr->request.length = 0x0020; |
948 | rr->request.code = 0x0033; | 948 | rr->request.code = 0x0033; |
949 | rr->op = op; | 949 | rr->op = op; |
950 | rr->ctrl = ctrl; | 950 | rr->ctrl = ctrl; |
951 | rc = chsc(rr); | 951 | rc = chsc(rr); |
952 | if (rc) | 952 | if (rc) |
953 | return -EIO; | 953 | return -EIO; |
954 | rc = (rr->response.code == 0x0001) ? 0 : -EIO; | 954 | rc = (rr->response.code == 0x0001) ? 0 : -EIO; |
955 | return rc; | 955 | return rc; |
956 | } | 956 | } |
957 | 957 | ||
958 | int chsc_sstpi(void *page, void *result, size_t size) | 958 | int chsc_sstpi(void *page, void *result, size_t size) |
959 | { | 959 | { |
960 | struct { | 960 | struct { |
961 | struct chsc_header request; | 961 | struct chsc_header request; |
962 | unsigned int rsvd0[3]; | 962 | unsigned int rsvd0[3]; |
963 | struct chsc_header response; | 963 | struct chsc_header response; |
964 | char data[size]; | 964 | char data[size]; |
965 | } __attribute__ ((packed)) *rr; | 965 | } __attribute__ ((packed)) *rr; |
966 | int rc; | 966 | int rc; |
967 | 967 | ||
968 | memset(page, 0, PAGE_SIZE); | 968 | memset(page, 0, PAGE_SIZE); |
969 | rr = page; | 969 | rr = page; |
970 | rr->request.length = 0x0010; | 970 | rr->request.length = 0x0010; |
971 | rr->request.code = 0x0038; | 971 | rr->request.code = 0x0038; |
972 | rc = chsc(rr); | 972 | rc = chsc(rr); |
973 | if (rc) | 973 | if (rc) |
974 | return -EIO; | 974 | return -EIO; |
975 | memcpy(result, &rr->data, size); | 975 | memcpy(result, &rr->data, size); |
976 | return (rr->response.code == 0x0001) ? 0 : -EIO; | 976 | return (rr->response.code == 0x0001) ? 0 : -EIO; |
977 | } | 977 | } |
978 | 978 | ||
979 | static struct { | 979 | static struct { |
980 | struct chsc_header request; | 980 | struct chsc_header request; |
981 | u32 word1; | 981 | u32 word1; |
982 | struct subchannel_id sid; | 982 | struct subchannel_id sid; |
983 | u32 word3; | 983 | u32 word3; |
984 | struct chsc_header response; | 984 | struct chsc_header response; |
985 | u32 word[11]; | 985 | u32 word[11]; |
986 | } __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE))); | 986 | } __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE))); |
987 | 987 | ||
988 | int chsc_siosl(struct subchannel_id schid) | 988 | int chsc_siosl(struct subchannel_id schid) |
989 | { | 989 | { |
990 | unsigned long flags; | 990 | unsigned long flags; |
991 | int ccode; | 991 | int ccode; |
992 | int rc; | 992 | int rc; |
993 | 993 | ||
994 | spin_lock_irqsave(&siosl_lock, flags); | 994 | spin_lock_irqsave(&siosl_lock, flags); |
995 | memset(&siosl_area, 0, sizeof(siosl_area)); | 995 | memset(&siosl_area, 0, sizeof(siosl_area)); |
996 | siosl_area.request.length = 0x0010; | 996 | siosl_area.request.length = 0x0010; |
997 | siosl_area.request.code = 0x0046; | 997 | siosl_area.request.code = 0x0046; |
998 | siosl_area.word1 = 0x80000000; | 998 | siosl_area.word1 = 0x80000000; |
999 | siosl_area.sid = schid; | 999 | siosl_area.sid = schid; |
1000 | 1000 | ||
1001 | ccode = chsc(&siosl_area); | 1001 | ccode = chsc(&siosl_area); |
1002 | if (ccode > 0) { | 1002 | if (ccode > 0) { |
1003 | if (ccode == 3) | 1003 | if (ccode == 3) |
1004 | rc = -ENODEV; | 1004 | rc = -ENODEV; |
1005 | else | 1005 | else |
1006 | rc = -EBUSY; | 1006 | rc = -EBUSY; |
1007 | CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", | 1007 | CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", |
1008 | schid.ssid, schid.sch_no, ccode); | 1008 | schid.ssid, schid.sch_no, ccode); |
1009 | goto out; | 1009 | goto out; |
1010 | } | 1010 | } |
1011 | rc = chsc_error_from_response(siosl_area.response.code); | 1011 | rc = chsc_error_from_response(siosl_area.response.code); |
1012 | if (rc) | 1012 | if (rc) |
1013 | CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", | 1013 | CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", |
1014 | schid.ssid, schid.sch_no, | 1014 | schid.ssid, schid.sch_no, |
1015 | siosl_area.response.code); | 1015 | siosl_area.response.code); |
1016 | else | 1016 | else |
1017 | CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", | 1017 | CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", |
1018 | schid.ssid, schid.sch_no); | 1018 | schid.ssid, schid.sch_no); |
1019 | out: | 1019 | out: |
1020 | spin_unlock_irqrestore(&siosl_lock, flags); | 1020 | spin_unlock_irqrestore(&siosl_lock, flags); |
1021 | 1021 | ||
1022 | return rc; | 1022 | return rc; |
1023 | } | 1023 | } |
1024 | EXPORT_SYMBOL_GPL(chsc_siosl); | 1024 | EXPORT_SYMBOL_GPL(chsc_siosl); |
1025 | 1025 |
drivers/s390/cio/chsc.h
1 | #ifndef S390_CHSC_H | 1 | #ifndef S390_CHSC_H |
2 | #define S390_CHSC_H | 2 | #define S390_CHSC_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <asm/chpid.h> | 6 | #include <asm/chpid.h> |
7 | #include <asm/chsc.h> | 7 | #include <asm/chsc.h> |
8 | #include <asm/schid.h> | 8 | #include <asm/schid.h> |
9 | 9 | ||
10 | #define CHSC_SDA_OC_MSS 0x2 | 10 | #define CHSC_SDA_OC_MSS 0x2 |
11 | 11 | ||
12 | struct chsc_header { | 12 | struct chsc_header { |
13 | u16 length; | 13 | u16 length; |
14 | u16 code; | 14 | u16 code; |
15 | } __attribute__ ((packed)); | 15 | } __attribute__ ((packed)); |
16 | 16 | ||
17 | #define NR_MEASUREMENT_CHARS 5 | 17 | #define NR_MEASUREMENT_CHARS 5 |
18 | struct cmg_chars { | 18 | struct cmg_chars { |
19 | u32 values[NR_MEASUREMENT_CHARS]; | 19 | u32 values[NR_MEASUREMENT_CHARS]; |
20 | } __attribute__ ((packed)); | 20 | } __attribute__ ((packed)); |
21 | 21 | ||
22 | #define NR_MEASUREMENT_ENTRIES 8 | 22 | #define NR_MEASUREMENT_ENTRIES 8 |
23 | struct cmg_entry { | 23 | struct cmg_entry { |
24 | u32 values[NR_MEASUREMENT_ENTRIES]; | 24 | u32 values[NR_MEASUREMENT_ENTRIES]; |
25 | } __attribute__ ((packed)); | 25 | } __attribute__ ((packed)); |
26 | 26 | ||
27 | struct channel_path_desc { | 27 | struct channel_path_desc { |
28 | u8 flags; | 28 | u8 flags; |
29 | u8 lsn; | 29 | u8 lsn; |
30 | u8 desc; | 30 | u8 desc; |
31 | u8 chpid; | 31 | u8 chpid; |
32 | u8 swla; | 32 | u8 swla; |
33 | u8 zeroes; | 33 | u8 zeroes; |
34 | u8 chla; | 34 | u8 chla; |
35 | u8 chpp; | 35 | u8 chpp; |
36 | } __attribute__ ((packed)); | 36 | } __attribute__ ((packed)); |
37 | 37 | ||
38 | struct channel_path; | 38 | struct channel_path; |
39 | 39 | ||
40 | struct css_chsc_char { | 40 | struct css_chsc_char { |
41 | u64 res; | 41 | u64 res; |
42 | u64 : 20; | 42 | u64 : 20; |
43 | u32 secm : 1; /* bit 84 */ | 43 | u32 secm : 1; /* bit 84 */ |
44 | u32 : 1; | 44 | u32 : 1; |
45 | u32 scmc : 1; /* bit 86 */ | 45 | u32 scmc : 1; /* bit 86 */ |
46 | u32 : 20; | 46 | u32 : 20; |
47 | u32 scssc : 1; /* bit 107 */ | 47 | u32 scssc : 1; /* bit 107 */ |
48 | u32 scsscf : 1; /* bit 108 */ | 48 | u32 scsscf : 1; /* bit 108 */ |
49 | u32 : 19; | 49 | u32 : 19; |
50 | }__attribute__((packed)); | 50 | }__attribute__((packed)); |
51 | 51 | ||
52 | extern struct css_chsc_char css_chsc_characteristics; | 52 | extern struct css_chsc_char css_chsc_characteristics; |
53 | 53 | ||
54 | struct chsc_ssd_info { | 54 | struct chsc_ssd_info { |
55 | u8 path_mask; | 55 | u8 path_mask; |
56 | u8 fla_valid_mask; | 56 | u8 fla_valid_mask; |
57 | struct chp_id chpid[8]; | 57 | struct chp_id chpid[8]; |
58 | u16 fla[8]; | 58 | u16 fla[8]; |
59 | }; | 59 | }; |
60 | extern int chsc_get_ssd_info(struct subchannel_id schid, | 60 | extern int chsc_get_ssd_info(struct subchannel_id schid, |
61 | struct chsc_ssd_info *ssd); | 61 | struct chsc_ssd_info *ssd); |
62 | extern int chsc_determine_css_characteristics(void); | 62 | extern int chsc_determine_css_characteristics(void); |
63 | extern int chsc_alloc_sei_area(void); | 63 | extern int chsc_init(void); |
64 | extern void chsc_free_sei_area(void); | 64 | extern void chsc_init_cleanup(void); |
65 | 65 | ||
66 | extern int chsc_enable_facility(int); | 66 | extern int chsc_enable_facility(int); |
67 | struct channel_subsystem; | 67 | struct channel_subsystem; |
68 | extern int chsc_secm(struct channel_subsystem *, int); | 68 | extern int chsc_secm(struct channel_subsystem *, int); |
69 | int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page); | 69 | int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page); |
70 | 70 | ||
71 | int chsc_chp_vary(struct chp_id chpid, int on); | 71 | int chsc_chp_vary(struct chp_id chpid, int on); |
72 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, | 72 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, |
73 | int c, int m, | 73 | int c, int m, |
74 | struct chsc_response_struct *resp); | 74 | struct chsc_response_struct *resp); |
75 | int chsc_determine_base_channel_path_desc(struct chp_id chpid, | 75 | int chsc_determine_base_channel_path_desc(struct chp_id chpid, |
76 | struct channel_path_desc *desc); | 76 | struct channel_path_desc *desc); |
77 | void chsc_chp_online(struct chp_id chpid); | 77 | void chsc_chp_online(struct chp_id chpid); |
78 | void chsc_chp_offline(struct chp_id chpid); | 78 | void chsc_chp_offline(struct chp_id chpid); |
79 | int chsc_get_channel_measurement_chars(struct channel_path *chp); | 79 | int chsc_get_channel_measurement_chars(struct channel_path *chp); |
80 | 80 | ||
81 | int chsc_error_from_response(int response); | 81 | int chsc_error_from_response(int response); |
82 | 82 | ||
83 | int chsc_siosl(struct subchannel_id schid); | 83 | int chsc_siosl(struct subchannel_id schid); |
84 | 84 | ||
85 | #endif | 85 | #endif |
86 | 86 |
drivers/s390/cio/css.c
1 | /* | 1 | /* |
2 | * driver for channel subsystem | 2 | * driver for channel subsystem |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2002, 2009 | 4 | * Copyright IBM Corp. 2002, 2010 |
5 | * | 5 | * |
6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "cio" | 10 | #define KMSG_COMPONENT "cio" |
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | #include <linux/reboot.h> | 19 | #include <linux/reboot.h> |
20 | #include <linux/suspend.h> | 20 | #include <linux/suspend.h> |
21 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
22 | #include <asm/isc.h> | 22 | #include <asm/isc.h> |
23 | #include <asm/crw.h> | 23 | #include <asm/crw.h> |
24 | 24 | ||
25 | #include "css.h" | 25 | #include "css.h" |
26 | #include "cio.h" | 26 | #include "cio.h" |
27 | #include "cio_debug.h" | 27 | #include "cio_debug.h" |
28 | #include "ioasm.h" | 28 | #include "ioasm.h" |
29 | #include "chsc.h" | 29 | #include "chsc.h" |
30 | #include "device.h" | 30 | #include "device.h" |
31 | #include "idset.h" | 31 | #include "idset.h" |
32 | #include "chp.h" | 32 | #include "chp.h" |
33 | 33 | ||
34 | int css_init_done = 0; | 34 | int css_init_done = 0; |
35 | int max_ssid; | 35 | int max_ssid; |
36 | 36 | ||
37 | struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; | 37 | struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; |
38 | 38 | ||
39 | int | 39 | int |
40 | for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) | 40 | for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) |
41 | { | 41 | { |
42 | struct subchannel_id schid; | 42 | struct subchannel_id schid; |
43 | int ret; | 43 | int ret; |
44 | 44 | ||
45 | init_subchannel_id(&schid); | 45 | init_subchannel_id(&schid); |
46 | ret = -ENODEV; | 46 | ret = -ENODEV; |
47 | do { | 47 | do { |
48 | do { | 48 | do { |
49 | ret = fn(schid, data); | 49 | ret = fn(schid, data); |
50 | if (ret) | 50 | if (ret) |
51 | break; | 51 | break; |
52 | } while (schid.sch_no++ < __MAX_SUBCHANNEL); | 52 | } while (schid.sch_no++ < __MAX_SUBCHANNEL); |
53 | schid.sch_no = 0; | 53 | schid.sch_no = 0; |
54 | } while (schid.ssid++ < max_ssid); | 54 | } while (schid.ssid++ < max_ssid); |
55 | return ret; | 55 | return ret; |
56 | } | 56 | } |
57 | 57 | ||
58 | struct cb_data { | 58 | struct cb_data { |
59 | void *data; | 59 | void *data; |
60 | struct idset *set; | 60 | struct idset *set; |
61 | int (*fn_known_sch)(struct subchannel *, void *); | 61 | int (*fn_known_sch)(struct subchannel *, void *); |
62 | int (*fn_unknown_sch)(struct subchannel_id, void *); | 62 | int (*fn_unknown_sch)(struct subchannel_id, void *); |
63 | }; | 63 | }; |
64 | 64 | ||
65 | static int call_fn_known_sch(struct device *dev, void *data) | 65 | static int call_fn_known_sch(struct device *dev, void *data) |
66 | { | 66 | { |
67 | struct subchannel *sch = to_subchannel(dev); | 67 | struct subchannel *sch = to_subchannel(dev); |
68 | struct cb_data *cb = data; | 68 | struct cb_data *cb = data; |
69 | int rc = 0; | 69 | int rc = 0; |
70 | 70 | ||
71 | idset_sch_del(cb->set, sch->schid); | 71 | idset_sch_del(cb->set, sch->schid); |
72 | if (cb->fn_known_sch) | 72 | if (cb->fn_known_sch) |
73 | rc = cb->fn_known_sch(sch, cb->data); | 73 | rc = cb->fn_known_sch(sch, cb->data); |
74 | return rc; | 74 | return rc; |
75 | } | 75 | } |
76 | 76 | ||
77 | static int call_fn_unknown_sch(struct subchannel_id schid, void *data) | 77 | static int call_fn_unknown_sch(struct subchannel_id schid, void *data) |
78 | { | 78 | { |
79 | struct cb_data *cb = data; | 79 | struct cb_data *cb = data; |
80 | int rc = 0; | 80 | int rc = 0; |
81 | 81 | ||
82 | if (idset_sch_contains(cb->set, schid)) | 82 | if (idset_sch_contains(cb->set, schid)) |
83 | rc = cb->fn_unknown_sch(schid, cb->data); | 83 | rc = cb->fn_unknown_sch(schid, cb->data); |
84 | return rc; | 84 | return rc; |
85 | } | 85 | } |
86 | 86 | ||
87 | static int call_fn_all_sch(struct subchannel_id schid, void *data) | 87 | static int call_fn_all_sch(struct subchannel_id schid, void *data) |
88 | { | 88 | { |
89 | struct cb_data *cb = data; | 89 | struct cb_data *cb = data; |
90 | struct subchannel *sch; | 90 | struct subchannel *sch; |
91 | int rc = 0; | 91 | int rc = 0; |
92 | 92 | ||
93 | sch = get_subchannel_by_schid(schid); | 93 | sch = get_subchannel_by_schid(schid); |
94 | if (sch) { | 94 | if (sch) { |
95 | if (cb->fn_known_sch) | 95 | if (cb->fn_known_sch) |
96 | rc = cb->fn_known_sch(sch, cb->data); | 96 | rc = cb->fn_known_sch(sch, cb->data); |
97 | put_device(&sch->dev); | 97 | put_device(&sch->dev); |
98 | } else { | 98 | } else { |
99 | if (cb->fn_unknown_sch) | 99 | if (cb->fn_unknown_sch) |
100 | rc = cb->fn_unknown_sch(schid, cb->data); | 100 | rc = cb->fn_unknown_sch(schid, cb->data); |
101 | } | 101 | } |
102 | 102 | ||
103 | return rc; | 103 | return rc; |
104 | } | 104 | } |
105 | 105 | ||
106 | int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), | 106 | int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), |
107 | int (*fn_unknown)(struct subchannel_id, | 107 | int (*fn_unknown)(struct subchannel_id, |
108 | void *), void *data) | 108 | void *), void *data) |
109 | { | 109 | { |
110 | struct cb_data cb; | 110 | struct cb_data cb; |
111 | int rc; | 111 | int rc; |
112 | 112 | ||
113 | cb.data = data; | 113 | cb.data = data; |
114 | cb.fn_known_sch = fn_known; | 114 | cb.fn_known_sch = fn_known; |
115 | cb.fn_unknown_sch = fn_unknown; | 115 | cb.fn_unknown_sch = fn_unknown; |
116 | 116 | ||
117 | cb.set = idset_sch_new(); | 117 | cb.set = idset_sch_new(); |
118 | if (!cb.set) | 118 | if (!cb.set) |
119 | /* fall back to brute force scanning in case of oom */ | 119 | /* fall back to brute force scanning in case of oom */ |
120 | return for_each_subchannel(call_fn_all_sch, &cb); | 120 | return for_each_subchannel(call_fn_all_sch, &cb); |
121 | 121 | ||
122 | idset_fill(cb.set); | 122 | idset_fill(cb.set); |
123 | 123 | ||
124 | /* Process registered subchannels. */ | 124 | /* Process registered subchannels. */ |
125 | rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); | 125 | rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); |
126 | if (rc) | 126 | if (rc) |
127 | goto out; | 127 | goto out; |
128 | /* Process unregistered subchannels. */ | 128 | /* Process unregistered subchannels. */ |
129 | if (fn_unknown) | 129 | if (fn_unknown) |
130 | rc = for_each_subchannel(call_fn_unknown_sch, &cb); | 130 | rc = for_each_subchannel(call_fn_unknown_sch, &cb); |
131 | out: | 131 | out: |
132 | idset_free(cb.set); | 132 | idset_free(cb.set); |
133 | 133 | ||
134 | return rc; | 134 | return rc; |
135 | } | 135 | } |
136 | 136 | ||
137 | static void css_sch_todo(struct work_struct *work); | 137 | static void css_sch_todo(struct work_struct *work); |
138 | 138 | ||
139 | static struct subchannel * | 139 | static struct subchannel * |
140 | css_alloc_subchannel(struct subchannel_id schid) | 140 | css_alloc_subchannel(struct subchannel_id schid) |
141 | { | 141 | { |
142 | struct subchannel *sch; | 142 | struct subchannel *sch; |
143 | int ret; | 143 | int ret; |
144 | 144 | ||
145 | sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); | 145 | sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); |
146 | if (sch == NULL) | 146 | if (sch == NULL) |
147 | return ERR_PTR(-ENOMEM); | 147 | return ERR_PTR(-ENOMEM); |
148 | ret = cio_validate_subchannel (sch, schid); | 148 | ret = cio_validate_subchannel (sch, schid); |
149 | if (ret < 0) { | 149 | if (ret < 0) { |
150 | kfree(sch); | 150 | kfree(sch); |
151 | return ERR_PTR(ret); | 151 | return ERR_PTR(ret); |
152 | } | 152 | } |
153 | INIT_WORK(&sch->todo_work, css_sch_todo); | 153 | INIT_WORK(&sch->todo_work, css_sch_todo); |
154 | return sch; | 154 | return sch; |
155 | } | 155 | } |
156 | 156 | ||
157 | static void | 157 | static void |
158 | css_subchannel_release(struct device *dev) | 158 | css_subchannel_release(struct device *dev) |
159 | { | 159 | { |
160 | struct subchannel *sch; | 160 | struct subchannel *sch; |
161 | 161 | ||
162 | sch = to_subchannel(dev); | 162 | sch = to_subchannel(dev); |
163 | if (!cio_is_console(sch->schid)) { | 163 | if (!cio_is_console(sch->schid)) { |
164 | /* Reset intparm to zeroes. */ | 164 | /* Reset intparm to zeroes. */ |
165 | sch->config.intparm = 0; | 165 | sch->config.intparm = 0; |
166 | cio_commit_config(sch); | 166 | cio_commit_config(sch); |
167 | kfree(sch->lock); | 167 | kfree(sch->lock); |
168 | kfree(sch); | 168 | kfree(sch); |
169 | } | 169 | } |
170 | } | 170 | } |
171 | 171 | ||
172 | static int css_sch_device_register(struct subchannel *sch) | 172 | static int css_sch_device_register(struct subchannel *sch) |
173 | { | 173 | { |
174 | int ret; | 174 | int ret; |
175 | 175 | ||
176 | mutex_lock(&sch->reg_mutex); | 176 | mutex_lock(&sch->reg_mutex); |
177 | dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, | 177 | dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, |
178 | sch->schid.sch_no); | 178 | sch->schid.sch_no); |
179 | ret = device_register(&sch->dev); | 179 | ret = device_register(&sch->dev); |
180 | mutex_unlock(&sch->reg_mutex); | 180 | mutex_unlock(&sch->reg_mutex); |
181 | return ret; | 181 | return ret; |
182 | } | 182 | } |
183 | 183 | ||
184 | /** | 184 | /** |
185 | * css_sch_device_unregister - unregister a subchannel | 185 | * css_sch_device_unregister - unregister a subchannel |
186 | * @sch: subchannel to be unregistered | 186 | * @sch: subchannel to be unregistered |
187 | */ | 187 | */ |
188 | void css_sch_device_unregister(struct subchannel *sch) | 188 | void css_sch_device_unregister(struct subchannel *sch) |
189 | { | 189 | { |
190 | mutex_lock(&sch->reg_mutex); | 190 | mutex_lock(&sch->reg_mutex); |
191 | if (device_is_registered(&sch->dev)) | 191 | if (device_is_registered(&sch->dev)) |
192 | device_unregister(&sch->dev); | 192 | device_unregister(&sch->dev); |
193 | mutex_unlock(&sch->reg_mutex); | 193 | mutex_unlock(&sch->reg_mutex); |
194 | } | 194 | } |
195 | EXPORT_SYMBOL_GPL(css_sch_device_unregister); | 195 | EXPORT_SYMBOL_GPL(css_sch_device_unregister); |
196 | 196 | ||
197 | static void css_sch_todo(struct work_struct *work) | 197 | static void css_sch_todo(struct work_struct *work) |
198 | { | 198 | { |
199 | struct subchannel *sch; | 199 | struct subchannel *sch; |
200 | enum sch_todo todo; | 200 | enum sch_todo todo; |
201 | 201 | ||
202 | sch = container_of(work, struct subchannel, todo_work); | 202 | sch = container_of(work, struct subchannel, todo_work); |
203 | /* Find out todo. */ | 203 | /* Find out todo. */ |
204 | spin_lock_irq(sch->lock); | 204 | spin_lock_irq(sch->lock); |
205 | todo = sch->todo; | 205 | todo = sch->todo; |
206 | CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, | 206 | CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, |
207 | sch->schid.sch_no, todo); | 207 | sch->schid.sch_no, todo); |
208 | sch->todo = SCH_TODO_NOTHING; | 208 | sch->todo = SCH_TODO_NOTHING; |
209 | spin_unlock_irq(sch->lock); | 209 | spin_unlock_irq(sch->lock); |
210 | /* Perform todo. */ | 210 | /* Perform todo. */ |
211 | if (todo == SCH_TODO_UNREG) | 211 | if (todo == SCH_TODO_UNREG) |
212 | css_sch_device_unregister(sch); | 212 | css_sch_device_unregister(sch); |
213 | /* Release workqueue ref. */ | 213 | /* Release workqueue ref. */ |
214 | put_device(&sch->dev); | 214 | put_device(&sch->dev); |
215 | } | 215 | } |
216 | 216 | ||
217 | /** | 217 | /** |
218 | * css_sched_sch_todo - schedule a subchannel operation | 218 | * css_sched_sch_todo - schedule a subchannel operation |
219 | * @sch: subchannel | 219 | * @sch: subchannel |
220 | * @todo: todo | 220 | * @todo: todo |
221 | * | 221 | * |
222 | * Schedule the operation identified by @todo to be performed on the slow path | 222 | * Schedule the operation identified by @todo to be performed on the slow path |
223 | * workqueue. Do nothing if another operation with higher priority is already | 223 | * workqueue. Do nothing if another operation with higher priority is already |
224 | * scheduled. Needs to be called with subchannel lock held. | 224 | * scheduled. Needs to be called with subchannel lock held. |
225 | */ | 225 | */ |
226 | void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) | 226 | void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) |
227 | { | 227 | { |
228 | CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", | 228 | CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", |
229 | sch->schid.ssid, sch->schid.sch_no, todo); | 229 | sch->schid.ssid, sch->schid.sch_no, todo); |
230 | if (sch->todo >= todo) | 230 | if (sch->todo >= todo) |
231 | return; | 231 | return; |
232 | /* Get workqueue ref. */ | 232 | /* Get workqueue ref. */ |
233 | if (!get_device(&sch->dev)) | 233 | if (!get_device(&sch->dev)) |
234 | return; | 234 | return; |
235 | sch->todo = todo; | 235 | sch->todo = todo; |
236 | if (!queue_work(cio_work_q, &sch->todo_work)) { | 236 | if (!queue_work(cio_work_q, &sch->todo_work)) { |
237 | /* Already queued, release workqueue ref. */ | 237 | /* Already queued, release workqueue ref. */ |
238 | put_device(&sch->dev); | 238 | put_device(&sch->dev); |
239 | } | 239 | } |
240 | } | 240 | } |
241 | 241 | ||
242 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) | 242 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) |
243 | { | 243 | { |
244 | int i; | 244 | int i; |
245 | int mask; | 245 | int mask; |
246 | 246 | ||
247 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); | 247 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); |
248 | ssd->path_mask = pmcw->pim; | 248 | ssd->path_mask = pmcw->pim; |
249 | for (i = 0; i < 8; i++) { | 249 | for (i = 0; i < 8; i++) { |
250 | mask = 0x80 >> i; | 250 | mask = 0x80 >> i; |
251 | if (pmcw->pim & mask) { | 251 | if (pmcw->pim & mask) { |
252 | chp_id_init(&ssd->chpid[i]); | 252 | chp_id_init(&ssd->chpid[i]); |
253 | ssd->chpid[i].id = pmcw->chpid[i]; | 253 | ssd->chpid[i].id = pmcw->chpid[i]; |
254 | } | 254 | } |
255 | } | 255 | } |
256 | } | 256 | } |
257 | 257 | ||
258 | static void ssd_register_chpids(struct chsc_ssd_info *ssd) | 258 | static void ssd_register_chpids(struct chsc_ssd_info *ssd) |
259 | { | 259 | { |
260 | int i; | 260 | int i; |
261 | int mask; | 261 | int mask; |
262 | 262 | ||
263 | for (i = 0; i < 8; i++) { | 263 | for (i = 0; i < 8; i++) { |
264 | mask = 0x80 >> i; | 264 | mask = 0x80 >> i; |
265 | if (ssd->path_mask & mask) | 265 | if (ssd->path_mask & mask) |
266 | if (!chp_is_registered(ssd->chpid[i])) | 266 | if (!chp_is_registered(ssd->chpid[i])) |
267 | chp_new(ssd->chpid[i]); | 267 | chp_new(ssd->chpid[i]); |
268 | } | 268 | } |
269 | } | 269 | } |
270 | 270 | ||
271 | void css_update_ssd_info(struct subchannel *sch) | 271 | void css_update_ssd_info(struct subchannel *sch) |
272 | { | 272 | { |
273 | int ret; | 273 | int ret; |
274 | 274 | ||
275 | if (cio_is_console(sch->schid)) { | 275 | if (cio_is_console(sch->schid)) { |
276 | /* Console is initialized too early for functions requiring | 276 | /* Console is initialized too early for functions requiring |
277 | * memory allocation. */ | 277 | * memory allocation. */ |
278 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | 278 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); |
279 | } else { | 279 | } else { |
280 | ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); | 280 | ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); |
281 | if (ret) | 281 | if (ret) |
282 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | 282 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); |
283 | ssd_register_chpids(&sch->ssd_info); | 283 | ssd_register_chpids(&sch->ssd_info); |
284 | } | 284 | } |
285 | } | 285 | } |
286 | 286 | ||
287 | static ssize_t type_show(struct device *dev, struct device_attribute *attr, | 287 | static ssize_t type_show(struct device *dev, struct device_attribute *attr, |
288 | char *buf) | 288 | char *buf) |
289 | { | 289 | { |
290 | struct subchannel *sch = to_subchannel(dev); | 290 | struct subchannel *sch = to_subchannel(dev); |
291 | 291 | ||
292 | return sprintf(buf, "%01x\n", sch->st); | 292 | return sprintf(buf, "%01x\n", sch->st); |
293 | } | 293 | } |
294 | 294 | ||
295 | static DEVICE_ATTR(type, 0444, type_show, NULL); | 295 | static DEVICE_ATTR(type, 0444, type_show, NULL); |
296 | 296 | ||
297 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | 297 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
298 | char *buf) | 298 | char *buf) |
299 | { | 299 | { |
300 | struct subchannel *sch = to_subchannel(dev); | 300 | struct subchannel *sch = to_subchannel(dev); |
301 | 301 | ||
302 | return sprintf(buf, "css:t%01X\n", sch->st); | 302 | return sprintf(buf, "css:t%01X\n", sch->st); |
303 | } | 303 | } |
304 | 304 | ||
305 | static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); | 305 | static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); |
306 | 306 | ||
307 | static struct attribute *subch_attrs[] = { | 307 | static struct attribute *subch_attrs[] = { |
308 | &dev_attr_type.attr, | 308 | &dev_attr_type.attr, |
309 | &dev_attr_modalias.attr, | 309 | &dev_attr_modalias.attr, |
310 | NULL, | 310 | NULL, |
311 | }; | 311 | }; |
312 | 312 | ||
313 | static struct attribute_group subch_attr_group = { | 313 | static struct attribute_group subch_attr_group = { |
314 | .attrs = subch_attrs, | 314 | .attrs = subch_attrs, |
315 | }; | 315 | }; |
316 | 316 | ||
317 | static const struct attribute_group *default_subch_attr_groups[] = { | 317 | static const struct attribute_group *default_subch_attr_groups[] = { |
318 | &subch_attr_group, | 318 | &subch_attr_group, |
319 | NULL, | 319 | NULL, |
320 | }; | 320 | }; |
321 | 321 | ||
322 | static int css_register_subchannel(struct subchannel *sch) | 322 | static int css_register_subchannel(struct subchannel *sch) |
323 | { | 323 | { |
324 | int ret; | 324 | int ret; |
325 | 325 | ||
326 | /* Initialize the subchannel structure */ | 326 | /* Initialize the subchannel structure */ |
327 | sch->dev.parent = &channel_subsystems[0]->device; | 327 | sch->dev.parent = &channel_subsystems[0]->device; |
328 | sch->dev.bus = &css_bus_type; | 328 | sch->dev.bus = &css_bus_type; |
329 | sch->dev.release = &css_subchannel_release; | 329 | sch->dev.release = &css_subchannel_release; |
330 | sch->dev.groups = default_subch_attr_groups; | 330 | sch->dev.groups = default_subch_attr_groups; |
331 | /* | 331 | /* |
332 | * We don't want to generate uevents for I/O subchannels that don't | 332 | * We don't want to generate uevents for I/O subchannels that don't |
333 | * have a working ccw device behind them since they will be | 333 | * have a working ccw device behind them since they will be |
334 | * unregistered before they can be used anyway, so we delay the add | 334 | * unregistered before they can be used anyway, so we delay the add |
335 | * uevent until after device recognition was successful. | 335 | * uevent until after device recognition was successful. |
336 | * Note that we suppress the uevent for all subchannel types; | 336 | * Note that we suppress the uevent for all subchannel types; |
337 | * the subchannel driver can decide itself when it wants to inform | 337 | * the subchannel driver can decide itself when it wants to inform |
338 | * userspace of its existence. | 338 | * userspace of its existence. |
339 | */ | 339 | */ |
340 | dev_set_uevent_suppress(&sch->dev, 1); | 340 | dev_set_uevent_suppress(&sch->dev, 1); |
341 | css_update_ssd_info(sch); | 341 | css_update_ssd_info(sch); |
342 | /* make it known to the system */ | 342 | /* make it known to the system */ |
343 | ret = css_sch_device_register(sch); | 343 | ret = css_sch_device_register(sch); |
344 | if (ret) { | 344 | if (ret) { |
345 | CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", | 345 | CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", |
346 | sch->schid.ssid, sch->schid.sch_no, ret); | 346 | sch->schid.ssid, sch->schid.sch_no, ret); |
347 | return ret; | 347 | return ret; |
348 | } | 348 | } |
349 | if (!sch->driver) { | 349 | if (!sch->driver) { |
350 | /* | 350 | /* |
351 | * No driver matched. Generate the uevent now so that | 351 | * No driver matched. Generate the uevent now so that |
352 | * a fitting driver module may be loaded based on the | 352 | * a fitting driver module may be loaded based on the |
353 | * modalias. | 353 | * modalias. |
354 | */ | 354 | */ |
355 | dev_set_uevent_suppress(&sch->dev, 0); | 355 | dev_set_uevent_suppress(&sch->dev, 0); |
356 | kobject_uevent(&sch->dev.kobj, KOBJ_ADD); | 356 | kobject_uevent(&sch->dev.kobj, KOBJ_ADD); |
357 | } | 357 | } |
358 | return ret; | 358 | return ret; |
359 | } | 359 | } |
360 | 360 | ||
361 | int css_probe_device(struct subchannel_id schid) | 361 | int css_probe_device(struct subchannel_id schid) |
362 | { | 362 | { |
363 | int ret; | 363 | int ret; |
364 | struct subchannel *sch; | 364 | struct subchannel *sch; |
365 | 365 | ||
366 | if (cio_is_console(schid)) | 366 | if (cio_is_console(schid)) |
367 | sch = cio_get_console_subchannel(); | 367 | sch = cio_get_console_subchannel(); |
368 | else { | 368 | else { |
369 | sch = css_alloc_subchannel(schid); | 369 | sch = css_alloc_subchannel(schid); |
370 | if (IS_ERR(sch)) | 370 | if (IS_ERR(sch)) |
371 | return PTR_ERR(sch); | 371 | return PTR_ERR(sch); |
372 | } | 372 | } |
373 | ret = css_register_subchannel(sch); | 373 | ret = css_register_subchannel(sch); |
374 | if (ret) { | 374 | if (ret) { |
375 | if (!cio_is_console(schid)) | 375 | if (!cio_is_console(schid)) |
376 | put_device(&sch->dev); | 376 | put_device(&sch->dev); |
377 | } | 377 | } |
378 | return ret; | 378 | return ret; |
379 | } | 379 | } |
380 | 380 | ||
381 | static int | 381 | static int |
382 | check_subchannel(struct device * dev, void * data) | 382 | check_subchannel(struct device * dev, void * data) |
383 | { | 383 | { |
384 | struct subchannel *sch; | 384 | struct subchannel *sch; |
385 | struct subchannel_id *schid = data; | 385 | struct subchannel_id *schid = data; |
386 | 386 | ||
387 | sch = to_subchannel(dev); | 387 | sch = to_subchannel(dev); |
388 | return schid_equal(&sch->schid, schid); | 388 | return schid_equal(&sch->schid, schid); |
389 | } | 389 | } |
390 | 390 | ||
391 | struct subchannel * | 391 | struct subchannel * |
392 | get_subchannel_by_schid(struct subchannel_id schid) | 392 | get_subchannel_by_schid(struct subchannel_id schid) |
393 | { | 393 | { |
394 | struct device *dev; | 394 | struct device *dev; |
395 | 395 | ||
396 | dev = bus_find_device(&css_bus_type, NULL, | 396 | dev = bus_find_device(&css_bus_type, NULL, |
397 | &schid, check_subchannel); | 397 | &schid, check_subchannel); |
398 | 398 | ||
399 | return dev ? to_subchannel(dev) : NULL; | 399 | return dev ? to_subchannel(dev) : NULL; |
400 | } | 400 | } |
401 | 401 | ||
402 | /** | 402 | /** |
403 | * css_sch_is_valid() - check if a subchannel is valid | 403 | * css_sch_is_valid() - check if a subchannel is valid |
404 | * @schib: subchannel information block for the subchannel | 404 | * @schib: subchannel information block for the subchannel |
405 | */ | 405 | */ |
406 | int css_sch_is_valid(struct schib *schib) | 406 | int css_sch_is_valid(struct schib *schib) |
407 | { | 407 | { |
408 | if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) | 408 | if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) |
409 | return 0; | 409 | return 0; |
410 | if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) | 410 | if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) |
411 | return 0; | 411 | return 0; |
412 | return 1; | 412 | return 1; |
413 | } | 413 | } |
414 | EXPORT_SYMBOL_GPL(css_sch_is_valid); | 414 | EXPORT_SYMBOL_GPL(css_sch_is_valid); |
415 | 415 | ||
416 | static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | 416 | static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) |
417 | { | 417 | { |
418 | struct schib schib; | 418 | struct schib schib; |
419 | 419 | ||
420 | if (!slow) { | 420 | if (!slow) { |
421 | /* Will be done on the slow path. */ | 421 | /* Will be done on the slow path. */ |
422 | return -EAGAIN; | 422 | return -EAGAIN; |
423 | } | 423 | } |
424 | if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { | 424 | if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { |
425 | /* Unusable - ignore. */ | 425 | /* Unusable - ignore. */ |
426 | return 0; | 426 | return 0; |
427 | } | 427 | } |
428 | CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, | 428 | CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, |
429 | schid.sch_no); | 429 | schid.sch_no); |
430 | 430 | ||
431 | return css_probe_device(schid); | 431 | return css_probe_device(schid); |
432 | } | 432 | } |
433 | 433 | ||
434 | static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) | 434 | static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) |
435 | { | 435 | { |
436 | int ret = 0; | 436 | int ret = 0; |
437 | 437 | ||
438 | if (sch->driver) { | 438 | if (sch->driver) { |
439 | if (sch->driver->sch_event) | 439 | if (sch->driver->sch_event) |
440 | ret = sch->driver->sch_event(sch, slow); | 440 | ret = sch->driver->sch_event(sch, slow); |
441 | else | 441 | else |
442 | dev_dbg(&sch->dev, | 442 | dev_dbg(&sch->dev, |
443 | "Got subchannel machine check but " | 443 | "Got subchannel machine check but " |
444 | "no sch_event handler provided.\n"); | 444 | "no sch_event handler provided.\n"); |
445 | } | 445 | } |
446 | if (ret != 0 && ret != -EAGAIN) { | 446 | if (ret != 0 && ret != -EAGAIN) { |
447 | CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", | 447 | CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", |
448 | sch->schid.ssid, sch->schid.sch_no, ret); | 448 | sch->schid.ssid, sch->schid.sch_no, ret); |
449 | } | 449 | } |
450 | return ret; | 450 | return ret; |
451 | } | 451 | } |
452 | 452 | ||
453 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) | 453 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) |
454 | { | 454 | { |
455 | struct subchannel *sch; | 455 | struct subchannel *sch; |
456 | int ret; | 456 | int ret; |
457 | 457 | ||
458 | sch = get_subchannel_by_schid(schid); | 458 | sch = get_subchannel_by_schid(schid); |
459 | if (sch) { | 459 | if (sch) { |
460 | ret = css_evaluate_known_subchannel(sch, slow); | 460 | ret = css_evaluate_known_subchannel(sch, slow); |
461 | put_device(&sch->dev); | 461 | put_device(&sch->dev); |
462 | } else | 462 | } else |
463 | ret = css_evaluate_new_subchannel(schid, slow); | 463 | ret = css_evaluate_new_subchannel(schid, slow); |
464 | if (ret == -EAGAIN) | 464 | if (ret == -EAGAIN) |
465 | css_schedule_eval(schid); | 465 | css_schedule_eval(schid); |
466 | } | 466 | } |
467 | 467 | ||
468 | static struct idset *slow_subchannel_set; | 468 | static struct idset *slow_subchannel_set; |
469 | static spinlock_t slow_subchannel_lock; | 469 | static spinlock_t slow_subchannel_lock; |
470 | static wait_queue_head_t css_eval_wq; | 470 | static wait_queue_head_t css_eval_wq; |
471 | static atomic_t css_eval_scheduled; | 471 | static atomic_t css_eval_scheduled; |
472 | 472 | ||
473 | static int __init slow_subchannel_init(void) | 473 | static int __init slow_subchannel_init(void) |
474 | { | 474 | { |
475 | spin_lock_init(&slow_subchannel_lock); | 475 | spin_lock_init(&slow_subchannel_lock); |
476 | atomic_set(&css_eval_scheduled, 0); | 476 | atomic_set(&css_eval_scheduled, 0); |
477 | init_waitqueue_head(&css_eval_wq); | 477 | init_waitqueue_head(&css_eval_wq); |
478 | slow_subchannel_set = idset_sch_new(); | 478 | slow_subchannel_set = idset_sch_new(); |
479 | if (!slow_subchannel_set) { | 479 | if (!slow_subchannel_set) { |
480 | CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); | 480 | CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); |
481 | return -ENOMEM; | 481 | return -ENOMEM; |
482 | } | 482 | } |
483 | return 0; | 483 | return 0; |
484 | } | 484 | } |
485 | 485 | ||
486 | static int slow_eval_known_fn(struct subchannel *sch, void *data) | 486 | static int slow_eval_known_fn(struct subchannel *sch, void *data) |
487 | { | 487 | { |
488 | int eval; | 488 | int eval; |
489 | int rc; | 489 | int rc; |
490 | 490 | ||
491 | spin_lock_irq(&slow_subchannel_lock); | 491 | spin_lock_irq(&slow_subchannel_lock); |
492 | eval = idset_sch_contains(slow_subchannel_set, sch->schid); | 492 | eval = idset_sch_contains(slow_subchannel_set, sch->schid); |
493 | idset_sch_del(slow_subchannel_set, sch->schid); | 493 | idset_sch_del(slow_subchannel_set, sch->schid); |
494 | spin_unlock_irq(&slow_subchannel_lock); | 494 | spin_unlock_irq(&slow_subchannel_lock); |
495 | if (eval) { | 495 | if (eval) { |
496 | rc = css_evaluate_known_subchannel(sch, 1); | 496 | rc = css_evaluate_known_subchannel(sch, 1); |
497 | if (rc == -EAGAIN) | 497 | if (rc == -EAGAIN) |
498 | css_schedule_eval(sch->schid); | 498 | css_schedule_eval(sch->schid); |
499 | } | 499 | } |
500 | return 0; | 500 | return 0; |
501 | } | 501 | } |
502 | 502 | ||
503 | static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) | 503 | static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) |
504 | { | 504 | { |
505 | int eval; | 505 | int eval; |
506 | int rc = 0; | 506 | int rc = 0; |
507 | 507 | ||
508 | spin_lock_irq(&slow_subchannel_lock); | 508 | spin_lock_irq(&slow_subchannel_lock); |
509 | eval = idset_sch_contains(slow_subchannel_set, schid); | 509 | eval = idset_sch_contains(slow_subchannel_set, schid); |
510 | idset_sch_del(slow_subchannel_set, schid); | 510 | idset_sch_del(slow_subchannel_set, schid); |
511 | spin_unlock_irq(&slow_subchannel_lock); | 511 | spin_unlock_irq(&slow_subchannel_lock); |
512 | if (eval) { | 512 | if (eval) { |
513 | rc = css_evaluate_new_subchannel(schid, 1); | 513 | rc = css_evaluate_new_subchannel(schid, 1); |
514 | switch (rc) { | 514 | switch (rc) { |
515 | case -EAGAIN: | 515 | case -EAGAIN: |
516 | css_schedule_eval(schid); | 516 | css_schedule_eval(schid); |
517 | rc = 0; | 517 | rc = 0; |
518 | break; | 518 | break; |
519 | case -ENXIO: | 519 | case -ENXIO: |
520 | case -ENOMEM: | 520 | case -ENOMEM: |
521 | case -EIO: | 521 | case -EIO: |
522 | /* These should abort looping */ | 522 | /* These should abort looping */ |
523 | break; | 523 | break; |
524 | default: | 524 | default: |
525 | rc = 0; | 525 | rc = 0; |
526 | } | 526 | } |
527 | } | 527 | } |
528 | return rc; | 528 | return rc; |
529 | } | 529 | } |
530 | 530 | ||
531 | static void css_slow_path_func(struct work_struct *unused) | 531 | static void css_slow_path_func(struct work_struct *unused) |
532 | { | 532 | { |
533 | unsigned long flags; | 533 | unsigned long flags; |
534 | 534 | ||
535 | CIO_TRACE_EVENT(4, "slowpath"); | 535 | CIO_TRACE_EVENT(4, "slowpath"); |
536 | for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, | 536 | for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, |
537 | NULL); | 537 | NULL); |
538 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 538 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
539 | if (idset_is_empty(slow_subchannel_set)) { | 539 | if (idset_is_empty(slow_subchannel_set)) { |
540 | atomic_set(&css_eval_scheduled, 0); | 540 | atomic_set(&css_eval_scheduled, 0); |
541 | wake_up(&css_eval_wq); | 541 | wake_up(&css_eval_wq); |
542 | } | 542 | } |
543 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 543 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
544 | } | 544 | } |
545 | 545 | ||
546 | static DECLARE_WORK(slow_path_work, css_slow_path_func); | 546 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
547 | struct workqueue_struct *cio_work_q; | 547 | struct workqueue_struct *cio_work_q; |
548 | 548 | ||
549 | void css_schedule_eval(struct subchannel_id schid) | 549 | void css_schedule_eval(struct subchannel_id schid) |
550 | { | 550 | { |
551 | unsigned long flags; | 551 | unsigned long flags; |
552 | 552 | ||
553 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 553 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
554 | idset_sch_add(slow_subchannel_set, schid); | 554 | idset_sch_add(slow_subchannel_set, schid); |
555 | atomic_set(&css_eval_scheduled, 1); | 555 | atomic_set(&css_eval_scheduled, 1); |
556 | queue_work(cio_work_q, &slow_path_work); | 556 | queue_work(cio_work_q, &slow_path_work); |
557 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 557 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
558 | } | 558 | } |
559 | 559 | ||
560 | void css_schedule_eval_all(void) | 560 | void css_schedule_eval_all(void) |
561 | { | 561 | { |
562 | unsigned long flags; | 562 | unsigned long flags; |
563 | 563 | ||
564 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 564 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
565 | idset_fill(slow_subchannel_set); | 565 | idset_fill(slow_subchannel_set); |
566 | atomic_set(&css_eval_scheduled, 1); | 566 | atomic_set(&css_eval_scheduled, 1); |
567 | queue_work(cio_work_q, &slow_path_work); | 567 | queue_work(cio_work_q, &slow_path_work); |
568 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 568 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
569 | } | 569 | } |
570 | 570 | ||
571 | static int __unset_registered(struct device *dev, void *data) | 571 | static int __unset_registered(struct device *dev, void *data) |
572 | { | 572 | { |
573 | struct idset *set = data; | 573 | struct idset *set = data; |
574 | struct subchannel *sch = to_subchannel(dev); | 574 | struct subchannel *sch = to_subchannel(dev); |
575 | 575 | ||
576 | idset_sch_del(set, sch->schid); | 576 | idset_sch_del(set, sch->schid); |
577 | return 0; | 577 | return 0; |
578 | } | 578 | } |
579 | 579 | ||
580 | static void css_schedule_eval_all_unreg(void) | 580 | static void css_schedule_eval_all_unreg(void) |
581 | { | 581 | { |
582 | unsigned long flags; | 582 | unsigned long flags; |
583 | struct idset *unreg_set; | 583 | struct idset *unreg_set; |
584 | 584 | ||
585 | /* Find unregistered subchannels. */ | 585 | /* Find unregistered subchannels. */ |
586 | unreg_set = idset_sch_new(); | 586 | unreg_set = idset_sch_new(); |
587 | if (!unreg_set) { | 587 | if (!unreg_set) { |
588 | /* Fallback. */ | 588 | /* Fallback. */ |
589 | css_schedule_eval_all(); | 589 | css_schedule_eval_all(); |
590 | return; | 590 | return; |
591 | } | 591 | } |
592 | idset_fill(unreg_set); | 592 | idset_fill(unreg_set); |
593 | bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); | 593 | bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); |
594 | /* Apply to slow_subchannel_set. */ | 594 | /* Apply to slow_subchannel_set. */ |
595 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 595 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
596 | idset_add_set(slow_subchannel_set, unreg_set); | 596 | idset_add_set(slow_subchannel_set, unreg_set); |
597 | atomic_set(&css_eval_scheduled, 1); | 597 | atomic_set(&css_eval_scheduled, 1); |
598 | queue_work(cio_work_q, &slow_path_work); | 598 | queue_work(cio_work_q, &slow_path_work); |
599 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 599 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
600 | idset_free(unreg_set); | 600 | idset_free(unreg_set); |
601 | } | 601 | } |
602 | 602 | ||
603 | void css_wait_for_slow_path(void) | 603 | void css_wait_for_slow_path(void) |
604 | { | 604 | { |
605 | flush_workqueue(cio_work_q); | 605 | flush_workqueue(cio_work_q); |
606 | } | 606 | } |
607 | 607 | ||
608 | /* Schedule reprobing of all unregistered subchannels. */ | 608 | /* Schedule reprobing of all unregistered subchannels. */ |
609 | void css_schedule_reprobe(void) | 609 | void css_schedule_reprobe(void) |
610 | { | 610 | { |
611 | css_schedule_eval_all_unreg(); | 611 | css_schedule_eval_all_unreg(); |
612 | } | 612 | } |
613 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); | 613 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); |
614 | 614 | ||
615 | /* | 615 | /* |
616 | * Called from the machine check handler for subchannel report words. | 616 | * Called from the machine check handler for subchannel report words. |
617 | */ | 617 | */ |
618 | static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) | 618 | static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) |
619 | { | 619 | { |
620 | struct subchannel_id mchk_schid; | 620 | struct subchannel_id mchk_schid; |
621 | 621 | ||
622 | if (overflow) { | 622 | if (overflow) { |
623 | css_schedule_eval_all(); | 623 | css_schedule_eval_all(); |
624 | return; | 624 | return; |
625 | } | 625 | } |
626 | CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " | 626 | CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " |
627 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | 627 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", |
628 | crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, | 628 | crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, |
629 | crw0->erc, crw0->rsid); | 629 | crw0->erc, crw0->rsid); |
630 | if (crw1) | 630 | if (crw1) |
631 | CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " | 631 | CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " |
632 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | 632 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", |
633 | crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, | 633 | crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, |
634 | crw1->anc, crw1->erc, crw1->rsid); | 634 | crw1->anc, crw1->erc, crw1->rsid); |
635 | init_subchannel_id(&mchk_schid); | 635 | init_subchannel_id(&mchk_schid); |
636 | mchk_schid.sch_no = crw0->rsid; | 636 | mchk_schid.sch_no = crw0->rsid; |
637 | if (crw1) | 637 | if (crw1) |
638 | mchk_schid.ssid = (crw1->rsid >> 8) & 3; | 638 | mchk_schid.ssid = (crw1->rsid >> 8) & 3; |
639 | 639 | ||
640 | /* | 640 | /* |
641 | * Since we are always presented with IPI in the CRW, we have to | 641 | * Since we are always presented with IPI in the CRW, we have to |
642 | * use stsch() to find out if the subchannel in question has come | 642 | * use stsch() to find out if the subchannel in question has come |
643 | * or gone. | 643 | * or gone. |
644 | */ | 644 | */ |
645 | css_evaluate_subchannel(mchk_schid, 0); | 645 | css_evaluate_subchannel(mchk_schid, 0); |
646 | } | 646 | } |
647 | 647 | ||
648 | static void __init | 648 | static void __init |
649 | css_generate_pgid(struct channel_subsystem *css, u32 tod_high) | 649 | css_generate_pgid(struct channel_subsystem *css, u32 tod_high) |
650 | { | 650 | { |
651 | struct cpuid cpu_id; | 651 | struct cpuid cpu_id; |
652 | 652 | ||
653 | if (css_general_characteristics.mcss) { | 653 | if (css_general_characteristics.mcss) { |
654 | css->global_pgid.pgid_high.ext_cssid.version = 0x80; | 654 | css->global_pgid.pgid_high.ext_cssid.version = 0x80; |
655 | css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; | 655 | css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; |
656 | } else { | 656 | } else { |
657 | #ifdef CONFIG_SMP | 657 | #ifdef CONFIG_SMP |
658 | css->global_pgid.pgid_high.cpu_addr = stap(); | 658 | css->global_pgid.pgid_high.cpu_addr = stap(); |
659 | #else | 659 | #else |
660 | css->global_pgid.pgid_high.cpu_addr = 0; | 660 | css->global_pgid.pgid_high.cpu_addr = 0; |
661 | #endif | 661 | #endif |
662 | } | 662 | } |
663 | get_cpu_id(&cpu_id); | 663 | get_cpu_id(&cpu_id); |
664 | css->global_pgid.cpu_id = cpu_id.ident; | 664 | css->global_pgid.cpu_id = cpu_id.ident; |
665 | css->global_pgid.cpu_model = cpu_id.machine; | 665 | css->global_pgid.cpu_model = cpu_id.machine; |
666 | css->global_pgid.tod_high = tod_high; | 666 | css->global_pgid.tod_high = tod_high; |
667 | 667 | ||
668 | } | 668 | } |
669 | 669 | ||
670 | static void | 670 | static void |
671 | channel_subsystem_release(struct device *dev) | 671 | channel_subsystem_release(struct device *dev) |
672 | { | 672 | { |
673 | struct channel_subsystem *css; | 673 | struct channel_subsystem *css; |
674 | 674 | ||
675 | css = to_css(dev); | 675 | css = to_css(dev); |
676 | mutex_destroy(&css->mutex); | 676 | mutex_destroy(&css->mutex); |
677 | if (css->pseudo_subchannel) { | 677 | if (css->pseudo_subchannel) { |
678 | /* Implies that it has been generated but never registered. */ | 678 | /* Implies that it has been generated but never registered. */ |
679 | css_subchannel_release(&css->pseudo_subchannel->dev); | 679 | css_subchannel_release(&css->pseudo_subchannel->dev); |
680 | css->pseudo_subchannel = NULL; | 680 | css->pseudo_subchannel = NULL; |
681 | } | 681 | } |
682 | kfree(css); | 682 | kfree(css); |
683 | } | 683 | } |
684 | 684 | ||
685 | static ssize_t | 685 | static ssize_t |
686 | css_cm_enable_show(struct device *dev, struct device_attribute *attr, | 686 | css_cm_enable_show(struct device *dev, struct device_attribute *attr, |
687 | char *buf) | 687 | char *buf) |
688 | { | 688 | { |
689 | struct channel_subsystem *css = to_css(dev); | 689 | struct channel_subsystem *css = to_css(dev); |
690 | int ret; | 690 | int ret; |
691 | 691 | ||
692 | if (!css) | 692 | if (!css) |
693 | return 0; | 693 | return 0; |
694 | mutex_lock(&css->mutex); | 694 | mutex_lock(&css->mutex); |
695 | ret = sprintf(buf, "%x\n", css->cm_enabled); | 695 | ret = sprintf(buf, "%x\n", css->cm_enabled); |
696 | mutex_unlock(&css->mutex); | 696 | mutex_unlock(&css->mutex); |
697 | return ret; | 697 | return ret; |
698 | } | 698 | } |
699 | 699 | ||
700 | static ssize_t | 700 | static ssize_t |
701 | css_cm_enable_store(struct device *dev, struct device_attribute *attr, | 701 | css_cm_enable_store(struct device *dev, struct device_attribute *attr, |
702 | const char *buf, size_t count) | 702 | const char *buf, size_t count) |
703 | { | 703 | { |
704 | struct channel_subsystem *css = to_css(dev); | 704 | struct channel_subsystem *css = to_css(dev); |
705 | int ret; | 705 | int ret; |
706 | unsigned long val; | 706 | unsigned long val; |
707 | 707 | ||
708 | ret = strict_strtoul(buf, 16, &val); | 708 | ret = strict_strtoul(buf, 16, &val); |
709 | if (ret) | 709 | if (ret) |
710 | return ret; | 710 | return ret; |
711 | mutex_lock(&css->mutex); | 711 | mutex_lock(&css->mutex); |
712 | switch (val) { | 712 | switch (val) { |
713 | case 0: | 713 | case 0: |
714 | ret = css->cm_enabled ? chsc_secm(css, 0) : 0; | 714 | ret = css->cm_enabled ? chsc_secm(css, 0) : 0; |
715 | break; | 715 | break; |
716 | case 1: | 716 | case 1: |
717 | ret = css->cm_enabled ? 0 : chsc_secm(css, 1); | 717 | ret = css->cm_enabled ? 0 : chsc_secm(css, 1); |
718 | break; | 718 | break; |
719 | default: | 719 | default: |
720 | ret = -EINVAL; | 720 | ret = -EINVAL; |
721 | } | 721 | } |
722 | mutex_unlock(&css->mutex); | 722 | mutex_unlock(&css->mutex); |
723 | return ret < 0 ? ret : count; | 723 | return ret < 0 ? ret : count; |
724 | } | 724 | } |
725 | 725 | ||
726 | static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); | 726 | static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); |
727 | 727 | ||
728 | static int __init setup_css(int nr) | 728 | static int __init setup_css(int nr) |
729 | { | 729 | { |
730 | u32 tod_high; | 730 | u32 tod_high; |
731 | int ret; | 731 | int ret; |
732 | struct channel_subsystem *css; | 732 | struct channel_subsystem *css; |
733 | 733 | ||
734 | css = channel_subsystems[nr]; | 734 | css = channel_subsystems[nr]; |
735 | memset(css, 0, sizeof(struct channel_subsystem)); | 735 | memset(css, 0, sizeof(struct channel_subsystem)); |
736 | css->pseudo_subchannel = | 736 | css->pseudo_subchannel = |
737 | kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); | 737 | kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); |
738 | if (!css->pseudo_subchannel) | 738 | if (!css->pseudo_subchannel) |
739 | return -ENOMEM; | 739 | return -ENOMEM; |
740 | css->pseudo_subchannel->dev.parent = &css->device; | 740 | css->pseudo_subchannel->dev.parent = &css->device; |
741 | css->pseudo_subchannel->dev.release = css_subchannel_release; | 741 | css->pseudo_subchannel->dev.release = css_subchannel_release; |
742 | dev_set_name(&css->pseudo_subchannel->dev, "defunct"); | 742 | dev_set_name(&css->pseudo_subchannel->dev, "defunct"); |
743 | mutex_init(&css->pseudo_subchannel->reg_mutex); | 743 | mutex_init(&css->pseudo_subchannel->reg_mutex); |
744 | ret = cio_create_sch_lock(css->pseudo_subchannel); | 744 | ret = cio_create_sch_lock(css->pseudo_subchannel); |
745 | if (ret) { | 745 | if (ret) { |
746 | kfree(css->pseudo_subchannel); | 746 | kfree(css->pseudo_subchannel); |
747 | return ret; | 747 | return ret; |
748 | } | 748 | } |
749 | mutex_init(&css->mutex); | 749 | mutex_init(&css->mutex); |
750 | css->valid = 1; | 750 | css->valid = 1; |
751 | css->cssid = nr; | 751 | css->cssid = nr; |
752 | dev_set_name(&css->device, "css%x", nr); | 752 | dev_set_name(&css->device, "css%x", nr); |
753 | css->device.release = channel_subsystem_release; | 753 | css->device.release = channel_subsystem_release; |
754 | tod_high = (u32) (get_clock() >> 32); | 754 | tod_high = (u32) (get_clock() >> 32); |
755 | css_generate_pgid(css, tod_high); | 755 | css_generate_pgid(css, tod_high); |
756 | return 0; | 756 | return 0; |
757 | } | 757 | } |
758 | 758 | ||
759 | static int css_reboot_event(struct notifier_block *this, | 759 | static int css_reboot_event(struct notifier_block *this, |
760 | unsigned long event, | 760 | unsigned long event, |
761 | void *ptr) | 761 | void *ptr) |
762 | { | 762 | { |
763 | int ret, i; | 763 | int ret, i; |
764 | 764 | ||
765 | ret = NOTIFY_DONE; | 765 | ret = NOTIFY_DONE; |
766 | for (i = 0; i <= __MAX_CSSID; i++) { | 766 | for (i = 0; i <= __MAX_CSSID; i++) { |
767 | struct channel_subsystem *css; | 767 | struct channel_subsystem *css; |
768 | 768 | ||
769 | css = channel_subsystems[i]; | 769 | css = channel_subsystems[i]; |
770 | mutex_lock(&css->mutex); | 770 | mutex_lock(&css->mutex); |
771 | if (css->cm_enabled) | 771 | if (css->cm_enabled) |
772 | if (chsc_secm(css, 0)) | 772 | if (chsc_secm(css, 0)) |
773 | ret = NOTIFY_BAD; | 773 | ret = NOTIFY_BAD; |
774 | mutex_unlock(&css->mutex); | 774 | mutex_unlock(&css->mutex); |
775 | } | 775 | } |
776 | 776 | ||
777 | return ret; | 777 | return ret; |
778 | } | 778 | } |
779 | 779 | ||
780 | static struct notifier_block css_reboot_notifier = { | 780 | static struct notifier_block css_reboot_notifier = { |
781 | .notifier_call = css_reboot_event, | 781 | .notifier_call = css_reboot_event, |
782 | }; | 782 | }; |
783 | 783 | ||
784 | /* | 784 | /* |
785 | * Since the css devices are neither on a bus nor have a class | 785 | * Since the css devices are neither on a bus nor have a class |
786 | * nor have a special device type, we cannot stop/restart channel | 786 | * nor have a special device type, we cannot stop/restart channel |
787 | * path measurements via the normal suspend/resume callbacks, but have | 787 | * path measurements via the normal suspend/resume callbacks, but have |
788 | * to use notifiers. | 788 | * to use notifiers. |
789 | */ | 789 | */ |
790 | static int css_power_event(struct notifier_block *this, unsigned long event, | 790 | static int css_power_event(struct notifier_block *this, unsigned long event, |
791 | void *ptr) | 791 | void *ptr) |
792 | { | 792 | { |
793 | void *secm_area; | 793 | void *secm_area; |
794 | int ret, i; | 794 | int ret, i; |
795 | 795 | ||
796 | switch (event) { | 796 | switch (event) { |
797 | case PM_HIBERNATION_PREPARE: | 797 | case PM_HIBERNATION_PREPARE: |
798 | case PM_SUSPEND_PREPARE: | 798 | case PM_SUSPEND_PREPARE: |
799 | ret = NOTIFY_DONE; | 799 | ret = NOTIFY_DONE; |
800 | for (i = 0; i <= __MAX_CSSID; i++) { | 800 | for (i = 0; i <= __MAX_CSSID; i++) { |
801 | struct channel_subsystem *css; | 801 | struct channel_subsystem *css; |
802 | 802 | ||
803 | css = channel_subsystems[i]; | 803 | css = channel_subsystems[i]; |
804 | mutex_lock(&css->mutex); | 804 | mutex_lock(&css->mutex); |
805 | if (!css->cm_enabled) { | 805 | if (!css->cm_enabled) { |
806 | mutex_unlock(&css->mutex); | 806 | mutex_unlock(&css->mutex); |
807 | continue; | 807 | continue; |
808 | } | 808 | } |
809 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | | 809 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | |
810 | GFP_DMA); | 810 | GFP_DMA); |
811 | if (secm_area) { | 811 | if (secm_area) { |
812 | if (__chsc_do_secm(css, 0, secm_area)) | 812 | if (__chsc_do_secm(css, 0, secm_area)) |
813 | ret = NOTIFY_BAD; | 813 | ret = NOTIFY_BAD; |
814 | free_page((unsigned long)secm_area); | 814 | free_page((unsigned long)secm_area); |
815 | } else | 815 | } else |
816 | ret = NOTIFY_BAD; | 816 | ret = NOTIFY_BAD; |
817 | 817 | ||
818 | mutex_unlock(&css->mutex); | 818 | mutex_unlock(&css->mutex); |
819 | } | 819 | } |
820 | break; | 820 | break; |
821 | case PM_POST_HIBERNATION: | 821 | case PM_POST_HIBERNATION: |
822 | case PM_POST_SUSPEND: | 822 | case PM_POST_SUSPEND: |
823 | ret = NOTIFY_DONE; | 823 | ret = NOTIFY_DONE; |
824 | for (i = 0; i <= __MAX_CSSID; i++) { | 824 | for (i = 0; i <= __MAX_CSSID; i++) { |
825 | struct channel_subsystem *css; | 825 | struct channel_subsystem *css; |
826 | 826 | ||
827 | css = channel_subsystems[i]; | 827 | css = channel_subsystems[i]; |
828 | mutex_lock(&css->mutex); | 828 | mutex_lock(&css->mutex); |
829 | if (!css->cm_enabled) { | 829 | if (!css->cm_enabled) { |
830 | mutex_unlock(&css->mutex); | 830 | mutex_unlock(&css->mutex); |
831 | continue; | 831 | continue; |
832 | } | 832 | } |
833 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | | 833 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | |
834 | GFP_DMA); | 834 | GFP_DMA); |
835 | if (secm_area) { | 835 | if (secm_area) { |
836 | if (__chsc_do_secm(css, 1, secm_area)) | 836 | if (__chsc_do_secm(css, 1, secm_area)) |
837 | ret = NOTIFY_BAD; | 837 | ret = NOTIFY_BAD; |
838 | free_page((unsigned long)secm_area); | 838 | free_page((unsigned long)secm_area); |
839 | } else | 839 | } else |
840 | ret = NOTIFY_BAD; | 840 | ret = NOTIFY_BAD; |
841 | 841 | ||
842 | mutex_unlock(&css->mutex); | 842 | mutex_unlock(&css->mutex); |
843 | } | 843 | } |
844 | /* search for subchannels, which appeared during hibernation */ | 844 | /* search for subchannels, which appeared during hibernation */ |
845 | css_schedule_reprobe(); | 845 | css_schedule_reprobe(); |
846 | break; | 846 | break; |
847 | default: | 847 | default: |
848 | ret = NOTIFY_DONE; | 848 | ret = NOTIFY_DONE; |
849 | } | 849 | } |
850 | return ret; | 850 | return ret; |
851 | 851 | ||
852 | } | 852 | } |
853 | static struct notifier_block css_power_notifier = { | 853 | static struct notifier_block css_power_notifier = { |
854 | .notifier_call = css_power_event, | 854 | .notifier_call = css_power_event, |
855 | }; | 855 | }; |
856 | 856 | ||
857 | /* | 857 | /* |
858 | * Now that the driver core is running, we can setup our channel subsystem. | 858 | * Now that the driver core is running, we can setup our channel subsystem. |
859 | * The struct subchannel's are created during probing (except for the | 859 | * The struct subchannel's are created during probing (except for the |
860 | * static console subchannel). | 860 | * static console subchannel). |
861 | */ | 861 | */ |
862 | static int __init css_bus_init(void) | 862 | static int __init css_bus_init(void) |
863 | { | 863 | { |
864 | int ret, i; | 864 | int ret, i; |
865 | 865 | ||
866 | ret = chsc_init(); | ||
867 | if (ret) | ||
868 | return ret; | ||
869 | |||
866 | ret = chsc_determine_css_characteristics(); | 870 | ret = chsc_determine_css_characteristics(); |
867 | if (ret == -ENOMEM) | 871 | if (ret == -ENOMEM) |
868 | goto out; | 872 | goto out; |
869 | 873 | ||
870 | ret = chsc_alloc_sei_area(); | ||
871 | if (ret) | ||
872 | goto out; | ||
873 | |||
874 | /* Try to enable MSS. */ | 874 | /* Try to enable MSS. */ |
875 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); | 875 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); |
876 | if (ret) | 876 | if (ret) |
877 | max_ssid = 0; | 877 | max_ssid = 0; |
878 | else /* Success. */ | 878 | else /* Success. */ |
879 | max_ssid = __MAX_SSID; | 879 | max_ssid = __MAX_SSID; |
880 | 880 | ||
881 | ret = slow_subchannel_init(); | 881 | ret = slow_subchannel_init(); |
882 | if (ret) | 882 | if (ret) |
883 | goto out; | 883 | goto out; |
884 | 884 | ||
885 | ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); | 885 | ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); |
886 | if (ret) | 886 | if (ret) |
887 | goto out; | 887 | goto out; |
888 | 888 | ||
889 | if ((ret = bus_register(&css_bus_type))) | 889 | if ((ret = bus_register(&css_bus_type))) |
890 | goto out; | 890 | goto out; |
891 | 891 | ||
892 | /* Setup css structure. */ | 892 | /* Setup css structure. */ |
893 | for (i = 0; i <= __MAX_CSSID; i++) { | 893 | for (i = 0; i <= __MAX_CSSID; i++) { |
894 | struct channel_subsystem *css; | 894 | struct channel_subsystem *css; |
895 | 895 | ||
896 | css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); | 896 | css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); |
897 | if (!css) { | 897 | if (!css) { |
898 | ret = -ENOMEM; | 898 | ret = -ENOMEM; |
899 | goto out_unregister; | 899 | goto out_unregister; |
900 | } | 900 | } |
901 | channel_subsystems[i] = css; | 901 | channel_subsystems[i] = css; |
902 | ret = setup_css(i); | 902 | ret = setup_css(i); |
903 | if (ret) { | 903 | if (ret) { |
904 | kfree(channel_subsystems[i]); | 904 | kfree(channel_subsystems[i]); |
905 | goto out_unregister; | 905 | goto out_unregister; |
906 | } | 906 | } |
907 | ret = device_register(&css->device); | 907 | ret = device_register(&css->device); |
908 | if (ret) { | 908 | if (ret) { |
909 | put_device(&css->device); | 909 | put_device(&css->device); |
910 | goto out_unregister; | 910 | goto out_unregister; |
911 | } | 911 | } |
912 | if (css_chsc_characteristics.secm) { | 912 | if (css_chsc_characteristics.secm) { |
913 | ret = device_create_file(&css->device, | 913 | ret = device_create_file(&css->device, |
914 | &dev_attr_cm_enable); | 914 | &dev_attr_cm_enable); |
915 | if (ret) | 915 | if (ret) |
916 | goto out_device; | 916 | goto out_device; |
917 | } | 917 | } |
918 | ret = device_register(&css->pseudo_subchannel->dev); | 918 | ret = device_register(&css->pseudo_subchannel->dev); |
919 | if (ret) { | 919 | if (ret) { |
920 | put_device(&css->pseudo_subchannel->dev); | 920 | put_device(&css->pseudo_subchannel->dev); |
921 | goto out_file; | 921 | goto out_file; |
922 | } | 922 | } |
923 | } | 923 | } |
924 | ret = register_reboot_notifier(&css_reboot_notifier); | 924 | ret = register_reboot_notifier(&css_reboot_notifier); |
925 | if (ret) | 925 | if (ret) |
926 | goto out_unregister; | 926 | goto out_unregister; |
927 | ret = register_pm_notifier(&css_power_notifier); | 927 | ret = register_pm_notifier(&css_power_notifier); |
928 | if (ret) { | 928 | if (ret) { |
929 | unregister_reboot_notifier(&css_reboot_notifier); | 929 | unregister_reboot_notifier(&css_reboot_notifier); |
930 | goto out_unregister; | 930 | goto out_unregister; |
931 | } | 931 | } |
932 | css_init_done = 1; | 932 | css_init_done = 1; |
933 | 933 | ||
934 | /* Enable default isc for I/O subchannels. */ | 934 | /* Enable default isc for I/O subchannels. */ |
935 | isc_register(IO_SCH_ISC); | 935 | isc_register(IO_SCH_ISC); |
936 | 936 | ||
937 | return 0; | 937 | return 0; |
938 | out_file: | 938 | out_file: |
939 | if (css_chsc_characteristics.secm) | 939 | if (css_chsc_characteristics.secm) |
940 | device_remove_file(&channel_subsystems[i]->device, | 940 | device_remove_file(&channel_subsystems[i]->device, |
941 | &dev_attr_cm_enable); | 941 | &dev_attr_cm_enable); |
942 | out_device: | 942 | out_device: |
943 | device_unregister(&channel_subsystems[i]->device); | 943 | device_unregister(&channel_subsystems[i]->device); |
944 | out_unregister: | 944 | out_unregister: |
945 | while (i > 0) { | 945 | while (i > 0) { |
946 | struct channel_subsystem *css; | 946 | struct channel_subsystem *css; |
947 | 947 | ||
948 | i--; | 948 | i--; |
949 | css = channel_subsystems[i]; | 949 | css = channel_subsystems[i]; |
950 | device_unregister(&css->pseudo_subchannel->dev); | 950 | device_unregister(&css->pseudo_subchannel->dev); |
951 | css->pseudo_subchannel = NULL; | 951 | css->pseudo_subchannel = NULL; |
952 | if (css_chsc_characteristics.secm) | 952 | if (css_chsc_characteristics.secm) |
953 | device_remove_file(&css->device, | 953 | device_remove_file(&css->device, |
954 | &dev_attr_cm_enable); | 954 | &dev_attr_cm_enable); |
955 | device_unregister(&css->device); | 955 | device_unregister(&css->device); |
956 | } | 956 | } |
957 | bus_unregister(&css_bus_type); | 957 | bus_unregister(&css_bus_type); |
958 | out: | 958 | out: |
959 | crw_unregister_handler(CRW_RSC_CSS); | 959 | crw_unregister_handler(CRW_RSC_SCH); |
960 | chsc_free_sei_area(); | ||
961 | idset_free(slow_subchannel_set); | 960 | idset_free(slow_subchannel_set); |
961 | chsc_init_cleanup(); | ||
962 | pr_alert("The CSS device driver initialization failed with " | 962 | pr_alert("The CSS device driver initialization failed with " |
963 | "errno=%d\n", ret); | 963 | "errno=%d\n", ret); |
964 | return ret; | 964 | return ret; |
965 | } | 965 | } |
966 | 966 | ||
967 | static void __init css_bus_cleanup(void) | 967 | static void __init css_bus_cleanup(void) |
968 | { | 968 | { |
969 | struct channel_subsystem *css; | 969 | struct channel_subsystem *css; |
970 | int i; | 970 | int i; |
971 | 971 | ||
972 | for (i = 0; i <= __MAX_CSSID; i++) { | 972 | for (i = 0; i <= __MAX_CSSID; i++) { |
973 | css = channel_subsystems[i]; | 973 | css = channel_subsystems[i]; |
974 | device_unregister(&css->pseudo_subchannel->dev); | 974 | device_unregister(&css->pseudo_subchannel->dev); |
975 | css->pseudo_subchannel = NULL; | 975 | css->pseudo_subchannel = NULL; |
976 | if (css_chsc_characteristics.secm) | 976 | if (css_chsc_characteristics.secm) |
977 | device_remove_file(&css->device, &dev_attr_cm_enable); | 977 | device_remove_file(&css->device, &dev_attr_cm_enable); |
978 | device_unregister(&css->device); | 978 | device_unregister(&css->device); |
979 | } | 979 | } |
980 | bus_unregister(&css_bus_type); | 980 | bus_unregister(&css_bus_type); |
981 | crw_unregister_handler(CRW_RSC_CSS); | 981 | crw_unregister_handler(CRW_RSC_SCH); |
982 | chsc_free_sei_area(); | ||
983 | idset_free(slow_subchannel_set); | 982 | idset_free(slow_subchannel_set); |
983 | chsc_init_cleanup(); | ||
984 | isc_unregister(IO_SCH_ISC); | 984 | isc_unregister(IO_SCH_ISC); |
985 | } | 985 | } |
986 | 986 | ||
987 | static int __init channel_subsystem_init(void) | 987 | static int __init channel_subsystem_init(void) |
988 | { | 988 | { |
989 | int ret; | 989 | int ret; |
990 | 990 | ||
991 | ret = css_bus_init(); | 991 | ret = css_bus_init(); |
992 | if (ret) | 992 | if (ret) |
993 | return ret; | 993 | return ret; |
994 | cio_work_q = create_singlethread_workqueue("cio"); | 994 | cio_work_q = create_singlethread_workqueue("cio"); |
995 | if (!cio_work_q) { | 995 | if (!cio_work_q) { |
996 | ret = -ENOMEM; | 996 | ret = -ENOMEM; |
997 | goto out_bus; | 997 | goto out_bus; |
998 | } | 998 | } |
999 | ret = io_subchannel_init(); | 999 | ret = io_subchannel_init(); |
1000 | if (ret) | 1000 | if (ret) |
1001 | goto out_wq; | 1001 | goto out_wq; |
1002 | 1002 | ||
1003 | return ret; | 1003 | return ret; |
1004 | out_wq: | 1004 | out_wq: |
1005 | destroy_workqueue(cio_work_q); | 1005 | destroy_workqueue(cio_work_q); |
1006 | out_bus: | 1006 | out_bus: |
1007 | css_bus_cleanup(); | 1007 | css_bus_cleanup(); |
1008 | return ret; | 1008 | return ret; |
1009 | } | 1009 | } |
1010 | subsys_initcall(channel_subsystem_init); | 1010 | subsys_initcall(channel_subsystem_init); |
1011 | 1011 | ||
1012 | static int css_settle(struct device_driver *drv, void *unused) | 1012 | static int css_settle(struct device_driver *drv, void *unused) |
1013 | { | 1013 | { |
1014 | struct css_driver *cssdrv = to_cssdriver(drv); | 1014 | struct css_driver *cssdrv = to_cssdriver(drv); |
1015 | 1015 | ||
1016 | if (cssdrv->settle) | 1016 | if (cssdrv->settle) |
1017 | return cssdrv->settle(); | 1017 | return cssdrv->settle(); |
1018 | return 0; | 1018 | return 0; |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | int css_complete_work(void) | 1021 | int css_complete_work(void) |
1022 | { | 1022 | { |
1023 | int ret; | 1023 | int ret; |
1024 | 1024 | ||
1025 | /* Wait for the evaluation of subchannels to finish. */ | 1025 | /* Wait for the evaluation of subchannels to finish. */ |
1026 | ret = wait_event_interruptible(css_eval_wq, | 1026 | ret = wait_event_interruptible(css_eval_wq, |
1027 | atomic_read(&css_eval_scheduled) == 0); | 1027 | atomic_read(&css_eval_scheduled) == 0); |
1028 | if (ret) | 1028 | if (ret) |
1029 | return -EINTR; | 1029 | return -EINTR; |
1030 | flush_workqueue(cio_work_q); | 1030 | flush_workqueue(cio_work_q); |
1031 | /* Wait for the subchannel type specific initialization to finish */ | 1031 | /* Wait for the subchannel type specific initialization to finish */ |
1032 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); | 1032 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | 1035 | ||
1036 | /* | 1036 | /* |
1037 | * Wait for the initialization of devices to finish, to make sure we are | 1037 | * Wait for the initialization of devices to finish, to make sure we are |
1038 | * done with our setup if the search for the root device starts. | 1038 | * done with our setup if the search for the root device starts. |
1039 | */ | 1039 | */ |
1040 | static int __init channel_subsystem_init_sync(void) | 1040 | static int __init channel_subsystem_init_sync(void) |
1041 | { | 1041 | { |
1042 | /* Start initial subchannel evaluation. */ | 1042 | /* Start initial subchannel evaluation. */ |
1043 | css_schedule_eval_all(); | 1043 | css_schedule_eval_all(); |
1044 | css_complete_work(); | 1044 | css_complete_work(); |
1045 | return 0; | 1045 | return 0; |
1046 | } | 1046 | } |
1047 | subsys_initcall_sync(channel_subsystem_init_sync); | 1047 | subsys_initcall_sync(channel_subsystem_init_sync); |
1048 | 1048 | ||
1049 | void channel_subsystem_reinit(void) | 1049 | void channel_subsystem_reinit(void) |
1050 | { | 1050 | { |
1051 | chsc_enable_facility(CHSC_SDA_OC_MSS); | 1051 | chsc_enable_facility(CHSC_SDA_OC_MSS); |
1052 | } | 1052 | } |
1053 | 1053 | ||
1054 | #ifdef CONFIG_PROC_FS | 1054 | #ifdef CONFIG_PROC_FS |
1055 | static ssize_t cio_settle_write(struct file *file, const char __user *buf, | 1055 | static ssize_t cio_settle_write(struct file *file, const char __user *buf, |
1056 | size_t count, loff_t *ppos) | 1056 | size_t count, loff_t *ppos) |
1057 | { | 1057 | { |
1058 | int ret; | 1058 | int ret; |
1059 | 1059 | ||
1060 | /* Handle pending CRW's. */ | 1060 | /* Handle pending CRW's. */ |
1061 | crw_wait_for_channel_report(); | 1061 | crw_wait_for_channel_report(); |
1062 | ret = css_complete_work(); | 1062 | ret = css_complete_work(); |
1063 | 1063 | ||
1064 | return ret ? ret : count; | 1064 | return ret ? ret : count; |
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | static const struct file_operations cio_settle_proc_fops = { | 1067 | static const struct file_operations cio_settle_proc_fops = { |
1068 | .open = nonseekable_open, | 1068 | .open = nonseekable_open, |
1069 | .write = cio_settle_write, | 1069 | .write = cio_settle_write, |
1070 | .llseek = no_llseek, | 1070 | .llseek = no_llseek, |
1071 | }; | 1071 | }; |
1072 | 1072 | ||
1073 | static int __init cio_settle_init(void) | 1073 | static int __init cio_settle_init(void) |
1074 | { | 1074 | { |
1075 | struct proc_dir_entry *entry; | 1075 | struct proc_dir_entry *entry; |
1076 | 1076 | ||
1077 | entry = proc_create("cio_settle", S_IWUSR, NULL, | 1077 | entry = proc_create("cio_settle", S_IWUSR, NULL, |
1078 | &cio_settle_proc_fops); | 1078 | &cio_settle_proc_fops); |
1079 | if (!entry) | 1079 | if (!entry) |
1080 | return -ENOMEM; | 1080 | return -ENOMEM; |
1081 | return 0; | 1081 | return 0; |
1082 | } | 1082 | } |
1083 | device_initcall(cio_settle_init); | 1083 | device_initcall(cio_settle_init); |
1084 | #endif /*CONFIG_PROC_FS*/ | 1084 | #endif /*CONFIG_PROC_FS*/ |
1085 | 1085 | ||
1086 | int sch_is_pseudo_sch(struct subchannel *sch) | 1086 | int sch_is_pseudo_sch(struct subchannel *sch) |
1087 | { | 1087 | { |
1088 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; | 1088 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; |
1089 | } | 1089 | } |
1090 | 1090 | ||
1091 | static int css_bus_match(struct device *dev, struct device_driver *drv) | 1091 | static int css_bus_match(struct device *dev, struct device_driver *drv) |
1092 | { | 1092 | { |
1093 | struct subchannel *sch = to_subchannel(dev); | 1093 | struct subchannel *sch = to_subchannel(dev); |
1094 | struct css_driver *driver = to_cssdriver(drv); | 1094 | struct css_driver *driver = to_cssdriver(drv); |
1095 | struct css_device_id *id; | 1095 | struct css_device_id *id; |
1096 | 1096 | ||
1097 | for (id = driver->subchannel_type; id->match_flags; id++) { | 1097 | for (id = driver->subchannel_type; id->match_flags; id++) { |
1098 | if (sch->st == id->type) | 1098 | if (sch->st == id->type) |
1099 | return 1; | 1099 | return 1; |
1100 | } | 1100 | } |
1101 | 1101 | ||
1102 | return 0; | 1102 | return 0; |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | static int css_probe(struct device *dev) | 1105 | static int css_probe(struct device *dev) |
1106 | { | 1106 | { |
1107 | struct subchannel *sch; | 1107 | struct subchannel *sch; |
1108 | int ret; | 1108 | int ret; |
1109 | 1109 | ||
1110 | sch = to_subchannel(dev); | 1110 | sch = to_subchannel(dev); |
1111 | sch->driver = to_cssdriver(dev->driver); | 1111 | sch->driver = to_cssdriver(dev->driver); |
1112 | ret = sch->driver->probe ? sch->driver->probe(sch) : 0; | 1112 | ret = sch->driver->probe ? sch->driver->probe(sch) : 0; |
1113 | if (ret) | 1113 | if (ret) |
1114 | sch->driver = NULL; | 1114 | sch->driver = NULL; |
1115 | return ret; | 1115 | return ret; |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | static int css_remove(struct device *dev) | 1118 | static int css_remove(struct device *dev) |
1119 | { | 1119 | { |
1120 | struct subchannel *sch; | 1120 | struct subchannel *sch; |
1121 | int ret; | 1121 | int ret; |
1122 | 1122 | ||
1123 | sch = to_subchannel(dev); | 1123 | sch = to_subchannel(dev); |
1124 | ret = sch->driver->remove ? sch->driver->remove(sch) : 0; | 1124 | ret = sch->driver->remove ? sch->driver->remove(sch) : 0; |
1125 | sch->driver = NULL; | 1125 | sch->driver = NULL; |
1126 | return ret; | 1126 | return ret; |
1127 | } | 1127 | } |
1128 | 1128 | ||
1129 | static void css_shutdown(struct device *dev) | 1129 | static void css_shutdown(struct device *dev) |
1130 | { | 1130 | { |
1131 | struct subchannel *sch; | 1131 | struct subchannel *sch; |
1132 | 1132 | ||
1133 | sch = to_subchannel(dev); | 1133 | sch = to_subchannel(dev); |
1134 | if (sch->driver && sch->driver->shutdown) | 1134 | if (sch->driver && sch->driver->shutdown) |
1135 | sch->driver->shutdown(sch); | 1135 | sch->driver->shutdown(sch); |
1136 | } | 1136 | } |
1137 | 1137 | ||
1138 | static int css_uevent(struct device *dev, struct kobj_uevent_env *env) | 1138 | static int css_uevent(struct device *dev, struct kobj_uevent_env *env) |
1139 | { | 1139 | { |
1140 | struct subchannel *sch = to_subchannel(dev); | 1140 | struct subchannel *sch = to_subchannel(dev); |
1141 | int ret; | 1141 | int ret; |
1142 | 1142 | ||
1143 | ret = add_uevent_var(env, "ST=%01X", sch->st); | 1143 | ret = add_uevent_var(env, "ST=%01X", sch->st); |
1144 | if (ret) | 1144 | if (ret) |
1145 | return ret; | 1145 | return ret; |
1146 | ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); | 1146 | ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); |
1147 | return ret; | 1147 | return ret; |
1148 | } | 1148 | } |
1149 | 1149 | ||
1150 | static int css_pm_prepare(struct device *dev) | 1150 | static int css_pm_prepare(struct device *dev) |
1151 | { | 1151 | { |
1152 | struct subchannel *sch = to_subchannel(dev); | 1152 | struct subchannel *sch = to_subchannel(dev); |
1153 | struct css_driver *drv; | 1153 | struct css_driver *drv; |
1154 | 1154 | ||
1155 | if (mutex_is_locked(&sch->reg_mutex)) | 1155 | if (mutex_is_locked(&sch->reg_mutex)) |
1156 | return -EAGAIN; | 1156 | return -EAGAIN; |
1157 | if (!sch->dev.driver) | 1157 | if (!sch->dev.driver) |
1158 | return 0; | 1158 | return 0; |
1159 | drv = to_cssdriver(sch->dev.driver); | 1159 | drv = to_cssdriver(sch->dev.driver); |
1160 | /* Notify drivers that they may not register children. */ | 1160 | /* Notify drivers that they may not register children. */ |
1161 | return drv->prepare ? drv->prepare(sch) : 0; | 1161 | return drv->prepare ? drv->prepare(sch) : 0; |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | static void css_pm_complete(struct device *dev) | 1164 | static void css_pm_complete(struct device *dev) |
1165 | { | 1165 | { |
1166 | struct subchannel *sch = to_subchannel(dev); | 1166 | struct subchannel *sch = to_subchannel(dev); |
1167 | struct css_driver *drv; | 1167 | struct css_driver *drv; |
1168 | 1168 | ||
1169 | if (!sch->dev.driver) | 1169 | if (!sch->dev.driver) |
1170 | return; | 1170 | return; |
1171 | drv = to_cssdriver(sch->dev.driver); | 1171 | drv = to_cssdriver(sch->dev.driver); |
1172 | if (drv->complete) | 1172 | if (drv->complete) |
1173 | drv->complete(sch); | 1173 | drv->complete(sch); |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | static int css_pm_freeze(struct device *dev) | 1176 | static int css_pm_freeze(struct device *dev) |
1177 | { | 1177 | { |
1178 | struct subchannel *sch = to_subchannel(dev); | 1178 | struct subchannel *sch = to_subchannel(dev); |
1179 | struct css_driver *drv; | 1179 | struct css_driver *drv; |
1180 | 1180 | ||
1181 | if (!sch->dev.driver) | 1181 | if (!sch->dev.driver) |
1182 | return 0; | 1182 | return 0; |
1183 | drv = to_cssdriver(sch->dev.driver); | 1183 | drv = to_cssdriver(sch->dev.driver); |
1184 | return drv->freeze ? drv->freeze(sch) : 0; | 1184 | return drv->freeze ? drv->freeze(sch) : 0; |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | static int css_pm_thaw(struct device *dev) | 1187 | static int css_pm_thaw(struct device *dev) |
1188 | { | 1188 | { |
1189 | struct subchannel *sch = to_subchannel(dev); | 1189 | struct subchannel *sch = to_subchannel(dev); |
1190 | struct css_driver *drv; | 1190 | struct css_driver *drv; |
1191 | 1191 | ||
1192 | if (!sch->dev.driver) | 1192 | if (!sch->dev.driver) |
1193 | return 0; | 1193 | return 0; |
1194 | drv = to_cssdriver(sch->dev.driver); | 1194 | drv = to_cssdriver(sch->dev.driver); |
1195 | return drv->thaw ? drv->thaw(sch) : 0; | 1195 | return drv->thaw ? drv->thaw(sch) : 0; |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | static int css_pm_restore(struct device *dev) | 1198 | static int css_pm_restore(struct device *dev) |
1199 | { | 1199 | { |
1200 | struct subchannel *sch = to_subchannel(dev); | 1200 | struct subchannel *sch = to_subchannel(dev); |
1201 | struct css_driver *drv; | 1201 | struct css_driver *drv; |
1202 | 1202 | ||
1203 | if (!sch->dev.driver) | 1203 | if (!sch->dev.driver) |
1204 | return 0; | 1204 | return 0; |
1205 | drv = to_cssdriver(sch->dev.driver); | 1205 | drv = to_cssdriver(sch->dev.driver); |
1206 | return drv->restore ? drv->restore(sch) : 0; | 1206 | return drv->restore ? drv->restore(sch) : 0; |
1207 | } | 1207 | } |
1208 | 1208 | ||
1209 | static const struct dev_pm_ops css_pm_ops = { | 1209 | static const struct dev_pm_ops css_pm_ops = { |
1210 | .prepare = css_pm_prepare, | 1210 | .prepare = css_pm_prepare, |
1211 | .complete = css_pm_complete, | 1211 | .complete = css_pm_complete, |
1212 | .freeze = css_pm_freeze, | 1212 | .freeze = css_pm_freeze, |
1213 | .thaw = css_pm_thaw, | 1213 | .thaw = css_pm_thaw, |
1214 | .restore = css_pm_restore, | 1214 | .restore = css_pm_restore, |
1215 | }; | 1215 | }; |
1216 | 1216 | ||
1217 | struct bus_type css_bus_type = { | 1217 | struct bus_type css_bus_type = { |
1218 | .name = "css", | 1218 | .name = "css", |
1219 | .match = css_bus_match, | 1219 | .match = css_bus_match, |
1220 | .probe = css_probe, | 1220 | .probe = css_probe, |
1221 | .remove = css_remove, | 1221 | .remove = css_remove, |
1222 | .shutdown = css_shutdown, | 1222 | .shutdown = css_shutdown, |
1223 | .uevent = css_uevent, | 1223 | .uevent = css_uevent, |
1224 | .pm = &css_pm_ops, | 1224 | .pm = &css_pm_ops, |
1225 | }; | 1225 | }; |
1226 | 1226 | ||
1227 | /** | 1227 | /** |
1228 | * css_driver_register - register a css driver | 1228 | * css_driver_register - register a css driver |
1229 | * @cdrv: css driver to register | 1229 | * @cdrv: css driver to register |
1230 | * | 1230 | * |
1231 | * This is mainly a wrapper around driver_register that sets name | 1231 | * This is mainly a wrapper around driver_register that sets name |
1232 | * and bus_type in the embedded struct device_driver correctly. | 1232 | * and bus_type in the embedded struct device_driver correctly. |
1233 | */ | 1233 | */ |
1234 | int css_driver_register(struct css_driver *cdrv) | 1234 | int css_driver_register(struct css_driver *cdrv) |
1235 | { | 1235 | { |
1236 | cdrv->drv.name = cdrv->name; | 1236 | cdrv->drv.name = cdrv->name; |
1237 | cdrv->drv.bus = &css_bus_type; | 1237 | cdrv->drv.bus = &css_bus_type; |
1238 | cdrv->drv.owner = cdrv->owner; | 1238 | cdrv->drv.owner = cdrv->owner; |
1239 | return driver_register(&cdrv->drv); | 1239 | return driver_register(&cdrv->drv); |
1240 | } | 1240 | } |
1241 | EXPORT_SYMBOL_GPL(css_driver_register); | 1241 | EXPORT_SYMBOL_GPL(css_driver_register); |
1242 | 1242 | ||
1243 | /** | 1243 | /** |
1244 | * css_driver_unregister - unregister a css driver | 1244 | * css_driver_unregister - unregister a css driver |
1245 | * @cdrv: css driver to unregister | 1245 | * @cdrv: css driver to unregister |
1246 | * | 1246 | * |
1247 | * This is a wrapper around driver_unregister. | 1247 | * This is a wrapper around driver_unregister. |
1248 | */ | 1248 | */ |
1249 | void css_driver_unregister(struct css_driver *cdrv) | 1249 | void css_driver_unregister(struct css_driver *cdrv) |
1250 | { | 1250 | { |
1251 | driver_unregister(&cdrv->drv); | 1251 | driver_unregister(&cdrv->drv); |