Commit e62133b4ea0d85888d9883a3e1c396ea8717bc26

Authored by Heiko Carstens
Committed by Martin Schwidefsky
1 parent 3b74a87422

[S390] Get rid of new section mismatch warnings.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

Showing 5 changed files with 8 additions and 9 deletions Inline Diff

arch/s390/kernel/head.S
1 /* 1 /*
2 * arch/s390/kernel/head.S 2 * arch/s390/kernel/head.S
3 * 3 *
4 * Copyright (C) IBM Corp. 1999,2006 4 * Copyright (C) IBM Corp. 1999,2006
5 * 5 *
6 * Author(s): Hartmut Penner <hp@de.ibm.com> 6 * Author(s): Hartmut Penner <hp@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * Rob van der Heij <rvdhei@iae.nl> 8 * Rob van der Heij <rvdhei@iae.nl>
9 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 * 10 *
11 * There are 5 different IPL methods 11 * There are 5 different IPL methods
12 * 1) load the image directly into ram at address 0 and do an PSW restart 12 * 1) load the image directly into ram at address 0 and do an PSW restart
13 * 2) linload will load the image from address 0x10000 to memory 0x10000 13 * 2) linload will load the image from address 0x10000 to memory 0x10000
14 * and start the code thru LPSW 0x0008000080010000 (VM only, deprecated) 14 * and start the code thru LPSW 0x0008000080010000 (VM only, deprecated)
15 * 3) generate the tape ipl header, store the generated image on a tape 15 * 3) generate the tape ipl header, store the generated image on a tape
16 * and ipl from it 16 * and ipl from it
17 * In case of SL tape you need to IPL 5 times to get past VOL1 etc 17 * In case of SL tape you need to IPL 5 times to get past VOL1 etc
18 * 4) generate the vm reader ipl header, move the generated image to the 18 * 4) generate the vm reader ipl header, move the generated image to the
19 * VM reader (use option NOH!) and do a ipl from reader (VM only) 19 * VM reader (use option NOH!) and do a ipl from reader (VM only)
20 * 5) direct call of start by the SALIPL loader 20 * 5) direct call of start by the SALIPL loader
21 * We use the cpuid to distinguish between VM and native ipl 21 * We use the cpuid to distinguish between VM and native ipl
22 * params for kernel are pushed to 0x10400 (see setup.h) 22 * params for kernel are pushed to 0x10400 (see setup.h)
23 * 23 *
24 */ 24 */
25 25
26 #include <asm/setup.h> 26 #include <asm/setup.h>
27 #include <asm/lowcore.h> 27 #include <asm/lowcore.h>
28 #include <asm/asm-offsets.h> 28 #include <asm/asm-offsets.h>
29 #include <asm/thread_info.h> 29 #include <asm/thread_info.h>
30 #include <asm/page.h> 30 #include <asm/page.h>
31 31
32 #ifdef CONFIG_64BIT 32 #ifdef CONFIG_64BIT
33 #define ARCH_OFFSET 4 33 #define ARCH_OFFSET 4
34 #else 34 #else
35 #define ARCH_OFFSET 0 35 #define ARCH_OFFSET 0
36 #endif 36 #endif
37 37
38 .section ".text.head","ax"
38 #ifndef CONFIG_IPL 39 #ifndef CONFIG_IPL
39 .org 0 40 .org 0
40 .long 0x00080000,0x80000000+startup # Just a restart PSW 41 .long 0x00080000,0x80000000+startup # Just a restart PSW
41 #else 42 #else
42 #ifdef CONFIG_IPL_TAPE 43 #ifdef CONFIG_IPL_TAPE
43 #define IPL_BS 1024 44 #define IPL_BS 1024
44 .org 0 45 .org 0
45 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded 46 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
46 .long 0x27000000,0x60000001 # by ipl to addresses 0-23. 47 .long 0x27000000,0x60000001 # by ipl to addresses 0-23.
47 .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs). 48 .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs).
48 .long 0x00000000,0x00000000 # external old psw 49 .long 0x00000000,0x00000000 # external old psw
49 .long 0x00000000,0x00000000 # svc old psw 50 .long 0x00000000,0x00000000 # svc old psw
50 .long 0x00000000,0x00000000 # program check old psw 51 .long 0x00000000,0x00000000 # program check old psw
51 .long 0x00000000,0x00000000 # machine check old psw 52 .long 0x00000000,0x00000000 # machine check old psw
52 .long 0x00000000,0x00000000 # io old psw 53 .long 0x00000000,0x00000000 # io old psw
53 .long 0x00000000,0x00000000 54 .long 0x00000000,0x00000000
54 .long 0x00000000,0x00000000 55 .long 0x00000000,0x00000000
55 .long 0x00000000,0x00000000 56 .long 0x00000000,0x00000000
56 .long 0x000a0000,0x00000058 # external new psw 57 .long 0x000a0000,0x00000058 # external new psw
57 .long 0x000a0000,0x00000060 # svc new psw 58 .long 0x000a0000,0x00000060 # svc new psw
58 .long 0x000a0000,0x00000068 # program check new psw 59 .long 0x000a0000,0x00000068 # program check new psw
59 .long 0x000a0000,0x00000070 # machine check new psw 60 .long 0x000a0000,0x00000070 # machine check new psw
60 .long 0x00080000,0x80000000+.Lioint # io new psw 61 .long 0x00080000,0x80000000+.Lioint # io new psw
61 62
62 .org 0x100 63 .org 0x100
63 # 64 #
64 # subroutine for loading from tape 65 # subroutine for loading from tape
65 # Paramters: 66 # Paramters:
66 # R1 = device number 67 # R1 = device number
67 # R2 = load address 68 # R2 = load address
68 .Lloader: 69 .Lloader:
69 st %r14,.Lldret 70 st %r14,.Lldret
70 la %r3,.Lorbread # r3 = address of orb 71 la %r3,.Lorbread # r3 = address of orb
71 la %r5,.Lirb # r5 = address of irb 72 la %r5,.Lirb # r5 = address of irb
72 st %r2,.Lccwread+4 # initialize CCW data addresses 73 st %r2,.Lccwread+4 # initialize CCW data addresses
73 lctl %c6,%c6,.Lcr6 74 lctl %c6,%c6,.Lcr6
74 slr %r2,%r2 75 slr %r2,%r2
75 .Lldlp: 76 .Lldlp:
76 la %r6,3 # 3 retries 77 la %r6,3 # 3 retries
77 .Lssch: 78 .Lssch:
78 ssch 0(%r3) # load chunk of IPL_BS bytes 79 ssch 0(%r3) # load chunk of IPL_BS bytes
79 bnz .Llderr 80 bnz .Llderr
80 .Lw4end: 81 .Lw4end:
81 bas %r14,.Lwait4io 82 bas %r14,.Lwait4io
82 tm 8(%r5),0x82 # do we have a problem ? 83 tm 8(%r5),0x82 # do we have a problem ?
83 bnz .Lrecov 84 bnz .Lrecov
84 slr %r7,%r7 85 slr %r7,%r7
85 icm %r7,3,10(%r5) # get residual count 86 icm %r7,3,10(%r5) # get residual count
86 lcr %r7,%r7 87 lcr %r7,%r7
87 la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read 88 la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read
88 ar %r2,%r7 # add to total size 89 ar %r2,%r7 # add to total size
89 tm 8(%r5),0x01 # found a tape mark ? 90 tm 8(%r5),0x01 # found a tape mark ?
90 bnz .Ldone 91 bnz .Ldone
91 l %r0,.Lccwread+4 # update CCW data addresses 92 l %r0,.Lccwread+4 # update CCW data addresses
92 ar %r0,%r7 93 ar %r0,%r7
93 st %r0,.Lccwread+4 94 st %r0,.Lccwread+4
94 b .Lldlp 95 b .Lldlp
95 .Ldone: 96 .Ldone:
96 l %r14,.Lldret 97 l %r14,.Lldret
97 br %r14 # r2 contains the total size 98 br %r14 # r2 contains the total size
98 .Lrecov: 99 .Lrecov:
99 bas %r14,.Lsense # do the sensing 100 bas %r14,.Lsense # do the sensing
100 bct %r6,.Lssch # dec. retry count & branch 101 bct %r6,.Lssch # dec. retry count & branch
101 b .Llderr 102 b .Llderr
102 # 103 #
103 # Sense subroutine 104 # Sense subroutine
104 # 105 #
105 .Lsense: 106 .Lsense:
106 st %r14,.Lsnsret 107 st %r14,.Lsnsret
107 la %r7,.Lorbsense 108 la %r7,.Lorbsense
108 ssch 0(%r7) # start sense command 109 ssch 0(%r7) # start sense command
109 bnz .Llderr 110 bnz .Llderr
110 bas %r14,.Lwait4io 111 bas %r14,.Lwait4io
111 l %r14,.Lsnsret 112 l %r14,.Lsnsret
112 tm 8(%r5),0x82 # do we have a problem ? 113 tm 8(%r5),0x82 # do we have a problem ?
113 bnz .Llderr 114 bnz .Llderr
114 br %r14 115 br %r14
115 # 116 #
116 # Wait for interrupt subroutine 117 # Wait for interrupt subroutine
117 # 118 #
118 .Lwait4io: 119 .Lwait4io:
119 lpsw .Lwaitpsw 120 lpsw .Lwaitpsw
120 .Lioint: 121 .Lioint:
121 c %r1,0xb8 # compare subchannel number 122 c %r1,0xb8 # compare subchannel number
122 bne .Lwait4io 123 bne .Lwait4io
123 tsch 0(%r5) 124 tsch 0(%r5)
124 slr %r0,%r0 125 slr %r0,%r0
125 tm 8(%r5),0x82 # do we have a problem ? 126 tm 8(%r5),0x82 # do we have a problem ?
126 bnz .Lwtexit 127 bnz .Lwtexit
127 tm 8(%r5),0x04 # got device end ? 128 tm 8(%r5),0x04 # got device end ?
128 bz .Lwait4io 129 bz .Lwait4io
129 .Lwtexit: 130 .Lwtexit:
130 br %r14 131 br %r14
131 .Llderr: 132 .Llderr:
132 lpsw .Lcrash 133 lpsw .Lcrash
133 134
134 .align 8 135 .align 8
135 .Lorbread: 136 .Lorbread:
136 .long 0x00000000,0x0080ff00,.Lccwread 137 .long 0x00000000,0x0080ff00,.Lccwread
137 .align 8 138 .align 8
138 .Lorbsense: 139 .Lorbsense:
139 .long 0x00000000,0x0080ff00,.Lccwsense 140 .long 0x00000000,0x0080ff00,.Lccwsense
140 .align 8 141 .align 8
141 .Lccwread: 142 .Lccwread:
142 .long 0x02200000+IPL_BS,0x00000000 143 .long 0x02200000+IPL_BS,0x00000000
143 .Lccwsense: 144 .Lccwsense:
144 .long 0x04200001,0x00000000 145 .long 0x04200001,0x00000000
145 .Lwaitpsw: 146 .Lwaitpsw:
146 .long 0x020a0000,0x80000000+.Lioint 147 .long 0x020a0000,0x80000000+.Lioint
147 148
148 .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 149 .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
149 .Lcr6: .long 0xff000000 150 .Lcr6: .long 0xff000000
150 .align 8 151 .align 8
151 .Lcrash:.long 0x000a0000,0x00000000 152 .Lcrash:.long 0x000a0000,0x00000000
152 .Lldret:.long 0 153 .Lldret:.long 0
153 .Lsnsret: .long 0 154 .Lsnsret: .long 0
154 #endif /* CONFIG_IPL_TAPE */ 155 #endif /* CONFIG_IPL_TAPE */
155 156
156 #ifdef CONFIG_IPL_VM 157 #ifdef CONFIG_IPL_VM
157 #define IPL_BS 0x730 158 #define IPL_BS 0x730
158 .org 0 159 .org 0
159 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded 160 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
160 .long 0x02000018,0x60000050 # by ipl to addresses 0-23. 161 .long 0x02000018,0x60000050 # by ipl to addresses 0-23.
161 .long 0x02000068,0x60000050 # (a PSW and two CCWs). 162 .long 0x02000068,0x60000050 # (a PSW and two CCWs).
162 .fill 80-24,1,0x40 # bytes 24-79 are discarded !! 163 .fill 80-24,1,0x40 # bytes 24-79 are discarded !!
163 .long 0x020000f0,0x60000050 # The next 160 byte are loaded 164 .long 0x020000f0,0x60000050 # The next 160 byte are loaded
164 .long 0x02000140,0x60000050 # to addresses 0x18-0xb7 165 .long 0x02000140,0x60000050 # to addresses 0x18-0xb7
165 .long 0x02000190,0x60000050 # They form the continuation 166 .long 0x02000190,0x60000050 # They form the continuation
166 .long 0x020001e0,0x60000050 # of the CCW program started 167 .long 0x020001e0,0x60000050 # of the CCW program started
167 .long 0x02000230,0x60000050 # by ipl and load the range 168 .long 0x02000230,0x60000050 # by ipl and load the range
168 .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image 169 .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image
169 .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730 170 .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730
170 .long 0x02000320,0x60000050 # in memory. At the end of 171 .long 0x02000320,0x60000050 # in memory. At the end of
171 .long 0x02000370,0x60000050 # the channel program the PSW 172 .long 0x02000370,0x60000050 # the channel program the PSW
172 .long 0x020003c0,0x60000050 # at location 0 is loaded. 173 .long 0x020003c0,0x60000050 # at location 0 is loaded.
173 .long 0x02000410,0x60000050 # Initial processing starts 174 .long 0x02000410,0x60000050 # Initial processing starts
174 .long 0x02000460,0x60000050 # at 0xf0 = iplstart. 175 .long 0x02000460,0x60000050 # at 0xf0 = iplstart.
175 .long 0x020004b0,0x60000050 176 .long 0x020004b0,0x60000050
176 .long 0x02000500,0x60000050 177 .long 0x02000500,0x60000050
177 .long 0x02000550,0x60000050 178 .long 0x02000550,0x60000050
178 .long 0x020005a0,0x60000050 179 .long 0x020005a0,0x60000050
179 .long 0x020005f0,0x60000050 180 .long 0x020005f0,0x60000050
180 .long 0x02000640,0x60000050 181 .long 0x02000640,0x60000050
181 .long 0x02000690,0x60000050 182 .long 0x02000690,0x60000050
182 .long 0x020006e0,0x20000050 183 .long 0x020006e0,0x20000050
183 184
184 .org 0xf0 185 .org 0xf0
185 # 186 #
186 # subroutine for loading cards from the reader 187 # subroutine for loading cards from the reader
187 # 188 #
188 .Lloader: 189 .Lloader:
189 la %r3,.Lorb # r2 = address of orb into r2 190 la %r3,.Lorb # r2 = address of orb into r2
190 la %r5,.Lirb # r4 = address of irb 191 la %r5,.Lirb # r4 = address of irb
191 la %r6,.Lccws 192 la %r6,.Lccws
192 la %r7,20 193 la %r7,20
193 .Linit: 194 .Linit:
194 st %r2,4(%r6) # initialize CCW data addresses 195 st %r2,4(%r6) # initialize CCW data addresses
195 la %r2,0x50(%r2) 196 la %r2,0x50(%r2)
196 la %r6,8(%r6) 197 la %r6,8(%r6)
197 bct 7,.Linit 198 bct 7,.Linit
198 199
199 lctl %c6,%c6,.Lcr6 # set IO subclass mask 200 lctl %c6,%c6,.Lcr6 # set IO subclass mask
200 slr %r2,%r2 201 slr %r2,%r2
201 .Lldlp: 202 .Lldlp:
202 ssch 0(%r3) # load chunk of 1600 bytes 203 ssch 0(%r3) # load chunk of 1600 bytes
203 bnz .Llderr 204 bnz .Llderr
204 .Lwait4irq: 205 .Lwait4irq:
205 mvc 0x78(8),.Lnewpsw # set up IO interrupt psw 206 mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
206 lpsw .Lwaitpsw 207 lpsw .Lwaitpsw
207 .Lioint: 208 .Lioint:
208 c %r1,0xb8 # compare subchannel number 209 c %r1,0xb8 # compare subchannel number
209 bne .Lwait4irq 210 bne .Lwait4irq
210 tsch 0(%r5) 211 tsch 0(%r5)
211 212
212 slr %r0,%r0 213 slr %r0,%r0
213 ic %r0,8(%r5) # get device status 214 ic %r0,8(%r5) # get device status
214 chi %r0,8 # channel end ? 215 chi %r0,8 # channel end ?
215 be .Lcont 216 be .Lcont
216 chi %r0,12 # channel end + device end ? 217 chi %r0,12 # channel end + device end ?
217 be .Lcont 218 be .Lcont
218 219
219 l %r0,4(%r5) 220 l %r0,4(%r5)
220 s %r0,8(%r3) # r0/8 = number of ccws executed 221 s %r0,8(%r3) # r0/8 = number of ccws executed
221 mhi %r0,10 # *10 = number of bytes in ccws 222 mhi %r0,10 # *10 = number of bytes in ccws
222 lh %r3,10(%r5) # get residual count 223 lh %r3,10(%r5) # get residual count
223 sr %r0,%r3 # #ccws*80-residual=#bytes read 224 sr %r0,%r3 # #ccws*80-residual=#bytes read
224 ar %r2,%r0 225 ar %r2,%r0
225 226
226 br %r14 # r2 contains the total size 227 br %r14 # r2 contains the total size
227 228
228 .Lcont: 229 .Lcont:
229 ahi %r2,0x640 # add 0x640 to total size 230 ahi %r2,0x640 # add 0x640 to total size
230 la %r6,.Lccws 231 la %r6,.Lccws
231 la %r7,20 232 la %r7,20
232 .Lincr: 233 .Lincr:
233 l %r0,4(%r6) # update CCW data addresses 234 l %r0,4(%r6) # update CCW data addresses
234 ahi %r0,0x640 235 ahi %r0,0x640
235 st %r0,4(%r6) 236 st %r0,4(%r6)
236 ahi %r6,8 237 ahi %r6,8
237 bct 7,.Lincr 238 bct 7,.Lincr
238 239
239 b .Lldlp 240 b .Lldlp
240 .Llderr: 241 .Llderr:
241 lpsw .Lcrash 242 lpsw .Lcrash
242 243
243 .align 8 244 .align 8
244 .Lorb: .long 0x00000000,0x0080ff00,.Lccws 245 .Lorb: .long 0x00000000,0x0080ff00,.Lccws
245 .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 246 .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
246 .Lcr6: .long 0xff000000 247 .Lcr6: .long 0xff000000
247 .Lloadp:.long 0,0 248 .Lloadp:.long 0,0
248 .align 8 249 .align 8
249 .Lcrash:.long 0x000a0000,0x00000000 250 .Lcrash:.long 0x000a0000,0x00000000
250 .Lnewpsw: 251 .Lnewpsw:
251 .long 0x00080000,0x80000000+.Lioint 252 .long 0x00080000,0x80000000+.Lioint
252 .Lwaitpsw: 253 .Lwaitpsw:
253 .long 0x020a0000,0x80000000+.Lioint 254 .long 0x020a0000,0x80000000+.Lioint
254 255
255 .align 8 256 .align 8
256 .Lccws: .rept 19 257 .Lccws: .rept 19
257 .long 0x02600050,0x00000000 258 .long 0x02600050,0x00000000
258 .endr 259 .endr
259 .long 0x02200050,0x00000000 260 .long 0x02200050,0x00000000
260 #endif /* CONFIG_IPL_VM */ 261 #endif /* CONFIG_IPL_VM */
261 262
262 iplstart: 263 iplstart:
263 lh %r1,0xb8 # test if subchannel number 264 lh %r1,0xb8 # test if subchannel number
264 bct %r1,.Lnoload # is valid 265 bct %r1,.Lnoload # is valid
265 l %r1,0xb8 # load ipl subchannel number 266 l %r1,0xb8 # load ipl subchannel number
266 la %r2,IPL_BS # load start address 267 la %r2,IPL_BS # load start address
267 bas %r14,.Lloader # load rest of ipl image 268 bas %r14,.Lloader # load rest of ipl image
268 l %r12,.Lparm # pointer to parameter area 269 l %r12,.Lparm # pointer to parameter area
269 st %r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number 270 st %r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number
270 271
271 # 272 #
272 # load parameter file from ipl device 273 # load parameter file from ipl device
273 # 274 #
274 .Lagain1: 275 .Lagain1:
275 l %r2,.Linitrd # ramdisk loc. is temp 276 l %r2,.Linitrd # ramdisk loc. is temp
276 bas %r14,.Lloader # load parameter file 277 bas %r14,.Lloader # load parameter file
277 ltr %r2,%r2 # got anything ? 278 ltr %r2,%r2 # got anything ?
278 bz .Lnopf 279 bz .Lnopf
279 chi %r2,895 280 chi %r2,895
280 bnh .Lnotrunc 281 bnh .Lnotrunc
281 la %r2,895 282 la %r2,895
282 .Lnotrunc: 283 .Lnotrunc:
283 l %r4,.Linitrd 284 l %r4,.Linitrd
284 clc 0(3,%r4),.L_hdr # if it is HDRx 285 clc 0(3,%r4),.L_hdr # if it is HDRx
285 bz .Lagain1 # skip dataset header 286 bz .Lagain1 # skip dataset header
286 clc 0(3,%r4),.L_eof # if it is EOFx 287 clc 0(3,%r4),.L_eof # if it is EOFx
287 bz .Lagain1 # skip dateset trailer 288 bz .Lagain1 # skip dateset trailer
288 la %r5,0(%r4,%r2) 289 la %r5,0(%r4,%r2)
289 lr %r3,%r2 290 lr %r3,%r2
290 .Lidebc: 291 .Lidebc:
291 tm 0(%r5),0x80 # high order bit set ? 292 tm 0(%r5),0x80 # high order bit set ?
292 bo .Ldocv # yes -> convert from EBCDIC 293 bo .Ldocv # yes -> convert from EBCDIC
293 ahi %r5,-1 294 ahi %r5,-1
294 bct %r3,.Lidebc 295 bct %r3,.Lidebc
295 b .Lnocv 296 b .Lnocv
296 .Ldocv: 297 .Ldocv:
297 l %r3,.Lcvtab 298 l %r3,.Lcvtab
298 tr 0(256,%r4),0(%r3) # convert parameters to ascii 299 tr 0(256,%r4),0(%r3) # convert parameters to ascii
299 tr 256(256,%r4),0(%r3) 300 tr 256(256,%r4),0(%r3)
300 tr 512(256,%r4),0(%r3) 301 tr 512(256,%r4),0(%r3)
301 tr 768(122,%r4),0(%r3) 302 tr 768(122,%r4),0(%r3)
302 .Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line 303 .Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
303 mvc 0(256,%r3),0(%r4) 304 mvc 0(256,%r3),0(%r4)
304 mvc 256(256,%r3),256(%r4) 305 mvc 256(256,%r3),256(%r4)
305 mvc 512(256,%r3),512(%r4) 306 mvc 512(256,%r3),512(%r4)
306 mvc 768(122,%r3),768(%r4) 307 mvc 768(122,%r3),768(%r4)
307 slr %r0,%r0 308 slr %r0,%r0
308 b .Lcntlp 309 b .Lcntlp
309 .Ldelspc: 310 .Ldelspc:
310 ic %r0,0(%r2,%r3) 311 ic %r0,0(%r2,%r3)
311 chi %r0,0x20 # is it a space ? 312 chi %r0,0x20 # is it a space ?
312 be .Lcntlp 313 be .Lcntlp
313 ahi %r2,1 314 ahi %r2,1
314 b .Leolp 315 b .Leolp
315 .Lcntlp: 316 .Lcntlp:
316 brct %r2,.Ldelspc 317 brct %r2,.Ldelspc
317 .Leolp: 318 .Leolp:
318 slr %r0,%r0 319 slr %r0,%r0
319 stc %r0,0(%r2,%r3) # terminate buffer 320 stc %r0,0(%r2,%r3) # terminate buffer
320 .Lnopf: 321 .Lnopf:
321 322
322 # 323 #
323 # load ramdisk from ipl device 324 # load ramdisk from ipl device
324 # 325 #
325 .Lagain2: 326 .Lagain2:
326 l %r2,.Linitrd # addr of ramdisk 327 l %r2,.Linitrd # addr of ramdisk
327 st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) 328 st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
328 bas %r14,.Lloader # load ramdisk 329 bas %r14,.Lloader # load ramdisk
329 st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of rd 330 st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of rd
330 ltr %r2,%r2 331 ltr %r2,%r2
331 bnz .Lrdcont 332 bnz .Lrdcont
332 st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found 333 st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found
333 .Lrdcont: 334 .Lrdcont:
334 l %r2,.Linitrd 335 l %r2,.Linitrd
335 336
336 clc 0(3,%r2),.L_hdr # skip HDRx and EOFx 337 clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
337 bz .Lagain2 338 bz .Lagain2
338 clc 0(3,%r2),.L_eof 339 clc 0(3,%r2),.L_eof
339 bz .Lagain2 340 bz .Lagain2
340 341
341 #ifdef CONFIG_IPL_VM 342 #ifdef CONFIG_IPL_VM
342 # 343 #
343 # reset files in VM reader 344 # reset files in VM reader
344 # 345 #
345 stidp __LC_CPUID # store cpuid 346 stidp __LC_CPUID # store cpuid
346 tm __LC_CPUID,0xff # running VM ? 347 tm __LC_CPUID,0xff # running VM ?
347 bno .Lnoreset 348 bno .Lnoreset
348 la %r2,.Lreset 349 la %r2,.Lreset
349 lhi %r3,26 350 lhi %r3,26
350 diag %r2,%r3,8 351 diag %r2,%r3,8
351 la %r5,.Lirb 352 la %r5,.Lirb
352 stsch 0(%r5) # check if irq is pending 353 stsch 0(%r5) # check if irq is pending
353 tm 30(%r5),0x0f # by verifying if any of the 354 tm 30(%r5),0x0f # by verifying if any of the
354 bnz .Lwaitforirq # activity or status control 355 bnz .Lwaitforirq # activity or status control
355 tm 31(%r5),0xff # bits is set in the schib 356 tm 31(%r5),0xff # bits is set in the schib
356 bz .Lnoreset 357 bz .Lnoreset
357 .Lwaitforirq: 358 .Lwaitforirq:
358 mvc 0x78(8),.Lrdrnewpsw # set up IO interrupt psw 359 mvc 0x78(8),.Lrdrnewpsw # set up IO interrupt psw
359 .Lwaitrdrirq: 360 .Lwaitrdrirq:
360 lpsw .Lrdrwaitpsw 361 lpsw .Lrdrwaitpsw
361 .Lrdrint: 362 .Lrdrint:
362 c %r1,0xb8 # compare subchannel number 363 c %r1,0xb8 # compare subchannel number
363 bne .Lwaitrdrirq 364 bne .Lwaitrdrirq
364 la %r5,.Lirb 365 la %r5,.Lirb
365 tsch 0(%r5) 366 tsch 0(%r5)
366 .Lnoreset: 367 .Lnoreset:
367 b .Lnoload 368 b .Lnoload
368 369
369 .align 8 370 .align 8
370 .Lrdrnewpsw: 371 .Lrdrnewpsw:
371 .long 0x00080000,0x80000000+.Lrdrint 372 .long 0x00080000,0x80000000+.Lrdrint
372 .Lrdrwaitpsw: 373 .Lrdrwaitpsw:
373 .long 0x020a0000,0x80000000+.Lrdrint 374 .long 0x020a0000,0x80000000+.Lrdrint
374 #endif 375 #endif
375 376
376 # 377 #
377 # everything loaded, go for it 378 # everything loaded, go for it
378 # 379 #
379 .Lnoload: 380 .Lnoload:
380 l %r1,.Lstartup 381 l %r1,.Lstartup
381 br %r1 382 br %r1
382 383
383 .Linitrd:.long _end + 0x400000 # default address of initrd 384 .Linitrd:.long _end + 0x400000 # default address of initrd
384 .Lparm: .long PARMAREA 385 .Lparm: .long PARMAREA
385 .Lstartup: .long startup 386 .Lstartup: .long startup
386 .Lcvtab:.long _ebcasc # ebcdic to ascii table 387 .Lcvtab:.long _ebcasc # ebcdic to ascii table
387 .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 388 .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
388 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 389 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
389 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" 390 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
390 .L_eof: .long 0xc5d6c600 /* C'EOF' */ 391 .L_eof: .long 0xc5d6c600 /* C'EOF' */
391 .L_hdr: .long 0xc8c4d900 /* C'HDR' */ 392 .L_hdr: .long 0xc8c4d900 /* C'HDR' */
392 393
393 #endif /* CONFIG_IPL */ 394 #endif /* CONFIG_IPL */
394 395
395 # 396 #
396 # SALIPL loader support. Based on a patch by Rob van der Heij. 397 # SALIPL loader support. Based on a patch by Rob van der Heij.
397 # This entry point is called directly from the SALIPL loader and 398 # This entry point is called directly from the SALIPL loader and
398 # doesn't need a builtin ipl record. 399 # doesn't need a builtin ipl record.
399 # 400 #
400 .org 0x800 401 .org 0x800
401 .globl start 402 .globl start
402 start: 403 start:
403 stm %r0,%r15,0x07b0 # store registers 404 stm %r0,%r15,0x07b0 # store registers
404 basr %r12,%r0 405 basr %r12,%r0
405 .base: 406 .base:
406 l %r11,.parm 407 l %r11,.parm
407 l %r8,.cmd # pointer to command buffer 408 l %r8,.cmd # pointer to command buffer
408 409
409 ltr %r9,%r9 # do we have SALIPL parameters? 410 ltr %r9,%r9 # do we have SALIPL parameters?
410 bp .sk8x8 411 bp .sk8x8
411 412
412 mvc 0(64,%r8),0x00b0 # copy saved registers 413 mvc 0(64,%r8),0x00b0 # copy saved registers
413 xc 64(240-64,%r8),0(%r8) # remainder of buffer 414 xc 64(240-64,%r8),0(%r8) # remainder of buffer
414 tr 0(64,%r8),.lowcase 415 tr 0(64,%r8),.lowcase
415 b .gotr 416 b .gotr
416 .sk8x8: 417 .sk8x8:
417 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer 418 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
418 .gotr: 419 .gotr:
419 l %r10,.tbl # EBCDIC to ASCII table 420 l %r10,.tbl # EBCDIC to ASCII table
420 tr 0(240,%r8),0(%r10) 421 tr 0(240,%r8),0(%r10)
421 slr %r0,%r0 422 slr %r0,%r0
422 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) 423 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
423 st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) 424 st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
424 j startup # continue with startup 425 j startup # continue with startup
425 .tbl: .long _ebcasc # translate table 426 .tbl: .long _ebcasc # translate table
426 .cmd: .long COMMAND_LINE # address of command line buffer 427 .cmd: .long COMMAND_LINE # address of command line buffer
427 .parm: .long PARMAREA 428 .parm: .long PARMAREA
428 .lowcase: 429 .lowcase:
429 .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 430 .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
430 .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f 431 .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
431 .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17 432 .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
432 .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f 433 .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
433 .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27 434 .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
434 .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f 435 .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
435 .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37 436 .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
436 .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f 437 .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
437 .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47 438 .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
438 .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f 439 .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
439 .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57 440 .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
440 .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f 441 .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
441 .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67 442 .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
442 .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f 443 .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
443 .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77 444 .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
444 .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f 445 .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
445 446
446 .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87 447 .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
447 .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f 448 .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
448 .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97 449 .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
449 .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f 450 .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
450 .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 451 .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
451 .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf 452 .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
452 .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7 453 .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
453 .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf 454 .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
454 .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg 455 .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
455 .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi 456 .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
456 .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop 457 .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
457 .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr 458 .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
458 .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx 459 .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
459 .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz 460 .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
460 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 461 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
461 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff 462 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
462 463
463 #ifdef CONFIG_64BIT 464 #ifdef CONFIG_64BIT
464 #include "head64.S" 465 #include "head64.S"
465 #else 466 #else
466 #include "head31.S" 467 #include "head31.S"
467 #endif 468 #endif
468 469
arch/s390/kernel/vmlinux.lds.S
1 /* ld script to make s390 Linux kernel 1 /* ld script to make s390 Linux kernel
2 * Written by Martin Schwidefsky (schwidefsky@de.ibm.com) 2 * Written by Martin Schwidefsky (schwidefsky@de.ibm.com)
3 */ 3 */
4 4
5 #include <asm-generic/vmlinux.lds.h> 5 #include <asm-generic/vmlinux.lds.h>
6 6
7 #ifndef CONFIG_64BIT 7 #ifndef CONFIG_64BIT
8 OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") 8 OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
9 OUTPUT_ARCH(s390) 9 OUTPUT_ARCH(s390)
10 ENTRY(_start) 10 ENTRY(_start)
11 jiffies = jiffies_64 + 4; 11 jiffies = jiffies_64 + 4;
12 #else 12 #else
13 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 13 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
14 OUTPUT_ARCH(s390:64-bit) 14 OUTPUT_ARCH(s390:64-bit)
15 ENTRY(_start) 15 ENTRY(_start)
16 jiffies = jiffies_64; 16 jiffies = jiffies_64;
17 #endif 17 #endif
18 18
19 SECTIONS 19 SECTIONS
20 { 20 {
21 . = 0x00000000; 21 . = 0x00000000;
22 _text = .; /* Text and read-only data */ 22 _text = .; /* Text and read-only data */
23 .text : { 23 .text : {
24 *(.text.head)
24 TEXT_TEXT 25 TEXT_TEXT
25 SCHED_TEXT 26 SCHED_TEXT
26 LOCK_TEXT 27 LOCK_TEXT
27 KPROBES_TEXT 28 KPROBES_TEXT
28 *(.fixup) 29 *(.fixup)
29 *(.gnu.warning) 30 *(.gnu.warning)
30 } = 0x0700 31 } = 0x0700
31 32
32 _etext = .; /* End of text section */ 33 _etext = .; /* End of text section */
33 34
34 RODATA 35 RODATA
35 36
36 #ifdef CONFIG_SHARED_KERNEL 37 #ifdef CONFIG_SHARED_KERNEL
37 . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ 38 . = ALIGN(1048576); /* VM shared segments are 1MB aligned */
38 #endif 39 #endif
39 40
40 . = ALIGN(4096); 41 . = ALIGN(4096);
41 _eshared = .; /* End of shareable data */ 42 _eshared = .; /* End of shareable data */
42 43
43 . = ALIGN(16); /* Exception table */ 44 . = ALIGN(16); /* Exception table */
44 __start___ex_table = .; 45 __start___ex_table = .;
45 __ex_table : { *(__ex_table) } 46 __ex_table : { *(__ex_table) }
46 __stop___ex_table = .; 47 __stop___ex_table = .;
47 48
48 NOTES 49 NOTES
49 50
50 BUG_TABLE 51 BUG_TABLE
51 52
52 .data : { /* Data */ 53 .data : { /* Data */
53 DATA_DATA 54 DATA_DATA
54 CONSTRUCTORS 55 CONSTRUCTORS
55 } 56 }
56 57
57 . = ALIGN(4096); 58 . = ALIGN(4096);
58 __nosave_begin = .; 59 __nosave_begin = .;
59 .data_nosave : { *(.data.nosave) } 60 .data_nosave : { *(.data.nosave) }
60 . = ALIGN(4096); 61 . = ALIGN(4096);
61 __nosave_end = .; 62 __nosave_end = .;
62 63
63 . = ALIGN(4096); 64 . = ALIGN(4096);
64 .data.page_aligned : { *(.data.idt) } 65 .data.page_aligned : { *(.data.idt) }
65 66
66 . = ALIGN(256); 67 . = ALIGN(256);
67 .data.cacheline_aligned : { *(.data.cacheline_aligned) } 68 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
68 69
69 . = ALIGN(256); 70 . = ALIGN(256);
70 .data.read_mostly : { *(.data.read_mostly) } 71 .data.read_mostly : { *(.data.read_mostly) }
71 _edata = .; /* End of data section */ 72 _edata = .; /* End of data section */
72 73
73 . = ALIGN(8192); /* init_task */ 74 . = ALIGN(8192); /* init_task */
74 .data.init_task : { *(.data.init_task) } 75 .data.init_task : { *(.data.init_task) }
75 76
76 /* will be freed after init */ 77 /* will be freed after init */
77 . = ALIGN(4096); /* Init code and data */ 78 . = ALIGN(4096); /* Init code and data */
78 __init_begin = .; 79 __init_begin = .;
79 .init.text : { 80 .init.text : {
80 _sinittext = .; 81 _sinittext = .;
81 *(.init.text) 82 *(.init.text)
82 _einittext = .; 83 _einittext = .;
83 } 84 }
84 /* 85 /*
85 * .exit.text is discarded at runtime, not link time, 86 * .exit.text is discarded at runtime, not link time,
86 * to deal with references from __bug_table 87 * to deal with references from __bug_table
87 */ 88 */
88 .exit.text : { *(.exit.text) } 89 .exit.text : { *(.exit.text) }
89 90
90 .init.data : { *(.init.data) } 91 .init.data : { *(.init.data) }
91 . = ALIGN(256); 92 . = ALIGN(256);
92 __setup_start = .; 93 __setup_start = .;
93 .init.setup : { *(.init.setup) } 94 .init.setup : { *(.init.setup) }
94 __setup_end = .; 95 __setup_end = .;
95 __initcall_start = .; 96 __initcall_start = .;
96 .initcall.init : { 97 .initcall.init : {
97 INITCALLS 98 INITCALLS
98 } 99 }
99 __initcall_end = .; 100 __initcall_end = .;
100 __con_initcall_start = .; 101 __con_initcall_start = .;
101 .con_initcall.init : { *(.con_initcall.init) } 102 .con_initcall.init : { *(.con_initcall.init) }
102 __con_initcall_end = .; 103 __con_initcall_end = .;
103 SECURITY_INIT 104 SECURITY_INIT
104 105
105 #ifdef CONFIG_BLK_DEV_INITRD 106 #ifdef CONFIG_BLK_DEV_INITRD
106 . = ALIGN(256); 107 . = ALIGN(256);
107 __initramfs_start = .; 108 __initramfs_start = .;
108 .init.ramfs : { *(.init.initramfs) } 109 .init.ramfs : { *(.init.initramfs) }
109 . = ALIGN(2); 110 . = ALIGN(2);
110 __initramfs_end = .; 111 __initramfs_end = .;
111 #endif 112 #endif
112 PERCPU(4096) 113 PERCPU(4096)
113 . = ALIGN(4096); 114 . = ALIGN(4096);
114 __init_end = .; 115 __init_end = .;
115 /* freed after init ends here */ 116 /* freed after init ends here */
116 117
117 __bss_start = .; /* BSS */ 118 __bss_start = .; /* BSS */
118 .bss : { *(.bss) } 119 .bss : { *(.bss) }
119 . = ALIGN(2); 120 . = ALIGN(2);
120 __bss_stop = .; 121 __bss_stop = .;
121 122
122 _end = . ; 123 _end = . ;
123 124
124 /* Sections to be discarded */ 125 /* Sections to be discarded */
125 /DISCARD/ : { 126 /DISCARD/ : {
126 *(.exit.data) *(.exitcall.exit) 127 *(.exit.data) *(.exitcall.exit)
127 } 128 }
128 129
129 /* Stabs debugging sections. */ 130 /* Stabs debugging sections. */
130 .stab 0 : { *(.stab) } 131 .stab 0 : { *(.stab) }
131 .stabstr 0 : { *(.stabstr) } 132 .stabstr 0 : { *(.stabstr) }
132 .stab.excl 0 : { *(.stab.excl) } 133 .stab.excl 0 : { *(.stab.excl) }
133 .stab.exclstr 0 : { *(.stab.exclstr) } 134 .stab.exclstr 0 : { *(.stab.exclstr) }
134 .stab.index 0 : { *(.stab.index) } 135 .stab.index 0 : { *(.stab.index) }
135 .stab.indexstr 0 : { *(.stab.indexstr) } 136 .stab.indexstr 0 : { *(.stab.indexstr) }
136 .comment 0 : { *(.comment) } 137 .comment 0 : { *(.comment) }
137 } 138 }
138 139
1 /* 1 /*
2 * arch/s390/mm/vmem.c 2 * arch/s390/mm/vmem.c
3 * 3 *
4 * Copyright IBM Corp. 2006 4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */ 6 */
7 7
8 #include <linux/bootmem.h> 8 #include <linux/bootmem.h>
9 #include <linux/pfn.h> 9 #include <linux/pfn.h>
10 #include <linux/mm.h> 10 #include <linux/mm.h>
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/list.h> 12 #include <linux/list.h>
13 #include <asm/pgalloc.h> 13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h> 14 #include <asm/pgtable.h>
15 #include <asm/setup.h> 15 #include <asm/setup.h>
16 #include <asm/tlbflush.h> 16 #include <asm/tlbflush.h>
17 17
18 unsigned long vmalloc_end; 18 unsigned long vmalloc_end;
19 EXPORT_SYMBOL(vmalloc_end); 19 EXPORT_SYMBOL(vmalloc_end);
20 20
21 static struct page *vmem_map; 21 static struct page *vmem_map;
22 static DEFINE_MUTEX(vmem_mutex); 22 static DEFINE_MUTEX(vmem_mutex);
23 23
24 struct memory_segment { 24 struct memory_segment {
25 struct list_head list; 25 struct list_head list;
26 unsigned long start; 26 unsigned long start;
27 unsigned long size; 27 unsigned long size;
28 }; 28 };
29 29
30 static LIST_HEAD(mem_segs); 30 static LIST_HEAD(mem_segs);
31 31
32 void memmap_init(unsigned long size, int nid, unsigned long zone, 32 void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
33 unsigned long start_pfn) 33 unsigned long start_pfn)
34 { 34 {
35 struct page *start, *end; 35 struct page *start, *end;
36 struct page *map_start, *map_end; 36 struct page *map_start, *map_end;
37 int i; 37 int i;
38 38
39 start = pfn_to_page(start_pfn); 39 start = pfn_to_page(start_pfn);
40 end = start + size; 40 end = start + size;
41 41
42 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 42 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
43 unsigned long cstart, cend; 43 unsigned long cstart, cend;
44 44
45 cstart = PFN_DOWN(memory_chunk[i].addr); 45 cstart = PFN_DOWN(memory_chunk[i].addr);
46 cend = cstart + PFN_DOWN(memory_chunk[i].size); 46 cend = cstart + PFN_DOWN(memory_chunk[i].size);
47 47
48 map_start = mem_map + cstart; 48 map_start = mem_map + cstart;
49 map_end = mem_map + cend; 49 map_end = mem_map + cend;
50 50
51 if (map_start < start) 51 if (map_start < start)
52 map_start = start; 52 map_start = start;
53 if (map_end > end) 53 if (map_end > end)
54 map_end = end; 54 map_end = end;
55 55
56 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) 56 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
57 / sizeof(struct page); 57 / sizeof(struct page);
58 map_end += ((PFN_ALIGN((unsigned long) map_end) 58 map_end += ((PFN_ALIGN((unsigned long) map_end)
59 - (unsigned long) map_end) 59 - (unsigned long) map_end)
60 / sizeof(struct page)); 60 / sizeof(struct page));
61 61
62 if (map_start < map_end) 62 if (map_start < map_end)
63 memmap_init_zone((unsigned long)(map_end - map_start), 63 memmap_init_zone((unsigned long)(map_end - map_start),
64 nid, zone, page_to_pfn(map_start), 64 nid, zone, page_to_pfn(map_start),
65 MEMMAP_EARLY); 65 MEMMAP_EARLY);
66 } 66 }
67 } 67 }
68 68
69 static inline void *vmem_alloc_pages(unsigned int order) 69 static void __init_refok *vmem_alloc_pages(unsigned int order)
70 { 70 {
71 if (slab_is_available()) 71 if (slab_is_available())
72 return (void *)__get_free_pages(GFP_KERNEL, order); 72 return (void *)__get_free_pages(GFP_KERNEL, order);
73 return alloc_bootmem_pages((1 << order) * PAGE_SIZE); 73 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
74 } 74 }
75 75
76 static inline pmd_t *vmem_pmd_alloc(void) 76 static inline pmd_t *vmem_pmd_alloc(void)
77 { 77 {
78 pmd_t *pmd; 78 pmd_t *pmd;
79 int i; 79 int i;
80 80
81 pmd = vmem_alloc_pages(PMD_ALLOC_ORDER); 81 pmd = vmem_alloc_pages(PMD_ALLOC_ORDER);
82 if (!pmd) 82 if (!pmd)
83 return NULL; 83 return NULL;
84 for (i = 0; i < PTRS_PER_PMD; i++) 84 for (i = 0; i < PTRS_PER_PMD; i++)
85 pmd_clear_kernel(pmd + i); 85 pmd_clear_kernel(pmd + i);
86 return pmd; 86 return pmd;
87 } 87 }
88 88
89 static inline pte_t *vmem_pte_alloc(void) 89 static inline pte_t *vmem_pte_alloc(void)
90 { 90 {
91 pte_t *pte; 91 pte_t *pte;
92 pte_t empty_pte; 92 pte_t empty_pte;
93 int i; 93 int i;
94 94
95 pte = vmem_alloc_pages(PTE_ALLOC_ORDER); 95 pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
96 if (!pte) 96 if (!pte)
97 return NULL; 97 return NULL;
98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY; 98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
99 for (i = 0; i < PTRS_PER_PTE; i++) 99 for (i = 0; i < PTRS_PER_PTE; i++)
100 pte[i] = empty_pte; 100 pte[i] = empty_pte;
101 return pte; 101 return pte;
102 } 102 }
103 103
104 /* 104 /*
105 * Add a physical memory range to the 1:1 mapping. 105 * Add a physical memory range to the 1:1 mapping.
106 */ 106 */
107 static int vmem_add_range(unsigned long start, unsigned long size) 107 static int vmem_add_range(unsigned long start, unsigned long size)
108 { 108 {
109 unsigned long address; 109 unsigned long address;
110 pgd_t *pg_dir; 110 pgd_t *pg_dir;
111 pmd_t *pm_dir; 111 pmd_t *pm_dir;
112 pte_t *pt_dir; 112 pte_t *pt_dir;
113 pte_t pte; 113 pte_t pte;
114 int ret = -ENOMEM; 114 int ret = -ENOMEM;
115 115
116 for (address = start; address < start + size; address += PAGE_SIZE) { 116 for (address = start; address < start + size; address += PAGE_SIZE) {
117 pg_dir = pgd_offset_k(address); 117 pg_dir = pgd_offset_k(address);
118 if (pgd_none(*pg_dir)) { 118 if (pgd_none(*pg_dir)) {
119 pm_dir = vmem_pmd_alloc(); 119 pm_dir = vmem_pmd_alloc();
120 if (!pm_dir) 120 if (!pm_dir)
121 goto out; 121 goto out;
122 pgd_populate_kernel(&init_mm, pg_dir, pm_dir); 122 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
123 } 123 }
124 124
125 pm_dir = pmd_offset(pg_dir, address); 125 pm_dir = pmd_offset(pg_dir, address);
126 if (pmd_none(*pm_dir)) { 126 if (pmd_none(*pm_dir)) {
127 pt_dir = vmem_pte_alloc(); 127 pt_dir = vmem_pte_alloc();
128 if (!pt_dir) 128 if (!pt_dir)
129 goto out; 129 goto out;
130 pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 130 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
131 } 131 }
132 132
133 pt_dir = pte_offset_kernel(pm_dir, address); 133 pt_dir = pte_offset_kernel(pm_dir, address);
134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); 134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
135 *pt_dir = pte; 135 *pt_dir = pte;
136 } 136 }
137 ret = 0; 137 ret = 0;
138 out: 138 out:
139 flush_tlb_kernel_range(start, start + size); 139 flush_tlb_kernel_range(start, start + size);
140 return ret; 140 return ret;
141 } 141 }
142 142
143 /* 143 /*
144 * Remove a physical memory range from the 1:1 mapping. 144 * Remove a physical memory range from the 1:1 mapping.
145 * Currently only invalidates page table entries. 145 * Currently only invalidates page table entries.
146 */ 146 */
147 static void vmem_remove_range(unsigned long start, unsigned long size) 147 static void vmem_remove_range(unsigned long start, unsigned long size)
148 { 148 {
149 unsigned long address; 149 unsigned long address;
150 pgd_t *pg_dir; 150 pgd_t *pg_dir;
151 pmd_t *pm_dir; 151 pmd_t *pm_dir;
152 pte_t *pt_dir; 152 pte_t *pt_dir;
153 pte_t pte; 153 pte_t pte;
154 154
155 pte_val(pte) = _PAGE_TYPE_EMPTY; 155 pte_val(pte) = _PAGE_TYPE_EMPTY;
156 for (address = start; address < start + size; address += PAGE_SIZE) { 156 for (address = start; address < start + size; address += PAGE_SIZE) {
157 pg_dir = pgd_offset_k(address); 157 pg_dir = pgd_offset_k(address);
158 if (pgd_none(*pg_dir)) 158 if (pgd_none(*pg_dir))
159 continue; 159 continue;
160 pm_dir = pmd_offset(pg_dir, address); 160 pm_dir = pmd_offset(pg_dir, address);
161 if (pmd_none(*pm_dir)) 161 if (pmd_none(*pm_dir))
162 continue; 162 continue;
163 pt_dir = pte_offset_kernel(pm_dir, address); 163 pt_dir = pte_offset_kernel(pm_dir, address);
164 *pt_dir = pte; 164 *pt_dir = pte;
165 } 165 }
166 flush_tlb_kernel_range(start, start + size); 166 flush_tlb_kernel_range(start, start + size);
167 } 167 }
168 168
169 /* 169 /*
170 * Add a backed mem_map array to the virtual mem_map array. 170 * Add a backed mem_map array to the virtual mem_map array.
171 */ 171 */
172 static int vmem_add_mem_map(unsigned long start, unsigned long size) 172 static int vmem_add_mem_map(unsigned long start, unsigned long size)
173 { 173 {
174 unsigned long address, start_addr, end_addr; 174 unsigned long address, start_addr, end_addr;
175 struct page *map_start, *map_end; 175 struct page *map_start, *map_end;
176 pgd_t *pg_dir; 176 pgd_t *pg_dir;
177 pmd_t *pm_dir; 177 pmd_t *pm_dir;
178 pte_t *pt_dir; 178 pte_t *pt_dir;
179 pte_t pte; 179 pte_t pte;
180 int ret = -ENOMEM; 180 int ret = -ENOMEM;
181 181
182 map_start = vmem_map + PFN_DOWN(start); 182 map_start = vmem_map + PFN_DOWN(start);
183 map_end = vmem_map + PFN_DOWN(start + size); 183 map_end = vmem_map + PFN_DOWN(start + size);
184 184
185 start_addr = (unsigned long) map_start & PAGE_MASK; 185 start_addr = (unsigned long) map_start & PAGE_MASK;
186 end_addr = PFN_ALIGN((unsigned long) map_end); 186 end_addr = PFN_ALIGN((unsigned long) map_end);
187 187
188 for (address = start_addr; address < end_addr; address += PAGE_SIZE) { 188 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
189 pg_dir = pgd_offset_k(address); 189 pg_dir = pgd_offset_k(address);
190 if (pgd_none(*pg_dir)) { 190 if (pgd_none(*pg_dir)) {
191 pm_dir = vmem_pmd_alloc(); 191 pm_dir = vmem_pmd_alloc();
192 if (!pm_dir) 192 if (!pm_dir)
193 goto out; 193 goto out;
194 pgd_populate_kernel(&init_mm, pg_dir, pm_dir); 194 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
195 } 195 }
196 196
197 pm_dir = pmd_offset(pg_dir, address); 197 pm_dir = pmd_offset(pg_dir, address);
198 if (pmd_none(*pm_dir)) { 198 if (pmd_none(*pm_dir)) {
199 pt_dir = vmem_pte_alloc(); 199 pt_dir = vmem_pte_alloc();
200 if (!pt_dir) 200 if (!pt_dir)
201 goto out; 201 goto out;
202 pmd_populate_kernel(&init_mm, pm_dir, pt_dir); 202 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
203 } 203 }
204 204
205 pt_dir = pte_offset_kernel(pm_dir, address); 205 pt_dir = pte_offset_kernel(pm_dir, address);
206 if (pte_none(*pt_dir)) { 206 if (pte_none(*pt_dir)) {
207 unsigned long new_page; 207 unsigned long new_page;
208 208
209 new_page =__pa(vmem_alloc_pages(0)); 209 new_page =__pa(vmem_alloc_pages(0));
210 if (!new_page) 210 if (!new_page)
211 goto out; 211 goto out;
212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
213 *pt_dir = pte; 213 *pt_dir = pte;
214 } 214 }
215 } 215 }
216 ret = 0; 216 ret = 0;
217 out: 217 out:
218 flush_tlb_kernel_range(start_addr, end_addr); 218 flush_tlb_kernel_range(start_addr, end_addr);
219 return ret; 219 return ret;
220 } 220 }
221 221
222 static int vmem_add_mem(unsigned long start, unsigned long size) 222 static int vmem_add_mem(unsigned long start, unsigned long size)
223 { 223 {
224 int ret; 224 int ret;
225 225
226 ret = vmem_add_range(start, size); 226 ret = vmem_add_range(start, size);
227 if (ret) 227 if (ret)
228 return ret; 228 return ret;
229 return vmem_add_mem_map(start, size); 229 return vmem_add_mem_map(start, size);
230 } 230 }
231 231
232 /* 232 /*
233 * Add memory segment to the segment list if it doesn't overlap with 233 * Add memory segment to the segment list if it doesn't overlap with
234 * an already present segment. 234 * an already present segment.
235 */ 235 */
236 static int insert_memory_segment(struct memory_segment *seg) 236 static int insert_memory_segment(struct memory_segment *seg)
237 { 237 {
238 struct memory_segment *tmp; 238 struct memory_segment *tmp;
239 239
240 if (PFN_DOWN(seg->start + seg->size) > max_pfn || 240 if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
241 seg->start + seg->size < seg->start) 241 seg->start + seg->size < seg->start)
242 return -ERANGE; 242 return -ERANGE;
243 243
244 list_for_each_entry(tmp, &mem_segs, list) { 244 list_for_each_entry(tmp, &mem_segs, list) {
245 if (seg->start >= tmp->start + tmp->size) 245 if (seg->start >= tmp->start + tmp->size)
246 continue; 246 continue;
247 if (seg->start + seg->size <= tmp->start) 247 if (seg->start + seg->size <= tmp->start)
248 continue; 248 continue;
249 return -ENOSPC; 249 return -ENOSPC;
250 } 250 }
251 list_add(&seg->list, &mem_segs); 251 list_add(&seg->list, &mem_segs);
252 return 0; 252 return 0;
253 } 253 }
254 254
255 /* 255 /*
256 * Remove memory segment from the segment list. 256 * Remove memory segment from the segment list.
257 */ 257 */
258 static void remove_memory_segment(struct memory_segment *seg) 258 static void remove_memory_segment(struct memory_segment *seg)
259 { 259 {
260 list_del(&seg->list); 260 list_del(&seg->list);
261 } 261 }
262 262
263 static void __remove_shared_memory(struct memory_segment *seg) 263 static void __remove_shared_memory(struct memory_segment *seg)
264 { 264 {
265 remove_memory_segment(seg); 265 remove_memory_segment(seg);
266 vmem_remove_range(seg->start, seg->size); 266 vmem_remove_range(seg->start, seg->size);
267 } 267 }
268 268
269 int remove_shared_memory(unsigned long start, unsigned long size) 269 int remove_shared_memory(unsigned long start, unsigned long size)
270 { 270 {
271 struct memory_segment *seg; 271 struct memory_segment *seg;
272 int ret; 272 int ret;
273 273
274 mutex_lock(&vmem_mutex); 274 mutex_lock(&vmem_mutex);
275 275
276 ret = -ENOENT; 276 ret = -ENOENT;
277 list_for_each_entry(seg, &mem_segs, list) { 277 list_for_each_entry(seg, &mem_segs, list) {
278 if (seg->start == start && seg->size == size) 278 if (seg->start == start && seg->size == size)
279 break; 279 break;
280 } 280 }
281 281
282 if (seg->start != start || seg->size != size) 282 if (seg->start != start || seg->size != size)
283 goto out; 283 goto out;
284 284
285 ret = 0; 285 ret = 0;
286 __remove_shared_memory(seg); 286 __remove_shared_memory(seg);
287 kfree(seg); 287 kfree(seg);
288 out: 288 out:
289 mutex_unlock(&vmem_mutex); 289 mutex_unlock(&vmem_mutex);
290 return ret; 290 return ret;
291 } 291 }
292 292
293 int add_shared_memory(unsigned long start, unsigned long size) 293 int add_shared_memory(unsigned long start, unsigned long size)
294 { 294 {
295 struct memory_segment *seg; 295 struct memory_segment *seg;
296 struct page *page; 296 struct page *page;
297 unsigned long pfn, num_pfn, end_pfn; 297 unsigned long pfn, num_pfn, end_pfn;
298 int ret; 298 int ret;
299 299
300 mutex_lock(&vmem_mutex); 300 mutex_lock(&vmem_mutex);
301 ret = -ENOMEM; 301 ret = -ENOMEM;
302 seg = kzalloc(sizeof(*seg), GFP_KERNEL); 302 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
303 if (!seg) 303 if (!seg)
304 goto out; 304 goto out;
305 seg->start = start; 305 seg->start = start;
306 seg->size = size; 306 seg->size = size;
307 307
308 ret = insert_memory_segment(seg); 308 ret = insert_memory_segment(seg);
309 if (ret) 309 if (ret)
310 goto out_free; 310 goto out_free;
311 311
312 ret = vmem_add_mem(start, size); 312 ret = vmem_add_mem(start, size);
313 if (ret) 313 if (ret)
314 goto out_remove; 314 goto out_remove;
315 315
316 pfn = PFN_DOWN(start); 316 pfn = PFN_DOWN(start);
317 num_pfn = PFN_DOWN(size); 317 num_pfn = PFN_DOWN(size);
318 end_pfn = pfn + num_pfn; 318 end_pfn = pfn + num_pfn;
319 319
320 page = pfn_to_page(pfn); 320 page = pfn_to_page(pfn);
321 memset(page, 0, num_pfn * sizeof(struct page)); 321 memset(page, 0, num_pfn * sizeof(struct page));
322 322
323 for (; pfn < end_pfn; pfn++) { 323 for (; pfn < end_pfn; pfn++) {
324 page = pfn_to_page(pfn); 324 page = pfn_to_page(pfn);
325 init_page_count(page); 325 init_page_count(page);
326 reset_page_mapcount(page); 326 reset_page_mapcount(page);
327 SetPageReserved(page); 327 SetPageReserved(page);
328 INIT_LIST_HEAD(&page->lru); 328 INIT_LIST_HEAD(&page->lru);
329 } 329 }
330 goto out; 330 goto out;
331 331
332 out_remove: 332 out_remove:
333 __remove_shared_memory(seg); 333 __remove_shared_memory(seg);
334 out_free: 334 out_free:
335 kfree(seg); 335 kfree(seg);
336 out: 336 out:
337 mutex_unlock(&vmem_mutex); 337 mutex_unlock(&vmem_mutex);
338 return ret; 338 return ret;
339 } 339 }
340 340
341 /* 341 /*
342 * map whole physical memory to virtual memory (identity mapping) 342 * map whole physical memory to virtual memory (identity mapping)
343 */ 343 */
344 void __init vmem_map_init(void) 344 void __init vmem_map_init(void)
345 { 345 {
346 unsigned long map_size; 346 unsigned long map_size;
347 int i; 347 int i;
348 348
349 map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); 349 map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
350 vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); 350 vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
351 vmem_map = (struct page *) vmalloc_end; 351 vmem_map = (struct page *) vmalloc_end;
352 NODE_DATA(0)->node_mem_map = vmem_map; 352 NODE_DATA(0)->node_mem_map = vmem_map;
353 353
354 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) 354 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
355 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); 355 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
356 } 356 }
357 357
358 /* 358 /*
359 * Convert memory chunk array to a memory segment list so there is a single 359 * Convert memory chunk array to a memory segment list so there is a single
360 * list that contains both r/w memory and shared memory segments. 360 * list that contains both r/w memory and shared memory segments.
361 */ 361 */
362 static int __init vmem_convert_memory_chunk(void) 362 static int __init vmem_convert_memory_chunk(void)
363 { 363 {
364 struct memory_segment *seg; 364 struct memory_segment *seg;
365 int i; 365 int i;
366 366
367 mutex_lock(&vmem_mutex); 367 mutex_lock(&vmem_mutex);
368 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 368 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
369 if (!memory_chunk[i].size) 369 if (!memory_chunk[i].size)
370 continue; 370 continue;
371 seg = kzalloc(sizeof(*seg), GFP_KERNEL); 371 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
372 if (!seg) 372 if (!seg)
373 panic("Out of memory...\n"); 373 panic("Out of memory...\n");
374 seg->start = memory_chunk[i].addr; 374 seg->start = memory_chunk[i].addr;
375 seg->size = memory_chunk[i].size; 375 seg->size = memory_chunk[i].size;
376 insert_memory_segment(seg); 376 insert_memory_segment(seg);
377 } 377 }
378 mutex_unlock(&vmem_mutex); 378 mutex_unlock(&vmem_mutex);
379 return 0; 379 return 0;
380 } 380 }
381 381
382 core_initcall(vmem_convert_memory_chunk); 382 core_initcall(vmem_convert_memory_chunk);
383 383
drivers/s390/char/raw3270.c
1 /* 1 /*
2 * drivers/s390/char/raw3270.c 2 * drivers/s390/char/raw3270.c
3 * IBM/3270 Driver - core functions. 3 * IBM/3270 Driver - core functions.
4 * 4 *
5 * Author(s): 5 * Author(s):
6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) 6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
7 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 */ 9 */
10 10
11 #include <linux/bootmem.h> 11 #include <linux/bootmem.h>
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/err.h> 13 #include <linux/err.h>
14 #include <linux/init.h> 14 #include <linux/init.h>
15 #include <linux/interrupt.h> 15 #include <linux/interrupt.h>
16 #include <linux/list.h> 16 #include <linux/list.h>
17 #include <linux/slab.h> 17 #include <linux/slab.h>
18 #include <linux/types.h> 18 #include <linux/types.h>
19 #include <linux/wait.h> 19 #include <linux/wait.h>
20 20
21 #include <asm/ccwdev.h> 21 #include <asm/ccwdev.h>
22 #include <asm/cio.h> 22 #include <asm/cio.h>
23 #include <asm/ebcdic.h> 23 #include <asm/ebcdic.h>
24 24
25 #include "raw3270.h" 25 #include "raw3270.h"
26 26
27 #include <linux/major.h> 27 #include <linux/major.h>
28 #include <linux/kdev_t.h> 28 #include <linux/kdev_t.h>
29 #include <linux/device.h> 29 #include <linux/device.h>
30 #include <linux/mutex.h> 30 #include <linux/mutex.h>
31 31
32 static struct class *class3270; 32 static struct class *class3270;
33 33
34 /* The main 3270 data structure. */ 34 /* The main 3270 data structure. */
35 struct raw3270 { 35 struct raw3270 {
36 struct list_head list; 36 struct list_head list;
37 struct ccw_device *cdev; 37 struct ccw_device *cdev;
38 int minor; 38 int minor;
39 39
40 short model, rows, cols; 40 short model, rows, cols;
41 unsigned long flags; 41 unsigned long flags;
42 42
43 struct list_head req_queue; /* Request queue. */ 43 struct list_head req_queue; /* Request queue. */
44 struct list_head view_list; /* List of available views. */ 44 struct list_head view_list; /* List of available views. */
45 struct raw3270_view *view; /* Active view. */ 45 struct raw3270_view *view; /* Active view. */
46 46
47 struct timer_list timer; /* Device timer. */ 47 struct timer_list timer; /* Device timer. */
48 48
49 unsigned char *ascebc; /* ascii -> ebcdic table */ 49 unsigned char *ascebc; /* ascii -> ebcdic table */
50 struct class_device *clttydev; /* 3270-class tty device ptr */ 50 struct class_device *clttydev; /* 3270-class tty device ptr */
51 struct class_device *cltubdev; /* 3270-class tub device ptr */ 51 struct class_device *cltubdev; /* 3270-class tub device ptr */
52 52
53 struct raw3270_request init_request; 53 struct raw3270_request init_request;
54 unsigned char init_data[256]; 54 unsigned char init_data[256];
55 }; 55 };
56 56
57 /* raw3270->flags */ 57 /* raw3270->flags */
58 #define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */ 58 #define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */
59 #define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */ 59 #define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */
60 #define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */ 60 #define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */
61 #define RAW3270_FLAGS_READY 4 /* Device is useable by views */ 61 #define RAW3270_FLAGS_READY 4 /* Device is useable by views */
62 #define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ 62 #define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
63 63
64 /* Semaphore to protect global data of raw3270 (devices, views, etc). */ 64 /* Semaphore to protect global data of raw3270 (devices, views, etc). */
65 static DEFINE_MUTEX(raw3270_mutex); 65 static DEFINE_MUTEX(raw3270_mutex);
66 66
67 /* List of 3270 devices. */ 67 /* List of 3270 devices. */
68 static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices); 68 static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices);
69 69
70 /* 70 /*
71 * Flag to indicate if the driver has been registered. Some operations 71 * Flag to indicate if the driver has been registered. Some operations
72 * like waiting for the end of i/o need to be done differently as long 72 * like waiting for the end of i/o need to be done differently as long
73 * as the kernel is still starting up (console support). 73 * as the kernel is still starting up (console support).
74 */ 74 */
75 static int raw3270_registered; 75 static int raw3270_registered;
76 76
77 /* Module parameters */ 77 /* Module parameters */
78 static int tubxcorrect = 0; 78 static int tubxcorrect = 0;
79 module_param(tubxcorrect, bool, 0); 79 module_param(tubxcorrect, bool, 0);
80 80
81 /* 81 /*
82 * Wait queue for device init/delete, view delete. 82 * Wait queue for device init/delete, view delete.
83 */ 83 */
84 DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue); 84 DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
85 85
86 /* 86 /*
87 * Encode array for 12 bit 3270 addresses. 87 * Encode array for 12 bit 3270 addresses.
88 */ 88 */
89 static unsigned char raw3270_ebcgraf[64] = { 89 static unsigned char raw3270_ebcgraf[64] = {
90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
93 0xd8, 0xd9, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 93 0xd8, 0xd9, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
94 0x60, 0x61, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 94 0x60, 0x61, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
95 0xe8, 0xe9, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 95 0xe8, 0xe9, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
96 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 96 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
97 0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f 97 0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
98 }; 98 };
99 99
100 void 100 void
101 raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr) 101 raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
102 { 102 {
103 if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) { 103 if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) {
104 cp[0] = (addr >> 8) & 0x3f; 104 cp[0] = (addr >> 8) & 0x3f;
105 cp[1] = addr & 0xff; 105 cp[1] = addr & 0xff;
106 } else { 106 } else {
107 cp[0] = raw3270_ebcgraf[(addr >> 6) & 0x3f]; 107 cp[0] = raw3270_ebcgraf[(addr >> 6) & 0x3f];
108 cp[1] = raw3270_ebcgraf[addr & 0x3f]; 108 cp[1] = raw3270_ebcgraf[addr & 0x3f];
109 } 109 }
110 } 110 }
111 111
112 /* 112 /*
113 * Allocate a new 3270 ccw request 113 * Allocate a new 3270 ccw request
114 */ 114 */
115 struct raw3270_request * 115 struct raw3270_request *
116 raw3270_request_alloc(size_t size) 116 raw3270_request_alloc(size_t size)
117 { 117 {
118 struct raw3270_request *rq; 118 struct raw3270_request *rq;
119 119
120 /* Allocate request structure */ 120 /* Allocate request structure */
121 rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA); 121 rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA);
122 if (!rq) 122 if (!rq)
123 return ERR_PTR(-ENOMEM); 123 return ERR_PTR(-ENOMEM);
124 124
125 /* alloc output buffer. */ 125 /* alloc output buffer. */
126 if (size > 0) { 126 if (size > 0) {
127 rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA); 127 rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
128 if (!rq->buffer) { 128 if (!rq->buffer) {
129 kfree(rq); 129 kfree(rq);
130 return ERR_PTR(-ENOMEM); 130 return ERR_PTR(-ENOMEM);
131 } 131 }
132 } 132 }
133 rq->size = size; 133 rq->size = size;
134 INIT_LIST_HEAD(&rq->list); 134 INIT_LIST_HEAD(&rq->list);
135 135
136 /* 136 /*
137 * Setup ccw. 137 * Setup ccw.
138 */ 138 */
139 rq->ccw.cda = __pa(rq->buffer); 139 rq->ccw.cda = __pa(rq->buffer);
140 rq->ccw.flags = CCW_FLAG_SLI; 140 rq->ccw.flags = CCW_FLAG_SLI;
141 141
142 return rq; 142 return rq;
143 } 143 }
144 144
145 #ifdef CONFIG_TN3270_CONSOLE 145 #ifdef CONFIG_TN3270_CONSOLE
146 /* 146 /*
147 * Allocate a new 3270 ccw request from bootmem. Only works very 147 * Allocate a new 3270 ccw request from bootmem. Only works very
148 * early in the boot process. Only con3270.c should be using this. 148 * early in the boot process. Only con3270.c should be using this.
149 */ 149 */
150 struct raw3270_request * 150 struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
151 raw3270_request_alloc_bootmem(size_t size)
152 { 151 {
153 struct raw3270_request *rq; 152 struct raw3270_request *rq;
154 153
155 rq = alloc_bootmem_low(sizeof(struct raw3270)); 154 rq = alloc_bootmem_low(sizeof(struct raw3270));
156 if (!rq) 155 if (!rq)
157 return ERR_PTR(-ENOMEM); 156 return ERR_PTR(-ENOMEM);
158 memset(rq, 0, sizeof(struct raw3270_request)); 157 memset(rq, 0, sizeof(struct raw3270_request));
159 158
160 /* alloc output buffer. */ 159 /* alloc output buffer. */
161 if (size > 0) { 160 if (size > 0) {
162 rq->buffer = alloc_bootmem_low(size); 161 rq->buffer = alloc_bootmem_low(size);
163 if (!rq->buffer) { 162 if (!rq->buffer) {
164 free_bootmem((unsigned long) rq, 163 free_bootmem((unsigned long) rq,
165 sizeof(struct raw3270)); 164 sizeof(struct raw3270));
166 return ERR_PTR(-ENOMEM); 165 return ERR_PTR(-ENOMEM);
167 } 166 }
168 } 167 }
169 rq->size = size; 168 rq->size = size;
170 INIT_LIST_HEAD(&rq->list); 169 INIT_LIST_HEAD(&rq->list);
171 170
172 /* 171 /*
173 * Setup ccw. 172 * Setup ccw.
174 */ 173 */
175 rq->ccw.cda = __pa(rq->buffer); 174 rq->ccw.cda = __pa(rq->buffer);
176 rq->ccw.flags = CCW_FLAG_SLI; 175 rq->ccw.flags = CCW_FLAG_SLI;
177 176
178 return rq; 177 return rq;
179 } 178 }
180 #endif 179 #endif
181 180
182 /* 181 /*
183 * Free 3270 ccw request 182 * Free 3270 ccw request
184 */ 183 */
185 void 184 void
186 raw3270_request_free (struct raw3270_request *rq) 185 raw3270_request_free (struct raw3270_request *rq)
187 { 186 {
188 kfree(rq->buffer); 187 kfree(rq->buffer);
189 kfree(rq); 188 kfree(rq);
190 } 189 }
191 190
192 /* 191 /*
193 * Reset request to initial state. 192 * Reset request to initial state.
194 */ 193 */
195 void 194 void
196 raw3270_request_reset(struct raw3270_request *rq) 195 raw3270_request_reset(struct raw3270_request *rq)
197 { 196 {
198 BUG_ON(!list_empty(&rq->list)); 197 BUG_ON(!list_empty(&rq->list));
199 rq->ccw.cmd_code = 0; 198 rq->ccw.cmd_code = 0;
200 rq->ccw.count = 0; 199 rq->ccw.count = 0;
201 rq->ccw.cda = __pa(rq->buffer); 200 rq->ccw.cda = __pa(rq->buffer);
202 rq->ccw.flags = CCW_FLAG_SLI; 201 rq->ccw.flags = CCW_FLAG_SLI;
203 rq->rescnt = 0; 202 rq->rescnt = 0;
204 rq->rc = 0; 203 rq->rc = 0;
205 } 204 }
206 205
207 /* 206 /*
208 * Set command code to ccw of a request. 207 * Set command code to ccw of a request.
209 */ 208 */
210 void 209 void
211 raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd) 210 raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
212 { 211 {
213 rq->ccw.cmd_code = cmd; 212 rq->ccw.cmd_code = cmd;
214 } 213 }
215 214
216 /* 215 /*
217 * Add data fragment to output buffer. 216 * Add data fragment to output buffer.
218 */ 217 */
219 int 218 int
220 raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size) 219 raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
221 { 220 {
222 if (size + rq->ccw.count > rq->size) 221 if (size + rq->ccw.count > rq->size)
223 return -E2BIG; 222 return -E2BIG;
224 memcpy(rq->buffer + rq->ccw.count, data, size); 223 memcpy(rq->buffer + rq->ccw.count, data, size);
225 rq->ccw.count += size; 224 rq->ccw.count += size;
226 return 0; 225 return 0;
227 } 226 }
228 227
229 /* 228 /*
230 * Set address/length pair to ccw of a request. 229 * Set address/length pair to ccw of a request.
231 */ 230 */
232 void 231 void
233 raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size) 232 raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
234 { 233 {
235 rq->ccw.cda = __pa(data); 234 rq->ccw.cda = __pa(data);
236 rq->ccw.count = size; 235 rq->ccw.count = size;
237 } 236 }
238 237
239 /* 238 /*
240 * Set idal buffer to ccw of a request. 239 * Set idal buffer to ccw of a request.
241 */ 240 */
242 void 241 void
243 raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib) 242 raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
244 { 243 {
245 rq->ccw.cda = __pa(ib->data); 244 rq->ccw.cda = __pa(ib->data);
246 rq->ccw.count = ib->size; 245 rq->ccw.count = ib->size;
247 rq->ccw.flags |= CCW_FLAG_IDA; 246 rq->ccw.flags |= CCW_FLAG_IDA;
248 } 247 }
249 248
250 /* 249 /*
251 * Stop running ccw. 250 * Stop running ccw.
252 */ 251 */
253 static int 252 static int
254 raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq) 253 raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
255 { 254 {
256 int retries; 255 int retries;
257 int rc; 256 int rc;
258 257
259 if (raw3270_request_final(rq)) 258 if (raw3270_request_final(rq))
260 return 0; 259 return 0;
261 /* Check if interrupt has already been processed */ 260 /* Check if interrupt has already been processed */
262 for (retries = 0; retries < 5; retries++) { 261 for (retries = 0; retries < 5; retries++) {
263 if (retries < 2) 262 if (retries < 2)
264 rc = ccw_device_halt(rp->cdev, (long) rq); 263 rc = ccw_device_halt(rp->cdev, (long) rq);
265 else 264 else
266 rc = ccw_device_clear(rp->cdev, (long) rq); 265 rc = ccw_device_clear(rp->cdev, (long) rq);
267 if (rc == 0) 266 if (rc == 0)
268 break; /* termination successful */ 267 break; /* termination successful */
269 } 268 }
270 return rc; 269 return rc;
271 } 270 }
272 271
273 static int 272 static int
274 raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq) 273 raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
275 { 274 {
276 unsigned long flags; 275 unsigned long flags;
277 int rc; 276 int rc;
278 277
279 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 278 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
280 rc = raw3270_halt_io_nolock(rp, rq); 279 rc = raw3270_halt_io_nolock(rp, rq);
281 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 280 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
282 return rc; 281 return rc;
283 } 282 }
284 283
285 /* 284 /*
286 * Add the request to the request queue, try to start it if the 285 * Add the request to the request queue, try to start it if the
287 * 3270 device is idle. Return without waiting for end of i/o. 286 * 3270 device is idle. Return without waiting for end of i/o.
288 */ 287 */
289 static int 288 static int
290 __raw3270_start(struct raw3270 *rp, struct raw3270_view *view, 289 __raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
291 struct raw3270_request *rq) 290 struct raw3270_request *rq)
292 { 291 {
293 rq->view = view; 292 rq->view = view;
294 raw3270_get_view(view); 293 raw3270_get_view(view);
295 if (list_empty(&rp->req_queue) && 294 if (list_empty(&rp->req_queue) &&
296 !test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) { 295 !test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
297 /* No other requests are on the queue. Start this one. */ 296 /* No other requests are on the queue. Start this one. */
298 rq->rc = ccw_device_start(rp->cdev, &rq->ccw, 297 rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
299 (unsigned long) rq, 0, 0); 298 (unsigned long) rq, 0, 0);
300 if (rq->rc) { 299 if (rq->rc) {
301 raw3270_put_view(view); 300 raw3270_put_view(view);
302 return rq->rc; 301 return rq->rc;
303 } 302 }
304 } 303 }
305 list_add_tail(&rq->list, &rp->req_queue); 304 list_add_tail(&rq->list, &rp->req_queue);
306 return 0; 305 return 0;
307 } 306 }
308 307
309 int 308 int
310 raw3270_start(struct raw3270_view *view, struct raw3270_request *rq) 309 raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
311 { 310 {
312 unsigned long flags; 311 unsigned long flags;
313 struct raw3270 *rp; 312 struct raw3270 *rp;
314 int rc; 313 int rc;
315 314
316 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); 315 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
317 rp = view->dev; 316 rp = view->dev;
318 if (!rp || rp->view != view) 317 if (!rp || rp->view != view)
319 rc = -EACCES; 318 rc = -EACCES;
320 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 319 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
321 rc = -ENODEV; 320 rc = -ENODEV;
322 else 321 else
323 rc = __raw3270_start(rp, view, rq); 322 rc = __raw3270_start(rp, view, rq);
324 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); 323 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
325 return rc; 324 return rc;
326 } 325 }
327 326
328 int 327 int
329 raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq) 328 raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
330 { 329 {
331 struct raw3270 *rp; 330 struct raw3270 *rp;
332 int rc; 331 int rc;
333 332
334 rp = view->dev; 333 rp = view->dev;
335 if (!rp || rp->view != view) 334 if (!rp || rp->view != view)
336 rc = -EACCES; 335 rc = -EACCES;
337 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 336 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
338 rc = -ENODEV; 337 rc = -ENODEV;
339 else 338 else
340 rc = __raw3270_start(rp, view, rq); 339 rc = __raw3270_start(rp, view, rq);
341 return rc; 340 return rc;
342 } 341 }
343 342
344 int 343 int
345 raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq) 344 raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
346 { 345 {
347 struct raw3270 *rp; 346 struct raw3270 *rp;
348 347
349 rp = view->dev; 348 rp = view->dev;
350 rq->view = view; 349 rq->view = view;
351 raw3270_get_view(view); 350 raw3270_get_view(view);
352 list_add_tail(&rq->list, &rp->req_queue); 351 list_add_tail(&rq->list, &rp->req_queue);
353 return 0; 352 return 0;
354 } 353 }
355 354
356 /* 355 /*
357 * 3270 interrupt routine, called from the ccw_device layer 356 * 3270 interrupt routine, called from the ccw_device layer
358 */ 357 */
359 static void 358 static void
360 raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) 359 raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
361 { 360 {
362 struct raw3270 *rp; 361 struct raw3270 *rp;
363 struct raw3270_view *view; 362 struct raw3270_view *view;
364 struct raw3270_request *rq; 363 struct raw3270_request *rq;
365 int rc; 364 int rc;
366 365
367 rp = (struct raw3270 *) cdev->dev.driver_data; 366 rp = (struct raw3270 *) cdev->dev.driver_data;
368 if (!rp) 367 if (!rp)
369 return; 368 return;
370 rq = (struct raw3270_request *) intparm; 369 rq = (struct raw3270_request *) intparm;
371 view = rq ? rq->view : rp->view; 370 view = rq ? rq->view : rp->view;
372 371
373 if (IS_ERR(irb)) 372 if (IS_ERR(irb))
374 rc = RAW3270_IO_RETRY; 373 rc = RAW3270_IO_RETRY;
375 else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { 374 else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
376 rq->rc = -EIO; 375 rq->rc = -EIO;
377 rc = RAW3270_IO_DONE; 376 rc = RAW3270_IO_DONE;
378 } else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END | 377 } else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
379 DEV_STAT_UNIT_EXCEP)) { 378 DEV_STAT_UNIT_EXCEP)) {
380 /* Handle CE-DE-UE and subsequent UDE */ 379 /* Handle CE-DE-UE and subsequent UDE */
381 set_bit(RAW3270_FLAGS_BUSY, &rp->flags); 380 set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
382 rc = RAW3270_IO_BUSY; 381 rc = RAW3270_IO_BUSY;
383 } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) { 382 } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
384 /* Wait for UDE if busy flag is set. */ 383 /* Wait for UDE if busy flag is set. */
385 if (irb->scsw.dstat & DEV_STAT_DEV_END) { 384 if (irb->scsw.dstat & DEV_STAT_DEV_END) {
386 clear_bit(RAW3270_FLAGS_BUSY, &rp->flags); 385 clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
387 /* Got it, now retry. */ 386 /* Got it, now retry. */
388 rc = RAW3270_IO_RETRY; 387 rc = RAW3270_IO_RETRY;
389 } else 388 } else
390 rc = RAW3270_IO_BUSY; 389 rc = RAW3270_IO_BUSY;
391 } else if (view) 390 } else if (view)
392 rc = view->fn->intv(view, rq, irb); 391 rc = view->fn->intv(view, rq, irb);
393 else 392 else
394 rc = RAW3270_IO_DONE; 393 rc = RAW3270_IO_DONE;
395 394
396 switch (rc) { 395 switch (rc) {
397 case RAW3270_IO_DONE: 396 case RAW3270_IO_DONE:
398 break; 397 break;
399 case RAW3270_IO_BUSY: 398 case RAW3270_IO_BUSY:
400 /* 399 /*
401 * Intervention required by the operator. We have to wait 400 * Intervention required by the operator. We have to wait
402 * for unsolicited device end. 401 * for unsolicited device end.
403 */ 402 */
404 return; 403 return;
405 case RAW3270_IO_RETRY: 404 case RAW3270_IO_RETRY:
406 if (!rq) 405 if (!rq)
407 break; 406 break;
408 rq->rc = ccw_device_start(rp->cdev, &rq->ccw, 407 rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
409 (unsigned long) rq, 0, 0); 408 (unsigned long) rq, 0, 0);
410 if (rq->rc == 0) 409 if (rq->rc == 0)
411 return; /* Sucessfully restarted. */ 410 return; /* Sucessfully restarted. */
412 break; 411 break;
413 case RAW3270_IO_STOP: 412 case RAW3270_IO_STOP:
414 if (!rq) 413 if (!rq)
415 break; 414 break;
416 raw3270_halt_io_nolock(rp, rq); 415 raw3270_halt_io_nolock(rp, rq);
417 rq->rc = -EIO; 416 rq->rc = -EIO;
418 break; 417 break;
419 default: 418 default:
420 BUG(); 419 BUG();
421 } 420 }
422 if (rq) { 421 if (rq) {
423 BUG_ON(list_empty(&rq->list)); 422 BUG_ON(list_empty(&rq->list));
424 /* The request completed, remove from queue and do callback. */ 423 /* The request completed, remove from queue and do callback. */
425 list_del_init(&rq->list); 424 list_del_init(&rq->list);
426 if (rq->callback) 425 if (rq->callback)
427 rq->callback(rq, rq->callback_data); 426 rq->callback(rq, rq->callback_data);
428 /* Do put_device for get_device in raw3270_start. */ 427 /* Do put_device for get_device in raw3270_start. */
429 raw3270_put_view(view); 428 raw3270_put_view(view);
430 } 429 }
431 /* 430 /*
432 * Try to start each request on request queue until one is 431 * Try to start each request on request queue until one is
433 * started successful. 432 * started successful.
434 */ 433 */
435 while (!list_empty(&rp->req_queue)) { 434 while (!list_empty(&rp->req_queue)) {
436 rq = list_entry(rp->req_queue.next,struct raw3270_request,list); 435 rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
437 rq->rc = ccw_device_start(rp->cdev, &rq->ccw, 436 rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
438 (unsigned long) rq, 0, 0); 437 (unsigned long) rq, 0, 0);
439 if (rq->rc == 0) 438 if (rq->rc == 0)
440 break; 439 break;
441 /* Start failed. Remove request and do callback. */ 440 /* Start failed. Remove request and do callback. */
442 list_del_init(&rq->list); 441 list_del_init(&rq->list);
443 if (rq->callback) 442 if (rq->callback)
444 rq->callback(rq, rq->callback_data); 443 rq->callback(rq, rq->callback_data);
445 /* Do put_device for get_device in raw3270_start. */ 444 /* Do put_device for get_device in raw3270_start. */
446 raw3270_put_view(view); 445 raw3270_put_view(view);
447 } 446 }
448 } 447 }
449 448
450 /* 449 /*
451 * Size sensing. 450 * Size sensing.
452 */ 451 */
453 452
454 struct raw3270_ua { /* Query Reply structure for Usable Area */ 453 struct raw3270_ua { /* Query Reply structure for Usable Area */
455 struct { /* Usable Area Query Reply Base */ 454 struct { /* Usable Area Query Reply Base */
456 short l; /* Length of this structured field */ 455 short l; /* Length of this structured field */
457 char sfid; /* 0x81 if Query Reply */ 456 char sfid; /* 0x81 if Query Reply */
458 char qcode; /* 0x81 if Usable Area */ 457 char qcode; /* 0x81 if Usable Area */
459 char flags0; 458 char flags0;
460 char flags1; 459 char flags1;
461 short w; /* Width of usable area */ 460 short w; /* Width of usable area */
462 short h; /* Heigth of usavle area */ 461 short h; /* Heigth of usavle area */
463 char units; /* 0x00:in; 0x01:mm */ 462 char units; /* 0x00:in; 0x01:mm */
464 int xr; 463 int xr;
465 int yr; 464 int yr;
466 char aw; 465 char aw;
467 char ah; 466 char ah;
468 short buffsz; /* Character buffer size, bytes */ 467 short buffsz; /* Character buffer size, bytes */
469 char xmin; 468 char xmin;
470 char ymin; 469 char ymin;
471 char xmax; 470 char xmax;
472 char ymax; 471 char ymax;
473 } __attribute__ ((packed)) uab; 472 } __attribute__ ((packed)) uab;
474 struct { /* Alternate Usable Area Self-Defining Parameter */ 473 struct { /* Alternate Usable Area Self-Defining Parameter */
475 char l; /* Length of this Self-Defining Parm */ 474 char l; /* Length of this Self-Defining Parm */
476 char sdpid; /* 0x02 if Alternate Usable Area */ 475 char sdpid; /* 0x02 if Alternate Usable Area */
477 char res; 476 char res;
478 char auaid; /* 0x01 is Id for the A U A */ 477 char auaid; /* 0x01 is Id for the A U A */
479 short wauai; /* Width of AUAi */ 478 short wauai; /* Width of AUAi */
480 short hauai; /* Height of AUAi */ 479 short hauai; /* Height of AUAi */
481 char auaunits; /* 0x00:in, 0x01:mm */ 480 char auaunits; /* 0x00:in, 0x01:mm */
482 int auaxr; 481 int auaxr;
483 int auayr; 482 int auayr;
484 char awauai; 483 char awauai;
485 char ahauai; 484 char ahauai;
486 } __attribute__ ((packed)) aua; 485 } __attribute__ ((packed)) aua;
487 } __attribute__ ((packed)); 486 } __attribute__ ((packed));
488 487
489 static struct diag210 raw3270_init_diag210; 488 static struct diag210 raw3270_init_diag210;
490 static DEFINE_MUTEX(raw3270_init_mutex); 489 static DEFINE_MUTEX(raw3270_init_mutex);
491 490
492 static int 491 static int
493 raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, 492 raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
494 struct irb *irb) 493 struct irb *irb)
495 { 494 {
496 /* 495 /*
497 * Unit-Check Processing: 496 * Unit-Check Processing:
498 * Expect Command Reject or Intervention Required. 497 * Expect Command Reject or Intervention Required.
499 */ 498 */
500 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 499 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
501 /* Request finished abnormally. */ 500 /* Request finished abnormally. */
502 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { 501 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
503 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); 502 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
504 return RAW3270_IO_BUSY; 503 return RAW3270_IO_BUSY;
505 } 504 }
506 } 505 }
507 if (rq) { 506 if (rq) {
508 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { 507 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
509 if (irb->ecw[0] & SNS0_CMD_REJECT) 508 if (irb->ecw[0] & SNS0_CMD_REJECT)
510 rq->rc = -EOPNOTSUPP; 509 rq->rc = -EOPNOTSUPP;
511 else 510 else
512 rq->rc = -EIO; 511 rq->rc = -EIO;
513 } else 512 } else
514 /* Request finished normally. Copy residual count. */ 513 /* Request finished normally. Copy residual count. */
515 rq->rescnt = irb->scsw.count; 514 rq->rescnt = irb->scsw.count;
516 } 515 }
517 if (irb->scsw.dstat & DEV_STAT_ATTENTION) { 516 if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
518 set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); 517 set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
519 wake_up(&raw3270_wait_queue); 518 wake_up(&raw3270_wait_queue);
520 } 519 }
521 return RAW3270_IO_DONE; 520 return RAW3270_IO_DONE;
522 } 521 }
523 522
524 static struct raw3270_fn raw3270_init_fn = { 523 static struct raw3270_fn raw3270_init_fn = {
525 .intv = raw3270_init_irq 524 .intv = raw3270_init_irq
526 }; 525 };
527 526
528 static struct raw3270_view raw3270_init_view = { 527 static struct raw3270_view raw3270_init_view = {
529 .fn = &raw3270_init_fn 528 .fn = &raw3270_init_fn
530 }; 529 };
531 530
532 /* 531 /*
533 * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup 532 * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup
534 * Wait for end of request. The request must have been started 533 * Wait for end of request. The request must have been started
535 * with raw3270_start, rc = 0. The device lock may NOT have been 534 * with raw3270_start, rc = 0. The device lock may NOT have been
536 * released between calling raw3270_start and raw3270_wait. 535 * released between calling raw3270_start and raw3270_wait.
537 */ 536 */
538 static void 537 static void
539 raw3270_wake_init(struct raw3270_request *rq, void *data) 538 raw3270_wake_init(struct raw3270_request *rq, void *data)
540 { 539 {
541 wake_up((wait_queue_head_t *) data); 540 wake_up((wait_queue_head_t *) data);
542 } 541 }
543 542
544 /* 543 /*
545 * Special wait function that can cope with console initialization. 544 * Special wait function that can cope with console initialization.
546 */ 545 */
547 static int 546 static int
548 raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view, 547 raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
549 struct raw3270_request *rq) 548 struct raw3270_request *rq)
550 { 549 {
551 unsigned long flags; 550 unsigned long flags;
552 wait_queue_head_t wq; 551 wait_queue_head_t wq;
553 int rc; 552 int rc;
554 553
555 #ifdef CONFIG_TN3270_CONSOLE 554 #ifdef CONFIG_TN3270_CONSOLE
556 if (raw3270_registered == 0) { 555 if (raw3270_registered == 0) {
557 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); 556 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
558 rq->callback = NULL; 557 rq->callback = NULL;
559 rc = __raw3270_start(rp, view, rq); 558 rc = __raw3270_start(rp, view, rq);
560 if (rc == 0) 559 if (rc == 0)
561 while (!raw3270_request_final(rq)) { 560 while (!raw3270_request_final(rq)) {
562 wait_cons_dev(); 561 wait_cons_dev();
563 barrier(); 562 barrier();
564 } 563 }
565 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); 564 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
566 return rq->rc; 565 return rq->rc;
567 } 566 }
568 #endif 567 #endif
569 init_waitqueue_head(&wq); 568 init_waitqueue_head(&wq);
570 rq->callback = raw3270_wake_init; 569 rq->callback = raw3270_wake_init;
571 rq->callback_data = &wq; 570 rq->callback_data = &wq;
572 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); 571 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
573 rc = __raw3270_start(rp, view, rq); 572 rc = __raw3270_start(rp, view, rq);
574 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); 573 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
575 if (rc) 574 if (rc)
576 return rc; 575 return rc;
577 /* Now wait for the completion. */ 576 /* Now wait for the completion. */
578 rc = wait_event_interruptible(wq, raw3270_request_final(rq)); 577 rc = wait_event_interruptible(wq, raw3270_request_final(rq));
579 if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ 578 if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */
580 raw3270_halt_io(view->dev, rq); 579 raw3270_halt_io(view->dev, rq);
581 /* No wait for the halt to complete. */ 580 /* No wait for the halt to complete. */
582 wait_event(wq, raw3270_request_final(rq)); 581 wait_event(wq, raw3270_request_final(rq));
583 return -ERESTARTSYS; 582 return -ERESTARTSYS;
584 } 583 }
585 return rq->rc; 584 return rq->rc;
586 } 585 }
587 586
588 static int 587 static int
589 __raw3270_size_device_vm(struct raw3270 *rp) 588 __raw3270_size_device_vm(struct raw3270 *rp)
590 { 589 {
591 int rc, model; 590 int rc, model;
592 struct ccw_dev_id dev_id; 591 struct ccw_dev_id dev_id;
593 592
594 ccw_device_get_id(rp->cdev, &dev_id); 593 ccw_device_get_id(rp->cdev, &dev_id);
595 raw3270_init_diag210.vrdcdvno = dev_id.devno; 594 raw3270_init_diag210.vrdcdvno = dev_id.devno;
596 raw3270_init_diag210.vrdclen = sizeof(struct diag210); 595 raw3270_init_diag210.vrdclen = sizeof(struct diag210);
597 rc = diag210(&raw3270_init_diag210); 596 rc = diag210(&raw3270_init_diag210);
598 if (rc) 597 if (rc)
599 return rc; 598 return rc;
600 model = raw3270_init_diag210.vrdccrmd; 599 model = raw3270_init_diag210.vrdccrmd;
601 switch (model) { 600 switch (model) {
602 case 2: 601 case 2:
603 rp->model = model; 602 rp->model = model;
604 rp->rows = 24; 603 rp->rows = 24;
605 rp->cols = 80; 604 rp->cols = 80;
606 break; 605 break;
607 case 3: 606 case 3:
608 rp->model = model; 607 rp->model = model;
609 rp->rows = 32; 608 rp->rows = 32;
610 rp->cols = 80; 609 rp->cols = 80;
611 break; 610 break;
612 case 4: 611 case 4:
613 rp->model = model; 612 rp->model = model;
614 rp->rows = 43; 613 rp->rows = 43;
615 rp->cols = 80; 614 rp->cols = 80;
616 break; 615 break;
617 case 5: 616 case 5:
618 rp->model = model; 617 rp->model = model;
619 rp->rows = 27; 618 rp->rows = 27;
620 rp->cols = 132; 619 rp->cols = 132;
621 break; 620 break;
622 default: 621 default:
623 printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model); 622 printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model);
624 rc = -EOPNOTSUPP; 623 rc = -EOPNOTSUPP;
625 break; 624 break;
626 } 625 }
627 return rc; 626 return rc;
628 } 627 }
629 628
630 static int 629 static int
631 __raw3270_size_device(struct raw3270 *rp) 630 __raw3270_size_device(struct raw3270 *rp)
632 { 631 {
633 static const unsigned char wbuf[] = 632 static const unsigned char wbuf[] =
634 { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 }; 633 { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
635 struct raw3270_ua *uap; 634 struct raw3270_ua *uap;
636 unsigned short count; 635 unsigned short count;
637 int rc; 636 int rc;
638 637
639 /* 638 /*
640 * To determine the size of the 3270 device we need to do: 639 * To determine the size of the 3270 device we need to do:
641 * 1) send a 'read partition' data stream to the device 640 * 1) send a 'read partition' data stream to the device
642 * 2) wait for the attn interrupt that preceeds the query reply 641 * 2) wait for the attn interrupt that preceeds the query reply
643 * 3) do a read modified to get the query reply 642 * 3) do a read modified to get the query reply
644 * To make things worse we have to cope with intervention 643 * To make things worse we have to cope with intervention
645 * required (3270 device switched to 'stand-by') and command 644 * required (3270 device switched to 'stand-by') and command
646 * rejects (old devices that can't do 'read partition'). 645 * rejects (old devices that can't do 'read partition').
647 */ 646 */
648 memset(&rp->init_request, 0, sizeof(rp->init_request)); 647 memset(&rp->init_request, 0, sizeof(rp->init_request));
649 memset(&rp->init_data, 0, 256); 648 memset(&rp->init_data, 0, 256);
650 /* Store 'read partition' data stream to init_data */ 649 /* Store 'read partition' data stream to init_data */
651 memcpy(&rp->init_data, wbuf, sizeof(wbuf)); 650 memcpy(&rp->init_data, wbuf, sizeof(wbuf));
652 INIT_LIST_HEAD(&rp->init_request.list); 651 INIT_LIST_HEAD(&rp->init_request.list);
653 rp->init_request.ccw.cmd_code = TC_WRITESF; 652 rp->init_request.ccw.cmd_code = TC_WRITESF;
654 rp->init_request.ccw.flags = CCW_FLAG_SLI; 653 rp->init_request.ccw.flags = CCW_FLAG_SLI;
655 rp->init_request.ccw.count = sizeof(wbuf); 654 rp->init_request.ccw.count = sizeof(wbuf);
656 rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data); 655 rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data);
657 656
658 rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); 657 rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
659 if (rc) 658 if (rc)
660 /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */ 659 /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
661 return rc; 660 return rc;
662 661
663 /* Wait for attention interrupt. */ 662 /* Wait for attention interrupt. */
664 #ifdef CONFIG_TN3270_CONSOLE 663 #ifdef CONFIG_TN3270_CONSOLE
665 if (raw3270_registered == 0) { 664 if (raw3270_registered == 0) {
666 unsigned long flags; 665 unsigned long flags;
667 666
668 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 667 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
669 while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags)) 668 while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags))
670 wait_cons_dev(); 669 wait_cons_dev();
671 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 670 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
672 } else 671 } else
673 #endif 672 #endif
674 rc = wait_event_interruptible(raw3270_wait_queue, 673 rc = wait_event_interruptible(raw3270_wait_queue,
675 test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags)); 674 test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags));
676 if (rc) 675 if (rc)
677 return rc; 676 return rc;
678 677
679 /* 678 /*
680 * The device accepted the 'read partition' command. Now 679 * The device accepted the 'read partition' command. Now
681 * set up a read ccw and issue it. 680 * set up a read ccw and issue it.
682 */ 681 */
683 rp->init_request.ccw.cmd_code = TC_READMOD; 682 rp->init_request.ccw.cmd_code = TC_READMOD;
684 rp->init_request.ccw.flags = CCW_FLAG_SLI; 683 rp->init_request.ccw.flags = CCW_FLAG_SLI;
685 rp->init_request.ccw.count = sizeof(rp->init_data); 684 rp->init_request.ccw.count = sizeof(rp->init_data);
686 rp->init_request.ccw.cda = (__u32) __pa(rp->init_data); 685 rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
687 rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); 686 rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
688 if (rc) 687 if (rc)
689 return rc; 688 return rc;
690 /* Got a Query Reply */ 689 /* Got a Query Reply */
691 count = sizeof(rp->init_data) - rp->init_request.rescnt; 690 count = sizeof(rp->init_data) - rp->init_request.rescnt;
692 uap = (struct raw3270_ua *) (rp->init_data + 1); 691 uap = (struct raw3270_ua *) (rp->init_data + 1);
693 /* Paranoia check. */ 692 /* Paranoia check. */
694 if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81) 693 if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81)
695 return -EOPNOTSUPP; 694 return -EOPNOTSUPP;
696 /* Copy rows/columns of default Usable Area */ 695 /* Copy rows/columns of default Usable Area */
697 rp->rows = uap->uab.h; 696 rp->rows = uap->uab.h;
698 rp->cols = uap->uab.w; 697 rp->cols = uap->uab.w;
699 /* Check for 14 bit addressing */ 698 /* Check for 14 bit addressing */
700 if ((uap->uab.flags0 & 0x0d) == 0x01) 699 if ((uap->uab.flags0 & 0x0d) == 0x01)
701 set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags); 700 set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
702 /* Check for Alternate Usable Area */ 701 /* Check for Alternate Usable Area */
703 if (uap->uab.l == sizeof(struct raw3270_ua) && 702 if (uap->uab.l == sizeof(struct raw3270_ua) &&
704 uap->aua.sdpid == 0x02) { 703 uap->aua.sdpid == 0x02) {
705 rp->rows = uap->aua.hauai; 704 rp->rows = uap->aua.hauai;
706 rp->cols = uap->aua.wauai; 705 rp->cols = uap->aua.wauai;
707 } 706 }
708 return 0; 707 return 0;
709 } 708 }
710 709
711 static int 710 static int
712 raw3270_size_device(struct raw3270 *rp) 711 raw3270_size_device(struct raw3270 *rp)
713 { 712 {
714 int rc; 713 int rc;
715 714
716 mutex_lock(&raw3270_init_mutex); 715 mutex_lock(&raw3270_init_mutex);
717 rp->view = &raw3270_init_view; 716 rp->view = &raw3270_init_view;
718 raw3270_init_view.dev = rp; 717 raw3270_init_view.dev = rp;
719 if (MACHINE_IS_VM) 718 if (MACHINE_IS_VM)
720 rc = __raw3270_size_device_vm(rp); 719 rc = __raw3270_size_device_vm(rp);
721 else 720 else
722 rc = __raw3270_size_device(rp); 721 rc = __raw3270_size_device(rp);
723 raw3270_init_view.dev = NULL; 722 raw3270_init_view.dev = NULL;
724 rp->view = NULL; 723 rp->view = NULL;
725 mutex_unlock(&raw3270_init_mutex); 724 mutex_unlock(&raw3270_init_mutex);
726 if (rc == 0) { /* Found something. */ 725 if (rc == 0) { /* Found something. */
727 /* Try to find a model. */ 726 /* Try to find a model. */
728 rp->model = 0; 727 rp->model = 0;
729 if (rp->rows == 24 && rp->cols == 80) 728 if (rp->rows == 24 && rp->cols == 80)
730 rp->model = 2; 729 rp->model = 2;
731 if (rp->rows == 32 && rp->cols == 80) 730 if (rp->rows == 32 && rp->cols == 80)
732 rp->model = 3; 731 rp->model = 3;
733 if (rp->rows == 43 && rp->cols == 80) 732 if (rp->rows == 43 && rp->cols == 80)
734 rp->model = 4; 733 rp->model = 4;
735 if (rp->rows == 27 && rp->cols == 132) 734 if (rp->rows == 27 && rp->cols == 132)
736 rp->model = 5; 735 rp->model = 5;
737 } else { 736 } else {
738 /* Couldn't detect size. Use default model 2. */ 737 /* Couldn't detect size. Use default model 2. */
739 rp->model = 2; 738 rp->model = 2;
740 rp->rows = 24; 739 rp->rows = 24;
741 rp->cols = 80; 740 rp->cols = 80;
742 return 0; 741 return 0;
743 } 742 }
744 return rc; 743 return rc;
745 } 744 }
746 745
747 static int 746 static int
748 raw3270_reset_device(struct raw3270 *rp) 747 raw3270_reset_device(struct raw3270 *rp)
749 { 748 {
750 int rc; 749 int rc;
751 750
752 mutex_lock(&raw3270_init_mutex); 751 mutex_lock(&raw3270_init_mutex);
753 memset(&rp->init_request, 0, sizeof(rp->init_request)); 752 memset(&rp->init_request, 0, sizeof(rp->init_request));
754 memset(&rp->init_data, 0, sizeof(rp->init_data)); 753 memset(&rp->init_data, 0, sizeof(rp->init_data));
755 /* Store reset data stream to init_data/init_request */ 754 /* Store reset data stream to init_data/init_request */
756 rp->init_data[0] = TW_KR; 755 rp->init_data[0] = TW_KR;
757 INIT_LIST_HEAD(&rp->init_request.list); 756 INIT_LIST_HEAD(&rp->init_request.list);
758 rp->init_request.ccw.cmd_code = TC_EWRITEA; 757 rp->init_request.ccw.cmd_code = TC_EWRITEA;
759 rp->init_request.ccw.flags = CCW_FLAG_SLI; 758 rp->init_request.ccw.flags = CCW_FLAG_SLI;
760 rp->init_request.ccw.count = 1; 759 rp->init_request.ccw.count = 1;
761 rp->init_request.ccw.cda = (__u32) __pa(rp->init_data); 760 rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
762 rp->view = &raw3270_init_view; 761 rp->view = &raw3270_init_view;
763 raw3270_init_view.dev = rp; 762 raw3270_init_view.dev = rp;
764 rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); 763 rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
765 raw3270_init_view.dev = NULL; 764 raw3270_init_view.dev = NULL;
766 rp->view = NULL; 765 rp->view = NULL;
767 mutex_unlock(&raw3270_init_mutex); 766 mutex_unlock(&raw3270_init_mutex);
768 return rc; 767 return rc;
769 } 768 }
770 769
771 int 770 int
772 raw3270_reset(struct raw3270_view *view) 771 raw3270_reset(struct raw3270_view *view)
773 { 772 {
774 struct raw3270 *rp; 773 struct raw3270 *rp;
775 int rc; 774 int rc;
776 775
777 rp = view->dev; 776 rp = view->dev;
778 if (!rp || rp->view != view) 777 if (!rp || rp->view != view)
779 rc = -EACCES; 778 rc = -EACCES;
780 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 779 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
781 rc = -ENODEV; 780 rc = -ENODEV;
782 else 781 else
783 rc = raw3270_reset_device(view->dev); 782 rc = raw3270_reset_device(view->dev);
784 return rc; 783 return rc;
785 } 784 }
786 785
787 /* 786 /*
788 * Setup new 3270 device. 787 * Setup new 3270 device.
789 */ 788 */
790 static int 789 static int
791 raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) 790 raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
792 { 791 {
793 struct list_head *l; 792 struct list_head *l;
794 struct raw3270 *tmp; 793 struct raw3270 *tmp;
795 int minor; 794 int minor;
796 795
797 memset(rp, 0, sizeof(struct raw3270)); 796 memset(rp, 0, sizeof(struct raw3270));
798 /* Copy ebcdic -> ascii translation table. */ 797 /* Copy ebcdic -> ascii translation table. */
799 memcpy(ascebc, _ascebc, 256); 798 memcpy(ascebc, _ascebc, 256);
800 if (tubxcorrect) { 799 if (tubxcorrect) {
801 /* correct brackets and circumflex */ 800 /* correct brackets and circumflex */
802 ascebc['['] = 0xad; 801 ascebc['['] = 0xad;
803 ascebc[']'] = 0xbd; 802 ascebc[']'] = 0xbd;
804 ascebc['^'] = 0xb0; 803 ascebc['^'] = 0xb0;
805 } 804 }
806 rp->ascebc = ascebc; 805 rp->ascebc = ascebc;
807 806
808 /* Set defaults. */ 807 /* Set defaults. */
809 rp->rows = 24; 808 rp->rows = 24;
810 rp->cols = 80; 809 rp->cols = 80;
811 810
812 INIT_LIST_HEAD(&rp->req_queue); 811 INIT_LIST_HEAD(&rp->req_queue);
813 INIT_LIST_HEAD(&rp->view_list); 812 INIT_LIST_HEAD(&rp->view_list);
814 813
815 /* 814 /*
816 * Add device to list and find the smallest unused minor 815 * Add device to list and find the smallest unused minor
817 * number for it. Note: there is no device with minor 0, 816 * number for it. Note: there is no device with minor 0,
818 * see special case for fs3270.c:fs3270_open(). 817 * see special case for fs3270.c:fs3270_open().
819 */ 818 */
820 mutex_lock(&raw3270_mutex); 819 mutex_lock(&raw3270_mutex);
821 /* Keep the list sorted. */ 820 /* Keep the list sorted. */
822 minor = RAW3270_FIRSTMINOR; 821 minor = RAW3270_FIRSTMINOR;
823 rp->minor = -1; 822 rp->minor = -1;
824 list_for_each(l, &raw3270_devices) { 823 list_for_each(l, &raw3270_devices) {
825 tmp = list_entry(l, struct raw3270, list); 824 tmp = list_entry(l, struct raw3270, list);
826 if (tmp->minor > minor) { 825 if (tmp->minor > minor) {
827 rp->minor = minor; 826 rp->minor = minor;
828 __list_add(&rp->list, l->prev, l); 827 __list_add(&rp->list, l->prev, l);
829 break; 828 break;
830 } 829 }
831 minor++; 830 minor++;
832 } 831 }
833 if (rp->minor == -1 && minor < RAW3270_MAXDEVS + RAW3270_FIRSTMINOR) { 832 if (rp->minor == -1 && minor < RAW3270_MAXDEVS + RAW3270_FIRSTMINOR) {
834 rp->minor = minor; 833 rp->minor = minor;
835 list_add_tail(&rp->list, &raw3270_devices); 834 list_add_tail(&rp->list, &raw3270_devices);
836 } 835 }
837 mutex_unlock(&raw3270_mutex); 836 mutex_unlock(&raw3270_mutex);
838 /* No free minor number? Then give up. */ 837 /* No free minor number? Then give up. */
839 if (rp->minor == -1) 838 if (rp->minor == -1)
840 return -EUSERS; 839 return -EUSERS;
841 rp->cdev = cdev; 840 rp->cdev = cdev;
842 cdev->dev.driver_data = rp; 841 cdev->dev.driver_data = rp;
843 cdev->handler = raw3270_irq; 842 cdev->handler = raw3270_irq;
844 return 0; 843 return 0;
845 } 844 }
846 845
847 #ifdef CONFIG_TN3270_CONSOLE 846 #ifdef CONFIG_TN3270_CONSOLE
848 /* 847 /*
849 * Setup 3270 device configured as console. 848 * Setup 3270 device configured as console.
850 */ 849 */
851 struct raw3270 * 850 struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
852 raw3270_setup_console(struct ccw_device *cdev)
853 { 851 {
854 struct raw3270 *rp; 852 struct raw3270 *rp;
855 char *ascebc; 853 char *ascebc;
856 int rc; 854 int rc;
857 855
858 rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270)); 856 rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270));
859 ascebc = (char *) alloc_bootmem(256); 857 ascebc = (char *) alloc_bootmem(256);
860 rc = raw3270_setup_device(cdev, rp, ascebc); 858 rc = raw3270_setup_device(cdev, rp, ascebc);
861 if (rc) 859 if (rc)
862 return ERR_PTR(rc); 860 return ERR_PTR(rc);
863 set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags); 861 set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
864 rc = raw3270_reset_device(rp); 862 rc = raw3270_reset_device(rp);
865 if (rc) 863 if (rc)
866 return ERR_PTR(rc); 864 return ERR_PTR(rc);
867 rc = raw3270_size_device(rp); 865 rc = raw3270_size_device(rp);
868 if (rc) 866 if (rc)
869 return ERR_PTR(rc); 867 return ERR_PTR(rc);
870 rc = raw3270_reset_device(rp); 868 rc = raw3270_reset_device(rp);
871 if (rc) 869 if (rc)
872 return ERR_PTR(rc); 870 return ERR_PTR(rc);
873 set_bit(RAW3270_FLAGS_READY, &rp->flags); 871 set_bit(RAW3270_FLAGS_READY, &rp->flags);
874 return rp; 872 return rp;
875 } 873 }
876 874
877 void 875 void
878 raw3270_wait_cons_dev(struct raw3270 *rp) 876 raw3270_wait_cons_dev(struct raw3270 *rp)
879 { 877 {
880 unsigned long flags; 878 unsigned long flags;
881 879
882 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 880 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
883 wait_cons_dev(); 881 wait_cons_dev();
884 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 882 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
885 } 883 }
886 884
887 #endif 885 #endif
888 886
889 /* 887 /*
890 * Create a 3270 device structure. 888 * Create a 3270 device structure.
891 */ 889 */
892 static struct raw3270 * 890 static struct raw3270 *
893 raw3270_create_device(struct ccw_device *cdev) 891 raw3270_create_device(struct ccw_device *cdev)
894 { 892 {
895 struct raw3270 *rp; 893 struct raw3270 *rp;
896 char *ascebc; 894 char *ascebc;
897 int rc; 895 int rc;
898 896
899 rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA); 897 rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
900 if (!rp) 898 if (!rp)
901 return ERR_PTR(-ENOMEM); 899 return ERR_PTR(-ENOMEM);
902 ascebc = kmalloc(256, GFP_KERNEL); 900 ascebc = kmalloc(256, GFP_KERNEL);
903 if (!ascebc) { 901 if (!ascebc) {
904 kfree(rp); 902 kfree(rp);
905 return ERR_PTR(-ENOMEM); 903 return ERR_PTR(-ENOMEM);
906 } 904 }
907 rc = raw3270_setup_device(cdev, rp, ascebc); 905 rc = raw3270_setup_device(cdev, rp, ascebc);
908 if (rc) { 906 if (rc) {
909 kfree(rp->ascebc); 907 kfree(rp->ascebc);
910 kfree(rp); 908 kfree(rp);
911 rp = ERR_PTR(rc); 909 rp = ERR_PTR(rc);
912 } 910 }
913 /* Get reference to ccw_device structure. */ 911 /* Get reference to ccw_device structure. */
914 get_device(&cdev->dev); 912 get_device(&cdev->dev);
915 return rp; 913 return rp;
916 } 914 }
917 915
918 /* 916 /*
919 * Activate a view. 917 * Activate a view.
920 */ 918 */
921 int 919 int
922 raw3270_activate_view(struct raw3270_view *view) 920 raw3270_activate_view(struct raw3270_view *view)
923 { 921 {
924 struct raw3270 *rp; 922 struct raw3270 *rp;
925 struct raw3270_view *oldview, *nv; 923 struct raw3270_view *oldview, *nv;
926 unsigned long flags; 924 unsigned long flags;
927 int rc; 925 int rc;
928 926
929 rp = view->dev; 927 rp = view->dev;
930 if (!rp) 928 if (!rp)
931 return -ENODEV; 929 return -ENODEV;
932 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 930 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
933 if (rp->view == view) 931 if (rp->view == view)
934 rc = 0; 932 rc = 0;
935 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 933 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
936 rc = -ENODEV; 934 rc = -ENODEV;
937 else { 935 else {
938 oldview = NULL; 936 oldview = NULL;
939 if (rp->view) { 937 if (rp->view) {
940 oldview = rp->view; 938 oldview = rp->view;
941 oldview->fn->deactivate(oldview); 939 oldview->fn->deactivate(oldview);
942 } 940 }
943 rp->view = view; 941 rp->view = view;
944 rc = view->fn->activate(view); 942 rc = view->fn->activate(view);
945 if (rc) { 943 if (rc) {
946 /* Didn't work. Try to reactivate the old view. */ 944 /* Didn't work. Try to reactivate the old view. */
947 rp->view = oldview; 945 rp->view = oldview;
948 if (!oldview || oldview->fn->activate(oldview) != 0) { 946 if (!oldview || oldview->fn->activate(oldview) != 0) {
949 /* Didn't work as well. Try any other view. */ 947 /* Didn't work as well. Try any other view. */
950 list_for_each_entry(nv, &rp->view_list, list) 948 list_for_each_entry(nv, &rp->view_list, list)
951 if (nv != view && nv != oldview) { 949 if (nv != view && nv != oldview) {
952 rp->view = nv; 950 rp->view = nv;
953 if (nv->fn->activate(nv) == 0) 951 if (nv->fn->activate(nv) == 0)
954 break; 952 break;
955 rp->view = NULL; 953 rp->view = NULL;
956 } 954 }
957 } 955 }
958 } 956 }
959 } 957 }
960 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 958 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
961 return rc; 959 return rc;
962 } 960 }
963 961
964 /* 962 /*
965 * Deactivate current view. 963 * Deactivate current view.
966 */ 964 */
967 void 965 void
968 raw3270_deactivate_view(struct raw3270_view *view) 966 raw3270_deactivate_view(struct raw3270_view *view)
969 { 967 {
970 unsigned long flags; 968 unsigned long flags;
971 struct raw3270 *rp; 969 struct raw3270 *rp;
972 970
973 rp = view->dev; 971 rp = view->dev;
974 if (!rp) 972 if (!rp)
975 return; 973 return;
976 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 974 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
977 if (rp->view == view) { 975 if (rp->view == view) {
978 view->fn->deactivate(view); 976 view->fn->deactivate(view);
979 rp->view = NULL; 977 rp->view = NULL;
980 /* Move deactivated view to end of list. */ 978 /* Move deactivated view to end of list. */
981 list_del_init(&view->list); 979 list_del_init(&view->list);
982 list_add_tail(&view->list, &rp->view_list); 980 list_add_tail(&view->list, &rp->view_list);
983 /* Try to activate another view. */ 981 /* Try to activate another view. */
984 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { 982 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
985 list_for_each_entry(view, &rp->view_list, list) { 983 list_for_each_entry(view, &rp->view_list, list) {
986 rp->view = view; 984 rp->view = view;
987 if (view->fn->activate(view) == 0) 985 if (view->fn->activate(view) == 0)
988 break; 986 break;
989 rp->view = NULL; 987 rp->view = NULL;
990 } 988 }
991 } 989 }
992 } 990 }
993 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 991 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
994 } 992 }
995 993
996 /* 994 /*
997 * Add view to device with minor "minor". 995 * Add view to device with minor "minor".
998 */ 996 */
999 int 997 int
1000 raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) 998 raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
1001 { 999 {
1002 unsigned long flags; 1000 unsigned long flags;
1003 struct raw3270 *rp; 1001 struct raw3270 *rp;
1004 int rc; 1002 int rc;
1005 1003
1006 if (minor <= 0) 1004 if (minor <= 0)
1007 return -ENODEV; 1005 return -ENODEV;
1008 mutex_lock(&raw3270_mutex); 1006 mutex_lock(&raw3270_mutex);
1009 rc = -ENODEV; 1007 rc = -ENODEV;
1010 list_for_each_entry(rp, &raw3270_devices, list) { 1008 list_for_each_entry(rp, &raw3270_devices, list) {
1011 if (rp->minor != minor) 1009 if (rp->minor != minor)
1012 continue; 1010 continue;
1013 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 1011 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
1014 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { 1012 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
1015 atomic_set(&view->ref_count, 2); 1013 atomic_set(&view->ref_count, 2);
1016 view->dev = rp; 1014 view->dev = rp;
1017 view->fn = fn; 1015 view->fn = fn;
1018 view->model = rp->model; 1016 view->model = rp->model;
1019 view->rows = rp->rows; 1017 view->rows = rp->rows;
1020 view->cols = rp->cols; 1018 view->cols = rp->cols;
1021 view->ascebc = rp->ascebc; 1019 view->ascebc = rp->ascebc;
1022 spin_lock_init(&view->lock); 1020 spin_lock_init(&view->lock);
1023 list_add(&view->list, &rp->view_list); 1021 list_add(&view->list, &rp->view_list);
1024 rc = 0; 1022 rc = 0;
1025 } 1023 }
1026 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 1024 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1027 break; 1025 break;
1028 } 1026 }
1029 mutex_unlock(&raw3270_mutex); 1027 mutex_unlock(&raw3270_mutex);
1030 return rc; 1028 return rc;
1031 } 1029 }
1032 1030
1033 /* 1031 /*
1034 * Find specific view of device with minor "minor". 1032 * Find specific view of device with minor "minor".
1035 */ 1033 */
1036 struct raw3270_view * 1034 struct raw3270_view *
1037 raw3270_find_view(struct raw3270_fn *fn, int minor) 1035 raw3270_find_view(struct raw3270_fn *fn, int minor)
1038 { 1036 {
1039 struct raw3270 *rp; 1037 struct raw3270 *rp;
1040 struct raw3270_view *view, *tmp; 1038 struct raw3270_view *view, *tmp;
1041 unsigned long flags; 1039 unsigned long flags;
1042 1040
1043 mutex_lock(&raw3270_mutex); 1041 mutex_lock(&raw3270_mutex);
1044 view = ERR_PTR(-ENODEV); 1042 view = ERR_PTR(-ENODEV);
1045 list_for_each_entry(rp, &raw3270_devices, list) { 1043 list_for_each_entry(rp, &raw3270_devices, list) {
1046 if (rp->minor != minor) 1044 if (rp->minor != minor)
1047 continue; 1045 continue;
1048 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 1046 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
1049 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { 1047 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
1050 view = ERR_PTR(-ENOENT); 1048 view = ERR_PTR(-ENOENT);
1051 list_for_each_entry(tmp, &rp->view_list, list) { 1049 list_for_each_entry(tmp, &rp->view_list, list) {
1052 if (tmp->fn == fn) { 1050 if (tmp->fn == fn) {
1053 raw3270_get_view(tmp); 1051 raw3270_get_view(tmp);
1054 view = tmp; 1052 view = tmp;
1055 break; 1053 break;
1056 } 1054 }
1057 } 1055 }
1058 } 1056 }
1059 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 1057 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1060 break; 1058 break;
1061 } 1059 }
1062 mutex_unlock(&raw3270_mutex); 1060 mutex_unlock(&raw3270_mutex);
1063 return view; 1061 return view;
1064 } 1062 }
1065 1063
1066 /* 1064 /*
1067 * Remove view from device and free view structure via call to view->fn->free. 1065 * Remove view from device and free view structure via call to view->fn->free.
1068 */ 1066 */
1069 void 1067 void
1070 raw3270_del_view(struct raw3270_view *view) 1068 raw3270_del_view(struct raw3270_view *view)
1071 { 1069 {
1072 unsigned long flags; 1070 unsigned long flags;
1073 struct raw3270 *rp; 1071 struct raw3270 *rp;
1074 struct raw3270_view *nv; 1072 struct raw3270_view *nv;
1075 1073
1076 rp = view->dev; 1074 rp = view->dev;
1077 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 1075 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
1078 if (rp->view == view) { 1076 if (rp->view == view) {
1079 view->fn->deactivate(view); 1077 view->fn->deactivate(view);
1080 rp->view = NULL; 1078 rp->view = NULL;
1081 } 1079 }
1082 list_del_init(&view->list); 1080 list_del_init(&view->list);
1083 if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) { 1081 if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
1084 /* Try to activate another view. */ 1082 /* Try to activate another view. */
1085 list_for_each_entry(nv, &rp->view_list, list) { 1083 list_for_each_entry(nv, &rp->view_list, list) {
1086 if (nv->fn->activate(nv) == 0) { 1084 if (nv->fn->activate(nv) == 0) {
1087 rp->view = nv; 1085 rp->view = nv;
1088 break; 1086 break;
1089 } 1087 }
1090 } 1088 }
1091 } 1089 }
1092 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 1090 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1093 /* Wait for reference counter to drop to zero. */ 1091 /* Wait for reference counter to drop to zero. */
1094 atomic_dec(&view->ref_count); 1092 atomic_dec(&view->ref_count);
1095 wait_event(raw3270_wait_queue, atomic_read(&view->ref_count) == 0); 1093 wait_event(raw3270_wait_queue, atomic_read(&view->ref_count) == 0);
1096 if (view->fn->free) 1094 if (view->fn->free)
1097 view->fn->free(view); 1095 view->fn->free(view);
1098 } 1096 }
1099 1097
1100 /* 1098 /*
1101 * Remove a 3270 device structure. 1099 * Remove a 3270 device structure.
1102 */ 1100 */
1103 static void 1101 static void
1104 raw3270_delete_device(struct raw3270 *rp) 1102 raw3270_delete_device(struct raw3270 *rp)
1105 { 1103 {
1106 struct ccw_device *cdev; 1104 struct ccw_device *cdev;
1107 1105
1108 /* Remove from device chain. */ 1106 /* Remove from device chain. */
1109 mutex_lock(&raw3270_mutex); 1107 mutex_lock(&raw3270_mutex);
1110 if (rp->clttydev && !IS_ERR(rp->clttydev)) 1108 if (rp->clttydev && !IS_ERR(rp->clttydev))
1111 class_device_destroy(class3270, 1109 class_device_destroy(class3270,
1112 MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1110 MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1113 if (rp->cltubdev && !IS_ERR(rp->cltubdev)) 1111 if (rp->cltubdev && !IS_ERR(rp->cltubdev))
1114 class_device_destroy(class3270, 1112 class_device_destroy(class3270,
1115 MKDEV(IBM_FS3270_MAJOR, rp->minor)); 1113 MKDEV(IBM_FS3270_MAJOR, rp->minor));
1116 list_del_init(&rp->list); 1114 list_del_init(&rp->list);
1117 mutex_unlock(&raw3270_mutex); 1115 mutex_unlock(&raw3270_mutex);
1118 1116
1119 /* Disconnect from ccw_device. */ 1117 /* Disconnect from ccw_device. */
1120 cdev = rp->cdev; 1118 cdev = rp->cdev;
1121 rp->cdev = NULL; 1119 rp->cdev = NULL;
1122 cdev->dev.driver_data = NULL; 1120 cdev->dev.driver_data = NULL;
1123 cdev->handler = NULL; 1121 cdev->handler = NULL;
1124 1122
1125 /* Put ccw_device structure. */ 1123 /* Put ccw_device structure. */
1126 put_device(&cdev->dev); 1124 put_device(&cdev->dev);
1127 1125
1128 /* Now free raw3270 structure. */ 1126 /* Now free raw3270 structure. */
1129 kfree(rp->ascebc); 1127 kfree(rp->ascebc);
1130 kfree(rp); 1128 kfree(rp);
1131 } 1129 }
1132 1130
1133 static int 1131 static int
1134 raw3270_probe (struct ccw_device *cdev) 1132 raw3270_probe (struct ccw_device *cdev)
1135 { 1133 {
1136 return 0; 1134 return 0;
1137 } 1135 }
1138 1136
1139 /* 1137 /*
1140 * Additional attributes for a 3270 device 1138 * Additional attributes for a 3270 device
1141 */ 1139 */
1142 static ssize_t 1140 static ssize_t
1143 raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf) 1141 raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
1144 { 1142 {
1145 return snprintf(buf, PAGE_SIZE, "%i\n", 1143 return snprintf(buf, PAGE_SIZE, "%i\n",
1146 ((struct raw3270 *) dev->driver_data)->model); 1144 ((struct raw3270 *) dev->driver_data)->model);
1147 } 1145 }
1148 static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL); 1146 static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
1149 1147
1150 static ssize_t 1148 static ssize_t
1151 raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf) 1149 raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
1152 { 1150 {
1153 return snprintf(buf, PAGE_SIZE, "%i\n", 1151 return snprintf(buf, PAGE_SIZE, "%i\n",
1154 ((struct raw3270 *) dev->driver_data)->rows); 1152 ((struct raw3270 *) dev->driver_data)->rows);
1155 } 1153 }
1156 static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL); 1154 static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
1157 1155
1158 static ssize_t 1156 static ssize_t
1159 raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf) 1157 raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
1160 { 1158 {
1161 return snprintf(buf, PAGE_SIZE, "%i\n", 1159 return snprintf(buf, PAGE_SIZE, "%i\n",
1162 ((struct raw3270 *) dev->driver_data)->cols); 1160 ((struct raw3270 *) dev->driver_data)->cols);
1163 } 1161 }
1164 static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL); 1162 static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
1165 1163
1166 static struct attribute * raw3270_attrs[] = { 1164 static struct attribute * raw3270_attrs[] = {
1167 &dev_attr_model.attr, 1165 &dev_attr_model.attr,
1168 &dev_attr_rows.attr, 1166 &dev_attr_rows.attr,
1169 &dev_attr_columns.attr, 1167 &dev_attr_columns.attr,
1170 NULL, 1168 NULL,
1171 }; 1169 };
1172 1170
1173 static struct attribute_group raw3270_attr_group = { 1171 static struct attribute_group raw3270_attr_group = {
1174 .attrs = raw3270_attrs, 1172 .attrs = raw3270_attrs,
1175 }; 1173 };
1176 1174
1177 static int raw3270_create_attributes(struct raw3270 *rp) 1175 static int raw3270_create_attributes(struct raw3270 *rp)
1178 { 1176 {
1179 int rc; 1177 int rc;
1180 1178
1181 rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1179 rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1182 if (rc) 1180 if (rc)
1183 goto out; 1181 goto out;
1184 1182
1185 rp->clttydev = class_device_create(class3270, NULL, 1183 rp->clttydev = class_device_create(class3270, NULL,
1186 MKDEV(IBM_TTY3270_MAJOR, rp->minor), 1184 MKDEV(IBM_TTY3270_MAJOR, rp->minor),
1187 &rp->cdev->dev, "tty%s", 1185 &rp->cdev->dev, "tty%s",
1188 rp->cdev->dev.bus_id); 1186 rp->cdev->dev.bus_id);
1189 if (IS_ERR(rp->clttydev)) { 1187 if (IS_ERR(rp->clttydev)) {
1190 rc = PTR_ERR(rp->clttydev); 1188 rc = PTR_ERR(rp->clttydev);
1191 goto out_ttydev; 1189 goto out_ttydev;
1192 } 1190 }
1193 1191
1194 rp->cltubdev = class_device_create(class3270, NULL, 1192 rp->cltubdev = class_device_create(class3270, NULL,
1195 MKDEV(IBM_FS3270_MAJOR, rp->minor), 1193 MKDEV(IBM_FS3270_MAJOR, rp->minor),
1196 &rp->cdev->dev, "tub%s", 1194 &rp->cdev->dev, "tub%s",
1197 rp->cdev->dev.bus_id); 1195 rp->cdev->dev.bus_id);
1198 if (!IS_ERR(rp->cltubdev)) 1196 if (!IS_ERR(rp->cltubdev))
1199 goto out; 1197 goto out;
1200 1198
1201 rc = PTR_ERR(rp->cltubdev); 1199 rc = PTR_ERR(rp->cltubdev);
1202 class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1200 class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1203 1201
1204 out_ttydev: 1202 out_ttydev:
1205 sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1203 sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1206 out: 1204 out:
1207 return rc; 1205 return rc;
1208 } 1206 }
1209 1207
1210 /* 1208 /*
1211 * Notifier for device addition/removal 1209 * Notifier for device addition/removal
1212 */ 1210 */
1213 struct raw3270_notifier { 1211 struct raw3270_notifier {
1214 struct list_head list; 1212 struct list_head list;
1215 void (*notifier)(int, int); 1213 void (*notifier)(int, int);
1216 }; 1214 };
1217 1215
1218 static struct list_head raw3270_notifier = LIST_HEAD_INIT(raw3270_notifier); 1216 static struct list_head raw3270_notifier = LIST_HEAD_INIT(raw3270_notifier);
1219 1217
1220 int raw3270_register_notifier(void (*notifier)(int, int)) 1218 int raw3270_register_notifier(void (*notifier)(int, int))
1221 { 1219 {
1222 struct raw3270_notifier *np; 1220 struct raw3270_notifier *np;
1223 struct raw3270 *rp; 1221 struct raw3270 *rp;
1224 1222
1225 np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL); 1223 np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL);
1226 if (!np) 1224 if (!np)
1227 return -ENOMEM; 1225 return -ENOMEM;
1228 np->notifier = notifier; 1226 np->notifier = notifier;
1229 mutex_lock(&raw3270_mutex); 1227 mutex_lock(&raw3270_mutex);
1230 list_add_tail(&np->list, &raw3270_notifier); 1228 list_add_tail(&np->list, &raw3270_notifier);
1231 list_for_each_entry(rp, &raw3270_devices, list) { 1229 list_for_each_entry(rp, &raw3270_devices, list) {
1232 get_device(&rp->cdev->dev); 1230 get_device(&rp->cdev->dev);
1233 notifier(rp->minor, 1); 1231 notifier(rp->minor, 1);
1234 } 1232 }
1235 mutex_unlock(&raw3270_mutex); 1233 mutex_unlock(&raw3270_mutex);
1236 return 0; 1234 return 0;
1237 } 1235 }
1238 1236
1239 void raw3270_unregister_notifier(void (*notifier)(int, int)) 1237 void raw3270_unregister_notifier(void (*notifier)(int, int))
1240 { 1238 {
1241 struct raw3270_notifier *np; 1239 struct raw3270_notifier *np;
1242 1240
1243 mutex_lock(&raw3270_mutex); 1241 mutex_lock(&raw3270_mutex);
1244 list_for_each_entry(np, &raw3270_notifier, list) 1242 list_for_each_entry(np, &raw3270_notifier, list)
1245 if (np->notifier == notifier) { 1243 if (np->notifier == notifier) {
1246 list_del(&np->list); 1244 list_del(&np->list);
1247 kfree(np); 1245 kfree(np);
1248 break; 1246 break;
1249 } 1247 }
1250 mutex_unlock(&raw3270_mutex); 1248 mutex_unlock(&raw3270_mutex);
1251 } 1249 }
1252 1250
1253 /* 1251 /*
1254 * Set 3270 device online. 1252 * Set 3270 device online.
1255 */ 1253 */
1256 static int 1254 static int
1257 raw3270_set_online (struct ccw_device *cdev) 1255 raw3270_set_online (struct ccw_device *cdev)
1258 { 1256 {
1259 struct raw3270 *rp; 1257 struct raw3270 *rp;
1260 struct raw3270_notifier *np; 1258 struct raw3270_notifier *np;
1261 int rc; 1259 int rc;
1262 1260
1263 rp = raw3270_create_device(cdev); 1261 rp = raw3270_create_device(cdev);
1264 if (IS_ERR(rp)) 1262 if (IS_ERR(rp))
1265 return PTR_ERR(rp); 1263 return PTR_ERR(rp);
1266 rc = raw3270_reset_device(rp); 1264 rc = raw3270_reset_device(rp);
1267 if (rc) 1265 if (rc)
1268 goto failure; 1266 goto failure;
1269 rc = raw3270_size_device(rp); 1267 rc = raw3270_size_device(rp);
1270 if (rc) 1268 if (rc)
1271 goto failure; 1269 goto failure;
1272 rc = raw3270_reset_device(rp); 1270 rc = raw3270_reset_device(rp);
1273 if (rc) 1271 if (rc)
1274 goto failure; 1272 goto failure;
1275 rc = raw3270_create_attributes(rp); 1273 rc = raw3270_create_attributes(rp);
1276 if (rc) 1274 if (rc)
1277 goto failure; 1275 goto failure;
1278 set_bit(RAW3270_FLAGS_READY, &rp->flags); 1276 set_bit(RAW3270_FLAGS_READY, &rp->flags);
1279 mutex_lock(&raw3270_mutex); 1277 mutex_lock(&raw3270_mutex);
1280 list_for_each_entry(np, &raw3270_notifier, list) 1278 list_for_each_entry(np, &raw3270_notifier, list)
1281 np->notifier(rp->minor, 1); 1279 np->notifier(rp->minor, 1);
1282 mutex_unlock(&raw3270_mutex); 1280 mutex_unlock(&raw3270_mutex);
1283 return 0; 1281 return 0;
1284 1282
1285 failure: 1283 failure:
1286 raw3270_delete_device(rp); 1284 raw3270_delete_device(rp);
1287 return rc; 1285 return rc;
1288 } 1286 }
1289 1287
1290 /* 1288 /*
1291 * Remove 3270 device structure. 1289 * Remove 3270 device structure.
1292 */ 1290 */
1293 static void 1291 static void
1294 raw3270_remove (struct ccw_device *cdev) 1292 raw3270_remove (struct ccw_device *cdev)
1295 { 1293 {
1296 unsigned long flags; 1294 unsigned long flags;
1297 struct raw3270 *rp; 1295 struct raw3270 *rp;
1298 struct raw3270_view *v; 1296 struct raw3270_view *v;
1299 struct raw3270_notifier *np; 1297 struct raw3270_notifier *np;
1300 1298
1301 rp = cdev->dev.driver_data; 1299 rp = cdev->dev.driver_data;
1302 /* 1300 /*
1303 * _remove is the opposite of _probe; it's probe that 1301 * _remove is the opposite of _probe; it's probe that
1304 * should set up rp. raw3270_remove gets entered for 1302 * should set up rp. raw3270_remove gets entered for
1305 * devices even if they haven't been varied online. 1303 * devices even if they haven't been varied online.
1306 * Thus, rp may validly be NULL here. 1304 * Thus, rp may validly be NULL here.
1307 */ 1305 */
1308 if (rp == NULL) 1306 if (rp == NULL)
1309 return; 1307 return;
1310 clear_bit(RAW3270_FLAGS_READY, &rp->flags); 1308 clear_bit(RAW3270_FLAGS_READY, &rp->flags);
1311 1309
1312 sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group); 1310 sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
1313 1311
1314 /* Deactivate current view and remove all views. */ 1312 /* Deactivate current view and remove all views. */
1315 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1313 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1316 if (rp->view) { 1314 if (rp->view) {
1317 rp->view->fn->deactivate(rp->view); 1315 rp->view->fn->deactivate(rp->view);
1318 rp->view = NULL; 1316 rp->view = NULL;
1319 } 1317 }
1320 while (!list_empty(&rp->view_list)) { 1318 while (!list_empty(&rp->view_list)) {
1321 v = list_entry(rp->view_list.next, struct raw3270_view, list); 1319 v = list_entry(rp->view_list.next, struct raw3270_view, list);
1322 if (v->fn->release) 1320 if (v->fn->release)
1323 v->fn->release(v); 1321 v->fn->release(v);
1324 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1322 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1325 raw3270_del_view(v); 1323 raw3270_del_view(v);
1326 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1324 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1327 } 1325 }
1328 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1326 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1329 1327
1330 mutex_lock(&raw3270_mutex); 1328 mutex_lock(&raw3270_mutex);
1331 list_for_each_entry(np, &raw3270_notifier, list) 1329 list_for_each_entry(np, &raw3270_notifier, list)
1332 np->notifier(rp->minor, 0); 1330 np->notifier(rp->minor, 0);
1333 mutex_unlock(&raw3270_mutex); 1331 mutex_unlock(&raw3270_mutex);
1334 1332
1335 /* Reset 3270 device. */ 1333 /* Reset 3270 device. */
1336 raw3270_reset_device(rp); 1334 raw3270_reset_device(rp);
1337 /* And finally remove it. */ 1335 /* And finally remove it. */
1338 raw3270_delete_device(rp); 1336 raw3270_delete_device(rp);
1339 } 1337 }
1340 1338
1341 /* 1339 /*
1342 * Set 3270 device offline. 1340 * Set 3270 device offline.
1343 */ 1341 */
1344 static int 1342 static int
1345 raw3270_set_offline (struct ccw_device *cdev) 1343 raw3270_set_offline (struct ccw_device *cdev)
1346 { 1344 {
1347 struct raw3270 *rp; 1345 struct raw3270 *rp;
1348 1346
1349 rp = cdev->dev.driver_data; 1347 rp = cdev->dev.driver_data;
1350 if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) 1348 if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
1351 return -EBUSY; 1349 return -EBUSY;
1352 raw3270_remove(cdev); 1350 raw3270_remove(cdev);
1353 return 0; 1351 return 0;
1354 } 1352 }
1355 1353
1356 static struct ccw_device_id raw3270_id[] = { 1354 static struct ccw_device_id raw3270_id[] = {
1357 { CCW_DEVICE(0x3270, 0) }, 1355 { CCW_DEVICE(0x3270, 0) },
1358 { CCW_DEVICE(0x3271, 0) }, 1356 { CCW_DEVICE(0x3271, 0) },
1359 { CCW_DEVICE(0x3272, 0) }, 1357 { CCW_DEVICE(0x3272, 0) },
1360 { CCW_DEVICE(0x3273, 0) }, 1358 { CCW_DEVICE(0x3273, 0) },
1361 { CCW_DEVICE(0x3274, 0) }, 1359 { CCW_DEVICE(0x3274, 0) },
1362 { CCW_DEVICE(0x3275, 0) }, 1360 { CCW_DEVICE(0x3275, 0) },
1363 { CCW_DEVICE(0x3276, 0) }, 1361 { CCW_DEVICE(0x3276, 0) },
1364 { CCW_DEVICE(0x3277, 0) }, 1362 { CCW_DEVICE(0x3277, 0) },
1365 { CCW_DEVICE(0x3278, 0) }, 1363 { CCW_DEVICE(0x3278, 0) },
1366 { CCW_DEVICE(0x3279, 0) }, 1364 { CCW_DEVICE(0x3279, 0) },
1367 { CCW_DEVICE(0x3174, 0) }, 1365 { CCW_DEVICE(0x3174, 0) },
1368 { /* end of list */ }, 1366 { /* end of list */ },
1369 }; 1367 };
1370 1368
1371 static struct ccw_driver raw3270_ccw_driver = { 1369 static struct ccw_driver raw3270_ccw_driver = {
1372 .name = "3270", 1370 .name = "3270",
1373 .owner = THIS_MODULE, 1371 .owner = THIS_MODULE,
1374 .ids = raw3270_id, 1372 .ids = raw3270_id,
1375 .probe = &raw3270_probe, 1373 .probe = &raw3270_probe,
1376 .remove = &raw3270_remove, 1374 .remove = &raw3270_remove,
1377 .set_online = &raw3270_set_online, 1375 .set_online = &raw3270_set_online,
1378 .set_offline = &raw3270_set_offline, 1376 .set_offline = &raw3270_set_offline,
1379 }; 1377 };
1380 1378
1381 static int 1379 static int
1382 raw3270_init(void) 1380 raw3270_init(void)
1383 { 1381 {
1384 struct raw3270 *rp; 1382 struct raw3270 *rp;
1385 int rc; 1383 int rc;
1386 1384
1387 if (raw3270_registered) 1385 if (raw3270_registered)
1388 return 0; 1386 return 0;
1389 raw3270_registered = 1; 1387 raw3270_registered = 1;
1390 rc = ccw_driver_register(&raw3270_ccw_driver); 1388 rc = ccw_driver_register(&raw3270_ccw_driver);
1391 if (rc == 0) { 1389 if (rc == 0) {
1392 /* Create attributes for early (= console) device. */ 1390 /* Create attributes for early (= console) device. */
1393 mutex_lock(&raw3270_mutex); 1391 mutex_lock(&raw3270_mutex);
1394 class3270 = class_create(THIS_MODULE, "3270"); 1392 class3270 = class_create(THIS_MODULE, "3270");
1395 list_for_each_entry(rp, &raw3270_devices, list) { 1393 list_for_each_entry(rp, &raw3270_devices, list) {
1396 get_device(&rp->cdev->dev); 1394 get_device(&rp->cdev->dev);
1397 raw3270_create_attributes(rp); 1395 raw3270_create_attributes(rp);
1398 } 1396 }
1399 mutex_unlock(&raw3270_mutex); 1397 mutex_unlock(&raw3270_mutex);
1400 } 1398 }
1401 return rc; 1399 return rc;
1402 } 1400 }
1403 1401
1404 static void 1402 static void
1405 raw3270_exit(void) 1403 raw3270_exit(void)
1406 { 1404 {
1407 ccw_driver_unregister(&raw3270_ccw_driver); 1405 ccw_driver_unregister(&raw3270_ccw_driver);
1408 class_destroy(class3270); 1406 class_destroy(class3270);
1409 } 1407 }
1410 1408
1411 MODULE_LICENSE("GPL"); 1409 MODULE_LICENSE("GPL");
1412 1410
1413 module_init(raw3270_init); 1411 module_init(raw3270_init);
1414 module_exit(raw3270_exit); 1412 module_exit(raw3270_exit);
1415 1413
1416 EXPORT_SYMBOL(raw3270_request_alloc); 1414 EXPORT_SYMBOL(raw3270_request_alloc);
1417 EXPORT_SYMBOL(raw3270_request_free); 1415 EXPORT_SYMBOL(raw3270_request_free);
1418 EXPORT_SYMBOL(raw3270_request_reset); 1416 EXPORT_SYMBOL(raw3270_request_reset);
1419 EXPORT_SYMBOL(raw3270_request_set_cmd); 1417 EXPORT_SYMBOL(raw3270_request_set_cmd);
1420 EXPORT_SYMBOL(raw3270_request_add_data); 1418 EXPORT_SYMBOL(raw3270_request_add_data);
1421 EXPORT_SYMBOL(raw3270_request_set_data); 1419 EXPORT_SYMBOL(raw3270_request_set_data);
1422 EXPORT_SYMBOL(raw3270_request_set_idal); 1420 EXPORT_SYMBOL(raw3270_request_set_idal);
1423 EXPORT_SYMBOL(raw3270_buffer_address); 1421 EXPORT_SYMBOL(raw3270_buffer_address);
1424 EXPORT_SYMBOL(raw3270_add_view); 1422 EXPORT_SYMBOL(raw3270_add_view);
1425 EXPORT_SYMBOL(raw3270_del_view); 1423 EXPORT_SYMBOL(raw3270_del_view);
1426 EXPORT_SYMBOL(raw3270_find_view); 1424 EXPORT_SYMBOL(raw3270_find_view);
1427 EXPORT_SYMBOL(raw3270_activate_view); 1425 EXPORT_SYMBOL(raw3270_activate_view);
1428 EXPORT_SYMBOL(raw3270_deactivate_view); 1426 EXPORT_SYMBOL(raw3270_deactivate_view);
1429 EXPORT_SYMBOL(raw3270_start); 1427 EXPORT_SYMBOL(raw3270_start);
1430 EXPORT_SYMBOL(raw3270_start_locked); 1428 EXPORT_SYMBOL(raw3270_start_locked);
1431 EXPORT_SYMBOL(raw3270_start_irq); 1429 EXPORT_SYMBOL(raw3270_start_irq);
1432 EXPORT_SYMBOL(raw3270_reset); 1430 EXPORT_SYMBOL(raw3270_reset);
1433 EXPORT_SYMBOL(raw3270_register_notifier); 1431 EXPORT_SYMBOL(raw3270_register_notifier);
1434 EXPORT_SYMBOL(raw3270_unregister_notifier); 1432 EXPORT_SYMBOL(raw3270_unregister_notifier);
1435 EXPORT_SYMBOL(raw3270_wait_queue); 1433 EXPORT_SYMBOL(raw3270_wait_queue);
1436 1434
drivers/s390/char/sclp_vt220.c
1 /* 1 /*
2 * drivers/s390/char/sclp_vt220.c 2 * drivers/s390/char/sclp_vt220.c
3 * SCLP VT220 terminal driver. 3 * SCLP VT220 terminal driver.
4 * 4 *
5 * S390 version 5 * S390 version
6 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com> 7 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
8 */ 8 */
9 9
10 #include <linux/module.h> 10 #include <linux/module.h>
11 #include <linux/spinlock.h> 11 #include <linux/spinlock.h>
12 #include <linux/list.h> 12 #include <linux/list.h>
13 #include <linux/wait.h> 13 #include <linux/wait.h>
14 #include <linux/timer.h> 14 #include <linux/timer.h>
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/tty.h> 16 #include <linux/tty.h>
17 #include <linux/tty_driver.h> 17 #include <linux/tty_driver.h>
18 #include <linux/tty_flip.h> 18 #include <linux/tty_flip.h>
19 #include <linux/errno.h> 19 #include <linux/errno.h>
20 #include <linux/mm.h> 20 #include <linux/mm.h>
21 #include <linux/major.h> 21 #include <linux/major.h>
22 #include <linux/console.h> 22 #include <linux/console.h>
23 #include <linux/kdev_t.h> 23 #include <linux/kdev_t.h>
24 #include <linux/bootmem.h> 24 #include <linux/bootmem.h>
25 #include <linux/interrupt.h> 25 #include <linux/interrupt.h>
26 #include <linux/init.h> 26 #include <linux/init.h>
27 #include <asm/uaccess.h> 27 #include <asm/uaccess.h>
28 #include "sclp.h" 28 #include "sclp.h"
29 29
30 #define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: " 30 #define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: "
31 #define SCLP_VT220_MAJOR TTY_MAJOR 31 #define SCLP_VT220_MAJOR TTY_MAJOR
32 #define SCLP_VT220_MINOR 65 32 #define SCLP_VT220_MINOR 65
33 #define SCLP_VT220_DRIVER_NAME "sclp_vt220" 33 #define SCLP_VT220_DRIVER_NAME "sclp_vt220"
34 #define SCLP_VT220_DEVICE_NAME "ttysclp" 34 #define SCLP_VT220_DEVICE_NAME "ttysclp"
35 #define SCLP_VT220_CONSOLE_NAME "ttyS" 35 #define SCLP_VT220_CONSOLE_NAME "ttyS"
36 #define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */ 36 #define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
37 #define SCLP_VT220_BUF_SIZE 80 37 #define SCLP_VT220_BUF_SIZE 80
38 38
39 /* Representation of a single write request */ 39 /* Representation of a single write request */
40 struct sclp_vt220_request { 40 struct sclp_vt220_request {
41 struct list_head list; 41 struct list_head list;
42 struct sclp_req sclp_req; 42 struct sclp_req sclp_req;
43 int retry_count; 43 int retry_count;
44 }; 44 };
45 45
46 /* VT220 SCCB */ 46 /* VT220 SCCB */
47 struct sclp_vt220_sccb { 47 struct sclp_vt220_sccb {
48 struct sccb_header header; 48 struct sccb_header header;
49 struct evbuf_header evbuf; 49 struct evbuf_header evbuf;
50 }; 50 };
51 51
52 #define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \ 52 #define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
53 sizeof(struct sclp_vt220_request) - \ 53 sizeof(struct sclp_vt220_request) - \
54 sizeof(struct sclp_vt220_sccb)) 54 sizeof(struct sclp_vt220_sccb))
55 55
56 /* Structures and data needed to register tty driver */ 56 /* Structures and data needed to register tty driver */
57 static struct tty_driver *sclp_vt220_driver; 57 static struct tty_driver *sclp_vt220_driver;
58 58
59 /* The tty_struct that the kernel associated with us */ 59 /* The tty_struct that the kernel associated with us */
60 static struct tty_struct *sclp_vt220_tty; 60 static struct tty_struct *sclp_vt220_tty;
61 61
62 /* Lock to protect internal data from concurrent access */ 62 /* Lock to protect internal data from concurrent access */
63 static spinlock_t sclp_vt220_lock; 63 static spinlock_t sclp_vt220_lock;
64 64
65 /* List of empty pages to be used as write request buffers */ 65 /* List of empty pages to be used as write request buffers */
66 static struct list_head sclp_vt220_empty; 66 static struct list_head sclp_vt220_empty;
67 67
68 /* List of pending requests */ 68 /* List of pending requests */
69 static struct list_head sclp_vt220_outqueue; 69 static struct list_head sclp_vt220_outqueue;
70 70
71 /* Number of requests in outqueue */ 71 /* Number of requests in outqueue */
72 static int sclp_vt220_outqueue_count; 72 static int sclp_vt220_outqueue_count;
73 73
74 /* Wait queue used to delay write requests while we've run out of buffers */ 74 /* Wait queue used to delay write requests while we've run out of buffers */
75 static wait_queue_head_t sclp_vt220_waitq; 75 static wait_queue_head_t sclp_vt220_waitq;
76 76
77 /* Timer used for delaying write requests to merge subsequent messages into 77 /* Timer used for delaying write requests to merge subsequent messages into
78 * a single buffer */ 78 * a single buffer */
79 static struct timer_list sclp_vt220_timer; 79 static struct timer_list sclp_vt220_timer;
80 80
81 /* Pointer to current request buffer which has been partially filled but not 81 /* Pointer to current request buffer which has been partially filled but not
82 * yet sent */ 82 * yet sent */
83 static struct sclp_vt220_request *sclp_vt220_current_request; 83 static struct sclp_vt220_request *sclp_vt220_current_request;
84 84
85 /* Number of characters in current request buffer */ 85 /* Number of characters in current request buffer */
86 static int sclp_vt220_buffered_chars; 86 static int sclp_vt220_buffered_chars;
87 87
88 /* Flag indicating whether this driver has already been initialized */ 88 /* Flag indicating whether this driver has already been initialized */
89 static int sclp_vt220_initialized = 0; 89 static int sclp_vt220_initialized = 0;
90 90
91 /* Flag indicating that sclp_vt220_current_request should really 91 /* Flag indicating that sclp_vt220_current_request should really
92 * have been already queued but wasn't because the SCLP was processing 92 * have been already queued but wasn't because the SCLP was processing
93 * another buffer */ 93 * another buffer */
94 static int sclp_vt220_flush_later; 94 static int sclp_vt220_flush_later;
95 95
96 static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf); 96 static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
97 static int __sclp_vt220_emit(struct sclp_vt220_request *request); 97 static int __sclp_vt220_emit(struct sclp_vt220_request *request);
98 static void sclp_vt220_emit_current(void); 98 static void sclp_vt220_emit_current(void);
99 99
100 /* Registration structure for our interest in SCLP event buffers */ 100 /* Registration structure for our interest in SCLP event buffers */
101 static struct sclp_register sclp_vt220_register = { 101 static struct sclp_register sclp_vt220_register = {
102 .send_mask = EVTYP_VT220MSG_MASK, 102 .send_mask = EVTYP_VT220MSG_MASK,
103 .receive_mask = EVTYP_VT220MSG_MASK, 103 .receive_mask = EVTYP_VT220MSG_MASK,
104 .state_change_fn = NULL, 104 .state_change_fn = NULL,
105 .receiver_fn = sclp_vt220_receiver_fn 105 .receiver_fn = sclp_vt220_receiver_fn
106 }; 106 };
107 107
108 108
109 /* 109 /*
110 * Put provided request buffer back into queue and check emit pending 110 * Put provided request buffer back into queue and check emit pending
111 * buffers if necessary. 111 * buffers if necessary.
112 */ 112 */
113 static void 113 static void
114 sclp_vt220_process_queue(struct sclp_vt220_request *request) 114 sclp_vt220_process_queue(struct sclp_vt220_request *request)
115 { 115 {
116 unsigned long flags; 116 unsigned long flags;
117 void *page; 117 void *page;
118 118
119 do { 119 do {
120 /* Put buffer back to list of empty buffers */ 120 /* Put buffer back to list of empty buffers */
121 page = request->sclp_req.sccb; 121 page = request->sclp_req.sccb;
122 spin_lock_irqsave(&sclp_vt220_lock, flags); 122 spin_lock_irqsave(&sclp_vt220_lock, flags);
123 /* Move request from outqueue to empty queue */ 123 /* Move request from outqueue to empty queue */
124 list_del(&request->list); 124 list_del(&request->list);
125 sclp_vt220_outqueue_count--; 125 sclp_vt220_outqueue_count--;
126 list_add_tail((struct list_head *) page, &sclp_vt220_empty); 126 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
127 /* Check if there is a pending buffer on the out queue. */ 127 /* Check if there is a pending buffer on the out queue. */
128 request = NULL; 128 request = NULL;
129 if (!list_empty(&sclp_vt220_outqueue)) 129 if (!list_empty(&sclp_vt220_outqueue))
130 request = list_entry(sclp_vt220_outqueue.next, 130 request = list_entry(sclp_vt220_outqueue.next,
131 struct sclp_vt220_request, list); 131 struct sclp_vt220_request, list);
132 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 132 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
133 } while (request && __sclp_vt220_emit(request)); 133 } while (request && __sclp_vt220_emit(request));
134 if (request == NULL && sclp_vt220_flush_later) 134 if (request == NULL && sclp_vt220_flush_later)
135 sclp_vt220_emit_current(); 135 sclp_vt220_emit_current();
136 wake_up(&sclp_vt220_waitq); 136 wake_up(&sclp_vt220_waitq);
137 /* Check if the tty needs a wake up call */ 137 /* Check if the tty needs a wake up call */
138 if (sclp_vt220_tty != NULL) { 138 if (sclp_vt220_tty != NULL) {
139 tty_wakeup(sclp_vt220_tty); 139 tty_wakeup(sclp_vt220_tty);
140 } 140 }
141 } 141 }
142 142
143 #define SCLP_BUFFER_MAX_RETRY 1 143 #define SCLP_BUFFER_MAX_RETRY 1
144 144
145 /* 145 /*
146 * Callback through which the result of a write request is reported by the 146 * Callback through which the result of a write request is reported by the
147 * SCLP. 147 * SCLP.
148 */ 148 */
149 static void 149 static void
150 sclp_vt220_callback(struct sclp_req *request, void *data) 150 sclp_vt220_callback(struct sclp_req *request, void *data)
151 { 151 {
152 struct sclp_vt220_request *vt220_request; 152 struct sclp_vt220_request *vt220_request;
153 struct sclp_vt220_sccb *sccb; 153 struct sclp_vt220_sccb *sccb;
154 154
155 vt220_request = (struct sclp_vt220_request *) data; 155 vt220_request = (struct sclp_vt220_request *) data;
156 if (request->status == SCLP_REQ_FAILED) { 156 if (request->status == SCLP_REQ_FAILED) {
157 sclp_vt220_process_queue(vt220_request); 157 sclp_vt220_process_queue(vt220_request);
158 return; 158 return;
159 } 159 }
160 sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb; 160 sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
161 161
162 /* Check SCLP response code and choose suitable action */ 162 /* Check SCLP response code and choose suitable action */
163 switch (sccb->header.response_code) { 163 switch (sccb->header.response_code) {
164 case 0x0020 : 164 case 0x0020 :
165 break; 165 break;
166 166
167 case 0x05f0: /* Target resource in improper state */ 167 case 0x05f0: /* Target resource in improper state */
168 break; 168 break;
169 169
170 case 0x0340: /* Contained SCLP equipment check */ 170 case 0x0340: /* Contained SCLP equipment check */
171 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY) 171 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
172 break; 172 break;
173 /* Remove processed buffers and requeue rest */ 173 /* Remove processed buffers and requeue rest */
174 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) { 174 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
175 /* Not all buffers were processed */ 175 /* Not all buffers were processed */
176 sccb->header.response_code = 0x0000; 176 sccb->header.response_code = 0x0000;
177 vt220_request->sclp_req.status = SCLP_REQ_FILLED; 177 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
178 if (sclp_add_request(request) == 0) 178 if (sclp_add_request(request) == 0)
179 return; 179 return;
180 } 180 }
181 break; 181 break;
182 182
183 case 0x0040: /* SCLP equipment check */ 183 case 0x0040: /* SCLP equipment check */
184 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY) 184 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
185 break; 185 break;
186 sccb->header.response_code = 0x0000; 186 sccb->header.response_code = 0x0000;
187 vt220_request->sclp_req.status = SCLP_REQ_FILLED; 187 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
188 if (sclp_add_request(request) == 0) 188 if (sclp_add_request(request) == 0)
189 return; 189 return;
190 break; 190 break;
191 191
192 default: 192 default:
193 break; 193 break;
194 } 194 }
195 sclp_vt220_process_queue(vt220_request); 195 sclp_vt220_process_queue(vt220_request);
196 } 196 }
197 197
198 /* 198 /*
199 * Emit vt220 request buffer to SCLP. Return zero on success, non-zero 199 * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
200 * otherwise. 200 * otherwise.
201 */ 201 */
202 static int 202 static int
203 __sclp_vt220_emit(struct sclp_vt220_request *request) 203 __sclp_vt220_emit(struct sclp_vt220_request *request)
204 { 204 {
205 if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) { 205 if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) {
206 request->sclp_req.status = SCLP_REQ_FAILED; 206 request->sclp_req.status = SCLP_REQ_FAILED;
207 return -EIO; 207 return -EIO;
208 } 208 }
209 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA; 209 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
210 request->sclp_req.status = SCLP_REQ_FILLED; 210 request->sclp_req.status = SCLP_REQ_FILLED;
211 request->sclp_req.callback = sclp_vt220_callback; 211 request->sclp_req.callback = sclp_vt220_callback;
212 request->sclp_req.callback_data = (void *) request; 212 request->sclp_req.callback_data = (void *) request;
213 213
214 return sclp_add_request(&request->sclp_req); 214 return sclp_add_request(&request->sclp_req);
215 } 215 }
216 216
217 /* 217 /*
218 * Queue and emit given request. 218 * Queue and emit given request.
219 */ 219 */
220 static void 220 static void
221 sclp_vt220_emit(struct sclp_vt220_request *request) 221 sclp_vt220_emit(struct sclp_vt220_request *request)
222 { 222 {
223 unsigned long flags; 223 unsigned long flags;
224 int count; 224 int count;
225 225
226 spin_lock_irqsave(&sclp_vt220_lock, flags); 226 spin_lock_irqsave(&sclp_vt220_lock, flags);
227 list_add_tail(&request->list, &sclp_vt220_outqueue); 227 list_add_tail(&request->list, &sclp_vt220_outqueue);
228 count = sclp_vt220_outqueue_count++; 228 count = sclp_vt220_outqueue_count++;
229 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 229 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
230 /* Emit only the first buffer immediately - callback takes care of 230 /* Emit only the first buffer immediately - callback takes care of
231 * the rest */ 231 * the rest */
232 if (count == 0 && __sclp_vt220_emit(request)) 232 if (count == 0 && __sclp_vt220_emit(request))
233 sclp_vt220_process_queue(request); 233 sclp_vt220_process_queue(request);
234 } 234 }
235 235
236 /* 236 /*
237 * Queue and emit current request. Return zero on success, non-zero otherwise. 237 * Queue and emit current request. Return zero on success, non-zero otherwise.
238 */ 238 */
239 static void 239 static void
240 sclp_vt220_emit_current(void) 240 sclp_vt220_emit_current(void)
241 { 241 {
242 unsigned long flags; 242 unsigned long flags;
243 struct sclp_vt220_request *request; 243 struct sclp_vt220_request *request;
244 struct sclp_vt220_sccb *sccb; 244 struct sclp_vt220_sccb *sccb;
245 245
246 spin_lock_irqsave(&sclp_vt220_lock, flags); 246 spin_lock_irqsave(&sclp_vt220_lock, flags);
247 request = NULL; 247 request = NULL;
248 if (sclp_vt220_current_request != NULL) { 248 if (sclp_vt220_current_request != NULL) {
249 sccb = (struct sclp_vt220_sccb *) 249 sccb = (struct sclp_vt220_sccb *)
250 sclp_vt220_current_request->sclp_req.sccb; 250 sclp_vt220_current_request->sclp_req.sccb;
251 /* Only emit buffers with content */ 251 /* Only emit buffers with content */
252 if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) { 252 if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
253 request = sclp_vt220_current_request; 253 request = sclp_vt220_current_request;
254 sclp_vt220_current_request = NULL; 254 sclp_vt220_current_request = NULL;
255 if (timer_pending(&sclp_vt220_timer)) 255 if (timer_pending(&sclp_vt220_timer))
256 del_timer(&sclp_vt220_timer); 256 del_timer(&sclp_vt220_timer);
257 } 257 }
258 sclp_vt220_flush_later = 0; 258 sclp_vt220_flush_later = 0;
259 } 259 }
260 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 260 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
261 if (request != NULL) 261 if (request != NULL)
262 sclp_vt220_emit(request); 262 sclp_vt220_emit(request);
263 } 263 }
264 264
265 #define SCLP_NORMAL_WRITE 0x00 265 #define SCLP_NORMAL_WRITE 0x00
266 266
267 /* 267 /*
268 * Helper function to initialize a page with the sclp request structure. 268 * Helper function to initialize a page with the sclp request structure.
269 */ 269 */
270 static struct sclp_vt220_request * 270 static struct sclp_vt220_request *
271 sclp_vt220_initialize_page(void *page) 271 sclp_vt220_initialize_page(void *page)
272 { 272 {
273 struct sclp_vt220_request *request; 273 struct sclp_vt220_request *request;
274 struct sclp_vt220_sccb *sccb; 274 struct sclp_vt220_sccb *sccb;
275 275
276 /* Place request structure at end of page */ 276 /* Place request structure at end of page */
277 request = ((struct sclp_vt220_request *) 277 request = ((struct sclp_vt220_request *)
278 ((addr_t) page + PAGE_SIZE)) - 1; 278 ((addr_t) page + PAGE_SIZE)) - 1;
279 request->retry_count = 0; 279 request->retry_count = 0;
280 request->sclp_req.sccb = page; 280 request->sclp_req.sccb = page;
281 /* SCCB goes at start of page */ 281 /* SCCB goes at start of page */
282 sccb = (struct sclp_vt220_sccb *) page; 282 sccb = (struct sclp_vt220_sccb *) page;
283 memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb)); 283 memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
284 sccb->header.length = sizeof(struct sclp_vt220_sccb); 284 sccb->header.length = sizeof(struct sclp_vt220_sccb);
285 sccb->header.function_code = SCLP_NORMAL_WRITE; 285 sccb->header.function_code = SCLP_NORMAL_WRITE;
286 sccb->header.response_code = 0x0000; 286 sccb->header.response_code = 0x0000;
287 sccb->evbuf.type = EVTYP_VT220MSG; 287 sccb->evbuf.type = EVTYP_VT220MSG;
288 sccb->evbuf.length = sizeof(struct evbuf_header); 288 sccb->evbuf.length = sizeof(struct evbuf_header);
289 289
290 return request; 290 return request;
291 } 291 }
292 292
293 static inline unsigned int 293 static inline unsigned int
294 sclp_vt220_space_left(struct sclp_vt220_request *request) 294 sclp_vt220_space_left(struct sclp_vt220_request *request)
295 { 295 {
296 struct sclp_vt220_sccb *sccb; 296 struct sclp_vt220_sccb *sccb;
297 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb; 297 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
298 return PAGE_SIZE - sizeof(struct sclp_vt220_request) - 298 return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
299 sccb->header.length; 299 sccb->header.length;
300 } 300 }
301 301
302 static inline unsigned int 302 static inline unsigned int
303 sclp_vt220_chars_stored(struct sclp_vt220_request *request) 303 sclp_vt220_chars_stored(struct sclp_vt220_request *request)
304 { 304 {
305 struct sclp_vt220_sccb *sccb; 305 struct sclp_vt220_sccb *sccb;
306 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb; 306 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
307 return sccb->evbuf.length - sizeof(struct evbuf_header); 307 return sccb->evbuf.length - sizeof(struct evbuf_header);
308 } 308 }
309 309
310 /* 310 /*
311 * Add msg to buffer associated with request. Return the number of characters 311 * Add msg to buffer associated with request. Return the number of characters
312 * added. 312 * added.
313 */ 313 */
314 static int 314 static int
315 sclp_vt220_add_msg(struct sclp_vt220_request *request, 315 sclp_vt220_add_msg(struct sclp_vt220_request *request,
316 const unsigned char *msg, int count, int convertlf) 316 const unsigned char *msg, int count, int convertlf)
317 { 317 {
318 struct sclp_vt220_sccb *sccb; 318 struct sclp_vt220_sccb *sccb;
319 void *buffer; 319 void *buffer;
320 unsigned char c; 320 unsigned char c;
321 int from; 321 int from;
322 int to; 322 int to;
323 323
324 if (count > sclp_vt220_space_left(request)) 324 if (count > sclp_vt220_space_left(request))
325 count = sclp_vt220_space_left(request); 325 count = sclp_vt220_space_left(request);
326 if (count <= 0) 326 if (count <= 0)
327 return 0; 327 return 0;
328 328
329 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb; 329 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
330 buffer = (void *) ((addr_t) sccb + sccb->header.length); 330 buffer = (void *) ((addr_t) sccb + sccb->header.length);
331 331
332 if (convertlf) { 332 if (convertlf) {
333 /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/ 333 /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
334 for (from=0, to=0; 334 for (from=0, to=0;
335 (from < count) && (to < sclp_vt220_space_left(request)); 335 (from < count) && (to < sclp_vt220_space_left(request));
336 from++) { 336 from++) {
337 /* Retrieve character */ 337 /* Retrieve character */
338 c = msg[from]; 338 c = msg[from];
339 /* Perform conversion */ 339 /* Perform conversion */
340 if (c == 0x0a) { 340 if (c == 0x0a) {
341 if (to + 1 < sclp_vt220_space_left(request)) { 341 if (to + 1 < sclp_vt220_space_left(request)) {
342 ((unsigned char *) buffer)[to++] = c; 342 ((unsigned char *) buffer)[to++] = c;
343 ((unsigned char *) buffer)[to++] = 0x0d; 343 ((unsigned char *) buffer)[to++] = 0x0d;
344 } else 344 } else
345 break; 345 break;
346 346
347 } else 347 } else
348 ((unsigned char *) buffer)[to++] = c; 348 ((unsigned char *) buffer)[to++] = c;
349 } 349 }
350 sccb->header.length += to; 350 sccb->header.length += to;
351 sccb->evbuf.length += to; 351 sccb->evbuf.length += to;
352 return from; 352 return from;
353 } else { 353 } else {
354 memcpy(buffer, (const void *) msg, count); 354 memcpy(buffer, (const void *) msg, count);
355 sccb->header.length += count; 355 sccb->header.length += count;
356 sccb->evbuf.length += count; 356 sccb->evbuf.length += count;
357 return count; 357 return count;
358 } 358 }
359 } 359 }
360 360
361 /* 361 /*
362 * Emit buffer after having waited long enough for more data to arrive. 362 * Emit buffer after having waited long enough for more data to arrive.
363 */ 363 */
364 static void 364 static void
365 sclp_vt220_timeout(unsigned long data) 365 sclp_vt220_timeout(unsigned long data)
366 { 366 {
367 sclp_vt220_emit_current(); 367 sclp_vt220_emit_current();
368 } 368 }
369 369
370 #define BUFFER_MAX_DELAY HZ/2 370 #define BUFFER_MAX_DELAY HZ/2
371 371
372 /* 372 /*
373 * Internal implementation of the write function. Write COUNT bytes of data 373 * Internal implementation of the write function. Write COUNT bytes of data
374 * from memory at BUF 374 * from memory at BUF
375 * to the SCLP interface. In case that the data does not fit into the current 375 * to the SCLP interface. In case that the data does not fit into the current
376 * write buffer, emit the current one and allocate a new one. If there are no 376 * write buffer, emit the current one and allocate a new one. If there are no
377 * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE 377 * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
378 * is non-zero, the buffer will be scheduled for emitting after a timeout - 378 * is non-zero, the buffer will be scheduled for emitting after a timeout -
379 * otherwise the user has to explicitly call the flush function. 379 * otherwise the user has to explicitly call the flush function.
380 * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message 380 * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
381 * buffer should be converted to 0x0a 0x0d. After completion, return the number 381 * buffer should be converted to 0x0a 0x0d. After completion, return the number
382 * of bytes written. 382 * of bytes written.
383 */ 383 */
384 static int 384 static int
385 __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, 385 __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
386 int convertlf) 386 int convertlf)
387 { 387 {
388 unsigned long flags; 388 unsigned long flags;
389 void *page; 389 void *page;
390 int written; 390 int written;
391 int overall_written; 391 int overall_written;
392 392
393 if (count <= 0) 393 if (count <= 0)
394 return 0; 394 return 0;
395 overall_written = 0; 395 overall_written = 0;
396 spin_lock_irqsave(&sclp_vt220_lock, flags); 396 spin_lock_irqsave(&sclp_vt220_lock, flags);
397 do { 397 do {
398 /* Create a sclp output buffer if none exists yet */ 398 /* Create a sclp output buffer if none exists yet */
399 if (sclp_vt220_current_request == NULL) { 399 if (sclp_vt220_current_request == NULL) {
400 while (list_empty(&sclp_vt220_empty)) { 400 while (list_empty(&sclp_vt220_empty)) {
401 spin_unlock_irqrestore(&sclp_vt220_lock, 401 spin_unlock_irqrestore(&sclp_vt220_lock,
402 flags); 402 flags);
403 if (in_interrupt()) 403 if (in_interrupt())
404 sclp_sync_wait(); 404 sclp_sync_wait();
405 else 405 else
406 wait_event(sclp_vt220_waitq, 406 wait_event(sclp_vt220_waitq,
407 !list_empty(&sclp_vt220_empty)); 407 !list_empty(&sclp_vt220_empty));
408 spin_lock_irqsave(&sclp_vt220_lock, flags); 408 spin_lock_irqsave(&sclp_vt220_lock, flags);
409 } 409 }
410 page = (void *) sclp_vt220_empty.next; 410 page = (void *) sclp_vt220_empty.next;
411 list_del((struct list_head *) page); 411 list_del((struct list_head *) page);
412 sclp_vt220_current_request = 412 sclp_vt220_current_request =
413 sclp_vt220_initialize_page(page); 413 sclp_vt220_initialize_page(page);
414 } 414 }
415 /* Try to write the string to the current request buffer */ 415 /* Try to write the string to the current request buffer */
416 written = sclp_vt220_add_msg(sclp_vt220_current_request, 416 written = sclp_vt220_add_msg(sclp_vt220_current_request,
417 buf, count, convertlf); 417 buf, count, convertlf);
418 overall_written += written; 418 overall_written += written;
419 if (written == count) 419 if (written == count)
420 break; 420 break;
421 /* 421 /*
422 * Not all characters could be written to the current 422 * Not all characters could be written to the current
423 * output buffer. Emit the buffer, create a new buffer 423 * output buffer. Emit the buffer, create a new buffer
424 * and then output the rest of the string. 424 * and then output the rest of the string.
425 */ 425 */
426 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 426 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
427 sclp_vt220_emit_current(); 427 sclp_vt220_emit_current();
428 spin_lock_irqsave(&sclp_vt220_lock, flags); 428 spin_lock_irqsave(&sclp_vt220_lock, flags);
429 buf += written; 429 buf += written;
430 count -= written; 430 count -= written;
431 } while (count > 0); 431 } while (count > 0);
432 /* Setup timer to output current console buffer after some time */ 432 /* Setup timer to output current console buffer after some time */
433 if (sclp_vt220_current_request != NULL && 433 if (sclp_vt220_current_request != NULL &&
434 !timer_pending(&sclp_vt220_timer) && do_schedule) { 434 !timer_pending(&sclp_vt220_timer) && do_schedule) {
435 sclp_vt220_timer.function = sclp_vt220_timeout; 435 sclp_vt220_timer.function = sclp_vt220_timeout;
436 sclp_vt220_timer.data = 0UL; 436 sclp_vt220_timer.data = 0UL;
437 sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY; 437 sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
438 add_timer(&sclp_vt220_timer); 438 add_timer(&sclp_vt220_timer);
439 } 439 }
440 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 440 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
441 return overall_written; 441 return overall_written;
442 } 442 }
443 443
444 /* 444 /*
445 * This routine is called by the kernel to write a series of 445 * This routine is called by the kernel to write a series of
446 * characters to the tty device. The characters may come from 446 * characters to the tty device. The characters may come from
447 * user space or kernel space. This routine will return the 447 * user space or kernel space. This routine will return the
448 * number of characters actually accepted for writing. 448 * number of characters actually accepted for writing.
449 */ 449 */
450 static int 450 static int
451 sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count) 451 sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
452 { 452 {
453 return __sclp_vt220_write(buf, count, 1, 0); 453 return __sclp_vt220_write(buf, count, 1, 0);
454 } 454 }
455 455
456 #define SCLP_VT220_SESSION_ENDED 0x01 456 #define SCLP_VT220_SESSION_ENDED 0x01
457 #define SCLP_VT220_SESSION_STARTED 0x80 457 #define SCLP_VT220_SESSION_STARTED 0x80
458 #define SCLP_VT220_SESSION_DATA 0x00 458 #define SCLP_VT220_SESSION_DATA 0x00
459 459
460 /* 460 /*
461 * Called by the SCLP to report incoming event buffers. 461 * Called by the SCLP to report incoming event buffers.
462 */ 462 */
463 static void 463 static void
464 sclp_vt220_receiver_fn(struct evbuf_header *evbuf) 464 sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
465 { 465 {
466 char *buffer; 466 char *buffer;
467 unsigned int count; 467 unsigned int count;
468 468
469 /* Ignore input if device is not open */ 469 /* Ignore input if device is not open */
470 if (sclp_vt220_tty == NULL) 470 if (sclp_vt220_tty == NULL)
471 return; 471 return;
472 472
473 buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header)); 473 buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
474 count = evbuf->length - sizeof(struct evbuf_header); 474 count = evbuf->length - sizeof(struct evbuf_header);
475 475
476 switch (*buffer) { 476 switch (*buffer) {
477 case SCLP_VT220_SESSION_ENDED: 477 case SCLP_VT220_SESSION_ENDED:
478 case SCLP_VT220_SESSION_STARTED: 478 case SCLP_VT220_SESSION_STARTED:
479 break; 479 break;
480 case SCLP_VT220_SESSION_DATA: 480 case SCLP_VT220_SESSION_DATA:
481 /* Send input to line discipline */ 481 /* Send input to line discipline */
482 buffer++; 482 buffer++;
483 count--; 483 count--;
484 tty_insert_flip_string(sclp_vt220_tty, buffer, count); 484 tty_insert_flip_string(sclp_vt220_tty, buffer, count);
485 tty_flip_buffer_push(sclp_vt220_tty); 485 tty_flip_buffer_push(sclp_vt220_tty);
486 break; 486 break;
487 } 487 }
488 } 488 }
489 489
490 /* 490 /*
491 * This routine is called when a particular tty device is opened. 491 * This routine is called when a particular tty device is opened.
492 */ 492 */
493 static int 493 static int
494 sclp_vt220_open(struct tty_struct *tty, struct file *filp) 494 sclp_vt220_open(struct tty_struct *tty, struct file *filp)
495 { 495 {
496 if (tty->count == 1) { 496 if (tty->count == 1) {
497 sclp_vt220_tty = tty; 497 sclp_vt220_tty = tty;
498 tty->driver_data = kmalloc(SCLP_VT220_BUF_SIZE, GFP_KERNEL); 498 tty->driver_data = kmalloc(SCLP_VT220_BUF_SIZE, GFP_KERNEL);
499 if (tty->driver_data == NULL) 499 if (tty->driver_data == NULL)
500 return -ENOMEM; 500 return -ENOMEM;
501 tty->low_latency = 0; 501 tty->low_latency = 0;
502 } 502 }
503 return 0; 503 return 0;
504 } 504 }
505 505
506 /* 506 /*
507 * This routine is called when a particular tty device is closed. 507 * This routine is called when a particular tty device is closed.
508 */ 508 */
509 static void 509 static void
510 sclp_vt220_close(struct tty_struct *tty, struct file *filp) 510 sclp_vt220_close(struct tty_struct *tty, struct file *filp)
511 { 511 {
512 if (tty->count == 1) { 512 if (tty->count == 1) {
513 sclp_vt220_tty = NULL; 513 sclp_vt220_tty = NULL;
514 kfree(tty->driver_data); 514 kfree(tty->driver_data);
515 tty->driver_data = NULL; 515 tty->driver_data = NULL;
516 } 516 }
517 } 517 }
518 518
519 /* 519 /*
520 * This routine is called by the kernel to write a single 520 * This routine is called by the kernel to write a single
521 * character to the tty device. If the kernel uses this routine, 521 * character to the tty device. If the kernel uses this routine,
522 * it must call the flush_chars() routine (if defined) when it is 522 * it must call the flush_chars() routine (if defined) when it is
523 * done stuffing characters into the driver. 523 * done stuffing characters into the driver.
524 * 524 *
525 * NOTE: include/linux/tty_driver.h specifies that a character should be 525 * NOTE: include/linux/tty_driver.h specifies that a character should be
526 * ignored if there is no room in the queue. This driver implements a different 526 * ignored if there is no room in the queue. This driver implements a different
527 * semantic in that it will block when there is no more room left. 527 * semantic in that it will block when there is no more room left.
528 */ 528 */
529 static void 529 static void
530 sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) 530 sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
531 { 531 {
532 __sclp_vt220_write(&ch, 1, 0, 0); 532 __sclp_vt220_write(&ch, 1, 0, 0);
533 } 533 }
534 534
535 /* 535 /*
536 * This routine is called by the kernel after it has written a 536 * This routine is called by the kernel after it has written a
537 * series of characters to the tty device using put_char(). 537 * series of characters to the tty device using put_char().
538 */ 538 */
539 static void 539 static void
540 sclp_vt220_flush_chars(struct tty_struct *tty) 540 sclp_vt220_flush_chars(struct tty_struct *tty)
541 { 541 {
542 if (sclp_vt220_outqueue_count == 0) 542 if (sclp_vt220_outqueue_count == 0)
543 sclp_vt220_emit_current(); 543 sclp_vt220_emit_current();
544 else 544 else
545 sclp_vt220_flush_later = 1; 545 sclp_vt220_flush_later = 1;
546 } 546 }
547 547
548 /* 548 /*
549 * This routine returns the numbers of characters the tty driver 549 * This routine returns the numbers of characters the tty driver
550 * will accept for queuing to be written. This number is subject 550 * will accept for queuing to be written. This number is subject
551 * to change as output buffers get emptied, or if the output flow 551 * to change as output buffers get emptied, or if the output flow
552 * control is acted. 552 * control is acted.
553 */ 553 */
554 static int 554 static int
555 sclp_vt220_write_room(struct tty_struct *tty) 555 sclp_vt220_write_room(struct tty_struct *tty)
556 { 556 {
557 unsigned long flags; 557 unsigned long flags;
558 struct list_head *l; 558 struct list_head *l;
559 int count; 559 int count;
560 560
561 spin_lock_irqsave(&sclp_vt220_lock, flags); 561 spin_lock_irqsave(&sclp_vt220_lock, flags);
562 count = 0; 562 count = 0;
563 if (sclp_vt220_current_request != NULL) 563 if (sclp_vt220_current_request != NULL)
564 count = sclp_vt220_space_left(sclp_vt220_current_request); 564 count = sclp_vt220_space_left(sclp_vt220_current_request);
565 list_for_each(l, &sclp_vt220_empty) 565 list_for_each(l, &sclp_vt220_empty)
566 count += SCLP_VT220_MAX_CHARS_PER_BUFFER; 566 count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
567 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 567 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
568 return count; 568 return count;
569 } 569 }
570 570
571 /* 571 /*
572 * Return number of buffered chars. 572 * Return number of buffered chars.
573 */ 573 */
574 static int 574 static int
575 sclp_vt220_chars_in_buffer(struct tty_struct *tty) 575 sclp_vt220_chars_in_buffer(struct tty_struct *tty)
576 { 576 {
577 unsigned long flags; 577 unsigned long flags;
578 struct list_head *l; 578 struct list_head *l;
579 struct sclp_vt220_request *r; 579 struct sclp_vt220_request *r;
580 int count; 580 int count;
581 581
582 spin_lock_irqsave(&sclp_vt220_lock, flags); 582 spin_lock_irqsave(&sclp_vt220_lock, flags);
583 count = 0; 583 count = 0;
584 if (sclp_vt220_current_request != NULL) 584 if (sclp_vt220_current_request != NULL)
585 count = sclp_vt220_chars_stored(sclp_vt220_current_request); 585 count = sclp_vt220_chars_stored(sclp_vt220_current_request);
586 list_for_each(l, &sclp_vt220_outqueue) { 586 list_for_each(l, &sclp_vt220_outqueue) {
587 r = list_entry(l, struct sclp_vt220_request, list); 587 r = list_entry(l, struct sclp_vt220_request, list);
588 count += sclp_vt220_chars_stored(r); 588 count += sclp_vt220_chars_stored(r);
589 } 589 }
590 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 590 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
591 return count; 591 return count;
592 } 592 }
593 593
594 static void 594 static void
595 __sclp_vt220_flush_buffer(void) 595 __sclp_vt220_flush_buffer(void)
596 { 596 {
597 unsigned long flags; 597 unsigned long flags;
598 598
599 sclp_vt220_emit_current(); 599 sclp_vt220_emit_current();
600 spin_lock_irqsave(&sclp_vt220_lock, flags); 600 spin_lock_irqsave(&sclp_vt220_lock, flags);
601 if (timer_pending(&sclp_vt220_timer)) 601 if (timer_pending(&sclp_vt220_timer))
602 del_timer(&sclp_vt220_timer); 602 del_timer(&sclp_vt220_timer);
603 while (sclp_vt220_outqueue_count > 0) { 603 while (sclp_vt220_outqueue_count > 0) {
604 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 604 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
605 sclp_sync_wait(); 605 sclp_sync_wait();
606 spin_lock_irqsave(&sclp_vt220_lock, flags); 606 spin_lock_irqsave(&sclp_vt220_lock, flags);
607 } 607 }
608 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 608 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
609 } 609 }
610 610
611 /* 611 /*
612 * Pass on all buffers to the hardware. Return only when there are no more 612 * Pass on all buffers to the hardware. Return only when there are no more
613 * buffers pending. 613 * buffers pending.
614 */ 614 */
615 static void 615 static void
616 sclp_vt220_flush_buffer(struct tty_struct *tty) 616 sclp_vt220_flush_buffer(struct tty_struct *tty)
617 { 617 {
618 sclp_vt220_emit_current(); 618 sclp_vt220_emit_current();
619 } 619 }
620 620
621 /* 621 /*
622 * Initialize all relevant components and register driver with system. 622 * Initialize all relevant components and register driver with system.
623 */ 623 */
624 static int 624 static int __init_refok __sclp_vt220_init(int early)
625 __sclp_vt220_init(int early)
626 { 625 {
627 void *page; 626 void *page;
628 int i; 627 int i;
629 628
630 if (sclp_vt220_initialized) 629 if (sclp_vt220_initialized)
631 return 0; 630 return 0;
632 sclp_vt220_initialized = 1; 631 sclp_vt220_initialized = 1;
633 spin_lock_init(&sclp_vt220_lock); 632 spin_lock_init(&sclp_vt220_lock);
634 INIT_LIST_HEAD(&sclp_vt220_empty); 633 INIT_LIST_HEAD(&sclp_vt220_empty);
635 INIT_LIST_HEAD(&sclp_vt220_outqueue); 634 INIT_LIST_HEAD(&sclp_vt220_outqueue);
636 init_waitqueue_head(&sclp_vt220_waitq); 635 init_waitqueue_head(&sclp_vt220_waitq);
637 init_timer(&sclp_vt220_timer); 636 init_timer(&sclp_vt220_timer);
638 sclp_vt220_current_request = NULL; 637 sclp_vt220_current_request = NULL;
639 sclp_vt220_buffered_chars = 0; 638 sclp_vt220_buffered_chars = 0;
640 sclp_vt220_outqueue_count = 0; 639 sclp_vt220_outqueue_count = 0;
641 sclp_vt220_tty = NULL; 640 sclp_vt220_tty = NULL;
642 sclp_vt220_flush_later = 0; 641 sclp_vt220_flush_later = 0;
643 642
644 /* Allocate pages for output buffering */ 643 /* Allocate pages for output buffering */
645 for (i = 0; i < (early ? MAX_CONSOLE_PAGES : MAX_KMEM_PAGES); i++) { 644 for (i = 0; i < (early ? MAX_CONSOLE_PAGES : MAX_KMEM_PAGES); i++) {
646 if (early) 645 if (early)
647 page = alloc_bootmem_low_pages(PAGE_SIZE); 646 page = alloc_bootmem_low_pages(PAGE_SIZE);
648 else 647 else
649 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 648 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
650 if (!page) 649 if (!page)
651 return -ENOMEM; 650 return -ENOMEM;
652 list_add_tail((struct list_head *) page, &sclp_vt220_empty); 651 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
653 } 652 }
654 return 0; 653 return 0;
655 } 654 }
656 655
657 static const struct tty_operations sclp_vt220_ops = { 656 static const struct tty_operations sclp_vt220_ops = {
658 .open = sclp_vt220_open, 657 .open = sclp_vt220_open,
659 .close = sclp_vt220_close, 658 .close = sclp_vt220_close,
660 .write = sclp_vt220_write, 659 .write = sclp_vt220_write,
661 .put_char = sclp_vt220_put_char, 660 .put_char = sclp_vt220_put_char,
662 .flush_chars = sclp_vt220_flush_chars, 661 .flush_chars = sclp_vt220_flush_chars,
663 .write_room = sclp_vt220_write_room, 662 .write_room = sclp_vt220_write_room,
664 .chars_in_buffer = sclp_vt220_chars_in_buffer, 663 .chars_in_buffer = sclp_vt220_chars_in_buffer,
665 .flush_buffer = sclp_vt220_flush_buffer 664 .flush_buffer = sclp_vt220_flush_buffer
666 }; 665 };
667 666
668 /* 667 /*
669 * Register driver with SCLP and Linux and initialize internal tty structures. 668 * Register driver with SCLP and Linux and initialize internal tty structures.
670 */ 669 */
671 static int __init 670 static int __init
672 sclp_vt220_tty_init(void) 671 sclp_vt220_tty_init(void)
673 { 672 {
674 struct tty_driver *driver; 673 struct tty_driver *driver;
675 int rc; 674 int rc;
676 675
677 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve 676 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
678 * symmetry between VM and LPAR systems regarding ttyS1. */ 677 * symmetry between VM and LPAR systems regarding ttyS1. */
679 driver = alloc_tty_driver(1); 678 driver = alloc_tty_driver(1);
680 if (!driver) 679 if (!driver)
681 return -ENOMEM; 680 return -ENOMEM;
682 rc = __sclp_vt220_init(0); 681 rc = __sclp_vt220_init(0);
683 if (rc) { 682 if (rc) {
684 put_tty_driver(driver); 683 put_tty_driver(driver);
685 return rc; 684 return rc;
686 } 685 }
687 rc = sclp_register(&sclp_vt220_register); 686 rc = sclp_register(&sclp_vt220_register);
688 if (rc) { 687 if (rc) {
689 printk(KERN_ERR SCLP_VT220_PRINT_HEADER 688 printk(KERN_ERR SCLP_VT220_PRINT_HEADER
690 "could not register tty - " 689 "could not register tty - "
691 "sclp_register returned %d\n", rc); 690 "sclp_register returned %d\n", rc);
692 put_tty_driver(driver); 691 put_tty_driver(driver);
693 return rc; 692 return rc;
694 } 693 }
695 694
696 driver->owner = THIS_MODULE; 695 driver->owner = THIS_MODULE;
697 driver->driver_name = SCLP_VT220_DRIVER_NAME; 696 driver->driver_name = SCLP_VT220_DRIVER_NAME;
698 driver->name = SCLP_VT220_DEVICE_NAME; 697 driver->name = SCLP_VT220_DEVICE_NAME;
699 driver->major = SCLP_VT220_MAJOR; 698 driver->major = SCLP_VT220_MAJOR;
700 driver->minor_start = SCLP_VT220_MINOR; 699 driver->minor_start = SCLP_VT220_MINOR;
701 driver->type = TTY_DRIVER_TYPE_SYSTEM; 700 driver->type = TTY_DRIVER_TYPE_SYSTEM;
702 driver->subtype = SYSTEM_TYPE_TTY; 701 driver->subtype = SYSTEM_TYPE_TTY;
703 driver->init_termios = tty_std_termios; 702 driver->init_termios = tty_std_termios;
704 driver->flags = TTY_DRIVER_REAL_RAW; 703 driver->flags = TTY_DRIVER_REAL_RAW;
705 tty_set_operations(driver, &sclp_vt220_ops); 704 tty_set_operations(driver, &sclp_vt220_ops);
706 705
707 rc = tty_register_driver(driver); 706 rc = tty_register_driver(driver);
708 if (rc) { 707 if (rc) {
709 printk(KERN_ERR SCLP_VT220_PRINT_HEADER 708 printk(KERN_ERR SCLP_VT220_PRINT_HEADER
710 "could not register tty - " 709 "could not register tty - "
711 "tty_register_driver returned %d\n", rc); 710 "tty_register_driver returned %d\n", rc);
712 put_tty_driver(driver); 711 put_tty_driver(driver);
713 return rc; 712 return rc;
714 } 713 }
715 sclp_vt220_driver = driver; 714 sclp_vt220_driver = driver;
716 return 0; 715 return 0;
717 } 716 }
718 717
719 module_init(sclp_vt220_tty_init); 718 module_init(sclp_vt220_tty_init);
720 719
721 #ifdef CONFIG_SCLP_VT220_CONSOLE 720 #ifdef CONFIG_SCLP_VT220_CONSOLE
722 721
723 static void 722 static void
724 sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count) 723 sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
725 { 724 {
726 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1); 725 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1);
727 } 726 }
728 727
729 static struct tty_driver * 728 static struct tty_driver *
730 sclp_vt220_con_device(struct console *c, int *index) 729 sclp_vt220_con_device(struct console *c, int *index)
731 { 730 {
732 *index = 0; 731 *index = 0;
733 return sclp_vt220_driver; 732 return sclp_vt220_driver;
734 } 733 }
735 734
736 /* 735 /*
737 * This routine is called from panic when the kernel is going to give up. 736 * This routine is called from panic when the kernel is going to give up.
738 * We have to make sure that all buffers will be flushed to the SCLP. 737 * We have to make sure that all buffers will be flushed to the SCLP.
739 * Note that this function may be called from within an interrupt context. 738 * Note that this function may be called from within an interrupt context.
740 */ 739 */
741 static void 740 static void
742 sclp_vt220_con_unblank(void) 741 sclp_vt220_con_unblank(void)
743 { 742 {
744 __sclp_vt220_flush_buffer(); 743 __sclp_vt220_flush_buffer();
745 } 744 }
746 745
747 /* Structure needed to register with printk */ 746 /* Structure needed to register with printk */
748 static struct console sclp_vt220_console = 747 static struct console sclp_vt220_console =
749 { 748 {
750 .name = SCLP_VT220_CONSOLE_NAME, 749 .name = SCLP_VT220_CONSOLE_NAME,
751 .write = sclp_vt220_con_write, 750 .write = sclp_vt220_con_write,
752 .device = sclp_vt220_con_device, 751 .device = sclp_vt220_con_device,
753 .unblank = sclp_vt220_con_unblank, 752 .unblank = sclp_vt220_con_unblank,
754 .flags = CON_PRINTBUFFER, 753 .flags = CON_PRINTBUFFER,
755 .index = SCLP_VT220_CONSOLE_INDEX 754 .index = SCLP_VT220_CONSOLE_INDEX
756 }; 755 };
757 756
758 static int __init 757 static int __init
759 sclp_vt220_con_init(void) 758 sclp_vt220_con_init(void)
760 { 759 {
761 int rc; 760 int rc;
762 761
763 if (!CONSOLE_IS_SCLP) 762 if (!CONSOLE_IS_SCLP)
764 return 0; 763 return 0;
765 rc = __sclp_vt220_init(1); 764 rc = __sclp_vt220_init(1);
766 if (rc) 765 if (rc)
767 return rc; 766 return rc;
768 /* Attach linux console */ 767 /* Attach linux console */
769 register_console(&sclp_vt220_console); 768 register_console(&sclp_vt220_console);
770 return 0; 769 return 0;
771 } 770 }
772 771
773 console_initcall(sclp_vt220_con_init); 772 console_initcall(sclp_vt220_con_init);
774 #endif /* CONFIG_SCLP_VT220_CONSOLE */ 773 #endif /* CONFIG_SCLP_VT220_CONSOLE */
775 774
776 775