Commit 25c8716cb08dea386c7d6220b82eba732ccbf976

Authored by Tobias Klauser
Committed by Linus Torvalds
1 parent c8e5429e49

[PATCH] arch/alpha: Use ARRAY_SIZE macro

Use ARRAY_SIZE macro instead of sizeof(x)/sizeof(x[0]) and remove a
duplicate of the macro.  Also remove some trailing whitespaces and needless
braces.

Signed-off-by: Tobias Klauser <tklauser@distanz.ch>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 5 changed files with 34 additions and 38 deletions Inline Diff

arch/alpha/kernel/err_ev7.c
1 /* 1 /*
2 * linux/arch/alpha/kernel/err_ev7.c 2 * linux/arch/alpha/kernel/err_ev7.c
3 * 3 *
4 * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) 4 * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
5 * 5 *
6 * Error handling code supporting Alpha systems 6 * Error handling code supporting Alpha systems
7 */ 7 */
8 8
9 #include <linux/init.h> 9 #include <linux/init.h>
10 #include <linux/pci.h> 10 #include <linux/pci.h>
11 #include <linux/sched.h> 11 #include <linux/sched.h>
12 12
13 #include <asm/io.h> 13 #include <asm/io.h>
14 #include <asm/hwrpb.h> 14 #include <asm/hwrpb.h>
15 #include <asm/smp.h> 15 #include <asm/smp.h>
16 #include <asm/err_common.h> 16 #include <asm/err_common.h>
17 #include <asm/err_ev7.h> 17 #include <asm/err_ev7.h>
18 18
19 #include "err_impl.h" 19 #include "err_impl.h"
20 #include "proto.h" 20 #include "proto.h"
21 21
22 struct ev7_lf_subpackets * 22 struct ev7_lf_subpackets *
23 ev7_collect_logout_frame_subpackets(struct el_subpacket *el_ptr, 23 ev7_collect_logout_frame_subpackets(struct el_subpacket *el_ptr,
24 struct ev7_lf_subpackets *lf_subpackets) 24 struct ev7_lf_subpackets *lf_subpackets)
25 { 25 {
26 struct el_subpacket *subpacket; 26 struct el_subpacket *subpacket;
27 int i; 27 int i;
28 28
29 /* 29 /*
30 * A Marvel machine check frame is always packaged in an 30 * A Marvel machine check frame is always packaged in an
31 * el_subpacket of class HEADER, type LOGOUT_FRAME. 31 * el_subpacket of class HEADER, type LOGOUT_FRAME.
32 */ 32 */
33 if (el_ptr->class != EL_CLASS__HEADER || 33 if (el_ptr->class != EL_CLASS__HEADER ||
34 el_ptr->type != EL_TYPE__HEADER__LOGOUT_FRAME) 34 el_ptr->type != EL_TYPE__HEADER__LOGOUT_FRAME)
35 return NULL; 35 return NULL;
36 36
37 /* 37 /*
38 * It is a logout frame header. Look at the one subpacket. 38 * It is a logout frame header. Look at the one subpacket.
39 */ 39 */
40 el_ptr = (struct el_subpacket *) 40 el_ptr = (struct el_subpacket *)
41 ((unsigned long)el_ptr + el_ptr->length); 41 ((unsigned long)el_ptr + el_ptr->length);
42 42
43 /* 43 /*
44 * It has to be class PAL, type LOGOUT_FRAME. 44 * It has to be class PAL, type LOGOUT_FRAME.
45 */ 45 */
46 if (el_ptr->class != EL_CLASS__PAL || 46 if (el_ptr->class != EL_CLASS__PAL ||
47 el_ptr->type != EL_TYPE__PAL__LOGOUT_FRAME) 47 el_ptr->type != EL_TYPE__PAL__LOGOUT_FRAME)
48 return NULL; 48 return NULL;
49 49
50 lf_subpackets->logout = (struct ev7_pal_logout_subpacket *) 50 lf_subpackets->logout = (struct ev7_pal_logout_subpacket *)
51 el_ptr->by_type.raw.data_start; 51 el_ptr->by_type.raw.data_start;
52 52
53 /* 53 /*
54 * Process the subpackets. 54 * Process the subpackets.
55 */ 55 */
56 subpacket = (struct el_subpacket *) 56 subpacket = (struct el_subpacket *)
57 ((unsigned long)el_ptr + el_ptr->length); 57 ((unsigned long)el_ptr + el_ptr->length);
58 for (i = 0; 58 for (i = 0;
59 subpacket && i < lf_subpackets->logout->subpacket_count; 59 subpacket && i < lf_subpackets->logout->subpacket_count;
60 subpacket = (struct el_subpacket *) 60 subpacket = (struct el_subpacket *)
61 ((unsigned long)subpacket + subpacket->length), i++) { 61 ((unsigned long)subpacket + subpacket->length), i++) {
62 /* 62 /*
63 * All subpackets should be class PAL. 63 * All subpackets should be class PAL.
64 */ 64 */
65 if (subpacket->class != EL_CLASS__PAL) { 65 if (subpacket->class != EL_CLASS__PAL) {
66 printk("%s**UNEXPECTED SUBPACKET CLASS %d " 66 printk("%s**UNEXPECTED SUBPACKET CLASS %d "
67 "IN LOGOUT FRAME (packet %d\n", 67 "IN LOGOUT FRAME (packet %d\n",
68 err_print_prefix, subpacket->class, i); 68 err_print_prefix, subpacket->class, i);
69 return NULL; 69 return NULL;
70 } 70 }
71 71
72 /* 72 /*
73 * Remember the subpacket. 73 * Remember the subpacket.
74 */ 74 */
75 switch(subpacket->type) { 75 switch(subpacket->type) {
76 case EL_TYPE__PAL__EV7_PROCESSOR: 76 case EL_TYPE__PAL__EV7_PROCESSOR:
77 lf_subpackets->ev7 = 77 lf_subpackets->ev7 =
78 (struct ev7_pal_processor_subpacket *) 78 (struct ev7_pal_processor_subpacket *)
79 subpacket->by_type.raw.data_start; 79 subpacket->by_type.raw.data_start;
80 break; 80 break;
81 81
82 case EL_TYPE__PAL__EV7_RBOX: 82 case EL_TYPE__PAL__EV7_RBOX:
83 lf_subpackets->rbox = (struct ev7_pal_rbox_subpacket *) 83 lf_subpackets->rbox = (struct ev7_pal_rbox_subpacket *)
84 subpacket->by_type.raw.data_start; 84 subpacket->by_type.raw.data_start;
85 break; 85 break;
86 86
87 case EL_TYPE__PAL__EV7_ZBOX: 87 case EL_TYPE__PAL__EV7_ZBOX:
88 lf_subpackets->zbox = (struct ev7_pal_zbox_subpacket *) 88 lf_subpackets->zbox = (struct ev7_pal_zbox_subpacket *)
89 subpacket->by_type.raw.data_start; 89 subpacket->by_type.raw.data_start;
90 break; 90 break;
91 91
92 case EL_TYPE__PAL__EV7_IO: 92 case EL_TYPE__PAL__EV7_IO:
93 lf_subpackets->io = (struct ev7_pal_io_subpacket *) 93 lf_subpackets->io = (struct ev7_pal_io_subpacket *)
94 subpacket->by_type.raw.data_start; 94 subpacket->by_type.raw.data_start;
95 break; 95 break;
96 96
97 case EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE: 97 case EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE:
98 case EL_TYPE__PAL__ENV__AIRMOVER_FAN: 98 case EL_TYPE__PAL__ENV__AIRMOVER_FAN:
99 case EL_TYPE__PAL__ENV__VOLTAGE: 99 case EL_TYPE__PAL__ENV__VOLTAGE:
100 case EL_TYPE__PAL__ENV__INTRUSION: 100 case EL_TYPE__PAL__ENV__INTRUSION:
101 case EL_TYPE__PAL__ENV__POWER_SUPPLY: 101 case EL_TYPE__PAL__ENV__POWER_SUPPLY:
102 case EL_TYPE__PAL__ENV__LAN: 102 case EL_TYPE__PAL__ENV__LAN:
103 case EL_TYPE__PAL__ENV__HOT_PLUG: 103 case EL_TYPE__PAL__ENV__HOT_PLUG:
104 lf_subpackets->env[ev7_lf_env_index(subpacket->type)] = 104 lf_subpackets->env[ev7_lf_env_index(subpacket->type)] =
105 (struct ev7_pal_environmental_subpacket *) 105 (struct ev7_pal_environmental_subpacket *)
106 subpacket->by_type.raw.data_start; 106 subpacket->by_type.raw.data_start;
107 break; 107 break;
108 108
109 default: 109 default:
110 /* 110 /*
111 * Don't know what kind of frame this is. 111 * Don't know what kind of frame this is.
112 */ 112 */
113 return NULL; 113 return NULL;
114 } 114 }
115 } 115 }
116 116
117 return lf_subpackets; 117 return lf_subpackets;
118 } 118 }
119 119
120 void 120 void
121 ev7_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs) 121 ev7_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs)
122 { 122 {
123 struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr; 123 struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr;
124 char *saved_err_prefix = err_print_prefix; 124 char *saved_err_prefix = err_print_prefix;
125 125
126 /* 126 /*
127 * Sync the processor 127 * Sync the processor
128 */ 128 */
129 mb(); 129 mb();
130 draina(); 130 draina();
131 131
132 err_print_prefix = KERN_CRIT; 132 err_print_prefix = KERN_CRIT;
133 printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d\n", 133 printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d\n",
134 err_print_prefix, 134 err_print_prefix,
135 (vector == SCB_Q_PROCERR) ? "Correctable" : "Uncorrectable", 135 (vector == SCB_Q_PROCERR) ? "Correctable" : "Uncorrectable",
136 (unsigned int)vector, (int)smp_processor_id()); 136 (unsigned int)vector, (int)smp_processor_id());
137 el_process_subpacket(el_ptr); 137 el_process_subpacket(el_ptr);
138 err_print_prefix = saved_err_prefix; 138 err_print_prefix = saved_err_prefix;
139 139
140 /* 140 /*
141 * Release the logout frame 141 * Release the logout frame
142 */ 142 */
143 wrmces(0x7); 143 wrmces(0x7);
144 mb(); 144 mb();
145 } 145 }
146 146
147 static char *el_ev7_processor_subpacket_annotation[] = { 147 static char *el_ev7_processor_subpacket_annotation[] = {
148 "Subpacket Header", "I_STAT", "DC_STAT", 148 "Subpacket Header", "I_STAT", "DC_STAT",
149 "C_ADDR", "C_SYNDROME_1", "C_SYNDROME_0", 149 "C_ADDR", "C_SYNDROME_1", "C_SYNDROME_0",
150 "C_STAT", "C_STS", "MM_STAT", 150 "C_STAT", "C_STS", "MM_STAT",
151 "EXC_ADDR", "IER_CM", "ISUM", 151 "EXC_ADDR", "IER_CM", "ISUM",
152 "PAL_BASE", "I_CTL", "PROCESS_CONTEXT", 152 "PAL_BASE", "I_CTL", "PROCESS_CONTEXT",
153 "CBOX_CTL", "CBOX_STP_CTL", "CBOX_ACC_CTL", 153 "CBOX_CTL", "CBOX_STP_CTL", "CBOX_ACC_CTL",
154 "CBOX_LCL_SET", "CBOX_GLB_SET", "BBOX_CTL", 154 "CBOX_LCL_SET", "CBOX_GLB_SET", "BBOX_CTL",
155 "BBOX_ERR_STS", "BBOX_ERR_IDX", "CBOX_DDP_ERR_STS", 155 "BBOX_ERR_STS", "BBOX_ERR_IDX", "CBOX_DDP_ERR_STS",
156 "BBOX_DAT_RMP", NULL 156 "BBOX_DAT_RMP", NULL
157 }; 157 };
158 158
159 static char *el_ev7_zbox_subpacket_annotation[] = { 159 static char *el_ev7_zbox_subpacket_annotation[] = {
160 "Subpacket Header", 160 "Subpacket Header",
161 "ZBOX(0): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1", 161 "ZBOX(0): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1",
162 "ZBOX(0): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3", 162 "ZBOX(0): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3",
163 "ZBOX(0): DIFT_TIMEOUT / DRAM_ERR_ADR", 163 "ZBOX(0): DIFT_TIMEOUT / DRAM_ERR_ADR",
164 "ZBOX(0): FRC_ERR_ADR / DRAM_MAPPER_CTL", 164 "ZBOX(0): FRC_ERR_ADR / DRAM_MAPPER_CTL",
165 "ZBOX(0): reserved / DIFT_ERR_STATUS", 165 "ZBOX(0): reserved / DIFT_ERR_STATUS",
166 "ZBOX(1): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1", 166 "ZBOX(1): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1",
167 "ZBOX(1): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3", 167 "ZBOX(1): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3",
168 "ZBOX(1): DIFT_TIMEOUT / DRAM_ERR_ADR", 168 "ZBOX(1): DIFT_TIMEOUT / DRAM_ERR_ADR",
169 "ZBOX(1): FRC_ERR_ADR / DRAM_MAPPER_CTL", 169 "ZBOX(1): FRC_ERR_ADR / DRAM_MAPPER_CTL",
170 "ZBOX(1): reserved / DIFT_ERR_STATUS", 170 "ZBOX(1): reserved / DIFT_ERR_STATUS",
171 "CBOX_CTL", "CBOX_STP_CTL", 171 "CBOX_CTL", "CBOX_STP_CTL",
172 "ZBOX(0)_ERROR_PA", "ZBOX(1)_ERROR_PA", 172 "ZBOX(0)_ERROR_PA", "ZBOX(1)_ERROR_PA",
173 "ZBOX(0)_ORED_SYNDROME","ZBOX(1)_ORED_SYNDROME", 173 "ZBOX(0)_ORED_SYNDROME","ZBOX(1)_ORED_SYNDROME",
174 NULL 174 NULL
175 }; 175 };
176 176
177 static char *el_ev7_rbox_subpacket_annotation[] = { 177 static char *el_ev7_rbox_subpacket_annotation[] = {
178 "Subpacket Header", "RBOX_CFG", "RBOX_N_CFG", 178 "Subpacket Header", "RBOX_CFG", "RBOX_N_CFG",
179 "RBOX_S_CFG", "RBOX_E_CFG", "RBOX_W_CFG", 179 "RBOX_S_CFG", "RBOX_E_CFG", "RBOX_W_CFG",
180 "RBOX_N_ERR", "RBOX_S_ERR", "RBOX_E_ERR", 180 "RBOX_N_ERR", "RBOX_S_ERR", "RBOX_E_ERR",
181 "RBOX_W_ERR", "RBOX_IO_CFG", "RBOX_IO_ERR", 181 "RBOX_W_ERR", "RBOX_IO_CFG", "RBOX_IO_ERR",
182 "RBOX_L_ERR", "RBOX_WHOAMI", "RBOX_IMASL", 182 "RBOX_L_ERR", "RBOX_WHOAMI", "RBOX_IMASL",
183 "RBOX_INTQ", "RBOX_INT", NULL 183 "RBOX_INTQ", "RBOX_INT", NULL
184 }; 184 };
185 185
186 static char *el_ev7_io_subpacket_annotation[] = { 186 static char *el_ev7_io_subpacket_annotation[] = {
187 "Subpacket Header", "IO_ASIC_REV", "IO_SYS_REV", 187 "Subpacket Header", "IO_ASIC_REV", "IO_SYS_REV",
188 "IO7_UPH", "HPI_CTL", "CRD_CTL", 188 "IO7_UPH", "HPI_CTL", "CRD_CTL",
189 "HEI_CTL", "PO7_ERROR_SUM","PO7_UNCRR_SYM", 189 "HEI_CTL", "PO7_ERROR_SUM","PO7_UNCRR_SYM",
190 "PO7_CRRCT_SYM", "PO7_UGBGE_SYM","PO7_ERR_PKT0", 190 "PO7_CRRCT_SYM", "PO7_UGBGE_SYM","PO7_ERR_PKT0",
191 "PO7_ERR_PKT1", "reserved", "reserved", 191 "PO7_ERR_PKT1", "reserved", "reserved",
192 "PO0_ERR_SUM", "PO0_TLB_ERR", "PO0_SPL_COMPLT", 192 "PO0_ERR_SUM", "PO0_TLB_ERR", "PO0_SPL_COMPLT",
193 "PO0_TRANS_SUM", "PO0_FIRST_ERR","PO0_MULT_ERR", 193 "PO0_TRANS_SUM", "PO0_FIRST_ERR","PO0_MULT_ERR",
194 "DM CSR PH", "DM CSR PH", "DM CSR PH", 194 "DM CSR PH", "DM CSR PH", "DM CSR PH",
195 "DM CSR PH", "reserved", 195 "DM CSR PH", "reserved",
196 "PO1_ERR_SUM", "PO1_TLB_ERR", "PO1_SPL_COMPLT", 196 "PO1_ERR_SUM", "PO1_TLB_ERR", "PO1_SPL_COMPLT",
197 "PO1_TRANS_SUM", "PO1_FIRST_ERR","PO1_MULT_ERR", 197 "PO1_TRANS_SUM", "PO1_FIRST_ERR","PO1_MULT_ERR",
198 "DM CSR PH", "DM CSR PH", "DM CSR PH", 198 "DM CSR PH", "DM CSR PH", "DM CSR PH",
199 "DM CSR PH", "reserved", 199 "DM CSR PH", "reserved",
200 "PO2_ERR_SUM", "PO2_TLB_ERR", "PO2_SPL_COMPLT", 200 "PO2_ERR_SUM", "PO2_TLB_ERR", "PO2_SPL_COMPLT",
201 "PO2_TRANS_SUM", "PO2_FIRST_ERR","PO2_MULT_ERR", 201 "PO2_TRANS_SUM", "PO2_FIRST_ERR","PO2_MULT_ERR",
202 "DM CSR PH", "DM CSR PH", "DM CSR PH", 202 "DM CSR PH", "DM CSR PH", "DM CSR PH",
203 "DM CSR PH", "reserved", 203 "DM CSR PH", "reserved",
204 "PO3_ERR_SUM", "PO3_TLB_ERR", "PO3_SPL_COMPLT", 204 "PO3_ERR_SUM", "PO3_TLB_ERR", "PO3_SPL_COMPLT",
205 "PO3_TRANS_SUM", "PO3_FIRST_ERR","PO3_MULT_ERR", 205 "PO3_TRANS_SUM", "PO3_FIRST_ERR","PO3_MULT_ERR",
206 "DM CSR PH", "DM CSR PH", "DM CSR PH", 206 "DM CSR PH", "DM CSR PH", "DM CSR PH",
207 "DM CSR PH", "reserved", 207 "DM CSR PH", "reserved",
208 NULL 208 NULL
209 }; 209 };
210 210
211 static struct el_subpacket_annotation el_ev7_pal_annotations[] = { 211 static struct el_subpacket_annotation el_ev7_pal_annotations[] = {
212 SUBPACKET_ANNOTATION(EL_CLASS__PAL, 212 SUBPACKET_ANNOTATION(EL_CLASS__PAL,
213 EL_TYPE__PAL__EV7_PROCESSOR, 213 EL_TYPE__PAL__EV7_PROCESSOR,
214 1, 214 1,
215 "EV7 Processor Subpacket", 215 "EV7 Processor Subpacket",
216 el_ev7_processor_subpacket_annotation), 216 el_ev7_processor_subpacket_annotation),
217 SUBPACKET_ANNOTATION(EL_CLASS__PAL, 217 SUBPACKET_ANNOTATION(EL_CLASS__PAL,
218 EL_TYPE__PAL__EV7_ZBOX, 218 EL_TYPE__PAL__EV7_ZBOX,
219 1, 219 1,
220 "EV7 ZBOX Subpacket", 220 "EV7 ZBOX Subpacket",
221 el_ev7_zbox_subpacket_annotation), 221 el_ev7_zbox_subpacket_annotation),
222 SUBPACKET_ANNOTATION(EL_CLASS__PAL, 222 SUBPACKET_ANNOTATION(EL_CLASS__PAL,
223 EL_TYPE__PAL__EV7_RBOX, 223 EL_TYPE__PAL__EV7_RBOX,
224 1, 224 1,
225 "EV7 RBOX Subpacket", 225 "EV7 RBOX Subpacket",
226 el_ev7_rbox_subpacket_annotation), 226 el_ev7_rbox_subpacket_annotation),
227 SUBPACKET_ANNOTATION(EL_CLASS__PAL, 227 SUBPACKET_ANNOTATION(EL_CLASS__PAL,
228 EL_TYPE__PAL__EV7_IO, 228 EL_TYPE__PAL__EV7_IO,
229 1, 229 1,
230 "EV7 IO Subpacket", 230 "EV7 IO Subpacket",
231 el_ev7_io_subpacket_annotation) 231 el_ev7_io_subpacket_annotation)
232 }; 232 };
233 233
234 static struct el_subpacket * 234 static struct el_subpacket *
235 ev7_process_pal_subpacket(struct el_subpacket *header) 235 ev7_process_pal_subpacket(struct el_subpacket *header)
236 { 236 {
237 struct ev7_pal_subpacket *packet; 237 struct ev7_pal_subpacket *packet;
238 238
239 if (header->class != EL_CLASS__PAL) { 239 if (header->class != EL_CLASS__PAL) {
240 printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", 240 printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n",
241 err_print_prefix, 241 err_print_prefix,
242 header->class, header->type); 242 header->class, header->type);
243 return NULL; 243 return NULL;
244 } 244 }
245 245
246 packet = (struct ev7_pal_subpacket *)header->by_type.raw.data_start; 246 packet = (struct ev7_pal_subpacket *)header->by_type.raw.data_start;
247 247
248 switch(header->type) { 248 switch(header->type) {
249 case EL_TYPE__PAL__LOGOUT_FRAME: 249 case EL_TYPE__PAL__LOGOUT_FRAME:
250 printk("%s*** MCHK occurred on LPID %ld (RBOX %lx)\n", 250 printk("%s*** MCHK occurred on LPID %ld (RBOX %lx)\n",
251 err_print_prefix, 251 err_print_prefix,
252 packet->by_type.logout.whami, 252 packet->by_type.logout.whami,
253 packet->by_type.logout.rbox_whami); 253 packet->by_type.logout.rbox_whami);
254 el_print_timestamp(&packet->by_type.logout.timestamp); 254 el_print_timestamp(&packet->by_type.logout.timestamp);
255 printk("%s EXC_ADDR: %016lx\n" 255 printk("%s EXC_ADDR: %016lx\n"
256 " HALT_CODE: %lx\n", 256 " HALT_CODE: %lx\n",
257 err_print_prefix, 257 err_print_prefix,
258 packet->by_type.logout.exc_addr, 258 packet->by_type.logout.exc_addr,
259 packet->by_type.logout.halt_code); 259 packet->by_type.logout.halt_code);
260 el_process_subpackets(header, 260 el_process_subpackets(header,
261 packet->by_type.logout.subpacket_count); 261 packet->by_type.logout.subpacket_count);
262 break; 262 break;
263 default: 263 default:
264 printk("%s ** PAL TYPE %d SUBPACKET\n", 264 printk("%s ** PAL TYPE %d SUBPACKET\n",
265 err_print_prefix, 265 err_print_prefix,
266 header->type); 266 header->type);
267 el_annotate_subpacket(header); 267 el_annotate_subpacket(header);
268 break; 268 break;
269 } 269 }
270 270
271 return (struct el_subpacket *)((unsigned long)header + header->length); 271 return (struct el_subpacket *)((unsigned long)header + header->length);
272 } 272 }
273 273
274 struct el_subpacket_handler ev7_pal_subpacket_handler = 274 struct el_subpacket_handler ev7_pal_subpacket_handler =
275 SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket); 275 SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket);
276 276
277 void 277 void
278 ev7_register_error_handlers(void) 278 ev7_register_error_handlers(void)
279 { 279 {
280 int i; 280 int i;
281 281
282 for(i = 0; 282 for (i = 0; i < ARRAY_SIZE(el_ev7_pal_annotations); i++)
283 i<sizeof(el_ev7_pal_annotations)/sizeof(el_ev7_pal_annotations[1]);
284 i++) {
285 cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]); 283 cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]);
286 } 284
287 cdl_register_subpacket_handler(&ev7_pal_subpacket_handler); 285 cdl_register_subpacket_handler(&ev7_pal_subpacket_handler);
288 } 286 }
289 287
290 288
arch/alpha/kernel/osf_sys.c
1 /* 1 /*
2 * linux/arch/alpha/kernel/osf_sys.c 2 * linux/arch/alpha/kernel/osf_sys.c
3 * 3 *
4 * Copyright (C) 1995 Linus Torvalds 4 * Copyright (C) 1995 Linus Torvalds
5 */ 5 */
6 6
7 /* 7 /*
8 * This file handles some of the stranger OSF/1 system call interfaces. 8 * This file handles some of the stranger OSF/1 system call interfaces.
9 * Some of the system calls expect a non-C calling standard, others have 9 * Some of the system calls expect a non-C calling standard, others have
10 * special parameter blocks.. 10 * special parameter blocks..
11 */ 11 */
12 12
13 #include <linux/errno.h> 13 #include <linux/errno.h>
14 #include <linux/sched.h> 14 #include <linux/sched.h>
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/mm.h> 16 #include <linux/mm.h>
17 #include <linux/smp.h> 17 #include <linux/smp.h>
18 #include <linux/smp_lock.h> 18 #include <linux/smp_lock.h>
19 #include <linux/stddef.h> 19 #include <linux/stddef.h>
20 #include <linux/syscalls.h> 20 #include <linux/syscalls.h>
21 #include <linux/unistd.h> 21 #include <linux/unistd.h>
22 #include <linux/ptrace.h> 22 #include <linux/ptrace.h>
23 #include <linux/slab.h> 23 #include <linux/slab.h>
24 #include <linux/user.h> 24 #include <linux/user.h>
25 #include <linux/a.out.h> 25 #include <linux/a.out.h>
26 #include <linux/utsname.h> 26 #include <linux/utsname.h>
27 #include <linux/time.h> 27 #include <linux/time.h>
28 #include <linux/timex.h> 28 #include <linux/timex.h>
29 #include <linux/major.h> 29 #include <linux/major.h>
30 #include <linux/stat.h> 30 #include <linux/stat.h>
31 #include <linux/mman.h> 31 #include <linux/mman.h>
32 #include <linux/shm.h> 32 #include <linux/shm.h>
33 #include <linux/poll.h> 33 #include <linux/poll.h>
34 #include <linux/file.h> 34 #include <linux/file.h>
35 #include <linux/types.h> 35 #include <linux/types.h>
36 #include <linux/ipc.h> 36 #include <linux/ipc.h>
37 #include <linux/namei.h> 37 #include <linux/namei.h>
38 #include <linux/uio.h> 38 #include <linux/uio.h>
39 #include <linux/vfs.h> 39 #include <linux/vfs.h>
40 #include <linux/rcupdate.h> 40 #include <linux/rcupdate.h>
41 41
42 #include <asm/fpu.h> 42 #include <asm/fpu.h>
43 #include <asm/io.h> 43 #include <asm/io.h>
44 #include <asm/uaccess.h> 44 #include <asm/uaccess.h>
45 #include <asm/system.h> 45 #include <asm/system.h>
46 #include <asm/sysinfo.h> 46 #include <asm/sysinfo.h>
47 #include <asm/hwrpb.h> 47 #include <asm/hwrpb.h>
48 #include <asm/processor.h> 48 #include <asm/processor.h>
49 49
50 extern int do_pipe(int *); 50 extern int do_pipe(int *);
51 51
52 /* 52 /*
53 * Brk needs to return an error. Still support Linux's brk(0) query idiom, 53 * Brk needs to return an error. Still support Linux's brk(0) query idiom,
54 * which OSF programs just shouldn't be doing. We're still not quite 54 * which OSF programs just shouldn't be doing. We're still not quite
55 * identical to OSF as we don't return 0 on success, but doing otherwise 55 * identical to OSF as we don't return 0 on success, but doing otherwise
56 * would require changes to libc. Hopefully this is good enough. 56 * would require changes to libc. Hopefully this is good enough.
57 */ 57 */
58 asmlinkage unsigned long 58 asmlinkage unsigned long
59 osf_brk(unsigned long brk) 59 osf_brk(unsigned long brk)
60 { 60 {
61 unsigned long retval = sys_brk(brk); 61 unsigned long retval = sys_brk(brk);
62 if (brk && brk != retval) 62 if (brk && brk != retval)
63 retval = -ENOMEM; 63 retval = -ENOMEM;
64 return retval; 64 return retval;
65 } 65 }
66 66
67 /* 67 /*
68 * This is pure guess-work.. 68 * This is pure guess-work..
69 */ 69 */
70 asmlinkage int 70 asmlinkage int
71 osf_set_program_attributes(unsigned long text_start, unsigned long text_len, 71 osf_set_program_attributes(unsigned long text_start, unsigned long text_len,
72 unsigned long bss_start, unsigned long bss_len) 72 unsigned long bss_start, unsigned long bss_len)
73 { 73 {
74 struct mm_struct *mm; 74 struct mm_struct *mm;
75 75
76 lock_kernel(); 76 lock_kernel();
77 mm = current->mm; 77 mm = current->mm;
78 mm->end_code = bss_start + bss_len; 78 mm->end_code = bss_start + bss_len;
79 mm->brk = bss_start + bss_len; 79 mm->brk = bss_start + bss_len;
80 #if 0 80 #if 0
81 printk("set_program_attributes(%lx %lx %lx %lx)\n", 81 printk("set_program_attributes(%lx %lx %lx %lx)\n",
82 text_start, text_len, bss_start, bss_len); 82 text_start, text_len, bss_start, bss_len);
83 #endif 83 #endif
84 unlock_kernel(); 84 unlock_kernel();
85 return 0; 85 return 0;
86 } 86 }
87 87
88 /* 88 /*
89 * OSF/1 directory handling functions... 89 * OSF/1 directory handling functions...
90 * 90 *
91 * The "getdents()" interface is much more sane: the "basep" stuff is 91 * The "getdents()" interface is much more sane: the "basep" stuff is
92 * braindamage (it can't really handle filesystems where the directory 92 * braindamage (it can't really handle filesystems where the directory
93 * offset differences aren't the same as "d_reclen"). 93 * offset differences aren't the same as "d_reclen").
94 */ 94 */
95 #define NAME_OFFSET offsetof (struct osf_dirent, d_name) 95 #define NAME_OFFSET offsetof (struct osf_dirent, d_name)
96 #define ROUND_UP(x) (((x)+3) & ~3) 96 #define ROUND_UP(x) (((x)+3) & ~3)
97 97
98 struct osf_dirent { 98 struct osf_dirent {
99 unsigned int d_ino; 99 unsigned int d_ino;
100 unsigned short d_reclen; 100 unsigned short d_reclen;
101 unsigned short d_namlen; 101 unsigned short d_namlen;
102 char d_name[1]; 102 char d_name[1];
103 }; 103 };
104 104
105 struct osf_dirent_callback { 105 struct osf_dirent_callback {
106 struct osf_dirent __user *dirent; 106 struct osf_dirent __user *dirent;
107 long __user *basep; 107 long __user *basep;
108 unsigned int count; 108 unsigned int count;
109 int error; 109 int error;
110 }; 110 };
111 111
112 static int 112 static int
113 osf_filldir(void *__buf, const char *name, int namlen, loff_t offset, 113 osf_filldir(void *__buf, const char *name, int namlen, loff_t offset,
114 ino_t ino, unsigned int d_type) 114 ino_t ino, unsigned int d_type)
115 { 115 {
116 struct osf_dirent __user *dirent; 116 struct osf_dirent __user *dirent;
117 struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf; 117 struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf;
118 unsigned int reclen = ROUND_UP(NAME_OFFSET + namlen + 1); 118 unsigned int reclen = ROUND_UP(NAME_OFFSET + namlen + 1);
119 119
120 buf->error = -EINVAL; /* only used if we fail */ 120 buf->error = -EINVAL; /* only used if we fail */
121 if (reclen > buf->count) 121 if (reclen > buf->count)
122 return -EINVAL; 122 return -EINVAL;
123 if (buf->basep) { 123 if (buf->basep) {
124 if (put_user(offset, buf->basep)) 124 if (put_user(offset, buf->basep))
125 return -EFAULT; 125 return -EFAULT;
126 buf->basep = NULL; 126 buf->basep = NULL;
127 } 127 }
128 dirent = buf->dirent; 128 dirent = buf->dirent;
129 put_user(ino, &dirent->d_ino); 129 put_user(ino, &dirent->d_ino);
130 put_user(namlen, &dirent->d_namlen); 130 put_user(namlen, &dirent->d_namlen);
131 put_user(reclen, &dirent->d_reclen); 131 put_user(reclen, &dirent->d_reclen);
132 if (copy_to_user(dirent->d_name, name, namlen) || 132 if (copy_to_user(dirent->d_name, name, namlen) ||
133 put_user(0, dirent->d_name + namlen)) 133 put_user(0, dirent->d_name + namlen))
134 return -EFAULT; 134 return -EFAULT;
135 dirent = (void __user *)dirent + reclen; 135 dirent = (void __user *)dirent + reclen;
136 buf->dirent = dirent; 136 buf->dirent = dirent;
137 buf->count -= reclen; 137 buf->count -= reclen;
138 return 0; 138 return 0;
139 } 139 }
140 140
141 asmlinkage int 141 asmlinkage int
142 osf_getdirentries(unsigned int fd, struct osf_dirent __user *dirent, 142 osf_getdirentries(unsigned int fd, struct osf_dirent __user *dirent,
143 unsigned int count, long __user *basep) 143 unsigned int count, long __user *basep)
144 { 144 {
145 int error; 145 int error;
146 struct file *file; 146 struct file *file;
147 struct osf_dirent_callback buf; 147 struct osf_dirent_callback buf;
148 148
149 error = -EBADF; 149 error = -EBADF;
150 file = fget(fd); 150 file = fget(fd);
151 if (!file) 151 if (!file)
152 goto out; 152 goto out;
153 153
154 buf.dirent = dirent; 154 buf.dirent = dirent;
155 buf.basep = basep; 155 buf.basep = basep;
156 buf.count = count; 156 buf.count = count;
157 buf.error = 0; 157 buf.error = 0;
158 158
159 error = vfs_readdir(file, osf_filldir, &buf); 159 error = vfs_readdir(file, osf_filldir, &buf);
160 if (error < 0) 160 if (error < 0)
161 goto out_putf; 161 goto out_putf;
162 162
163 error = buf.error; 163 error = buf.error;
164 if (count != buf.count) 164 if (count != buf.count)
165 error = count - buf.count; 165 error = count - buf.count;
166 166
167 out_putf: 167 out_putf:
168 fput(file); 168 fput(file);
169 out: 169 out:
170 return error; 170 return error;
171 } 171 }
172 172
173 #undef ROUND_UP 173 #undef ROUND_UP
174 #undef NAME_OFFSET 174 #undef NAME_OFFSET
175 175
176 asmlinkage unsigned long 176 asmlinkage unsigned long
177 osf_mmap(unsigned long addr, unsigned long len, unsigned long prot, 177 osf_mmap(unsigned long addr, unsigned long len, unsigned long prot,
178 unsigned long flags, unsigned long fd, unsigned long off) 178 unsigned long flags, unsigned long fd, unsigned long off)
179 { 179 {
180 struct file *file = NULL; 180 struct file *file = NULL;
181 unsigned long ret = -EBADF; 181 unsigned long ret = -EBADF;
182 182
183 #if 0 183 #if 0
184 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) 184 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
185 printk("%s: unimplemented OSF mmap flags %04lx\n", 185 printk("%s: unimplemented OSF mmap flags %04lx\n",
186 current->comm, flags); 186 current->comm, flags);
187 #endif 187 #endif
188 if (!(flags & MAP_ANONYMOUS)) { 188 if (!(flags & MAP_ANONYMOUS)) {
189 file = fget(fd); 189 file = fget(fd);
190 if (!file) 190 if (!file)
191 goto out; 191 goto out;
192 } 192 }
193 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 193 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
194 down_write(&current->mm->mmap_sem); 194 down_write(&current->mm->mmap_sem);
195 ret = do_mmap(file, addr, len, prot, flags, off); 195 ret = do_mmap(file, addr, len, prot, flags, off);
196 up_write(&current->mm->mmap_sem); 196 up_write(&current->mm->mmap_sem);
197 if (file) 197 if (file)
198 fput(file); 198 fput(file);
199 out: 199 out:
200 return ret; 200 return ret;
201 } 201 }
202 202
203 203
204 /* 204 /*
205 * The OSF/1 statfs structure is much larger, but this should 205 * The OSF/1 statfs structure is much larger, but this should
206 * match the beginning, at least. 206 * match the beginning, at least.
207 */ 207 */
208 struct osf_statfs { 208 struct osf_statfs {
209 short f_type; 209 short f_type;
210 short f_flags; 210 short f_flags;
211 int f_fsize; 211 int f_fsize;
212 int f_bsize; 212 int f_bsize;
213 int f_blocks; 213 int f_blocks;
214 int f_bfree; 214 int f_bfree;
215 int f_bavail; 215 int f_bavail;
216 int f_files; 216 int f_files;
217 int f_ffree; 217 int f_ffree;
218 __kernel_fsid_t f_fsid; 218 __kernel_fsid_t f_fsid;
219 }; 219 };
220 220
221 static int 221 static int
222 linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat, 222 linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat,
223 unsigned long bufsiz) 223 unsigned long bufsiz)
224 { 224 {
225 struct osf_statfs tmp_stat; 225 struct osf_statfs tmp_stat;
226 226
227 tmp_stat.f_type = linux_stat->f_type; 227 tmp_stat.f_type = linux_stat->f_type;
228 tmp_stat.f_flags = 0; /* mount flags */ 228 tmp_stat.f_flags = 0; /* mount flags */
229 tmp_stat.f_fsize = linux_stat->f_frsize; 229 tmp_stat.f_fsize = linux_stat->f_frsize;
230 tmp_stat.f_bsize = linux_stat->f_bsize; 230 tmp_stat.f_bsize = linux_stat->f_bsize;
231 tmp_stat.f_blocks = linux_stat->f_blocks; 231 tmp_stat.f_blocks = linux_stat->f_blocks;
232 tmp_stat.f_bfree = linux_stat->f_bfree; 232 tmp_stat.f_bfree = linux_stat->f_bfree;
233 tmp_stat.f_bavail = linux_stat->f_bavail; 233 tmp_stat.f_bavail = linux_stat->f_bavail;
234 tmp_stat.f_files = linux_stat->f_files; 234 tmp_stat.f_files = linux_stat->f_files;
235 tmp_stat.f_ffree = linux_stat->f_ffree; 235 tmp_stat.f_ffree = linux_stat->f_ffree;
236 tmp_stat.f_fsid = linux_stat->f_fsid; 236 tmp_stat.f_fsid = linux_stat->f_fsid;
237 if (bufsiz > sizeof(tmp_stat)) 237 if (bufsiz > sizeof(tmp_stat))
238 bufsiz = sizeof(tmp_stat); 238 bufsiz = sizeof(tmp_stat);
239 return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; 239 return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0;
240 } 240 }
241 241
242 static int 242 static int
243 do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer, 243 do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer,
244 unsigned long bufsiz) 244 unsigned long bufsiz)
245 { 245 {
246 struct kstatfs linux_stat; 246 struct kstatfs linux_stat;
247 int error = vfs_statfs(dentry, &linux_stat); 247 int error = vfs_statfs(dentry, &linux_stat);
248 if (!error) 248 if (!error)
249 error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); 249 error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
250 return error; 250 return error;
251 } 251 }
252 252
253 asmlinkage int 253 asmlinkage int
254 osf_statfs(char __user *path, struct osf_statfs __user *buffer, unsigned long bufsiz) 254 osf_statfs(char __user *path, struct osf_statfs __user *buffer, unsigned long bufsiz)
255 { 255 {
256 struct nameidata nd; 256 struct nameidata nd;
257 int retval; 257 int retval;
258 258
259 retval = user_path_walk(path, &nd); 259 retval = user_path_walk(path, &nd);
260 if (!retval) { 260 if (!retval) {
261 retval = do_osf_statfs(nd.dentry, buffer, bufsiz); 261 retval = do_osf_statfs(nd.dentry, buffer, bufsiz);
262 path_release(&nd); 262 path_release(&nd);
263 } 263 }
264 return retval; 264 return retval;
265 } 265 }
266 266
267 asmlinkage int 267 asmlinkage int
268 osf_fstatfs(unsigned long fd, struct osf_statfs __user *buffer, unsigned long bufsiz) 268 osf_fstatfs(unsigned long fd, struct osf_statfs __user *buffer, unsigned long bufsiz)
269 { 269 {
270 struct file *file; 270 struct file *file;
271 int retval; 271 int retval;
272 272
273 retval = -EBADF; 273 retval = -EBADF;
274 file = fget(fd); 274 file = fget(fd);
275 if (file) { 275 if (file) {
276 retval = do_osf_statfs(file->f_dentry, buffer, bufsiz); 276 retval = do_osf_statfs(file->f_dentry, buffer, bufsiz);
277 fput(file); 277 fput(file);
278 } 278 }
279 return retval; 279 return retval;
280 } 280 }
281 281
282 /* 282 /*
283 * Uhh.. OSF/1 mount parameters aren't exactly obvious.. 283 * Uhh.. OSF/1 mount parameters aren't exactly obvious..
284 * 284 *
285 * Although to be frank, neither are the native Linux/i386 ones.. 285 * Although to be frank, neither are the native Linux/i386 ones..
286 */ 286 */
287 struct ufs_args { 287 struct ufs_args {
288 char __user *devname; 288 char __user *devname;
289 int flags; 289 int flags;
290 uid_t exroot; 290 uid_t exroot;
291 }; 291 };
292 292
293 struct cdfs_args { 293 struct cdfs_args {
294 char __user *devname; 294 char __user *devname;
295 int flags; 295 int flags;
296 uid_t exroot; 296 uid_t exroot;
297 297
298 /* This has lots more here, which Linux handles with the option block 298 /* This has lots more here, which Linux handles with the option block
299 but I'm too lazy to do the translation into ASCII. */ 299 but I'm too lazy to do the translation into ASCII. */
300 }; 300 };
301 301
302 struct procfs_args { 302 struct procfs_args {
303 char __user *devname; 303 char __user *devname;
304 int flags; 304 int flags;
305 uid_t exroot; 305 uid_t exroot;
306 }; 306 };
307 307
308 /* 308 /*
309 * We can't actually handle ufs yet, so we translate UFS mounts to 309 * We can't actually handle ufs yet, so we translate UFS mounts to
310 * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS 310 * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS
311 * layout is so braindead it's a major headache doing it. 311 * layout is so braindead it's a major headache doing it.
312 * 312 *
313 * Just how long ago was it written? OTOH our UFS driver may be still 313 * Just how long ago was it written? OTOH our UFS driver may be still
314 * unhappy with OSF UFS. [CHECKME] 314 * unhappy with OSF UFS. [CHECKME]
315 */ 315 */
316 static int 316 static int
317 osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) 317 osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
318 { 318 {
319 int retval; 319 int retval;
320 struct cdfs_args tmp; 320 struct cdfs_args tmp;
321 char *devname; 321 char *devname;
322 322
323 retval = -EFAULT; 323 retval = -EFAULT;
324 if (copy_from_user(&tmp, args, sizeof(tmp))) 324 if (copy_from_user(&tmp, args, sizeof(tmp)))
325 goto out; 325 goto out;
326 devname = getname(tmp.devname); 326 devname = getname(tmp.devname);
327 retval = PTR_ERR(devname); 327 retval = PTR_ERR(devname);
328 if (IS_ERR(devname)) 328 if (IS_ERR(devname))
329 goto out; 329 goto out;
330 retval = do_mount(devname, dirname, "ext2", flags, NULL); 330 retval = do_mount(devname, dirname, "ext2", flags, NULL);
331 putname(devname); 331 putname(devname);
332 out: 332 out:
333 return retval; 333 return retval;
334 } 334 }
335 335
336 static int 336 static int
337 osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) 337 osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
338 { 338 {
339 int retval; 339 int retval;
340 struct cdfs_args tmp; 340 struct cdfs_args tmp;
341 char *devname; 341 char *devname;
342 342
343 retval = -EFAULT; 343 retval = -EFAULT;
344 if (copy_from_user(&tmp, args, sizeof(tmp))) 344 if (copy_from_user(&tmp, args, sizeof(tmp)))
345 goto out; 345 goto out;
346 devname = getname(tmp.devname); 346 devname = getname(tmp.devname);
347 retval = PTR_ERR(devname); 347 retval = PTR_ERR(devname);
348 if (IS_ERR(devname)) 348 if (IS_ERR(devname))
349 goto out; 349 goto out;
350 retval = do_mount(devname, dirname, "iso9660", flags, NULL); 350 retval = do_mount(devname, dirname, "iso9660", flags, NULL);
351 putname(devname); 351 putname(devname);
352 out: 352 out:
353 return retval; 353 return retval;
354 } 354 }
355 355
356 static int 356 static int
357 osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) 357 osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags)
358 { 358 {
359 struct procfs_args tmp; 359 struct procfs_args tmp;
360 360
361 if (copy_from_user(&tmp, args, sizeof(tmp))) 361 if (copy_from_user(&tmp, args, sizeof(tmp)))
362 return -EFAULT; 362 return -EFAULT;
363 363
364 return do_mount("", dirname, "proc", flags, NULL); 364 return do_mount("", dirname, "proc", flags, NULL);
365 } 365 }
366 366
367 asmlinkage int 367 asmlinkage int
368 osf_mount(unsigned long typenr, char __user *path, int flag, void __user *data) 368 osf_mount(unsigned long typenr, char __user *path, int flag, void __user *data)
369 { 369 {
370 int retval = -EINVAL; 370 int retval = -EINVAL;
371 char *name; 371 char *name;
372 372
373 lock_kernel(); 373 lock_kernel();
374 374
375 name = getname(path); 375 name = getname(path);
376 retval = PTR_ERR(name); 376 retval = PTR_ERR(name);
377 if (IS_ERR(name)) 377 if (IS_ERR(name))
378 goto out; 378 goto out;
379 switch (typenr) { 379 switch (typenr) {
380 case 1: 380 case 1:
381 retval = osf_ufs_mount(name, data, flag); 381 retval = osf_ufs_mount(name, data, flag);
382 break; 382 break;
383 case 6: 383 case 6:
384 retval = osf_cdfs_mount(name, data, flag); 384 retval = osf_cdfs_mount(name, data, flag);
385 break; 385 break;
386 case 9: 386 case 9:
387 retval = osf_procfs_mount(name, data, flag); 387 retval = osf_procfs_mount(name, data, flag);
388 break; 388 break;
389 default: 389 default:
390 printk("osf_mount(%ld, %x)\n", typenr, flag); 390 printk("osf_mount(%ld, %x)\n", typenr, flag);
391 } 391 }
392 putname(name); 392 putname(name);
393 out: 393 out:
394 unlock_kernel(); 394 unlock_kernel();
395 return retval; 395 return retval;
396 } 396 }
397 397
398 asmlinkage int 398 asmlinkage int
399 osf_utsname(char __user *name) 399 osf_utsname(char __user *name)
400 { 400 {
401 int error; 401 int error;
402 402
403 down_read(&uts_sem); 403 down_read(&uts_sem);
404 error = -EFAULT; 404 error = -EFAULT;
405 if (copy_to_user(name + 0, system_utsname.sysname, 32)) 405 if (copy_to_user(name + 0, system_utsname.sysname, 32))
406 goto out; 406 goto out;
407 if (copy_to_user(name + 32, system_utsname.nodename, 32)) 407 if (copy_to_user(name + 32, system_utsname.nodename, 32))
408 goto out; 408 goto out;
409 if (copy_to_user(name + 64, system_utsname.release, 32)) 409 if (copy_to_user(name + 64, system_utsname.release, 32))
410 goto out; 410 goto out;
411 if (copy_to_user(name + 96, system_utsname.version, 32)) 411 if (copy_to_user(name + 96, system_utsname.version, 32))
412 goto out; 412 goto out;
413 if (copy_to_user(name + 128, system_utsname.machine, 32)) 413 if (copy_to_user(name + 128, system_utsname.machine, 32))
414 goto out; 414 goto out;
415 415
416 error = 0; 416 error = 0;
417 out: 417 out:
418 up_read(&uts_sem); 418 up_read(&uts_sem);
419 return error; 419 return error;
420 } 420 }
421 421
422 asmlinkage unsigned long 422 asmlinkage unsigned long
423 sys_getpagesize(void) 423 sys_getpagesize(void)
424 { 424 {
425 return PAGE_SIZE; 425 return PAGE_SIZE;
426 } 426 }
427 427
428 asmlinkage unsigned long 428 asmlinkage unsigned long
429 sys_getdtablesize(void) 429 sys_getdtablesize(void)
430 { 430 {
431 return NR_OPEN; 431 return NR_OPEN;
432 } 432 }
433 433
434 /* 434 /*
435 * For compatibility with OSF/1 only. Use utsname(2) instead. 435 * For compatibility with OSF/1 only. Use utsname(2) instead.
436 */ 436 */
437 asmlinkage int 437 asmlinkage int
438 osf_getdomainname(char __user *name, int namelen) 438 osf_getdomainname(char __user *name, int namelen)
439 { 439 {
440 unsigned len; 440 unsigned len;
441 int i; 441 int i;
442 442
443 if (!access_ok(VERIFY_WRITE, name, namelen)) 443 if (!access_ok(VERIFY_WRITE, name, namelen))
444 return -EFAULT; 444 return -EFAULT;
445 445
446 len = namelen; 446 len = namelen;
447 if (namelen > 32) 447 if (namelen > 32)
448 len = 32; 448 len = 32;
449 449
450 down_read(&uts_sem); 450 down_read(&uts_sem);
451 for (i = 0; i < len; ++i) { 451 for (i = 0; i < len; ++i) {
452 __put_user(system_utsname.domainname[i], name + i); 452 __put_user(system_utsname.domainname[i], name + i);
453 if (system_utsname.domainname[i] == '\0') 453 if (system_utsname.domainname[i] == '\0')
454 break; 454 break;
455 } 455 }
456 up_read(&uts_sem); 456 up_read(&uts_sem);
457 457
458 return 0; 458 return 0;
459 } 459 }
460 460
461 /* 461 /*
462 * The following stuff should move into a header file should it ever 462 * The following stuff should move into a header file should it ever
463 * be labeled "officially supported." Right now, there is just enough 463 * be labeled "officially supported." Right now, there is just enough
464 * support to avoid applications (such as tar) printing error 464 * support to avoid applications (such as tar) printing error
465 * messages. The attributes are not really implemented. 465 * messages. The attributes are not really implemented.
466 */ 466 */
467 467
468 /* 468 /*
469 * Values for Property list entry flag 469 * Values for Property list entry flag
470 */ 470 */
471 #define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry 471 #define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry
472 by default */ 472 by default */
473 #define PLE_FLAG_MASK 0x1 /* Valid flag values */ 473 #define PLE_FLAG_MASK 0x1 /* Valid flag values */
474 #define PLE_FLAG_ALL -1 /* All flag value */ 474 #define PLE_FLAG_ALL -1 /* All flag value */
475 475
476 struct proplistname_args { 476 struct proplistname_args {
477 unsigned int pl_mask; 477 unsigned int pl_mask;
478 unsigned int pl_numnames; 478 unsigned int pl_numnames;
479 char **pl_names; 479 char **pl_names;
480 }; 480 };
481 481
482 union pl_args { 482 union pl_args {
483 struct setargs { 483 struct setargs {
484 char __user *path; 484 char __user *path;
485 long follow; 485 long follow;
486 long nbytes; 486 long nbytes;
487 char __user *buf; 487 char __user *buf;
488 } set; 488 } set;
489 struct fsetargs { 489 struct fsetargs {
490 long fd; 490 long fd;
491 long nbytes; 491 long nbytes;
492 char __user *buf; 492 char __user *buf;
493 } fset; 493 } fset;
494 struct getargs { 494 struct getargs {
495 char __user *path; 495 char __user *path;
496 long follow; 496 long follow;
497 struct proplistname_args __user *name_args; 497 struct proplistname_args __user *name_args;
498 long nbytes; 498 long nbytes;
499 char __user *buf; 499 char __user *buf;
500 int __user *min_buf_size; 500 int __user *min_buf_size;
501 } get; 501 } get;
502 struct fgetargs { 502 struct fgetargs {
503 long fd; 503 long fd;
504 struct proplistname_args __user *name_args; 504 struct proplistname_args __user *name_args;
505 long nbytes; 505 long nbytes;
506 char __user *buf; 506 char __user *buf;
507 int __user *min_buf_size; 507 int __user *min_buf_size;
508 } fget; 508 } fget;
509 struct delargs { 509 struct delargs {
510 char __user *path; 510 char __user *path;
511 long follow; 511 long follow;
512 struct proplistname_args __user *name_args; 512 struct proplistname_args __user *name_args;
513 } del; 513 } del;
514 struct fdelargs { 514 struct fdelargs {
515 long fd; 515 long fd;
516 struct proplistname_args __user *name_args; 516 struct proplistname_args __user *name_args;
517 } fdel; 517 } fdel;
518 }; 518 };
519 519
520 enum pl_code { 520 enum pl_code {
521 PL_SET = 1, PL_FSET = 2, 521 PL_SET = 1, PL_FSET = 2,
522 PL_GET = 3, PL_FGET = 4, 522 PL_GET = 3, PL_FGET = 4,
523 PL_DEL = 5, PL_FDEL = 6 523 PL_DEL = 5, PL_FDEL = 6
524 }; 524 };
525 525
526 asmlinkage long 526 asmlinkage long
527 osf_proplist_syscall(enum pl_code code, union pl_args __user *args) 527 osf_proplist_syscall(enum pl_code code, union pl_args __user *args)
528 { 528 {
529 long error; 529 long error;
530 int __user *min_buf_size_ptr; 530 int __user *min_buf_size_ptr;
531 531
532 lock_kernel(); 532 lock_kernel();
533 switch (code) { 533 switch (code) {
534 case PL_SET: 534 case PL_SET:
535 if (get_user(error, &args->set.nbytes)) 535 if (get_user(error, &args->set.nbytes))
536 error = -EFAULT; 536 error = -EFAULT;
537 break; 537 break;
538 case PL_FSET: 538 case PL_FSET:
539 if (get_user(error, &args->fset.nbytes)) 539 if (get_user(error, &args->fset.nbytes))
540 error = -EFAULT; 540 error = -EFAULT;
541 break; 541 break;
542 case PL_GET: 542 case PL_GET:
543 error = get_user(min_buf_size_ptr, &args->get.min_buf_size); 543 error = get_user(min_buf_size_ptr, &args->get.min_buf_size);
544 if (error) 544 if (error)
545 break; 545 break;
546 error = put_user(0, min_buf_size_ptr); 546 error = put_user(0, min_buf_size_ptr);
547 break; 547 break;
548 case PL_FGET: 548 case PL_FGET:
549 error = get_user(min_buf_size_ptr, &args->fget.min_buf_size); 549 error = get_user(min_buf_size_ptr, &args->fget.min_buf_size);
550 if (error) 550 if (error)
551 break; 551 break;
552 error = put_user(0, min_buf_size_ptr); 552 error = put_user(0, min_buf_size_ptr);
553 break; 553 break;
554 case PL_DEL: 554 case PL_DEL:
555 case PL_FDEL: 555 case PL_FDEL:
556 error = 0; 556 error = 0;
557 break; 557 break;
558 default: 558 default:
559 error = -EOPNOTSUPP; 559 error = -EOPNOTSUPP;
560 break; 560 break;
561 }; 561 };
562 unlock_kernel(); 562 unlock_kernel();
563 return error; 563 return error;
564 } 564 }
565 565
566 asmlinkage int 566 asmlinkage int
567 osf_sigstack(struct sigstack __user *uss, struct sigstack __user *uoss) 567 osf_sigstack(struct sigstack __user *uss, struct sigstack __user *uoss)
568 { 568 {
569 unsigned long usp = rdusp(); 569 unsigned long usp = rdusp();
570 unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; 570 unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size;
571 unsigned long oss_os = on_sig_stack(usp); 571 unsigned long oss_os = on_sig_stack(usp);
572 int error; 572 int error;
573 573
574 if (uss) { 574 if (uss) {
575 void __user *ss_sp; 575 void __user *ss_sp;
576 576
577 error = -EFAULT; 577 error = -EFAULT;
578 if (get_user(ss_sp, &uss->ss_sp)) 578 if (get_user(ss_sp, &uss->ss_sp))
579 goto out; 579 goto out;
580 580
581 /* If the current stack was set with sigaltstack, don't 581 /* If the current stack was set with sigaltstack, don't
582 swap stacks while we are on it. */ 582 swap stacks while we are on it. */
583 error = -EPERM; 583 error = -EPERM;
584 if (current->sas_ss_sp && on_sig_stack(usp)) 584 if (current->sas_ss_sp && on_sig_stack(usp))
585 goto out; 585 goto out;
586 586
587 /* Since we don't know the extent of the stack, and we don't 587 /* Since we don't know the extent of the stack, and we don't
588 track onstack-ness, but rather calculate it, we must 588 track onstack-ness, but rather calculate it, we must
589 presume a size. Ho hum this interface is lossy. */ 589 presume a size. Ho hum this interface is lossy. */
590 current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; 590 current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
591 current->sas_ss_size = SIGSTKSZ; 591 current->sas_ss_size = SIGSTKSZ;
592 } 592 }
593 593
594 if (uoss) { 594 if (uoss) {
595 error = -EFAULT; 595 error = -EFAULT;
596 if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)) 596 if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))
597 || __put_user(oss_sp, &uoss->ss_sp) 597 || __put_user(oss_sp, &uoss->ss_sp)
598 || __put_user(oss_os, &uoss->ss_onstack)) 598 || __put_user(oss_os, &uoss->ss_onstack))
599 goto out; 599 goto out;
600 } 600 }
601 601
602 error = 0; 602 error = 0;
603 out: 603 out:
604 return error; 604 return error;
605 } 605 }
606 606
607 asmlinkage long 607 asmlinkage long
608 osf_sysinfo(int command, char __user *buf, long count) 608 osf_sysinfo(int command, char __user *buf, long count)
609 { 609 {
610 static char * sysinfo_table[] = { 610 static char * sysinfo_table[] = {
611 system_utsname.sysname, 611 system_utsname.sysname,
612 system_utsname.nodename, 612 system_utsname.nodename,
613 system_utsname.release, 613 system_utsname.release,
614 system_utsname.version, 614 system_utsname.version,
615 system_utsname.machine, 615 system_utsname.machine,
616 "alpha", /* instruction set architecture */ 616 "alpha", /* instruction set architecture */
617 "dummy", /* hardware serial number */ 617 "dummy", /* hardware serial number */
618 "dummy", /* hardware manufacturer */ 618 "dummy", /* hardware manufacturer */
619 "dummy", /* secure RPC domain */ 619 "dummy", /* secure RPC domain */
620 }; 620 };
621 unsigned long offset; 621 unsigned long offset;
622 char *res; 622 char *res;
623 long len, err = -EINVAL; 623 long len, err = -EINVAL;
624 624
625 offset = command-1; 625 offset = command-1;
626 if (offset >= sizeof(sysinfo_table)/sizeof(char *)) { 626 if (offset >= ARRAY_SIZE(sysinfo_table)) {
627 /* Digital UNIX has a few unpublished interfaces here */ 627 /* Digital UNIX has a few unpublished interfaces here */
628 printk("sysinfo(%d)", command); 628 printk("sysinfo(%d)", command);
629 goto out; 629 goto out;
630 } 630 }
631 631
632 down_read(&uts_sem); 632 down_read(&uts_sem);
633 res = sysinfo_table[offset]; 633 res = sysinfo_table[offset];
634 len = strlen(res)+1; 634 len = strlen(res)+1;
635 if (len > count) 635 if (len > count)
636 len = count; 636 len = count;
637 if (copy_to_user(buf, res, len)) 637 if (copy_to_user(buf, res, len))
638 err = -EFAULT; 638 err = -EFAULT;
639 else 639 else
640 err = 0; 640 err = 0;
641 up_read(&uts_sem); 641 up_read(&uts_sem);
642 out: 642 out:
643 return err; 643 return err;
644 } 644 }
645 645
646 asmlinkage unsigned long 646 asmlinkage unsigned long
647 osf_getsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, 647 osf_getsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes,
648 int __user *start, void __user *arg) 648 int __user *start, void __user *arg)
649 { 649 {
650 unsigned long w; 650 unsigned long w;
651 struct percpu_struct *cpu; 651 struct percpu_struct *cpu;
652 652
653 switch (op) { 653 switch (op) {
654 case GSI_IEEE_FP_CONTROL: 654 case GSI_IEEE_FP_CONTROL:
655 /* Return current software fp control & status bits. */ 655 /* Return current software fp control & status bits. */
656 /* Note that DU doesn't verify available space here. */ 656 /* Note that DU doesn't verify available space here. */
657 657
658 w = current_thread_info()->ieee_state & IEEE_SW_MASK; 658 w = current_thread_info()->ieee_state & IEEE_SW_MASK;
659 w = swcr_update_status(w, rdfpcr()); 659 w = swcr_update_status(w, rdfpcr());
660 if (put_user(w, (unsigned long __user *) buffer)) 660 if (put_user(w, (unsigned long __user *) buffer))
661 return -EFAULT; 661 return -EFAULT;
662 return 0; 662 return 0;
663 663
664 case GSI_IEEE_STATE_AT_SIGNAL: 664 case GSI_IEEE_STATE_AT_SIGNAL:
665 /* 665 /*
666 * Not sure anybody will ever use this weird stuff. These 666 * Not sure anybody will ever use this weird stuff. These
667 * ops can be used (under OSF/1) to set the fpcr that should 667 * ops can be used (under OSF/1) to set the fpcr that should
668 * be used when a signal handler starts executing. 668 * be used when a signal handler starts executing.
669 */ 669 */
670 break; 670 break;
671 671
672 case GSI_UACPROC: 672 case GSI_UACPROC:
673 if (nbytes < sizeof(unsigned int)) 673 if (nbytes < sizeof(unsigned int))
674 return -EINVAL; 674 return -EINVAL;
675 w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK; 675 w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK;
676 if (put_user(w, (unsigned int __user *)buffer)) 676 if (put_user(w, (unsigned int __user *)buffer))
677 return -EFAULT; 677 return -EFAULT;
678 return 1; 678 return 1;
679 679
680 case GSI_PROC_TYPE: 680 case GSI_PROC_TYPE:
681 if (nbytes < sizeof(unsigned long)) 681 if (nbytes < sizeof(unsigned long))
682 return -EINVAL; 682 return -EINVAL;
683 cpu = (struct percpu_struct*) 683 cpu = (struct percpu_struct*)
684 ((char*)hwrpb + hwrpb->processor_offset); 684 ((char*)hwrpb + hwrpb->processor_offset);
685 w = cpu->type; 685 w = cpu->type;
686 if (put_user(w, (unsigned long __user*)buffer)) 686 if (put_user(w, (unsigned long __user*)buffer))
687 return -EFAULT; 687 return -EFAULT;
688 return 1; 688 return 1;
689 689
690 case GSI_GET_HWRPB: 690 case GSI_GET_HWRPB:
691 if (nbytes < sizeof(*hwrpb)) 691 if (nbytes < sizeof(*hwrpb))
692 return -EINVAL; 692 return -EINVAL;
693 if (copy_to_user(buffer, hwrpb, nbytes) != 0) 693 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
694 return -EFAULT; 694 return -EFAULT;
695 return 1; 695 return 1;
696 696
697 default: 697 default:
698 break; 698 break;
699 } 699 }
700 700
701 return -EOPNOTSUPP; 701 return -EOPNOTSUPP;
702 } 702 }
703 703
704 asmlinkage unsigned long 704 asmlinkage unsigned long
705 osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, 705 osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes,
706 int __user *start, void __user *arg) 706 int __user *start, void __user *arg)
707 { 707 {
708 switch (op) { 708 switch (op) {
709 case SSI_IEEE_FP_CONTROL: { 709 case SSI_IEEE_FP_CONTROL: {
710 unsigned long swcr, fpcr; 710 unsigned long swcr, fpcr;
711 unsigned int *state; 711 unsigned int *state;
712 712
713 /* 713 /*
714 * Alpha Architecture Handbook 4.7.7.3: 714 * Alpha Architecture Handbook 4.7.7.3:
715 * To be fully IEEE compiant, we must track the current IEEE 715 * To be fully IEEE compiant, we must track the current IEEE
716 * exception state in software, because spurrious bits can be 716 * exception state in software, because spurrious bits can be
717 * set in the trap shadow of a software-complete insn. 717 * set in the trap shadow of a software-complete insn.
718 */ 718 */
719 719
720 if (get_user(swcr, (unsigned long __user *)buffer)) 720 if (get_user(swcr, (unsigned long __user *)buffer))
721 return -EFAULT; 721 return -EFAULT;
722 state = &current_thread_info()->ieee_state; 722 state = &current_thread_info()->ieee_state;
723 723
724 /* Update softare trap enable bits. */ 724 /* Update softare trap enable bits. */
725 *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); 725 *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK);
726 726
727 /* Update the real fpcr. */ 727 /* Update the real fpcr. */
728 fpcr = rdfpcr() & FPCR_DYN_MASK; 728 fpcr = rdfpcr() & FPCR_DYN_MASK;
729 fpcr |= ieee_swcr_to_fpcr(swcr); 729 fpcr |= ieee_swcr_to_fpcr(swcr);
730 wrfpcr(fpcr); 730 wrfpcr(fpcr);
731 731
732 return 0; 732 return 0;
733 } 733 }
734 734
735 case SSI_IEEE_RAISE_EXCEPTION: { 735 case SSI_IEEE_RAISE_EXCEPTION: {
736 unsigned long exc, swcr, fpcr, fex; 736 unsigned long exc, swcr, fpcr, fex;
737 unsigned int *state; 737 unsigned int *state;
738 738
739 if (get_user(exc, (unsigned long __user *)buffer)) 739 if (get_user(exc, (unsigned long __user *)buffer))
740 return -EFAULT; 740 return -EFAULT;
741 state = &current_thread_info()->ieee_state; 741 state = &current_thread_info()->ieee_state;
742 exc &= IEEE_STATUS_MASK; 742 exc &= IEEE_STATUS_MASK;
743 743
744 /* Update softare trap enable bits. */ 744 /* Update softare trap enable bits. */
745 swcr = (*state & IEEE_SW_MASK) | exc; 745 swcr = (*state & IEEE_SW_MASK) | exc;
746 *state |= exc; 746 *state |= exc;
747 747
748 /* Update the real fpcr. */ 748 /* Update the real fpcr. */
749 fpcr = rdfpcr(); 749 fpcr = rdfpcr();
750 fpcr |= ieee_swcr_to_fpcr(swcr); 750 fpcr |= ieee_swcr_to_fpcr(swcr);
751 wrfpcr(fpcr); 751 wrfpcr(fpcr);
752 752
753 /* If any exceptions set by this call, and are unmasked, 753 /* If any exceptions set by this call, and are unmasked,
754 send a signal. Old exceptions are not signaled. */ 754 send a signal. Old exceptions are not signaled. */
755 fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; 755 fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr;
756 if (fex) { 756 if (fex) {
757 siginfo_t info; 757 siginfo_t info;
758 int si_code = 0; 758 int si_code = 0;
759 759
760 if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; 760 if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND;
761 if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; 761 if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES;
762 if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND; 762 if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND;
763 if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF; 763 if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF;
764 if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; 764 if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV;
765 if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; 765 if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV;
766 766
767 info.si_signo = SIGFPE; 767 info.si_signo = SIGFPE;
768 info.si_errno = 0; 768 info.si_errno = 0;
769 info.si_code = si_code; 769 info.si_code = si_code;
770 info.si_addr = NULL; /* FIXME */ 770 info.si_addr = NULL; /* FIXME */
771 send_sig_info(SIGFPE, &info, current); 771 send_sig_info(SIGFPE, &info, current);
772 } 772 }
773 return 0; 773 return 0;
774 } 774 }
775 775
776 case SSI_IEEE_STATE_AT_SIGNAL: 776 case SSI_IEEE_STATE_AT_SIGNAL:
777 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 777 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
778 /* 778 /*
779 * Not sure anybody will ever use this weird stuff. These 779 * Not sure anybody will ever use this weird stuff. These
780 * ops can be used (under OSF/1) to set the fpcr that should 780 * ops can be used (under OSF/1) to set the fpcr that should
781 * be used when a signal handler starts executing. 781 * be used when a signal handler starts executing.
782 */ 782 */
783 break; 783 break;
784 784
785 case SSI_NVPAIRS: { 785 case SSI_NVPAIRS: {
786 unsigned long v, w, i; 786 unsigned long v, w, i;
787 unsigned int old, new; 787 unsigned int old, new;
788 788
789 for (i = 0; i < nbytes; ++i) { 789 for (i = 0; i < nbytes; ++i) {
790 790
791 if (get_user(v, 2*i + (unsigned int __user *)buffer)) 791 if (get_user(v, 2*i + (unsigned int __user *)buffer))
792 return -EFAULT; 792 return -EFAULT;
793 if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer)) 793 if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer))
794 return -EFAULT; 794 return -EFAULT;
795 switch (v) { 795 switch (v) {
796 case SSIN_UACPROC: 796 case SSIN_UACPROC:
797 again: 797 again:
798 old = current_thread_info()->flags; 798 old = current_thread_info()->flags;
799 new = old & ~(UAC_BITMASK << UAC_SHIFT); 799 new = old & ~(UAC_BITMASK << UAC_SHIFT);
800 new = new | (w & UAC_BITMASK) << UAC_SHIFT; 800 new = new | (w & UAC_BITMASK) << UAC_SHIFT;
801 if (cmpxchg(&current_thread_info()->flags, 801 if (cmpxchg(&current_thread_info()->flags,
802 old, new) != old) 802 old, new) != old)
803 goto again; 803 goto again;
804 break; 804 break;
805 805
806 default: 806 default:
807 return -EOPNOTSUPP; 807 return -EOPNOTSUPP;
808 } 808 }
809 } 809 }
810 return 0; 810 return 0;
811 } 811 }
812 812
813 default: 813 default:
814 break; 814 break;
815 } 815 }
816 816
817 return -EOPNOTSUPP; 817 return -EOPNOTSUPP;
818 } 818 }
819 819
820 /* Translations due to the fact that OSF's time_t is an int. Which 820 /* Translations due to the fact that OSF's time_t is an int. Which
821 affects all sorts of things, like timeval and itimerval. */ 821 affects all sorts of things, like timeval and itimerval. */
822 822
823 extern struct timezone sys_tz; 823 extern struct timezone sys_tz;
824 824
825 struct timeval32 825 struct timeval32
826 { 826 {
827 int tv_sec, tv_usec; 827 int tv_sec, tv_usec;
828 }; 828 };
829 829
830 struct itimerval32 830 struct itimerval32
831 { 831 {
832 struct timeval32 it_interval; 832 struct timeval32 it_interval;
833 struct timeval32 it_value; 833 struct timeval32 it_value;
834 }; 834 };
835 835
836 static inline long 836 static inline long
837 get_tv32(struct timeval *o, struct timeval32 __user *i) 837 get_tv32(struct timeval *o, struct timeval32 __user *i)
838 { 838 {
839 return (!access_ok(VERIFY_READ, i, sizeof(*i)) || 839 return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
840 (__get_user(o->tv_sec, &i->tv_sec) | 840 (__get_user(o->tv_sec, &i->tv_sec) |
841 __get_user(o->tv_usec, &i->tv_usec))); 841 __get_user(o->tv_usec, &i->tv_usec)));
842 } 842 }
843 843
844 static inline long 844 static inline long
845 put_tv32(struct timeval32 __user *o, struct timeval *i) 845 put_tv32(struct timeval32 __user *o, struct timeval *i)
846 { 846 {
847 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || 847 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
848 (__put_user(i->tv_sec, &o->tv_sec) | 848 (__put_user(i->tv_sec, &o->tv_sec) |
849 __put_user(i->tv_usec, &o->tv_usec))); 849 __put_user(i->tv_usec, &o->tv_usec)));
850 } 850 }
851 851
852 static inline long 852 static inline long
853 get_it32(struct itimerval *o, struct itimerval32 __user *i) 853 get_it32(struct itimerval *o, struct itimerval32 __user *i)
854 { 854 {
855 return (!access_ok(VERIFY_READ, i, sizeof(*i)) || 855 return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
856 (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | 856 (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
857 __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | 857 __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
858 __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | 858 __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
859 __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); 859 __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
860 } 860 }
861 861
862 static inline long 862 static inline long
863 put_it32(struct itimerval32 __user *o, struct itimerval *i) 863 put_it32(struct itimerval32 __user *o, struct itimerval *i)
864 { 864 {
865 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || 865 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
866 (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | 866 (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
867 __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | 867 __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
868 __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | 868 __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
869 __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); 869 __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
870 } 870 }
871 871
872 static inline void 872 static inline void
873 jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value) 873 jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value)
874 { 874 {
875 value->tv_usec = (jiffies % HZ) * (1000000L / HZ); 875 value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
876 value->tv_sec = jiffies / HZ; 876 value->tv_sec = jiffies / HZ;
877 } 877 }
878 878
879 asmlinkage int 879 asmlinkage int
880 osf_gettimeofday(struct timeval32 __user *tv, struct timezone __user *tz) 880 osf_gettimeofday(struct timeval32 __user *tv, struct timezone __user *tz)
881 { 881 {
882 if (tv) { 882 if (tv) {
883 struct timeval ktv; 883 struct timeval ktv;
884 do_gettimeofday(&ktv); 884 do_gettimeofday(&ktv);
885 if (put_tv32(tv, &ktv)) 885 if (put_tv32(tv, &ktv))
886 return -EFAULT; 886 return -EFAULT;
887 } 887 }
888 if (tz) { 888 if (tz) {
889 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) 889 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
890 return -EFAULT; 890 return -EFAULT;
891 } 891 }
892 return 0; 892 return 0;
893 } 893 }
894 894
895 asmlinkage int 895 asmlinkage int
896 osf_settimeofday(struct timeval32 __user *tv, struct timezone __user *tz) 896 osf_settimeofday(struct timeval32 __user *tv, struct timezone __user *tz)
897 { 897 {
898 struct timespec kts; 898 struct timespec kts;
899 struct timezone ktz; 899 struct timezone ktz;
900 900
901 if (tv) { 901 if (tv) {
902 if (get_tv32((struct timeval *)&kts, tv)) 902 if (get_tv32((struct timeval *)&kts, tv))
903 return -EFAULT; 903 return -EFAULT;
904 } 904 }
905 if (tz) { 905 if (tz) {
906 if (copy_from_user(&ktz, tz, sizeof(*tz))) 906 if (copy_from_user(&ktz, tz, sizeof(*tz)))
907 return -EFAULT; 907 return -EFAULT;
908 } 908 }
909 909
910 kts.tv_nsec *= 1000; 910 kts.tv_nsec *= 1000;
911 911
912 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); 912 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
913 } 913 }
914 914
915 asmlinkage int 915 asmlinkage int
916 osf_getitimer(int which, struct itimerval32 __user *it) 916 osf_getitimer(int which, struct itimerval32 __user *it)
917 { 917 {
918 struct itimerval kit; 918 struct itimerval kit;
919 int error; 919 int error;
920 920
921 error = do_getitimer(which, &kit); 921 error = do_getitimer(which, &kit);
922 if (!error && put_it32(it, &kit)) 922 if (!error && put_it32(it, &kit))
923 error = -EFAULT; 923 error = -EFAULT;
924 924
925 return error; 925 return error;
926 } 926 }
927 927
928 asmlinkage int 928 asmlinkage int
929 osf_setitimer(int which, struct itimerval32 __user *in, struct itimerval32 __user *out) 929 osf_setitimer(int which, struct itimerval32 __user *in, struct itimerval32 __user *out)
930 { 930 {
931 struct itimerval kin, kout; 931 struct itimerval kin, kout;
932 int error; 932 int error;
933 933
934 if (in) { 934 if (in) {
935 if (get_it32(&kin, in)) 935 if (get_it32(&kin, in))
936 return -EFAULT; 936 return -EFAULT;
937 } else 937 } else
938 memset(&kin, 0, sizeof(kin)); 938 memset(&kin, 0, sizeof(kin));
939 939
940 error = do_setitimer(which, &kin, out ? &kout : NULL); 940 error = do_setitimer(which, &kin, out ? &kout : NULL);
941 if (error || !out) 941 if (error || !out)
942 return error; 942 return error;
943 943
944 if (put_it32(out, &kout)) 944 if (put_it32(out, &kout))
945 return -EFAULT; 945 return -EFAULT;
946 946
947 return 0; 947 return 0;
948 948
949 } 949 }
950 950
951 asmlinkage int 951 asmlinkage int
952 osf_utimes(char __user *filename, struct timeval32 __user *tvs) 952 osf_utimes(char __user *filename, struct timeval32 __user *tvs)
953 { 953 {
954 struct timeval ktvs[2]; 954 struct timeval ktvs[2];
955 955
956 if (tvs) { 956 if (tvs) {
957 if (get_tv32(&ktvs[0], &tvs[0]) || 957 if (get_tv32(&ktvs[0], &tvs[0]) ||
958 get_tv32(&ktvs[1], &tvs[1])) 958 get_tv32(&ktvs[1], &tvs[1]))
959 return -EFAULT; 959 return -EFAULT;
960 } 960 }
961 961
962 return do_utimes(AT_FDCWD, filename, tvs ? ktvs : NULL); 962 return do_utimes(AT_FDCWD, filename, tvs ? ktvs : NULL);
963 } 963 }
964 964
965 #define MAX_SELECT_SECONDS \ 965 #define MAX_SELECT_SECONDS \
966 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) 966 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
967 967
968 asmlinkage int 968 asmlinkage int
969 osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, 969 osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
970 struct timeval32 __user *tvp) 970 struct timeval32 __user *tvp)
971 { 971 {
972 fd_set_bits fds; 972 fd_set_bits fds;
973 char *bits; 973 char *bits;
974 size_t size; 974 size_t size;
975 long timeout; 975 long timeout;
976 int ret = -EINVAL; 976 int ret = -EINVAL;
977 struct fdtable *fdt; 977 struct fdtable *fdt;
978 int max_fdset; 978 int max_fdset;
979 979
980 timeout = MAX_SCHEDULE_TIMEOUT; 980 timeout = MAX_SCHEDULE_TIMEOUT;
981 if (tvp) { 981 if (tvp) {
982 time_t sec, usec; 982 time_t sec, usec;
983 983
984 if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp)) 984 if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
985 || __get_user(sec, &tvp->tv_sec) 985 || __get_user(sec, &tvp->tv_sec)
986 || __get_user(usec, &tvp->tv_usec)) { 986 || __get_user(usec, &tvp->tv_usec)) {
987 ret = -EFAULT; 987 ret = -EFAULT;
988 goto out_nofds; 988 goto out_nofds;
989 } 989 }
990 990
991 if (sec < 0 || usec < 0) 991 if (sec < 0 || usec < 0)
992 goto out_nofds; 992 goto out_nofds;
993 993
994 if ((unsigned long) sec < MAX_SELECT_SECONDS) { 994 if ((unsigned long) sec < MAX_SELECT_SECONDS) {
995 timeout = (usec + 1000000/HZ - 1) / (1000000/HZ); 995 timeout = (usec + 1000000/HZ - 1) / (1000000/HZ);
996 timeout += sec * (unsigned long) HZ; 996 timeout += sec * (unsigned long) HZ;
997 } 997 }
998 } 998 }
999 999
1000 rcu_read_lock(); 1000 rcu_read_lock();
1001 fdt = files_fdtable(current->files); 1001 fdt = files_fdtable(current->files);
1002 max_fdset = fdt->max_fdset; 1002 max_fdset = fdt->max_fdset;
1003 rcu_read_unlock(); 1003 rcu_read_unlock();
1004 if (n < 0 || n > max_fdset) 1004 if (n < 0 || n > max_fdset)
1005 goto out_nofds; 1005 goto out_nofds;
1006 1006
1007 /* 1007 /*
1008 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 1008 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
1009 * since we used fdset we need to allocate memory in units of 1009 * since we used fdset we need to allocate memory in units of
1010 * long-words. 1010 * long-words.
1011 */ 1011 */
1012 ret = -ENOMEM; 1012 ret = -ENOMEM;
1013 size = FDS_BYTES(n); 1013 size = FDS_BYTES(n);
1014 bits = kmalloc(6 * size, GFP_KERNEL); 1014 bits = kmalloc(6 * size, GFP_KERNEL);
1015 if (!bits) 1015 if (!bits)
1016 goto out_nofds; 1016 goto out_nofds;
1017 fds.in = (unsigned long *) bits; 1017 fds.in = (unsigned long *) bits;
1018 fds.out = (unsigned long *) (bits + size); 1018 fds.out = (unsigned long *) (bits + size);
1019 fds.ex = (unsigned long *) (bits + 2*size); 1019 fds.ex = (unsigned long *) (bits + 2*size);
1020 fds.res_in = (unsigned long *) (bits + 3*size); 1020 fds.res_in = (unsigned long *) (bits + 3*size);
1021 fds.res_out = (unsigned long *) (bits + 4*size); 1021 fds.res_out = (unsigned long *) (bits + 4*size);
1022 fds.res_ex = (unsigned long *) (bits + 5*size); 1022 fds.res_ex = (unsigned long *) (bits + 5*size);
1023 1023
1024 if ((ret = get_fd_set(n, inp->fds_bits, fds.in)) || 1024 if ((ret = get_fd_set(n, inp->fds_bits, fds.in)) ||
1025 (ret = get_fd_set(n, outp->fds_bits, fds.out)) || 1025 (ret = get_fd_set(n, outp->fds_bits, fds.out)) ||
1026 (ret = get_fd_set(n, exp->fds_bits, fds.ex))) 1026 (ret = get_fd_set(n, exp->fds_bits, fds.ex)))
1027 goto out; 1027 goto out;
1028 zero_fd_set(n, fds.res_in); 1028 zero_fd_set(n, fds.res_in);
1029 zero_fd_set(n, fds.res_out); 1029 zero_fd_set(n, fds.res_out);
1030 zero_fd_set(n, fds.res_ex); 1030 zero_fd_set(n, fds.res_ex);
1031 1031
1032 ret = do_select(n, &fds, &timeout); 1032 ret = do_select(n, &fds, &timeout);
1033 1033
1034 /* OSF does not copy back the remaining time. */ 1034 /* OSF does not copy back the remaining time. */
1035 1035
1036 if (ret < 0) 1036 if (ret < 0)
1037 goto out; 1037 goto out;
1038 if (!ret) { 1038 if (!ret) {
1039 ret = -ERESTARTNOHAND; 1039 ret = -ERESTARTNOHAND;
1040 if (signal_pending(current)) 1040 if (signal_pending(current))
1041 goto out; 1041 goto out;
1042 ret = 0; 1042 ret = 0;
1043 } 1043 }
1044 1044
1045 if (set_fd_set(n, inp->fds_bits, fds.res_in) || 1045 if (set_fd_set(n, inp->fds_bits, fds.res_in) ||
1046 set_fd_set(n, outp->fds_bits, fds.res_out) || 1046 set_fd_set(n, outp->fds_bits, fds.res_out) ||
1047 set_fd_set(n, exp->fds_bits, fds.res_ex)) 1047 set_fd_set(n, exp->fds_bits, fds.res_ex))
1048 ret = -EFAULT; 1048 ret = -EFAULT;
1049 1049
1050 out: 1050 out:
1051 kfree(bits); 1051 kfree(bits);
1052 out_nofds: 1052 out_nofds:
1053 return ret; 1053 return ret;
1054 } 1054 }
1055 1055
1056 struct rusage32 { 1056 struct rusage32 {
1057 struct timeval32 ru_utime; /* user time used */ 1057 struct timeval32 ru_utime; /* user time used */
1058 struct timeval32 ru_stime; /* system time used */ 1058 struct timeval32 ru_stime; /* system time used */
1059 long ru_maxrss; /* maximum resident set size */ 1059 long ru_maxrss; /* maximum resident set size */
1060 long ru_ixrss; /* integral shared memory size */ 1060 long ru_ixrss; /* integral shared memory size */
1061 long ru_idrss; /* integral unshared data size */ 1061 long ru_idrss; /* integral unshared data size */
1062 long ru_isrss; /* integral unshared stack size */ 1062 long ru_isrss; /* integral unshared stack size */
1063 long ru_minflt; /* page reclaims */ 1063 long ru_minflt; /* page reclaims */
1064 long ru_majflt; /* page faults */ 1064 long ru_majflt; /* page faults */
1065 long ru_nswap; /* swaps */ 1065 long ru_nswap; /* swaps */
1066 long ru_inblock; /* block input operations */ 1066 long ru_inblock; /* block input operations */
1067 long ru_oublock; /* block output operations */ 1067 long ru_oublock; /* block output operations */
1068 long ru_msgsnd; /* messages sent */ 1068 long ru_msgsnd; /* messages sent */
1069 long ru_msgrcv; /* messages received */ 1069 long ru_msgrcv; /* messages received */
1070 long ru_nsignals; /* signals received */ 1070 long ru_nsignals; /* signals received */
1071 long ru_nvcsw; /* voluntary context switches */ 1071 long ru_nvcsw; /* voluntary context switches */
1072 long ru_nivcsw; /* involuntary " */ 1072 long ru_nivcsw; /* involuntary " */
1073 }; 1073 };
1074 1074
1075 asmlinkage int 1075 asmlinkage int
1076 osf_getrusage(int who, struct rusage32 __user *ru) 1076 osf_getrusage(int who, struct rusage32 __user *ru)
1077 { 1077 {
1078 struct rusage32 r; 1078 struct rusage32 r;
1079 1079
1080 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) 1080 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1081 return -EINVAL; 1081 return -EINVAL;
1082 1082
1083 memset(&r, 0, sizeof(r)); 1083 memset(&r, 0, sizeof(r));
1084 switch (who) { 1084 switch (who) {
1085 case RUSAGE_SELF: 1085 case RUSAGE_SELF:
1086 jiffies_to_timeval32(current->utime, &r.ru_utime); 1086 jiffies_to_timeval32(current->utime, &r.ru_utime);
1087 jiffies_to_timeval32(current->stime, &r.ru_stime); 1087 jiffies_to_timeval32(current->stime, &r.ru_stime);
1088 r.ru_minflt = current->min_flt; 1088 r.ru_minflt = current->min_flt;
1089 r.ru_majflt = current->maj_flt; 1089 r.ru_majflt = current->maj_flt;
1090 break; 1090 break;
1091 case RUSAGE_CHILDREN: 1091 case RUSAGE_CHILDREN:
1092 jiffies_to_timeval32(current->signal->cutime, &r.ru_utime); 1092 jiffies_to_timeval32(current->signal->cutime, &r.ru_utime);
1093 jiffies_to_timeval32(current->signal->cstime, &r.ru_stime); 1093 jiffies_to_timeval32(current->signal->cstime, &r.ru_stime);
1094 r.ru_minflt = current->signal->cmin_flt; 1094 r.ru_minflt = current->signal->cmin_flt;
1095 r.ru_majflt = current->signal->cmaj_flt; 1095 r.ru_majflt = current->signal->cmaj_flt;
1096 break; 1096 break;
1097 } 1097 }
1098 1098
1099 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1099 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1100 } 1100 }
1101 1101
1102 asmlinkage long 1102 asmlinkage long
1103 osf_wait4(pid_t pid, int __user *ustatus, int options, 1103 osf_wait4(pid_t pid, int __user *ustatus, int options,
1104 struct rusage32 __user *ur) 1104 struct rusage32 __user *ur)
1105 { 1105 {
1106 struct rusage r; 1106 struct rusage r;
1107 long ret, err; 1107 long ret, err;
1108 mm_segment_t old_fs; 1108 mm_segment_t old_fs;
1109 1109
1110 if (!ur) 1110 if (!ur)
1111 return sys_wait4(pid, ustatus, options, NULL); 1111 return sys_wait4(pid, ustatus, options, NULL);
1112 1112
1113 old_fs = get_fs(); 1113 old_fs = get_fs();
1114 1114
1115 set_fs (KERNEL_DS); 1115 set_fs (KERNEL_DS);
1116 ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r); 1116 ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
1117 set_fs (old_fs); 1117 set_fs (old_fs);
1118 1118
1119 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) 1119 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
1120 return -EFAULT; 1120 return -EFAULT;
1121 1121
1122 err = 0; 1122 err = 0;
1123 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); 1123 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
1124 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); 1124 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
1125 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); 1125 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
1126 err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec); 1126 err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec);
1127 err |= __put_user(r.ru_maxrss, &ur->ru_maxrss); 1127 err |= __put_user(r.ru_maxrss, &ur->ru_maxrss);
1128 err |= __put_user(r.ru_ixrss, &ur->ru_ixrss); 1128 err |= __put_user(r.ru_ixrss, &ur->ru_ixrss);
1129 err |= __put_user(r.ru_idrss, &ur->ru_idrss); 1129 err |= __put_user(r.ru_idrss, &ur->ru_idrss);
1130 err |= __put_user(r.ru_isrss, &ur->ru_isrss); 1130 err |= __put_user(r.ru_isrss, &ur->ru_isrss);
1131 err |= __put_user(r.ru_minflt, &ur->ru_minflt); 1131 err |= __put_user(r.ru_minflt, &ur->ru_minflt);
1132 err |= __put_user(r.ru_majflt, &ur->ru_majflt); 1132 err |= __put_user(r.ru_majflt, &ur->ru_majflt);
1133 err |= __put_user(r.ru_nswap, &ur->ru_nswap); 1133 err |= __put_user(r.ru_nswap, &ur->ru_nswap);
1134 err |= __put_user(r.ru_inblock, &ur->ru_inblock); 1134 err |= __put_user(r.ru_inblock, &ur->ru_inblock);
1135 err |= __put_user(r.ru_oublock, &ur->ru_oublock); 1135 err |= __put_user(r.ru_oublock, &ur->ru_oublock);
1136 err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd); 1136 err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd);
1137 err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv); 1137 err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv);
1138 err |= __put_user(r.ru_nsignals, &ur->ru_nsignals); 1138 err |= __put_user(r.ru_nsignals, &ur->ru_nsignals);
1139 err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw); 1139 err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw);
1140 err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw); 1140 err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw);
1141 1141
1142 return err ? err : ret; 1142 return err ? err : ret;
1143 } 1143 }
1144 1144
1145 /* 1145 /*
1146 * I don't know what the parameters are: the first one 1146 * I don't know what the parameters are: the first one
1147 * seems to be a timeval pointer, and I suspect the second 1147 * seems to be a timeval pointer, and I suspect the second
1148 * one is the time remaining.. Ho humm.. No documentation. 1148 * one is the time remaining.. Ho humm.. No documentation.
1149 */ 1149 */
1150 asmlinkage int 1150 asmlinkage int
1151 osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remain) 1151 osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remain)
1152 { 1152 {
1153 struct timeval tmp; 1153 struct timeval tmp;
1154 unsigned long ticks; 1154 unsigned long ticks;
1155 1155
1156 if (get_tv32(&tmp, sleep)) 1156 if (get_tv32(&tmp, sleep))
1157 goto fault; 1157 goto fault;
1158 1158
1159 ticks = timeval_to_jiffies(&tmp); 1159 ticks = timeval_to_jiffies(&tmp);
1160 1160
1161 ticks = schedule_timeout_interruptible(ticks); 1161 ticks = schedule_timeout_interruptible(ticks);
1162 1162
1163 if (remain) { 1163 if (remain) {
1164 jiffies_to_timeval(ticks, &tmp); 1164 jiffies_to_timeval(ticks, &tmp);
1165 if (put_tv32(remain, &tmp)) 1165 if (put_tv32(remain, &tmp))
1166 goto fault; 1166 goto fault;
1167 } 1167 }
1168 1168
1169 return 0; 1169 return 0;
1170 fault: 1170 fault:
1171 return -EFAULT; 1171 return -EFAULT;
1172 } 1172 }
1173 1173
1174 1174
1175 struct timex32 { 1175 struct timex32 {
1176 unsigned int modes; /* mode selector */ 1176 unsigned int modes; /* mode selector */
1177 long offset; /* time offset (usec) */ 1177 long offset; /* time offset (usec) */
1178 long freq; /* frequency offset (scaled ppm) */ 1178 long freq; /* frequency offset (scaled ppm) */
1179 long maxerror; /* maximum error (usec) */ 1179 long maxerror; /* maximum error (usec) */
1180 long esterror; /* estimated error (usec) */ 1180 long esterror; /* estimated error (usec) */
1181 int status; /* clock command/status */ 1181 int status; /* clock command/status */
1182 long constant; /* pll time constant */ 1182 long constant; /* pll time constant */
1183 long precision; /* clock precision (usec) (read only) */ 1183 long precision; /* clock precision (usec) (read only) */
1184 long tolerance; /* clock frequency tolerance (ppm) 1184 long tolerance; /* clock frequency tolerance (ppm)
1185 * (read only) 1185 * (read only)
1186 */ 1186 */
1187 struct timeval32 time; /* (read only) */ 1187 struct timeval32 time; /* (read only) */
1188 long tick; /* (modified) usecs between clock ticks */ 1188 long tick; /* (modified) usecs between clock ticks */
1189 1189
1190 long ppsfreq; /* pps frequency (scaled ppm) (ro) */ 1190 long ppsfreq; /* pps frequency (scaled ppm) (ro) */
1191 long jitter; /* pps jitter (us) (ro) */ 1191 long jitter; /* pps jitter (us) (ro) */
1192 int shift; /* interval duration (s) (shift) (ro) */ 1192 int shift; /* interval duration (s) (shift) (ro) */
1193 long stabil; /* pps stability (scaled ppm) (ro) */ 1193 long stabil; /* pps stability (scaled ppm) (ro) */
1194 long jitcnt; /* jitter limit exceeded (ro) */ 1194 long jitcnt; /* jitter limit exceeded (ro) */
1195 long calcnt; /* calibration intervals (ro) */ 1195 long calcnt; /* calibration intervals (ro) */
1196 long errcnt; /* calibration errors (ro) */ 1196 long errcnt; /* calibration errors (ro) */
1197 long stbcnt; /* stability limit exceeded (ro) */ 1197 long stbcnt; /* stability limit exceeded (ro) */
1198 1198
1199 int :32; int :32; int :32; int :32; 1199 int :32; int :32; int :32; int :32;
1200 int :32; int :32; int :32; int :32; 1200 int :32; int :32; int :32; int :32;
1201 int :32; int :32; int :32; int :32; 1201 int :32; int :32; int :32; int :32;
1202 }; 1202 };
1203 1203
1204 asmlinkage int 1204 asmlinkage int
1205 sys_old_adjtimex(struct timex32 __user *txc_p) 1205 sys_old_adjtimex(struct timex32 __user *txc_p)
1206 { 1206 {
1207 struct timex txc; 1207 struct timex txc;
1208 int ret; 1208 int ret;
1209 1209
1210 /* copy relevant bits of struct timex. */ 1210 /* copy relevant bits of struct timex. */
1211 if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || 1211 if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
1212 copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - 1212 copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
1213 offsetof(struct timex32, time))) 1213 offsetof(struct timex32, time)))
1214 return -EFAULT; 1214 return -EFAULT;
1215 1215
1216 ret = do_adjtimex(&txc); 1216 ret = do_adjtimex(&txc);
1217 if (ret < 0) 1217 if (ret < 0)
1218 return ret; 1218 return ret;
1219 1219
1220 /* copy back to timex32 */ 1220 /* copy back to timex32 */
1221 if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || 1221 if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) ||
1222 (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - 1222 (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) -
1223 offsetof(struct timex32, tick))) || 1223 offsetof(struct timex32, tick))) ||
1224 (put_tv32(&txc_p->time, &txc.time))) 1224 (put_tv32(&txc_p->time, &txc.time)))
1225 return -EFAULT; 1225 return -EFAULT;
1226 1226
1227 return ret; 1227 return ret;
1228 } 1228 }
1229 1229
1230 /* Get an address range which is currently unmapped. Similar to the 1230 /* Get an address range which is currently unmapped. Similar to the
1231 generic version except that we know how to honor ADDR_LIMIT_32BIT. */ 1231 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
1232 1232
1233 static unsigned long 1233 static unsigned long
1234 arch_get_unmapped_area_1(unsigned long addr, unsigned long len, 1234 arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
1235 unsigned long limit) 1235 unsigned long limit)
1236 { 1236 {
1237 struct vm_area_struct *vma = find_vma(current->mm, addr); 1237 struct vm_area_struct *vma = find_vma(current->mm, addr);
1238 1238
1239 while (1) { 1239 while (1) {
1240 /* At this point: (!vma || addr < vma->vm_end). */ 1240 /* At this point: (!vma || addr < vma->vm_end). */
1241 if (limit - len < addr) 1241 if (limit - len < addr)
1242 return -ENOMEM; 1242 return -ENOMEM;
1243 if (!vma || addr + len <= vma->vm_start) 1243 if (!vma || addr + len <= vma->vm_start)
1244 return addr; 1244 return addr;
1245 addr = vma->vm_end; 1245 addr = vma->vm_end;
1246 vma = vma->vm_next; 1246 vma = vma->vm_next;
1247 } 1247 }
1248 } 1248 }
1249 1249
1250 unsigned long 1250 unsigned long
1251 arch_get_unmapped_area(struct file *filp, unsigned long addr, 1251 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1252 unsigned long len, unsigned long pgoff, 1252 unsigned long len, unsigned long pgoff,
1253 unsigned long flags) 1253 unsigned long flags)
1254 { 1254 {
1255 unsigned long limit; 1255 unsigned long limit;
1256 1256
1257 /* "32 bit" actually means 31 bit, since pointers sign extend. */ 1257 /* "32 bit" actually means 31 bit, since pointers sign extend. */
1258 if (current->personality & ADDR_LIMIT_32BIT) 1258 if (current->personality & ADDR_LIMIT_32BIT)
1259 limit = 0x80000000; 1259 limit = 0x80000000;
1260 else 1260 else
1261 limit = TASK_SIZE; 1261 limit = TASK_SIZE;
1262 1262
1263 if (len > limit) 1263 if (len > limit)
1264 return -ENOMEM; 1264 return -ENOMEM;
1265 1265
1266 /* First, see if the given suggestion fits. 1266 /* First, see if the given suggestion fits.
1267 1267
1268 The OSF/1 loader (/sbin/loader) relies on us returning an 1268 The OSF/1 loader (/sbin/loader) relies on us returning an
1269 address larger than the requested if one exists, which is 1269 address larger than the requested if one exists, which is
1270 a terribly broken way to program. 1270 a terribly broken way to program.
1271 1271
1272 That said, I can see the use in being able to suggest not 1272 That said, I can see the use in being able to suggest not
1273 merely specific addresses, but regions of memory -- perhaps 1273 merely specific addresses, but regions of memory -- perhaps
1274 this feature should be incorporated into all ports? */ 1274 this feature should be incorporated into all ports? */
1275 1275
1276 if (addr) { 1276 if (addr) {
1277 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); 1277 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
1278 if (addr != (unsigned long) -ENOMEM) 1278 if (addr != (unsigned long) -ENOMEM)
1279 return addr; 1279 return addr;
1280 } 1280 }
1281 1281
1282 /* Next, try allocating at TASK_UNMAPPED_BASE. */ 1282 /* Next, try allocating at TASK_UNMAPPED_BASE. */
1283 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), 1283 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
1284 len, limit); 1284 len, limit);
1285 if (addr != (unsigned long) -ENOMEM) 1285 if (addr != (unsigned long) -ENOMEM)
1286 return addr; 1286 return addr;
1287 1287
1288 /* Finally, try allocating in low memory. */ 1288 /* Finally, try allocating in low memory. */
1289 addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); 1289 addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
1290 1290
1291 return addr; 1291 return addr;
1292 } 1292 }
1293 1293
1294 #ifdef CONFIG_OSF4_COMPAT 1294 #ifdef CONFIG_OSF4_COMPAT
1295 1295
1296 /* Clear top 32 bits of iov_len in the user's buffer for 1296 /* Clear top 32 bits of iov_len in the user's buffer for
1297 compatibility with old versions of OSF/1 where iov_len 1297 compatibility with old versions of OSF/1 where iov_len
1298 was defined as int. */ 1298 was defined as int. */
1299 static int 1299 static int
1300 osf_fix_iov_len(const struct iovec __user *iov, unsigned long count) 1300 osf_fix_iov_len(const struct iovec __user *iov, unsigned long count)
1301 { 1301 {
1302 unsigned long i; 1302 unsigned long i;
1303 1303
1304 for (i = 0 ; i < count ; i++) { 1304 for (i = 0 ; i < count ; i++) {
1305 int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1; 1305 int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1;
1306 1306
1307 if (put_user(0, iov_len_high)) 1307 if (put_user(0, iov_len_high))
1308 return -EFAULT; 1308 return -EFAULT;
1309 } 1309 }
1310 return 0; 1310 return 0;
1311 } 1311 }
1312 1312
1313 asmlinkage ssize_t 1313 asmlinkage ssize_t
1314 osf_readv(unsigned long fd, const struct iovec __user * vector, unsigned long count) 1314 osf_readv(unsigned long fd, const struct iovec __user * vector, unsigned long count)
1315 { 1315 {
1316 if (unlikely(personality(current->personality) == PER_OSF4)) 1316 if (unlikely(personality(current->personality) == PER_OSF4))
1317 if (osf_fix_iov_len(vector, count)) 1317 if (osf_fix_iov_len(vector, count))
1318 return -EFAULT; 1318 return -EFAULT;
1319 return sys_readv(fd, vector, count); 1319 return sys_readv(fd, vector, count);
1320 } 1320 }
1321 1321
1322 asmlinkage ssize_t 1322 asmlinkage ssize_t
1323 osf_writev(unsigned long fd, const struct iovec __user * vector, unsigned long count) 1323 osf_writev(unsigned long fd, const struct iovec __user * vector, unsigned long count)
1324 { 1324 {
1325 if (unlikely(personality(current->personality) == PER_OSF4)) 1325 if (unlikely(personality(current->personality) == PER_OSF4))
1326 if (osf_fix_iov_len(vector, count)) 1326 if (osf_fix_iov_len(vector, count))
1327 return -EFAULT; 1327 return -EFAULT;
1328 return sys_writev(fd, vector, count); 1328 return sys_writev(fd, vector, count);
1329 } 1329 }
1330 1330
1331 #endif 1331 #endif
1332 1332
arch/alpha/kernel/setup.c
1 /* 1 /*
2 * linux/arch/alpha/kernel/setup.c 2 * linux/arch/alpha/kernel/setup.c
3 * 3 *
4 * Copyright (C) 1995 Linus Torvalds 4 * Copyright (C) 1995 Linus Torvalds
5 */ 5 */
6 6
7 /* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */ 7 /* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
8 8
9 /* 9 /*
10 * Bootup setup stuff. 10 * Bootup setup stuff.
11 */ 11 */
12 12
13 #include <linux/sched.h> 13 #include <linux/sched.h>
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/mm.h> 15 #include <linux/mm.h>
16 #include <linux/stddef.h> 16 #include <linux/stddef.h>
17 #include <linux/unistd.h> 17 #include <linux/unistd.h>
18 #include <linux/ptrace.h> 18 #include <linux/ptrace.h>
19 #include <linux/slab.h> 19 #include <linux/slab.h>
20 #include <linux/user.h> 20 #include <linux/user.h>
21 #include <linux/a.out.h> 21 #include <linux/a.out.h>
22 #include <linux/screen_info.h> 22 #include <linux/screen_info.h>
23 #include <linux/delay.h> 23 #include <linux/delay.h>
24 #include <linux/config.h> /* CONFIG_ALPHA_LCA etc */ 24 #include <linux/config.h> /* CONFIG_ALPHA_LCA etc */
25 #include <linux/mc146818rtc.h> 25 #include <linux/mc146818rtc.h>
26 #include <linux/console.h> 26 #include <linux/console.h>
27 #include <linux/cpu.h> 27 #include <linux/cpu.h>
28 #include <linux/errno.h> 28 #include <linux/errno.h>
29 #include <linux/init.h> 29 #include <linux/init.h>
30 #include <linux/string.h> 30 #include <linux/string.h>
31 #include <linux/ioport.h> 31 #include <linux/ioport.h>
32 #include <linux/platform_device.h> 32 #include <linux/platform_device.h>
33 #include <linux/bootmem.h> 33 #include <linux/bootmem.h>
34 #include <linux/pci.h> 34 #include <linux/pci.h>
35 #include <linux/seq_file.h> 35 #include <linux/seq_file.h>
36 #include <linux/root_dev.h> 36 #include <linux/root_dev.h>
37 #include <linux/initrd.h> 37 #include <linux/initrd.h>
38 #include <linux/eisa.h> 38 #include <linux/eisa.h>
39 #include <linux/pfn.h> 39 #include <linux/pfn.h>
40 #ifdef CONFIG_MAGIC_SYSRQ 40 #ifdef CONFIG_MAGIC_SYSRQ
41 #include <linux/sysrq.h> 41 #include <linux/sysrq.h>
42 #include <linux/reboot.h> 42 #include <linux/reboot.h>
43 #endif 43 #endif
44 #include <linux/notifier.h> 44 #include <linux/notifier.h>
45 #include <asm/setup.h> 45 #include <asm/setup.h>
46 #include <asm/io.h> 46 #include <asm/io.h>
47 47
48 extern struct atomic_notifier_head panic_notifier_list; 48 extern struct atomic_notifier_head panic_notifier_list;
49 static int alpha_panic_event(struct notifier_block *, unsigned long, void *); 49 static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
50 static struct notifier_block alpha_panic_block = { 50 static struct notifier_block alpha_panic_block = {
51 alpha_panic_event, 51 alpha_panic_event,
52 NULL, 52 NULL,
53 INT_MAX /* try to do it first */ 53 INT_MAX /* try to do it first */
54 }; 54 };
55 55
56 #include <asm/uaccess.h> 56 #include <asm/uaccess.h>
57 #include <asm/pgtable.h> 57 #include <asm/pgtable.h>
58 #include <asm/system.h> 58 #include <asm/system.h>
59 #include <asm/hwrpb.h> 59 #include <asm/hwrpb.h>
60 #include <asm/dma.h> 60 #include <asm/dma.h>
61 #include <asm/io.h> 61 #include <asm/io.h>
62 #include <asm/mmu_context.h> 62 #include <asm/mmu_context.h>
63 #include <asm/console.h> 63 #include <asm/console.h>
64 64
65 #include "proto.h" 65 #include "proto.h"
66 #include "pci_impl.h" 66 #include "pci_impl.h"
67 67
68 68
69 struct hwrpb_struct *hwrpb; 69 struct hwrpb_struct *hwrpb;
70 unsigned long srm_hae; 70 unsigned long srm_hae;
71 71
72 int alpha_l1i_cacheshape; 72 int alpha_l1i_cacheshape;
73 int alpha_l1d_cacheshape; 73 int alpha_l1d_cacheshape;
74 int alpha_l2_cacheshape; 74 int alpha_l2_cacheshape;
75 int alpha_l3_cacheshape; 75 int alpha_l3_cacheshape;
76 76
77 #ifdef CONFIG_VERBOSE_MCHECK 77 #ifdef CONFIG_VERBOSE_MCHECK
78 /* 0=minimum, 1=verbose, 2=all */ 78 /* 0=minimum, 1=verbose, 2=all */
79 /* These can be overridden via the command line, ie "verbose_mcheck=2") */ 79 /* These can be overridden via the command line, ie "verbose_mcheck=2") */
80 unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; 80 unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
81 #endif 81 #endif
82 82
83 /* Which processor we booted from. */ 83 /* Which processor we booted from. */
84 int boot_cpuid; 84 int boot_cpuid;
85 85
86 /* 86 /*
87 * Using SRM callbacks for initial console output. This works from 87 * Using SRM callbacks for initial console output. This works from
88 * setup_arch() time through the end of time_init(), as those places 88 * setup_arch() time through the end of time_init(), as those places
89 * are under our (Alpha) control. 89 * are under our (Alpha) control.
90 90
91 * "srmcons" specified in the boot command arguments allows us to 91 * "srmcons" specified in the boot command arguments allows us to
92 * see kernel messages during the period of time before the true 92 * see kernel messages during the period of time before the true
93 * console device is "registered" during console_init(). 93 * console device is "registered" during console_init().
94 * As of this version (2.5.59), console_init() will call 94 * As of this version (2.5.59), console_init() will call
95 * disable_early_printk() as the last action before initializing 95 * disable_early_printk() as the last action before initializing
96 * the console drivers. That's the last possible time srmcons can be 96 * the console drivers. That's the last possible time srmcons can be
97 * unregistered without interfering with console behavior. 97 * unregistered without interfering with console behavior.
98 * 98 *
99 * By default, OFF; set it with a bootcommand arg of "srmcons" or 99 * By default, OFF; set it with a bootcommand arg of "srmcons" or
100 * "console=srm". The meaning of these two args is: 100 * "console=srm". The meaning of these two args is:
101 * "srmcons" - early callback prints 101 * "srmcons" - early callback prints
102 * "console=srm" - full callback based console, including early prints 102 * "console=srm" - full callback based console, including early prints
103 */ 103 */
104 int srmcons_output = 0; 104 int srmcons_output = 0;
105 105
106 /* Enforce a memory size limit; useful for testing. By default, none. */ 106 /* Enforce a memory size limit; useful for testing. By default, none. */
107 unsigned long mem_size_limit = 0; 107 unsigned long mem_size_limit = 0;
108 108
109 /* Set AGP GART window size (0 means disabled). */ 109 /* Set AGP GART window size (0 means disabled). */
110 unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE; 110 unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
111 111
112 #ifdef CONFIG_ALPHA_GENERIC 112 #ifdef CONFIG_ALPHA_GENERIC
113 struct alpha_machine_vector alpha_mv; 113 struct alpha_machine_vector alpha_mv;
114 int alpha_using_srm; 114 int alpha_using_srm;
115 #endif 115 #endif
116 116
117 #define N(a) (sizeof(a)/sizeof(a[0]))
118
119 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long, 117 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
120 unsigned long); 118 unsigned long);
121 static struct alpha_machine_vector *get_sysvec_byname(const char *); 119 static struct alpha_machine_vector *get_sysvec_byname(const char *);
122 static void get_sysnames(unsigned long, unsigned long, unsigned long, 120 static void get_sysnames(unsigned long, unsigned long, unsigned long,
123 char **, char **); 121 char **, char **);
124 static void determine_cpu_caches (unsigned int); 122 static void determine_cpu_caches (unsigned int);
125 123
126 static char command_line[COMMAND_LINE_SIZE]; 124 static char command_line[COMMAND_LINE_SIZE];
127 125
128 /* 126 /*
129 * The format of "screen_info" is strange, and due to early 127 * The format of "screen_info" is strange, and due to early
130 * i386-setup code. This is just enough to make the console 128 * i386-setup code. This is just enough to make the console
131 * code think we're on a VGA color display. 129 * code think we're on a VGA color display.
132 */ 130 */
133 131
134 struct screen_info screen_info = { 132 struct screen_info screen_info = {
135 .orig_x = 0, 133 .orig_x = 0,
136 .orig_y = 25, 134 .orig_y = 25,
137 .orig_video_cols = 80, 135 .orig_video_cols = 80,
138 .orig_video_lines = 25, 136 .orig_video_lines = 25,
139 .orig_video_isVGA = 1, 137 .orig_video_isVGA = 1,
140 .orig_video_points = 16 138 .orig_video_points = 16
141 }; 139 };
142 140
143 /* 141 /*
144 * The direct map I/O window, if any. This should be the same 142 * The direct map I/O window, if any. This should be the same
145 * for all busses, since it's used by virt_to_bus. 143 * for all busses, since it's used by virt_to_bus.
146 */ 144 */
147 145
148 unsigned long __direct_map_base; 146 unsigned long __direct_map_base;
149 unsigned long __direct_map_size; 147 unsigned long __direct_map_size;
150 148
151 /* 149 /*
152 * Declare all of the machine vectors. 150 * Declare all of the machine vectors.
153 */ 151 */
154 152
155 /* GCC 2.7.2 (on alpha at least) is lame. It does not support either 153 /* GCC 2.7.2 (on alpha at least) is lame. It does not support either
156 __attribute__((weak)) or #pragma weak. Bypass it and talk directly 154 __attribute__((weak)) or #pragma weak. Bypass it and talk directly
157 to the assembler. */ 155 to the assembler. */
158 156
159 #define WEAK(X) \ 157 #define WEAK(X) \
160 extern struct alpha_machine_vector X; \ 158 extern struct alpha_machine_vector X; \
161 asm(".weak "#X) 159 asm(".weak "#X)
162 160
163 WEAK(alcor_mv); 161 WEAK(alcor_mv);
164 WEAK(alphabook1_mv); 162 WEAK(alphabook1_mv);
165 WEAK(avanti_mv); 163 WEAK(avanti_mv);
166 WEAK(cabriolet_mv); 164 WEAK(cabriolet_mv);
167 WEAK(clipper_mv); 165 WEAK(clipper_mv);
168 WEAK(dp264_mv); 166 WEAK(dp264_mv);
169 WEAK(eb164_mv); 167 WEAK(eb164_mv);
170 WEAK(eb64p_mv); 168 WEAK(eb64p_mv);
171 WEAK(eb66_mv); 169 WEAK(eb66_mv);
172 WEAK(eb66p_mv); 170 WEAK(eb66p_mv);
173 WEAK(eiger_mv); 171 WEAK(eiger_mv);
174 WEAK(jensen_mv); 172 WEAK(jensen_mv);
175 WEAK(lx164_mv); 173 WEAK(lx164_mv);
176 WEAK(lynx_mv); 174 WEAK(lynx_mv);
177 WEAK(marvel_ev7_mv); 175 WEAK(marvel_ev7_mv);
178 WEAK(miata_mv); 176 WEAK(miata_mv);
179 WEAK(mikasa_mv); 177 WEAK(mikasa_mv);
180 WEAK(mikasa_primo_mv); 178 WEAK(mikasa_primo_mv);
181 WEAK(monet_mv); 179 WEAK(monet_mv);
182 WEAK(nautilus_mv); 180 WEAK(nautilus_mv);
183 WEAK(noname_mv); 181 WEAK(noname_mv);
184 WEAK(noritake_mv); 182 WEAK(noritake_mv);
185 WEAK(noritake_primo_mv); 183 WEAK(noritake_primo_mv);
186 WEAK(p2k_mv); 184 WEAK(p2k_mv);
187 WEAK(pc164_mv); 185 WEAK(pc164_mv);
188 WEAK(privateer_mv); 186 WEAK(privateer_mv);
189 WEAK(rawhide_mv); 187 WEAK(rawhide_mv);
190 WEAK(ruffian_mv); 188 WEAK(ruffian_mv);
191 WEAK(rx164_mv); 189 WEAK(rx164_mv);
192 WEAK(sable_mv); 190 WEAK(sable_mv);
193 WEAK(sable_gamma_mv); 191 WEAK(sable_gamma_mv);
194 WEAK(shark_mv); 192 WEAK(shark_mv);
195 WEAK(sx164_mv); 193 WEAK(sx164_mv);
196 WEAK(takara_mv); 194 WEAK(takara_mv);
197 WEAK(titan_mv); 195 WEAK(titan_mv);
198 WEAK(webbrick_mv); 196 WEAK(webbrick_mv);
199 WEAK(wildfire_mv); 197 WEAK(wildfire_mv);
200 WEAK(xl_mv); 198 WEAK(xl_mv);
201 WEAK(xlt_mv); 199 WEAK(xlt_mv);
202 200
203 #undef WEAK 201 #undef WEAK
204 202
205 /* 203 /*
206 * I/O resources inherited from PeeCees. Except for perhaps the 204 * I/O resources inherited from PeeCees. Except for perhaps the
207 * turbochannel alphas, everyone has these on some sort of SuperIO chip. 205 * turbochannel alphas, everyone has these on some sort of SuperIO chip.
208 * 206 *
209 * ??? If this becomes less standard, move the struct out into the 207 * ??? If this becomes less standard, move the struct out into the
210 * machine vector. 208 * machine vector.
211 */ 209 */
212 210
213 static void __init 211 static void __init
214 reserve_std_resources(void) 212 reserve_std_resources(void)
215 { 213 {
216 static struct resource standard_io_resources[] = { 214 static struct resource standard_io_resources[] = {
217 { .name = "rtc", .start = -1, .end = -1 }, 215 { .name = "rtc", .start = -1, .end = -1 },
218 { .name = "dma1", .start = 0x00, .end = 0x1f }, 216 { .name = "dma1", .start = 0x00, .end = 0x1f },
219 { .name = "pic1", .start = 0x20, .end = 0x3f }, 217 { .name = "pic1", .start = 0x20, .end = 0x3f },
220 { .name = "timer", .start = 0x40, .end = 0x5f }, 218 { .name = "timer", .start = 0x40, .end = 0x5f },
221 { .name = "keyboard", .start = 0x60, .end = 0x6f }, 219 { .name = "keyboard", .start = 0x60, .end = 0x6f },
222 { .name = "dma page reg", .start = 0x80, .end = 0x8f }, 220 { .name = "dma page reg", .start = 0x80, .end = 0x8f },
223 { .name = "pic2", .start = 0xa0, .end = 0xbf }, 221 { .name = "pic2", .start = 0xa0, .end = 0xbf },
224 { .name = "dma2", .start = 0xc0, .end = 0xdf }, 222 { .name = "dma2", .start = 0xc0, .end = 0xdf },
225 }; 223 };
226 224
227 struct resource *io = &ioport_resource; 225 struct resource *io = &ioport_resource;
228 size_t i; 226 size_t i;
229 227
230 if (hose_head) { 228 if (hose_head) {
231 struct pci_controller *hose; 229 struct pci_controller *hose;
232 for (hose = hose_head; hose; hose = hose->next) 230 for (hose = hose_head; hose; hose = hose->next)
233 if (hose->index == 0) { 231 if (hose->index == 0) {
234 io = hose->io_space; 232 io = hose->io_space;
235 break; 233 break;
236 } 234 }
237 } 235 }
238 236
239 /* Fix up for the Jensen's queer RTC placement. */ 237 /* Fix up for the Jensen's queer RTC placement. */
240 standard_io_resources[0].start = RTC_PORT(0); 238 standard_io_resources[0].start = RTC_PORT(0);
241 standard_io_resources[0].end = RTC_PORT(0) + 0x10; 239 standard_io_resources[0].end = RTC_PORT(0) + 0x10;
242 240
243 for (i = 0; i < N(standard_io_resources); ++i) 241 for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
244 request_resource(io, standard_io_resources+i); 242 request_resource(io, standard_io_resources+i);
245 } 243 }
246 244
247 #define PFN_MAX PFN_DOWN(0x80000000) 245 #define PFN_MAX PFN_DOWN(0x80000000)
248 #define for_each_mem_cluster(memdesc, cluster, i) \ 246 #define for_each_mem_cluster(memdesc, cluster, i) \
249 for ((cluster) = (memdesc)->cluster, (i) = 0; \ 247 for ((cluster) = (memdesc)->cluster, (i) = 0; \
250 (i) < (memdesc)->numclusters; (i)++, (cluster)++) 248 (i) < (memdesc)->numclusters; (i)++, (cluster)++)
251 249
252 static unsigned long __init 250 static unsigned long __init
253 get_mem_size_limit(char *s) 251 get_mem_size_limit(char *s)
254 { 252 {
255 unsigned long end = 0; 253 unsigned long end = 0;
256 char *from = s; 254 char *from = s;
257 255
258 end = simple_strtoul(from, &from, 0); 256 end = simple_strtoul(from, &from, 0);
259 if ( *from == 'K' || *from == 'k' ) { 257 if ( *from == 'K' || *from == 'k' ) {
260 end = end << 10; 258 end = end << 10;
261 from++; 259 from++;
262 } else if ( *from == 'M' || *from == 'm' ) { 260 } else if ( *from == 'M' || *from == 'm' ) {
263 end = end << 20; 261 end = end << 20;
264 from++; 262 from++;
265 } else if ( *from == 'G' || *from == 'g' ) { 263 } else if ( *from == 'G' || *from == 'g' ) {
266 end = end << 30; 264 end = end << 30;
267 from++; 265 from++;
268 } 266 }
269 return end >> PAGE_SHIFT; /* Return the PFN of the limit. */ 267 return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
270 } 268 }
271 269
272 #ifdef CONFIG_BLK_DEV_INITRD 270 #ifdef CONFIG_BLK_DEV_INITRD
273 void * __init 271 void * __init
274 move_initrd(unsigned long mem_limit) 272 move_initrd(unsigned long mem_limit)
275 { 273 {
276 void *start; 274 void *start;
277 unsigned long size; 275 unsigned long size;
278 276
279 size = initrd_end - initrd_start; 277 size = initrd_end - initrd_start;
280 start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0); 278 start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
281 if (!start || __pa(start) + size > mem_limit) { 279 if (!start || __pa(start) + size > mem_limit) {
282 initrd_start = initrd_end = 0; 280 initrd_start = initrd_end = 0;
283 return NULL; 281 return NULL;
284 } 282 }
285 memmove(start, (void *)initrd_start, size); 283 memmove(start, (void *)initrd_start, size);
286 initrd_start = (unsigned long)start; 284 initrd_start = (unsigned long)start;
287 initrd_end = initrd_start + size; 285 initrd_end = initrd_start + size;
288 printk("initrd moved to %p\n", start); 286 printk("initrd moved to %p\n", start);
289 return start; 287 return start;
290 } 288 }
291 #endif 289 #endif
292 290
293 #ifndef CONFIG_DISCONTIGMEM 291 #ifndef CONFIG_DISCONTIGMEM
294 static void __init 292 static void __init
295 setup_memory(void *kernel_end) 293 setup_memory(void *kernel_end)
296 { 294 {
297 struct memclust_struct * cluster; 295 struct memclust_struct * cluster;
298 struct memdesc_struct * memdesc; 296 struct memdesc_struct * memdesc;
299 unsigned long start_kernel_pfn, end_kernel_pfn; 297 unsigned long start_kernel_pfn, end_kernel_pfn;
300 unsigned long bootmap_size, bootmap_pages, bootmap_start; 298 unsigned long bootmap_size, bootmap_pages, bootmap_start;
301 unsigned long start, end; 299 unsigned long start, end;
302 unsigned long i; 300 unsigned long i;
303 301
304 /* Find free clusters, and init and free the bootmem accordingly. */ 302 /* Find free clusters, and init and free the bootmem accordingly. */
305 memdesc = (struct memdesc_struct *) 303 memdesc = (struct memdesc_struct *)
306 (hwrpb->mddt_offset + (unsigned long) hwrpb); 304 (hwrpb->mddt_offset + (unsigned long) hwrpb);
307 305
308 for_each_mem_cluster(memdesc, cluster, i) { 306 for_each_mem_cluster(memdesc, cluster, i) {
309 printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n", 307 printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
310 i, cluster->usage, cluster->start_pfn, 308 i, cluster->usage, cluster->start_pfn,
311 cluster->start_pfn + cluster->numpages); 309 cluster->start_pfn + cluster->numpages);
312 310
313 /* Bit 0 is console/PALcode reserved. Bit 1 is 311 /* Bit 0 is console/PALcode reserved. Bit 1 is
314 non-volatile memory -- we might want to mark 312 non-volatile memory -- we might want to mark
315 this for later. */ 313 this for later. */
316 if (cluster->usage & 3) 314 if (cluster->usage & 3)
317 continue; 315 continue;
318 316
319 end = cluster->start_pfn + cluster->numpages; 317 end = cluster->start_pfn + cluster->numpages;
320 if (end > max_low_pfn) 318 if (end > max_low_pfn)
321 max_low_pfn = end; 319 max_low_pfn = end;
322 } 320 }
323 321
324 /* 322 /*
325 * Except for the NUMA systems (wildfire, marvel) all of the 323 * Except for the NUMA systems (wildfire, marvel) all of the
326 * Alpha systems we run on support 32GB of memory or less. 324 * Alpha systems we run on support 32GB of memory or less.
327 * Since the NUMA systems introduce large holes in memory addressing, 325 * Since the NUMA systems introduce large holes in memory addressing,
328 * we can get into a situation where there is not enough contiguous 326 * we can get into a situation where there is not enough contiguous
329 * memory for the memory map. 327 * memory for the memory map.
330 * 328 *
331 * Limit memory to the first 32GB to limit the NUMA systems to 329 * Limit memory to the first 32GB to limit the NUMA systems to
332 * memory on their first node (wildfire) or 2 (marvel) to avoid 330 * memory on their first node (wildfire) or 2 (marvel) to avoid
333 * not being able to produce the memory map. In order to access 331 * not being able to produce the memory map. In order to access
334 * all of the memory on the NUMA systems, build with discontiguous 332 * all of the memory on the NUMA systems, build with discontiguous
335 * memory support. 333 * memory support.
336 * 334 *
337 * If the user specified a memory limit, let that memory limit stand. 335 * If the user specified a memory limit, let that memory limit stand.
338 */ 336 */
339 if (!mem_size_limit) 337 if (!mem_size_limit)
340 mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT; 338 mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
341 339
342 if (mem_size_limit && max_low_pfn >= mem_size_limit) 340 if (mem_size_limit && max_low_pfn >= mem_size_limit)
343 { 341 {
344 printk("setup: forcing memory size to %ldK (from %ldK).\n", 342 printk("setup: forcing memory size to %ldK (from %ldK).\n",
345 mem_size_limit << (PAGE_SHIFT - 10), 343 mem_size_limit << (PAGE_SHIFT - 10),
346 max_low_pfn << (PAGE_SHIFT - 10)); 344 max_low_pfn << (PAGE_SHIFT - 10));
347 max_low_pfn = mem_size_limit; 345 max_low_pfn = mem_size_limit;
348 } 346 }
349 347
350 /* Find the bounds of kernel memory. */ 348 /* Find the bounds of kernel memory. */
351 start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); 349 start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
352 end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); 350 end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
353 bootmap_start = -1; 351 bootmap_start = -1;
354 352
355 try_again: 353 try_again:
356 if (max_low_pfn <= end_kernel_pfn) 354 if (max_low_pfn <= end_kernel_pfn)
357 panic("not enough memory to boot"); 355 panic("not enough memory to boot");
358 356
359 /* We need to know how many physically contiguous pages 357 /* We need to know how many physically contiguous pages
360 we'll need for the bootmap. */ 358 we'll need for the bootmap. */
361 bootmap_pages = bootmem_bootmap_pages(max_low_pfn); 359 bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
362 360
363 /* Now find a good region where to allocate the bootmap. */ 361 /* Now find a good region where to allocate the bootmap. */
364 for_each_mem_cluster(memdesc, cluster, i) { 362 for_each_mem_cluster(memdesc, cluster, i) {
365 if (cluster->usage & 3) 363 if (cluster->usage & 3)
366 continue; 364 continue;
367 365
368 start = cluster->start_pfn; 366 start = cluster->start_pfn;
369 end = start + cluster->numpages; 367 end = start + cluster->numpages;
370 if (start >= max_low_pfn) 368 if (start >= max_low_pfn)
371 continue; 369 continue;
372 if (end > max_low_pfn) 370 if (end > max_low_pfn)
373 end = max_low_pfn; 371 end = max_low_pfn;
374 if (start < start_kernel_pfn) { 372 if (start < start_kernel_pfn) {
375 if (end > end_kernel_pfn 373 if (end > end_kernel_pfn
376 && end - end_kernel_pfn >= bootmap_pages) { 374 && end - end_kernel_pfn >= bootmap_pages) {
377 bootmap_start = end_kernel_pfn; 375 bootmap_start = end_kernel_pfn;
378 break; 376 break;
379 } else if (end > start_kernel_pfn) 377 } else if (end > start_kernel_pfn)
380 end = start_kernel_pfn; 378 end = start_kernel_pfn;
381 } else if (start < end_kernel_pfn) 379 } else if (start < end_kernel_pfn)
382 start = end_kernel_pfn; 380 start = end_kernel_pfn;
383 if (end - start >= bootmap_pages) { 381 if (end - start >= bootmap_pages) {
384 bootmap_start = start; 382 bootmap_start = start;
385 break; 383 break;
386 } 384 }
387 } 385 }
388 386
389 if (bootmap_start == ~0UL) { 387 if (bootmap_start == ~0UL) {
390 max_low_pfn >>= 1; 388 max_low_pfn >>= 1;
391 goto try_again; 389 goto try_again;
392 } 390 }
393 391
394 /* Allocate the bootmap and mark the whole MM as reserved. */ 392 /* Allocate the bootmap and mark the whole MM as reserved. */
395 bootmap_size = init_bootmem(bootmap_start, max_low_pfn); 393 bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
396 394
397 /* Mark the free regions. */ 395 /* Mark the free regions. */
398 for_each_mem_cluster(memdesc, cluster, i) { 396 for_each_mem_cluster(memdesc, cluster, i) {
399 if (cluster->usage & 3) 397 if (cluster->usage & 3)
400 continue; 398 continue;
401 399
402 start = cluster->start_pfn; 400 start = cluster->start_pfn;
403 end = cluster->start_pfn + cluster->numpages; 401 end = cluster->start_pfn + cluster->numpages;
404 if (start >= max_low_pfn) 402 if (start >= max_low_pfn)
405 continue; 403 continue;
406 if (end > max_low_pfn) 404 if (end > max_low_pfn)
407 end = max_low_pfn; 405 end = max_low_pfn;
408 if (start < start_kernel_pfn) { 406 if (start < start_kernel_pfn) {
409 if (end > end_kernel_pfn) { 407 if (end > end_kernel_pfn) {
410 free_bootmem(PFN_PHYS(start), 408 free_bootmem(PFN_PHYS(start),
411 (PFN_PHYS(start_kernel_pfn) 409 (PFN_PHYS(start_kernel_pfn)
412 - PFN_PHYS(start))); 410 - PFN_PHYS(start)));
413 printk("freeing pages %ld:%ld\n", 411 printk("freeing pages %ld:%ld\n",
414 start, start_kernel_pfn); 412 start, start_kernel_pfn);
415 start = end_kernel_pfn; 413 start = end_kernel_pfn;
416 } else if (end > start_kernel_pfn) 414 } else if (end > start_kernel_pfn)
417 end = start_kernel_pfn; 415 end = start_kernel_pfn;
418 } else if (start < end_kernel_pfn) 416 } else if (start < end_kernel_pfn)
419 start = end_kernel_pfn; 417 start = end_kernel_pfn;
420 if (start >= end) 418 if (start >= end)
421 continue; 419 continue;
422 420
423 free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); 421 free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
424 printk("freeing pages %ld:%ld\n", start, end); 422 printk("freeing pages %ld:%ld\n", start, end);
425 } 423 }
426 424
427 /* Reserve the bootmap memory. */ 425 /* Reserve the bootmap memory. */
428 reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size); 426 reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size);
429 printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); 427 printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
430 428
431 #ifdef CONFIG_BLK_DEV_INITRD 429 #ifdef CONFIG_BLK_DEV_INITRD
432 initrd_start = INITRD_START; 430 initrd_start = INITRD_START;
433 if (initrd_start) { 431 if (initrd_start) {
434 initrd_end = initrd_start+INITRD_SIZE; 432 initrd_end = initrd_start+INITRD_SIZE;
435 printk("Initial ramdisk at: 0x%p (%lu bytes)\n", 433 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
436 (void *) initrd_start, INITRD_SIZE); 434 (void *) initrd_start, INITRD_SIZE);
437 435
438 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { 436 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
439 if (!move_initrd(PFN_PHYS(max_low_pfn))) 437 if (!move_initrd(PFN_PHYS(max_low_pfn)))
440 printk("initrd extends beyond end of memory " 438 printk("initrd extends beyond end of memory "
441 "(0x%08lx > 0x%p)\ndisabling initrd\n", 439 "(0x%08lx > 0x%p)\ndisabling initrd\n",
442 initrd_end, 440 initrd_end,
443 phys_to_virt(PFN_PHYS(max_low_pfn))); 441 phys_to_virt(PFN_PHYS(max_low_pfn)));
444 } else { 442 } else {
445 reserve_bootmem(virt_to_phys((void *)initrd_start), 443 reserve_bootmem(virt_to_phys((void *)initrd_start),
446 INITRD_SIZE); 444 INITRD_SIZE);
447 } 445 }
448 } 446 }
449 #endif /* CONFIG_BLK_DEV_INITRD */ 447 #endif /* CONFIG_BLK_DEV_INITRD */
450 } 448 }
451 #else 449 #else
452 extern void setup_memory(void *); 450 extern void setup_memory(void *);
453 #endif /* !CONFIG_DISCONTIGMEM */ 451 #endif /* !CONFIG_DISCONTIGMEM */
454 452
455 int __init 453 int __init
456 page_is_ram(unsigned long pfn) 454 page_is_ram(unsigned long pfn)
457 { 455 {
458 struct memclust_struct * cluster; 456 struct memclust_struct * cluster;
459 struct memdesc_struct * memdesc; 457 struct memdesc_struct * memdesc;
460 unsigned long i; 458 unsigned long i;
461 459
462 memdesc = (struct memdesc_struct *) 460 memdesc = (struct memdesc_struct *)
463 (hwrpb->mddt_offset + (unsigned long) hwrpb); 461 (hwrpb->mddt_offset + (unsigned long) hwrpb);
464 for_each_mem_cluster(memdesc, cluster, i) 462 for_each_mem_cluster(memdesc, cluster, i)
465 { 463 {
466 if (pfn >= cluster->start_pfn && 464 if (pfn >= cluster->start_pfn &&
467 pfn < cluster->start_pfn + cluster->numpages) { 465 pfn < cluster->start_pfn + cluster->numpages) {
468 return (cluster->usage & 3) ? 0 : 1; 466 return (cluster->usage & 3) ? 0 : 1;
469 } 467 }
470 } 468 }
471 469
472 return 0; 470 return 0;
473 } 471 }
474 472
475 static int __init 473 static int __init
476 register_cpus(void) 474 register_cpus(void)
477 { 475 {
478 int i; 476 int i;
479 477
480 for_each_possible_cpu(i) { 478 for_each_possible_cpu(i) {
481 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); 479 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
482 if (!p) 480 if (!p)
483 return -ENOMEM; 481 return -ENOMEM;
484 register_cpu(p, i); 482 register_cpu(p, i);
485 } 483 }
486 return 0; 484 return 0;
487 } 485 }
488 486
489 arch_initcall(register_cpus); 487 arch_initcall(register_cpus);
490 488
491 void __init 489 void __init
492 setup_arch(char **cmdline_p) 490 setup_arch(char **cmdline_p)
493 { 491 {
494 extern char _end[]; 492 extern char _end[];
495 493
496 struct alpha_machine_vector *vec = NULL; 494 struct alpha_machine_vector *vec = NULL;
497 struct percpu_struct *cpu; 495 struct percpu_struct *cpu;
498 char *type_name, *var_name, *p; 496 char *type_name, *var_name, *p;
499 void *kernel_end = _end; /* end of kernel */ 497 void *kernel_end = _end; /* end of kernel */
500 char *args = command_line; 498 char *args = command_line;
501 499
502 hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr); 500 hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
503 boot_cpuid = hard_smp_processor_id(); 501 boot_cpuid = hard_smp_processor_id();
504 502
505 /* 503 /*
506 * Pre-process the system type to make sure it will be valid. 504 * Pre-process the system type to make sure it will be valid.
507 * 505 *
508 * This may restore real CABRIO and EB66+ family names, ie 506 * This may restore real CABRIO and EB66+ family names, ie
509 * EB64+ and EB66. 507 * EB64+ and EB66.
510 * 508 *
511 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series) 509 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
512 * and AS1200 (DIGITAL Server 5000 series) have the type as 510 * and AS1200 (DIGITAL Server 5000 series) have the type as
513 * the negative of the real one. 511 * the negative of the real one.
514 */ 512 */
515 if ((long)hwrpb->sys_type < 0) { 513 if ((long)hwrpb->sys_type < 0) {
516 hwrpb->sys_type = -((long)hwrpb->sys_type); 514 hwrpb->sys_type = -((long)hwrpb->sys_type);
517 hwrpb_update_checksum(hwrpb); 515 hwrpb_update_checksum(hwrpb);
518 } 516 }
519 517
520 /* Register a call for panic conditions. */ 518 /* Register a call for panic conditions. */
521 atomic_notifier_chain_register(&panic_notifier_list, 519 atomic_notifier_chain_register(&panic_notifier_list,
522 &alpha_panic_block); 520 &alpha_panic_block);
523 521
524 #ifdef CONFIG_ALPHA_GENERIC 522 #ifdef CONFIG_ALPHA_GENERIC
525 /* Assume that we've booted from SRM if we haven't booted from MILO. 523 /* Assume that we've booted from SRM if we haven't booted from MILO.
526 Detect the later by looking for "MILO" in the system serial nr. */ 524 Detect the later by looking for "MILO" in the system serial nr. */
527 alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0; 525 alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
528 #endif 526 #endif
529 527
530 /* If we are using SRM, we want to allow callbacks 528 /* If we are using SRM, we want to allow callbacks
531 as early as possible, so do this NOW, and then 529 as early as possible, so do this NOW, and then
532 they should work immediately thereafter. 530 they should work immediately thereafter.
533 */ 531 */
534 kernel_end = callback_init(kernel_end); 532 kernel_end = callback_init(kernel_end);
535 533
536 /* 534 /*
537 * Locate the command line. 535 * Locate the command line.
538 */ 536 */
539 /* Hack for Jensen... since we're restricted to 8 or 16 chars for 537 /* Hack for Jensen... since we're restricted to 8 or 16 chars for
540 boot flags depending on the boot mode, we need some shorthand. 538 boot flags depending on the boot mode, we need some shorthand.
541 This should do for installation. */ 539 This should do for installation. */
542 if (strcmp(COMMAND_LINE, "INSTALL") == 0) { 540 if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
543 strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line); 541 strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
544 } else { 542 } else {
545 strlcpy(command_line, COMMAND_LINE, sizeof command_line); 543 strlcpy(command_line, COMMAND_LINE, sizeof command_line);
546 } 544 }
547 strcpy(saved_command_line, command_line); 545 strcpy(saved_command_line, command_line);
548 *cmdline_p = command_line; 546 *cmdline_p = command_line;
549 547
550 /* 548 /*
551 * Process command-line arguments. 549 * Process command-line arguments.
552 */ 550 */
553 while ((p = strsep(&args, " \t")) != NULL) { 551 while ((p = strsep(&args, " \t")) != NULL) {
554 if (!*p) continue; 552 if (!*p) continue;
555 if (strncmp(p, "alpha_mv=", 9) == 0) { 553 if (strncmp(p, "alpha_mv=", 9) == 0) {
556 vec = get_sysvec_byname(p+9); 554 vec = get_sysvec_byname(p+9);
557 continue; 555 continue;
558 } 556 }
559 if (strncmp(p, "cycle=", 6) == 0) { 557 if (strncmp(p, "cycle=", 6) == 0) {
560 est_cycle_freq = simple_strtol(p+6, NULL, 0); 558 est_cycle_freq = simple_strtol(p+6, NULL, 0);
561 continue; 559 continue;
562 } 560 }
563 if (strncmp(p, "mem=", 4) == 0) { 561 if (strncmp(p, "mem=", 4) == 0) {
564 mem_size_limit = get_mem_size_limit(p+4); 562 mem_size_limit = get_mem_size_limit(p+4);
565 continue; 563 continue;
566 } 564 }
567 if (strncmp(p, "srmcons", 7) == 0) { 565 if (strncmp(p, "srmcons", 7) == 0) {
568 srmcons_output |= 1; 566 srmcons_output |= 1;
569 continue; 567 continue;
570 } 568 }
571 if (strncmp(p, "console=srm", 11) == 0) { 569 if (strncmp(p, "console=srm", 11) == 0) {
572 srmcons_output |= 2; 570 srmcons_output |= 2;
573 continue; 571 continue;
574 } 572 }
575 if (strncmp(p, "gartsize=", 9) == 0) { 573 if (strncmp(p, "gartsize=", 9) == 0) {
576 alpha_agpgart_size = 574 alpha_agpgart_size =
577 get_mem_size_limit(p+9) << PAGE_SHIFT; 575 get_mem_size_limit(p+9) << PAGE_SHIFT;
578 continue; 576 continue;
579 } 577 }
580 #ifdef CONFIG_VERBOSE_MCHECK 578 #ifdef CONFIG_VERBOSE_MCHECK
581 if (strncmp(p, "verbose_mcheck=", 15) == 0) { 579 if (strncmp(p, "verbose_mcheck=", 15) == 0) {
582 alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0); 580 alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
583 continue; 581 continue;
584 } 582 }
585 #endif 583 #endif
586 } 584 }
587 585
588 /* Replace the command line, now that we've killed it with strsep. */ 586 /* Replace the command line, now that we've killed it with strsep. */
589 strcpy(command_line, saved_command_line); 587 strcpy(command_line, saved_command_line);
590 588
591 /* If we want SRM console printk echoing early, do it now. */ 589 /* If we want SRM console printk echoing early, do it now. */
592 if (alpha_using_srm && srmcons_output) { 590 if (alpha_using_srm && srmcons_output) {
593 register_srm_console(); 591 register_srm_console();
594 592
595 /* 593 /*
596 * If "console=srm" was specified, clear the srmcons_output 594 * If "console=srm" was specified, clear the srmcons_output
597 * flag now so that time.c won't unregister_srm_console 595 * flag now so that time.c won't unregister_srm_console
598 */ 596 */
599 if (srmcons_output & 2) 597 if (srmcons_output & 2)
600 srmcons_output = 0; 598 srmcons_output = 0;
601 } 599 }
602 600
603 #ifdef CONFIG_MAGIC_SYSRQ 601 #ifdef CONFIG_MAGIC_SYSRQ
604 /* If we're using SRM, make sysrq-b halt back to the prom, 602 /* If we're using SRM, make sysrq-b halt back to the prom,
605 not auto-reboot. */ 603 not auto-reboot. */
606 if (alpha_using_srm) { 604 if (alpha_using_srm) {
607 struct sysrq_key_op *op = __sysrq_get_key_op('b'); 605 struct sysrq_key_op *op = __sysrq_get_key_op('b');
608 op->handler = (void *) machine_halt; 606 op->handler = (void *) machine_halt;
609 } 607 }
610 #endif 608 #endif
611 609
612 /* 610 /*
613 * Identify and reconfigure for the current system. 611 * Identify and reconfigure for the current system.
614 */ 612 */
615 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset); 613 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
616 614
617 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, 615 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
618 cpu->type, &type_name, &var_name); 616 cpu->type, &type_name, &var_name);
619 if (*var_name == '0') 617 if (*var_name == '0')
620 var_name = ""; 618 var_name = "";
621 619
622 if (!vec) { 620 if (!vec) {
623 vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation, 621 vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
624 cpu->type); 622 cpu->type);
625 } 623 }
626 624
627 if (!vec) { 625 if (!vec) {
628 panic("Unsupported system type: %s%s%s (%ld %ld)\n", 626 panic("Unsupported system type: %s%s%s (%ld %ld)\n",
629 type_name, (*var_name ? " variation " : ""), var_name, 627 type_name, (*var_name ? " variation " : ""), var_name,
630 hwrpb->sys_type, hwrpb->sys_variation); 628 hwrpb->sys_type, hwrpb->sys_variation);
631 } 629 }
632 if (vec != &alpha_mv) { 630 if (vec != &alpha_mv) {
633 alpha_mv = *vec; 631 alpha_mv = *vec;
634 } 632 }
635 633
636 printk("Booting " 634 printk("Booting "
637 #ifdef CONFIG_ALPHA_GENERIC 635 #ifdef CONFIG_ALPHA_GENERIC
638 "GENERIC " 636 "GENERIC "
639 #endif 637 #endif
640 "on %s%s%s using machine vector %s from %s\n", 638 "on %s%s%s using machine vector %s from %s\n",
641 type_name, (*var_name ? " variation " : ""), 639 type_name, (*var_name ? " variation " : ""),
642 var_name, alpha_mv.vector_name, 640 var_name, alpha_mv.vector_name,
643 (alpha_using_srm ? "SRM" : "MILO")); 641 (alpha_using_srm ? "SRM" : "MILO"));
644 642
645 printk("Major Options: " 643 printk("Major Options: "
646 #ifdef CONFIG_SMP 644 #ifdef CONFIG_SMP
647 "SMP " 645 "SMP "
648 #endif 646 #endif
649 #ifdef CONFIG_ALPHA_EV56 647 #ifdef CONFIG_ALPHA_EV56
650 "EV56 " 648 "EV56 "
651 #endif 649 #endif
652 #ifdef CONFIG_ALPHA_EV67 650 #ifdef CONFIG_ALPHA_EV67
653 "EV67 " 651 "EV67 "
654 #endif 652 #endif
655 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS 653 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
656 "LEGACY_START " 654 "LEGACY_START "
657 #endif 655 #endif
658 #ifdef CONFIG_VERBOSE_MCHECK 656 #ifdef CONFIG_VERBOSE_MCHECK
659 "VERBOSE_MCHECK " 657 "VERBOSE_MCHECK "
660 #endif 658 #endif
661 659
662 #ifdef CONFIG_DISCONTIGMEM 660 #ifdef CONFIG_DISCONTIGMEM
663 "DISCONTIGMEM " 661 "DISCONTIGMEM "
664 #ifdef CONFIG_NUMA 662 #ifdef CONFIG_NUMA
665 "NUMA " 663 "NUMA "
666 #endif 664 #endif
667 #endif 665 #endif
668 666
669 #ifdef CONFIG_DEBUG_SPINLOCK 667 #ifdef CONFIG_DEBUG_SPINLOCK
670 "DEBUG_SPINLOCK " 668 "DEBUG_SPINLOCK "
671 #endif 669 #endif
672 #ifdef CONFIG_MAGIC_SYSRQ 670 #ifdef CONFIG_MAGIC_SYSRQ
673 "MAGIC_SYSRQ " 671 "MAGIC_SYSRQ "
674 #endif 672 #endif
675 "\n"); 673 "\n");
676 674
677 printk("Command line: %s\n", command_line); 675 printk("Command line: %s\n", command_line);
678 676
679 /* 677 /*
680 * Sync up the HAE. 678 * Sync up the HAE.
681 * Save the SRM's current value for restoration. 679 * Save the SRM's current value for restoration.
682 */ 680 */
683 srm_hae = *alpha_mv.hae_register; 681 srm_hae = *alpha_mv.hae_register;
684 __set_hae(alpha_mv.hae_cache); 682 __set_hae(alpha_mv.hae_cache);
685 683
686 /* Reset enable correctable error reports. */ 684 /* Reset enable correctable error reports. */
687 wrmces(0x7); 685 wrmces(0x7);
688 686
689 /* Find our memory. */ 687 /* Find our memory. */
690 setup_memory(kernel_end); 688 setup_memory(kernel_end);
691 689
692 /* First guess at cpu cache sizes. Do this before init_arch. */ 690 /* First guess at cpu cache sizes. Do this before init_arch. */
693 determine_cpu_caches(cpu->type); 691 determine_cpu_caches(cpu->type);
694 692
695 /* Initialize the machine. Usually has to do with setting up 693 /* Initialize the machine. Usually has to do with setting up
696 DMA windows and the like. */ 694 DMA windows and the like. */
697 if (alpha_mv.init_arch) 695 if (alpha_mv.init_arch)
698 alpha_mv.init_arch(); 696 alpha_mv.init_arch();
699 697
700 /* Reserve standard resources. */ 698 /* Reserve standard resources. */
701 reserve_std_resources(); 699 reserve_std_resources();
702 700
703 /* 701 /*
704 * Give us a default console. TGA users will see nothing until 702 * Give us a default console. TGA users will see nothing until
705 * chr_dev_init is called, rather late in the boot sequence. 703 * chr_dev_init is called, rather late in the boot sequence.
706 */ 704 */
707 705
708 #ifdef CONFIG_VT 706 #ifdef CONFIG_VT
709 #if defined(CONFIG_VGA_CONSOLE) 707 #if defined(CONFIG_VGA_CONSOLE)
710 conswitchp = &vga_con; 708 conswitchp = &vga_con;
711 #elif defined(CONFIG_DUMMY_CONSOLE) 709 #elif defined(CONFIG_DUMMY_CONSOLE)
712 conswitchp = &dummy_con; 710 conswitchp = &dummy_con;
713 #endif 711 #endif
714 #endif 712 #endif
715 713
716 /* Default root filesystem to sda2. */ 714 /* Default root filesystem to sda2. */
717 ROOT_DEV = Root_SDA2; 715 ROOT_DEV = Root_SDA2;
718 716
719 #ifdef CONFIG_EISA 717 #ifdef CONFIG_EISA
720 /* FIXME: only set this when we actually have EISA in this box? */ 718 /* FIXME: only set this when we actually have EISA in this box? */
721 EISA_bus = 1; 719 EISA_bus = 1;
722 #endif 720 #endif
723 721
724 /* 722 /*
725 * Check ASN in HWRPB for validity, report if bad. 723 * Check ASN in HWRPB for validity, report if bad.
726 * FIXME: how was this failing? Should we trust it instead, 724 * FIXME: how was this failing? Should we trust it instead,
727 * and copy the value into alpha_mv.max_asn? 725 * and copy the value into alpha_mv.max_asn?
728 */ 726 */
729 727
730 if (hwrpb->max_asn != MAX_ASN) { 728 if (hwrpb->max_asn != MAX_ASN) {
731 printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn); 729 printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
732 } 730 }
733 731
734 /* 732 /*
735 * Identify the flock of penguins. 733 * Identify the flock of penguins.
736 */ 734 */
737 735
738 #ifdef CONFIG_SMP 736 #ifdef CONFIG_SMP
739 setup_smp(); 737 setup_smp();
740 #endif 738 #endif
741 paging_init(); 739 paging_init();
742 } 740 }
743 741
744 void __init 742 void __init
745 disable_early_printk(void) 743 disable_early_printk(void)
746 { 744 {
747 if (alpha_using_srm && srmcons_output) { 745 if (alpha_using_srm && srmcons_output) {
748 unregister_srm_console(); 746 unregister_srm_console();
749 srmcons_output = 0; 747 srmcons_output = 0;
750 } 748 }
751 } 749 }
752 750
753 static char sys_unknown[] = "Unknown"; 751 static char sys_unknown[] = "Unknown";
754 static char systype_names[][16] = { 752 static char systype_names[][16] = {
755 "0", 753 "0",
756 "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen", 754 "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
757 "Pelican", "Morgan", "Sable", "Medulla", "Noname", 755 "Pelican", "Morgan", "Sable", "Medulla", "Noname",
758 "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind", 756 "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
759 "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1", 757 "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
760 "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake", 758 "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
761 "Cortex", "29", "Miata", "XXM", "Takara", "Yukon", 759 "Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
762 "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel" 760 "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
763 }; 761 };
764 762
765 static char unofficial_names[][8] = {"100", "Ruffian"}; 763 static char unofficial_names[][8] = {"100", "Ruffian"};
766 764
767 static char api_names[][16] = {"200", "Nautilus"}; 765 static char api_names[][16] = {"200", "Nautilus"};
768 766
769 static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"}; 767 static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
770 static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4}; 768 static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
771 769
772 static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"}; 770 static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
773 static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2}; 771 static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
774 772
775 static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"}; 773 static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
776 static int eb64p_indices[] = {0,0,1,2}; 774 static int eb64p_indices[] = {0,0,1,2};
777 775
778 static char eb66_names[][8] = {"EB66", "EB66+"}; 776 static char eb66_names[][8] = {"EB66", "EB66+"};
779 static int eb66_indices[] = {0,0,1}; 777 static int eb66_indices[] = {0,0,1};
780 778
781 static char marvel_names[][16] = { 779 static char marvel_names[][16] = {
782 "Marvel/EV7" 780 "Marvel/EV7"
783 }; 781 };
784 static int marvel_indices[] = { 0 }; 782 static int marvel_indices[] = { 0 };
785 783
786 static char rawhide_names[][16] = { 784 static char rawhide_names[][16] = {
787 "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci" 785 "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
788 }; 786 };
789 static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4}; 787 static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
790 788
791 static char titan_names[][16] = { 789 static char titan_names[][16] = {
792 "DEFAULT", "Privateer", "Falcon", "Granite" 790 "DEFAULT", "Privateer", "Falcon", "Granite"
793 }; 791 };
794 static int titan_indices[] = {0,1,2,2,3}; 792 static int titan_indices[] = {0,1,2,2,3};
795 793
796 static char tsunami_names[][16] = { 794 static char tsunami_names[][16] = {
797 "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper", 795 "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
798 "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne", 796 "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
799 "Flying Clipper", "Shark" 797 "Flying Clipper", "Shark"
800 }; 798 };
801 static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12}; 799 static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
802 800
803 static struct alpha_machine_vector * __init 801 static struct alpha_machine_vector * __init
804 get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu) 802 get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
805 { 803 {
806 static struct alpha_machine_vector *systype_vecs[] __initdata = 804 static struct alpha_machine_vector *systype_vecs[] __initdata =
807 { 805 {
808 NULL, /* 0 */ 806 NULL, /* 0 */
809 NULL, /* ADU */ 807 NULL, /* ADU */
810 NULL, /* Cobra */ 808 NULL, /* Cobra */
811 NULL, /* Ruby */ 809 NULL, /* Ruby */
812 NULL, /* Flamingo */ 810 NULL, /* Flamingo */
813 NULL, /* Mannequin */ 811 NULL, /* Mannequin */
814 &jensen_mv, 812 &jensen_mv,
815 NULL, /* Pelican */ 813 NULL, /* Pelican */
816 NULL, /* Morgan */ 814 NULL, /* Morgan */
817 NULL, /* Sable -- see below. */ 815 NULL, /* Sable -- see below. */
818 NULL, /* Medulla */ 816 NULL, /* Medulla */
819 &noname_mv, 817 &noname_mv,
820 NULL, /* Turbolaser */ 818 NULL, /* Turbolaser */
821 &avanti_mv, 819 &avanti_mv,
822 NULL, /* Mustang */ 820 NULL, /* Mustang */
823 NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */ 821 NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
824 NULL, /* Tradewind */ 822 NULL, /* Tradewind */
825 NULL, /* Mikasa -- see below. */ 823 NULL, /* Mikasa -- see below. */
826 NULL, /* EB64 */ 824 NULL, /* EB64 */
827 NULL, /* EB66 -- see variation. */ 825 NULL, /* EB66 -- see variation. */
828 NULL, /* EB64+ -- see variation. */ 826 NULL, /* EB64+ -- see variation. */
829 &alphabook1_mv, 827 &alphabook1_mv,
830 &rawhide_mv, 828 &rawhide_mv,
831 NULL, /* K2 */ 829 NULL, /* K2 */
832 &lynx_mv, /* Lynx */ 830 &lynx_mv, /* Lynx */
833 &xl_mv, 831 &xl_mv,
834 NULL, /* EB164 -- see variation. */ 832 NULL, /* EB164 -- see variation. */
835 NULL, /* Noritake -- see below. */ 833 NULL, /* Noritake -- see below. */
836 NULL, /* Cortex */ 834 NULL, /* Cortex */
837 NULL, /* 29 */ 835 NULL, /* 29 */
838 &miata_mv, 836 &miata_mv,
839 NULL, /* XXM */ 837 NULL, /* XXM */
840 &takara_mv, 838 &takara_mv,
841 NULL, /* Yukon */ 839 NULL, /* Yukon */
842 NULL, /* Tsunami -- see variation. */ 840 NULL, /* Tsunami -- see variation. */
843 &wildfire_mv, /* Wildfire */ 841 &wildfire_mv, /* Wildfire */
844 NULL, /* CUSCO */ 842 NULL, /* CUSCO */
845 &eiger_mv, /* Eiger */ 843 &eiger_mv, /* Eiger */
846 NULL, /* Titan */ 844 NULL, /* Titan */
847 NULL, /* Marvel */ 845 NULL, /* Marvel */
848 }; 846 };
849 847
850 static struct alpha_machine_vector *unofficial_vecs[] __initdata = 848 static struct alpha_machine_vector *unofficial_vecs[] __initdata =
851 { 849 {
852 NULL, /* 100 */ 850 NULL, /* 100 */
853 &ruffian_mv, 851 &ruffian_mv,
854 }; 852 };
855 853
856 static struct alpha_machine_vector *api_vecs[] __initdata = 854 static struct alpha_machine_vector *api_vecs[] __initdata =
857 { 855 {
858 NULL, /* 200 */ 856 NULL, /* 200 */
859 &nautilus_mv, 857 &nautilus_mv,
860 }; 858 };
861 859
862 static struct alpha_machine_vector *alcor_vecs[] __initdata = 860 static struct alpha_machine_vector *alcor_vecs[] __initdata =
863 { 861 {
864 &alcor_mv, &xlt_mv, &xlt_mv 862 &alcor_mv, &xlt_mv, &xlt_mv
865 }; 863 };
866 864
867 static struct alpha_machine_vector *eb164_vecs[] __initdata = 865 static struct alpha_machine_vector *eb164_vecs[] __initdata =
868 { 866 {
869 &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv 867 &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
870 }; 868 };
871 869
872 static struct alpha_machine_vector *eb64p_vecs[] __initdata = 870 static struct alpha_machine_vector *eb64p_vecs[] __initdata =
873 { 871 {
874 &eb64p_mv, 872 &eb64p_mv,
875 &cabriolet_mv, 873 &cabriolet_mv,
876 &cabriolet_mv /* AlphaPCI64 */ 874 &cabriolet_mv /* AlphaPCI64 */
877 }; 875 };
878 876
879 static struct alpha_machine_vector *eb66_vecs[] __initdata = 877 static struct alpha_machine_vector *eb66_vecs[] __initdata =
880 { 878 {
881 &eb66_mv, 879 &eb66_mv,
882 &eb66p_mv 880 &eb66p_mv
883 }; 881 };
884 882
885 static struct alpha_machine_vector *marvel_vecs[] __initdata = 883 static struct alpha_machine_vector *marvel_vecs[] __initdata =
886 { 884 {
887 &marvel_ev7_mv, 885 &marvel_ev7_mv,
888 }; 886 };
889 887
890 static struct alpha_machine_vector *titan_vecs[] __initdata = 888 static struct alpha_machine_vector *titan_vecs[] __initdata =
891 { 889 {
892 &titan_mv, /* default */ 890 &titan_mv, /* default */
893 &privateer_mv, /* privateer */ 891 &privateer_mv, /* privateer */
894 &titan_mv, /* falcon */ 892 &titan_mv, /* falcon */
895 &privateer_mv, /* granite */ 893 &privateer_mv, /* granite */
896 }; 894 };
897 895
898 static struct alpha_machine_vector *tsunami_vecs[] __initdata = 896 static struct alpha_machine_vector *tsunami_vecs[] __initdata =
899 { 897 {
900 NULL, 898 NULL,
901 &dp264_mv, /* dp264 */ 899 &dp264_mv, /* dp264 */
902 &dp264_mv, /* warhol */ 900 &dp264_mv, /* warhol */
903 &dp264_mv, /* windjammer */ 901 &dp264_mv, /* windjammer */
904 &monet_mv, /* monet */ 902 &monet_mv, /* monet */
905 &clipper_mv, /* clipper */ 903 &clipper_mv, /* clipper */
906 &dp264_mv, /* goldrush */ 904 &dp264_mv, /* goldrush */
907 &webbrick_mv, /* webbrick */ 905 &webbrick_mv, /* webbrick */
908 &dp264_mv, /* catamaran */ 906 &dp264_mv, /* catamaran */
909 NULL, /* brisbane? */ 907 NULL, /* brisbane? */
910 NULL, /* melbourne? */ 908 NULL, /* melbourne? */
911 NULL, /* flying clipper? */ 909 NULL, /* flying clipper? */
912 &shark_mv, /* shark */ 910 &shark_mv, /* shark */
913 }; 911 };
914 912
915 /* ??? Do we need to distinguish between Rawhides? */ 913 /* ??? Do we need to distinguish between Rawhides? */
916 914
917 struct alpha_machine_vector *vec; 915 struct alpha_machine_vector *vec;
918 916
919 /* Search the system tables first... */ 917 /* Search the system tables first... */
920 vec = NULL; 918 vec = NULL;
921 if (type < N(systype_vecs)) { 919 if (type < ARRAY_SIZE(systype_vecs)) {
922 vec = systype_vecs[type]; 920 vec = systype_vecs[type];
923 } else if ((type > ST_API_BIAS) && 921 } else if ((type > ST_API_BIAS) &&
924 (type - ST_API_BIAS) < N(api_vecs)) { 922 (type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) {
925 vec = api_vecs[type - ST_API_BIAS]; 923 vec = api_vecs[type - ST_API_BIAS];
926 } else if ((type > ST_UNOFFICIAL_BIAS) && 924 } else if ((type > ST_UNOFFICIAL_BIAS) &&
927 (type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) { 925 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) {
928 vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS]; 926 vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
929 } 927 }
930 928
931 /* If we've not found one, try for a variation. */ 929 /* If we've not found one, try for a variation. */
932 930
933 if (!vec) { 931 if (!vec) {
934 /* Member ID is a bit-field. */ 932 /* Member ID is a bit-field. */
935 unsigned long member = (variation >> 10) & 0x3f; 933 unsigned long member = (variation >> 10) & 0x3f;
936 934
937 cpu &= 0xffffffff; /* make it usable */ 935 cpu &= 0xffffffff; /* make it usable */
938 936
939 switch (type) { 937 switch (type) {
940 case ST_DEC_ALCOR: 938 case ST_DEC_ALCOR:
941 if (member < N(alcor_indices)) 939 if (member < ARRAY_SIZE(alcor_indices))
942 vec = alcor_vecs[alcor_indices[member]]; 940 vec = alcor_vecs[alcor_indices[member]];
943 break; 941 break;
944 case ST_DEC_EB164: 942 case ST_DEC_EB164:
945 if (member < N(eb164_indices)) 943 if (member < ARRAY_SIZE(eb164_indices))
946 vec = eb164_vecs[eb164_indices[member]]; 944 vec = eb164_vecs[eb164_indices[member]];
947 /* PC164 may show as EB164 variation with EV56 CPU, 945 /* PC164 may show as EB164 variation with EV56 CPU,
948 but, since no true EB164 had anything but EV5... */ 946 but, since no true EB164 had anything but EV5... */
949 if (vec == &eb164_mv && cpu == EV56_CPU) 947 if (vec == &eb164_mv && cpu == EV56_CPU)
950 vec = &pc164_mv; 948 vec = &pc164_mv;
951 break; 949 break;
952 case ST_DEC_EB64P: 950 case ST_DEC_EB64P:
953 if (member < N(eb64p_indices)) 951 if (member < ARRAY_SIZE(eb64p_indices))
954 vec = eb64p_vecs[eb64p_indices[member]]; 952 vec = eb64p_vecs[eb64p_indices[member]];
955 break; 953 break;
956 case ST_DEC_EB66: 954 case ST_DEC_EB66:
957 if (member < N(eb66_indices)) 955 if (member < ARRAY_SIZE(eb66_indices))
958 vec = eb66_vecs[eb66_indices[member]]; 956 vec = eb66_vecs[eb66_indices[member]];
959 break; 957 break;
960 case ST_DEC_MARVEL: 958 case ST_DEC_MARVEL:
961 if (member < N(marvel_indices)) 959 if (member < ARRAY_SIZE(marvel_indices))
962 vec = marvel_vecs[marvel_indices[member]]; 960 vec = marvel_vecs[marvel_indices[member]];
963 break; 961 break;
964 case ST_DEC_TITAN: 962 case ST_DEC_TITAN:
965 vec = titan_vecs[0]; /* default */ 963 vec = titan_vecs[0]; /* default */
966 if (member < N(titan_indices)) 964 if (member < ARRAY_SIZE(titan_indices))
967 vec = titan_vecs[titan_indices[member]]; 965 vec = titan_vecs[titan_indices[member]];
968 break; 966 break;
969 case ST_DEC_TSUNAMI: 967 case ST_DEC_TSUNAMI:
970 if (member < N(tsunami_indices)) 968 if (member < ARRAY_SIZE(tsunami_indices))
971 vec = tsunami_vecs[tsunami_indices[member]]; 969 vec = tsunami_vecs[tsunami_indices[member]];
972 break; 970 break;
973 case ST_DEC_1000: 971 case ST_DEC_1000:
974 if (cpu == EV5_CPU || cpu == EV56_CPU) 972 if (cpu == EV5_CPU || cpu == EV56_CPU)
975 vec = &mikasa_primo_mv; 973 vec = &mikasa_primo_mv;
976 else 974 else
977 vec = &mikasa_mv; 975 vec = &mikasa_mv;
978 break; 976 break;
979 case ST_DEC_NORITAKE: 977 case ST_DEC_NORITAKE:
980 if (cpu == EV5_CPU || cpu == EV56_CPU) 978 if (cpu == EV5_CPU || cpu == EV56_CPU)
981 vec = &noritake_primo_mv; 979 vec = &noritake_primo_mv;
982 else 980 else
983 vec = &noritake_mv; 981 vec = &noritake_mv;
984 break; 982 break;
985 case ST_DEC_2100_A500: 983 case ST_DEC_2100_A500:
986 if (cpu == EV5_CPU || cpu == EV56_CPU) 984 if (cpu == EV5_CPU || cpu == EV56_CPU)
987 vec = &sable_gamma_mv; 985 vec = &sable_gamma_mv;
988 else 986 else
989 vec = &sable_mv; 987 vec = &sable_mv;
990 break; 988 break;
991 } 989 }
992 } 990 }
993 return vec; 991 return vec;
994 } 992 }
995 993
996 static struct alpha_machine_vector * __init 994 static struct alpha_machine_vector * __init
997 get_sysvec_byname(const char *name) 995 get_sysvec_byname(const char *name)
998 { 996 {
999 static struct alpha_machine_vector *all_vecs[] __initdata = 997 static struct alpha_machine_vector *all_vecs[] __initdata =
1000 { 998 {
1001 &alcor_mv, 999 &alcor_mv,
1002 &alphabook1_mv, 1000 &alphabook1_mv,
1003 &avanti_mv, 1001 &avanti_mv,
1004 &cabriolet_mv, 1002 &cabriolet_mv,
1005 &clipper_mv, 1003 &clipper_mv,
1006 &dp264_mv, 1004 &dp264_mv,
1007 &eb164_mv, 1005 &eb164_mv,
1008 &eb64p_mv, 1006 &eb64p_mv,
1009 &eb66_mv, 1007 &eb66_mv,
1010 &eb66p_mv, 1008 &eb66p_mv,
1011 &eiger_mv, 1009 &eiger_mv,
1012 &jensen_mv, 1010 &jensen_mv,
1013 &lx164_mv, 1011 &lx164_mv,
1014 &lynx_mv, 1012 &lynx_mv,
1015 &miata_mv, 1013 &miata_mv,
1016 &mikasa_mv, 1014 &mikasa_mv,
1017 &mikasa_primo_mv, 1015 &mikasa_primo_mv,
1018 &monet_mv, 1016 &monet_mv,
1019 &nautilus_mv, 1017 &nautilus_mv,
1020 &noname_mv, 1018 &noname_mv,
1021 &noritake_mv, 1019 &noritake_mv,
1022 &noritake_primo_mv, 1020 &noritake_primo_mv,
1023 &p2k_mv, 1021 &p2k_mv,
1024 &pc164_mv, 1022 &pc164_mv,
1025 &privateer_mv, 1023 &privateer_mv,
1026 &rawhide_mv, 1024 &rawhide_mv,
1027 &ruffian_mv, 1025 &ruffian_mv,
1028 &rx164_mv, 1026 &rx164_mv,
1029 &sable_mv, 1027 &sable_mv,
1030 &sable_gamma_mv, 1028 &sable_gamma_mv,
1031 &shark_mv, 1029 &shark_mv,
1032 &sx164_mv, 1030 &sx164_mv,
1033 &takara_mv, 1031 &takara_mv,
1034 &webbrick_mv, 1032 &webbrick_mv,
1035 &wildfire_mv, 1033 &wildfire_mv,
1036 &xl_mv, 1034 &xl_mv,
1037 &xlt_mv 1035 &xlt_mv
1038 }; 1036 };
1039 1037
1040 size_t i; 1038 size_t i;
1041 1039
1042 for (i = 0; i < N(all_vecs); ++i) { 1040 for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) {
1043 struct alpha_machine_vector *mv = all_vecs[i]; 1041 struct alpha_machine_vector *mv = all_vecs[i];
1044 if (strcasecmp(mv->vector_name, name) == 0) 1042 if (strcasecmp(mv->vector_name, name) == 0)
1045 return mv; 1043 return mv;
1046 } 1044 }
1047 return NULL; 1045 return NULL;
1048 } 1046 }
1049 1047
1050 static void 1048 static void
1051 get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu, 1049 get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
1052 char **type_name, char **variation_name) 1050 char **type_name, char **variation_name)
1053 { 1051 {
1054 unsigned long member; 1052 unsigned long member;
1055 1053
1056 /* If not in the tables, make it UNKNOWN, 1054 /* If not in the tables, make it UNKNOWN,
1057 else set type name to family */ 1055 else set type name to family */
1058 if (type < N(systype_names)) { 1056 if (type < ARRAY_SIZE(systype_names)) {
1059 *type_name = systype_names[type]; 1057 *type_name = systype_names[type];
1060 } else if ((type > ST_API_BIAS) && 1058 } else if ((type > ST_API_BIAS) &&
1061 (type - ST_API_BIAS) < N(api_names)) { 1059 (type - ST_API_BIAS) < ARRAY_SIZE(api_names)) {
1062 *type_name = api_names[type - ST_API_BIAS]; 1060 *type_name = api_names[type - ST_API_BIAS];
1063 } else if ((type > ST_UNOFFICIAL_BIAS) && 1061 } else if ((type > ST_UNOFFICIAL_BIAS) &&
1064 (type - ST_UNOFFICIAL_BIAS) < N(unofficial_names)) { 1062 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) {
1065 *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS]; 1063 *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
1066 } else { 1064 } else {
1067 *type_name = sys_unknown; 1065 *type_name = sys_unknown;
1068 *variation_name = sys_unknown; 1066 *variation_name = sys_unknown;
1069 return; 1067 return;
1070 } 1068 }
1071 1069
1072 /* Set variation to "0"; if variation is zero, done. */ 1070 /* Set variation to "0"; if variation is zero, done. */
1073 *variation_name = systype_names[0]; 1071 *variation_name = systype_names[0];
1074 if (variation == 0) { 1072 if (variation == 0) {
1075 return; 1073 return;
1076 } 1074 }
1077 1075
1078 member = (variation >> 10) & 0x3f; /* member ID is a bit-field */ 1076 member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
1079 1077
1080 cpu &= 0xffffffff; /* make it usable */ 1078 cpu &= 0xffffffff; /* make it usable */
1081 1079
1082 switch (type) { /* select by family */ 1080 switch (type) { /* select by family */
1083 default: /* default to variation "0" for now */ 1081 default: /* default to variation "0" for now */
1084 break; 1082 break;
1085 case ST_DEC_EB164: 1083 case ST_DEC_EB164:
1086 if (member < N(eb164_indices)) 1084 if (member < ARRAY_SIZE(eb164_indices))
1087 *variation_name = eb164_names[eb164_indices[member]]; 1085 *variation_name = eb164_names[eb164_indices[member]];
1088 /* PC164 may show as EB164 variation, but with EV56 CPU, 1086 /* PC164 may show as EB164 variation, but with EV56 CPU,
1089 so, since no true EB164 had anything but EV5... */ 1087 so, since no true EB164 had anything but EV5... */
1090 if (eb164_indices[member] == 0 && cpu == EV56_CPU) 1088 if (eb164_indices[member] == 0 && cpu == EV56_CPU)
1091 *variation_name = eb164_names[1]; /* make it PC164 */ 1089 *variation_name = eb164_names[1]; /* make it PC164 */
1092 break; 1090 break;
1093 case ST_DEC_ALCOR: 1091 case ST_DEC_ALCOR:
1094 if (member < N(alcor_indices)) 1092 if (member < ARRAY_SIZE(alcor_indices))
1095 *variation_name = alcor_names[alcor_indices[member]]; 1093 *variation_name = alcor_names[alcor_indices[member]];
1096 break; 1094 break;
1097 case ST_DEC_EB64P: 1095 case ST_DEC_EB64P:
1098 if (member < N(eb64p_indices)) 1096 if (member < ARRAY_SIZE(eb64p_indices))
1099 *variation_name = eb64p_names[eb64p_indices[member]]; 1097 *variation_name = eb64p_names[eb64p_indices[member]];
1100 break; 1098 break;
1101 case ST_DEC_EB66: 1099 case ST_DEC_EB66:
1102 if (member < N(eb66_indices)) 1100 if (member < ARRAY_SIZE(eb66_indices))
1103 *variation_name = eb66_names[eb66_indices[member]]; 1101 *variation_name = eb66_names[eb66_indices[member]];
1104 break; 1102 break;
1105 case ST_DEC_MARVEL: 1103 case ST_DEC_MARVEL:
1106 if (member < N(marvel_indices)) 1104 if (member < ARRAY_SIZE(marvel_indices))
1107 *variation_name = marvel_names[marvel_indices[member]]; 1105 *variation_name = marvel_names[marvel_indices[member]];
1108 break; 1106 break;
1109 case ST_DEC_RAWHIDE: 1107 case ST_DEC_RAWHIDE:
1110 if (member < N(rawhide_indices)) 1108 if (member < ARRAY_SIZE(rawhide_indices))
1111 *variation_name = rawhide_names[rawhide_indices[member]]; 1109 *variation_name = rawhide_names[rawhide_indices[member]];
1112 break; 1110 break;
1113 case ST_DEC_TITAN: 1111 case ST_DEC_TITAN:
1114 *variation_name = titan_names[0]; /* default */ 1112 *variation_name = titan_names[0]; /* default */
1115 if (member < N(titan_indices)) 1113 if (member < ARRAY_SIZE(titan_indices))
1116 *variation_name = titan_names[titan_indices[member]]; 1114 *variation_name = titan_names[titan_indices[member]];
1117 break; 1115 break;
1118 case ST_DEC_TSUNAMI: 1116 case ST_DEC_TSUNAMI:
1119 if (member < N(tsunami_indices)) 1117 if (member < ARRAY_SIZE(tsunami_indices))
1120 *variation_name = tsunami_names[tsunami_indices[member]]; 1118 *variation_name = tsunami_names[tsunami_indices[member]];
1121 break; 1119 break;
1122 } 1120 }
1123 } 1121 }
1124 1122
1125 /* 1123 /*
1126 * A change was made to the HWRPB via an ECO and the following code 1124 * A change was made to the HWRPB via an ECO and the following code
1127 * tracks a part of the ECO. In HWRPB versions less than 5, the ECO 1125 * tracks a part of the ECO. In HWRPB versions less than 5, the ECO
1128 * was not implemented in the console firmware. If it's revision 5 or 1126 * was not implemented in the console firmware. If it's revision 5 or
1129 * greater we can get the name of the platform as an ASCII string from 1127 * greater we can get the name of the platform as an ASCII string from
1130 * the HWRPB. That's what this function does. It checks the revision 1128 * the HWRPB. That's what this function does. It checks the revision
1131 * level and if the string is in the HWRPB it returns the address of 1129 * level and if the string is in the HWRPB it returns the address of
1132 * the string--a pointer to the name of the platform. 1130 * the string--a pointer to the name of the platform.
1133 * 1131 *
1134 * Returns: 1132 * Returns:
1135 * - Pointer to a ASCII string if it's in the HWRPB 1133 * - Pointer to a ASCII string if it's in the HWRPB
1136 * - Pointer to a blank string if the data is not in the HWRPB. 1134 * - Pointer to a blank string if the data is not in the HWRPB.
1137 */ 1135 */
1138 1136
1139 static char * 1137 static char *
1140 platform_string(void) 1138 platform_string(void)
1141 { 1139 {
1142 struct dsr_struct *dsr; 1140 struct dsr_struct *dsr;
1143 static char unk_system_string[] = "N/A"; 1141 static char unk_system_string[] = "N/A";
1144 1142
1145 /* Go to the console for the string pointer. 1143 /* Go to the console for the string pointer.
1146 * If the rpb_vers is not 5 or greater the rpb 1144 * If the rpb_vers is not 5 or greater the rpb
1147 * is old and does not have this data in it. 1145 * is old and does not have this data in it.
1148 */ 1146 */
1149 if (hwrpb->revision < 5) 1147 if (hwrpb->revision < 5)
1150 return (unk_system_string); 1148 return (unk_system_string);
1151 else { 1149 else {
1152 /* The Dynamic System Recognition struct 1150 /* The Dynamic System Recognition struct
1153 * has the system platform name starting 1151 * has the system platform name starting
1154 * after the character count of the string. 1152 * after the character count of the string.
1155 */ 1153 */
1156 dsr = ((struct dsr_struct *) 1154 dsr = ((struct dsr_struct *)
1157 ((char *)hwrpb + hwrpb->dsr_offset)); 1155 ((char *)hwrpb + hwrpb->dsr_offset));
1158 return ((char *)dsr + (dsr->sysname_off + 1156 return ((char *)dsr + (dsr->sysname_off +
1159 sizeof(long))); 1157 sizeof(long)));
1160 } 1158 }
1161 } 1159 }
1162 1160
1163 static int 1161 static int
1164 get_nr_processors(struct percpu_struct *cpubase, unsigned long num) 1162 get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
1165 { 1163 {
1166 struct percpu_struct *cpu; 1164 struct percpu_struct *cpu;
1167 unsigned long i; 1165 unsigned long i;
1168 int count = 0; 1166 int count = 0;
1169 1167
1170 for (i = 0; i < num; i++) { 1168 for (i = 0; i < num; i++) {
1171 cpu = (struct percpu_struct *) 1169 cpu = (struct percpu_struct *)
1172 ((char *)cpubase + i*hwrpb->processor_size); 1170 ((char *)cpubase + i*hwrpb->processor_size);
1173 if ((cpu->flags & 0x1cc) == 0x1cc) 1171 if ((cpu->flags & 0x1cc) == 0x1cc)
1174 count++; 1172 count++;
1175 } 1173 }
1176 return count; 1174 return count;
1177 } 1175 }
1178 1176
1179 static void 1177 static void
1180 show_cache_size (struct seq_file *f, const char *which, int shape) 1178 show_cache_size (struct seq_file *f, const char *which, int shape)
1181 { 1179 {
1182 if (shape == -1) 1180 if (shape == -1)
1183 seq_printf (f, "%s\t\t: n/a\n", which); 1181 seq_printf (f, "%s\t\t: n/a\n", which);
1184 else if (shape == 0) 1182 else if (shape == 0)
1185 seq_printf (f, "%s\t\t: unknown\n", which); 1183 seq_printf (f, "%s\t\t: unknown\n", which);
1186 else 1184 else
1187 seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n", 1185 seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n",
1188 which, shape >> 10, shape & 15, 1186 which, shape >> 10, shape & 15,
1189 1 << ((shape >> 4) & 15)); 1187 1 << ((shape >> 4) & 15));
1190 } 1188 }
1191 1189
1192 static int 1190 static int
1193 show_cpuinfo(struct seq_file *f, void *slot) 1191 show_cpuinfo(struct seq_file *f, void *slot)
1194 { 1192 {
1195 extern struct unaligned_stat { 1193 extern struct unaligned_stat {
1196 unsigned long count, va, pc; 1194 unsigned long count, va, pc;
1197 } unaligned[2]; 1195 } unaligned[2];
1198 1196
1199 static char cpu_names[][8] = { 1197 static char cpu_names[][8] = {
1200 "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56", 1198 "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56",
1201 "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL", 1199 "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL",
1202 "EV68CX", "EV7", "EV79", "EV69" 1200 "EV68CX", "EV7", "EV79", "EV69"
1203 }; 1201 };
1204 1202
1205 struct percpu_struct *cpu = slot; 1203 struct percpu_struct *cpu = slot;
1206 unsigned int cpu_index; 1204 unsigned int cpu_index;
1207 char *cpu_name; 1205 char *cpu_name;
1208 char *systype_name; 1206 char *systype_name;
1209 char *sysvariation_name; 1207 char *sysvariation_name;
1210 int nr_processors; 1208 int nr_processors;
1211 1209
1212 cpu_index = (unsigned) (cpu->type - 1); 1210 cpu_index = (unsigned) (cpu->type - 1);
1213 cpu_name = "Unknown"; 1211 cpu_name = "Unknown";
1214 if (cpu_index < N(cpu_names)) 1212 if (cpu_index < ARRAY_SIZE(cpu_names))
1215 cpu_name = cpu_names[cpu_index]; 1213 cpu_name = cpu_names[cpu_index];
1216 1214
1217 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, 1215 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
1218 cpu->type, &systype_name, &sysvariation_name); 1216 cpu->type, &systype_name, &sysvariation_name);
1219 1217
1220 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors); 1218 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
1221 1219
1222 seq_printf(f, "cpu\t\t\t: Alpha\n" 1220 seq_printf(f, "cpu\t\t\t: Alpha\n"
1223 "cpu model\t\t: %s\n" 1221 "cpu model\t\t: %s\n"
1224 "cpu variation\t\t: %ld\n" 1222 "cpu variation\t\t: %ld\n"
1225 "cpu revision\t\t: %ld\n" 1223 "cpu revision\t\t: %ld\n"
1226 "cpu serial number\t: %s\n" 1224 "cpu serial number\t: %s\n"
1227 "system type\t\t: %s\n" 1225 "system type\t\t: %s\n"
1228 "system variation\t: %s\n" 1226 "system variation\t: %s\n"
1229 "system revision\t\t: %ld\n" 1227 "system revision\t\t: %ld\n"
1230 "system serial number\t: %s\n" 1228 "system serial number\t: %s\n"
1231 "cycle frequency [Hz]\t: %lu %s\n" 1229 "cycle frequency [Hz]\t: %lu %s\n"
1232 "timer frequency [Hz]\t: %lu.%02lu\n" 1230 "timer frequency [Hz]\t: %lu.%02lu\n"
1233 "page size [bytes]\t: %ld\n" 1231 "page size [bytes]\t: %ld\n"
1234 "phys. address bits\t: %ld\n" 1232 "phys. address bits\t: %ld\n"
1235 "max. addr. space #\t: %ld\n" 1233 "max. addr. space #\t: %ld\n"
1236 "BogoMIPS\t\t: %lu.%02lu\n" 1234 "BogoMIPS\t\t: %lu.%02lu\n"
1237 "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n" 1235 "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1238 "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n" 1236 "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1239 "platform string\t\t: %s\n" 1237 "platform string\t\t: %s\n"
1240 "cpus detected\t\t: %d\n", 1238 "cpus detected\t\t: %d\n",
1241 cpu_name, cpu->variation, cpu->revision, 1239 cpu_name, cpu->variation, cpu->revision,
1242 (char*)cpu->serial_no, 1240 (char*)cpu->serial_no,
1243 systype_name, sysvariation_name, hwrpb->sys_revision, 1241 systype_name, sysvariation_name, hwrpb->sys_revision,
1244 (char*)hwrpb->ssn, 1242 (char*)hwrpb->ssn,
1245 est_cycle_freq ? : hwrpb->cycle_freq, 1243 est_cycle_freq ? : hwrpb->cycle_freq,
1246 est_cycle_freq ? "est." : "", 1244 est_cycle_freq ? "est." : "",
1247 hwrpb->intr_freq / 4096, 1245 hwrpb->intr_freq / 4096,
1248 (100 * hwrpb->intr_freq / 4096) % 100, 1246 (100 * hwrpb->intr_freq / 4096) % 100,
1249 hwrpb->pagesize, 1247 hwrpb->pagesize,
1250 hwrpb->pa_bits, 1248 hwrpb->pa_bits,
1251 hwrpb->max_asn, 1249 hwrpb->max_asn,
1252 loops_per_jiffy / (500000/HZ), 1250 loops_per_jiffy / (500000/HZ),
1253 (loops_per_jiffy / (5000/HZ)) % 100, 1251 (loops_per_jiffy / (5000/HZ)) % 100,
1254 unaligned[0].count, unaligned[0].pc, unaligned[0].va, 1252 unaligned[0].count, unaligned[0].pc, unaligned[0].va,
1255 unaligned[1].count, unaligned[1].pc, unaligned[1].va, 1253 unaligned[1].count, unaligned[1].pc, unaligned[1].va,
1256 platform_string(), nr_processors); 1254 platform_string(), nr_processors);
1257 1255
1258 #ifdef CONFIG_SMP 1256 #ifdef CONFIG_SMP
1259 seq_printf(f, "cpus active\t\t: %d\n" 1257 seq_printf(f, "cpus active\t\t: %d\n"
1260 "cpu active mask\t\t: %016lx\n", 1258 "cpu active mask\t\t: %016lx\n",
1261 num_online_cpus(), cpus_addr(cpu_possible_map)[0]); 1259 num_online_cpus(), cpus_addr(cpu_possible_map)[0]);
1262 #endif 1260 #endif
1263 1261
1264 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape); 1262 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
1265 show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape); 1263 show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape);
1266 show_cache_size (f, "L2 cache", alpha_l2_cacheshape); 1264 show_cache_size (f, "L2 cache", alpha_l2_cacheshape);
1267 show_cache_size (f, "L3 cache", alpha_l3_cacheshape); 1265 show_cache_size (f, "L3 cache", alpha_l3_cacheshape);
1268 1266
1269 return 0; 1267 return 0;
1270 } 1268 }
1271 1269
1272 static int __init 1270 static int __init
1273 read_mem_block(int *addr, int stride, int size) 1271 read_mem_block(int *addr, int stride, int size)
1274 { 1272 {
1275 long nloads = size / stride, cnt, tmp; 1273 long nloads = size / stride, cnt, tmp;
1276 1274
1277 __asm__ __volatile__( 1275 __asm__ __volatile__(
1278 " rpcc %0\n" 1276 " rpcc %0\n"
1279 "1: ldl %3,0(%2)\n" 1277 "1: ldl %3,0(%2)\n"
1280 " subq %1,1,%1\n" 1278 " subq %1,1,%1\n"
1281 /* Next two XORs introduce an explicit data dependency between 1279 /* Next two XORs introduce an explicit data dependency between
1282 consecutive loads in the loop, which will give us true load 1280 consecutive loads in the loop, which will give us true load
1283 latency. */ 1281 latency. */
1284 " xor %3,%2,%2\n" 1282 " xor %3,%2,%2\n"
1285 " xor %3,%2,%2\n" 1283 " xor %3,%2,%2\n"
1286 " addq %2,%4,%2\n" 1284 " addq %2,%4,%2\n"
1287 " bne %1,1b\n" 1285 " bne %1,1b\n"
1288 " rpcc %3\n" 1286 " rpcc %3\n"
1289 " subl %3,%0,%0\n" 1287 " subl %3,%0,%0\n"
1290 : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp) 1288 : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp)
1291 : "r" (stride), "1" (nloads), "2" (addr)); 1289 : "r" (stride), "1" (nloads), "2" (addr));
1292 1290
1293 return cnt / (size / stride); 1291 return cnt / (size / stride);
1294 } 1292 }
1295 1293
1296 #define CSHAPE(totalsize, linesize, assoc) \ 1294 #define CSHAPE(totalsize, linesize, assoc) \
1297 ((totalsize & ~0xff) | (linesize << 4) | assoc) 1295 ((totalsize & ~0xff) | (linesize << 4) | assoc)
1298 1296
1299 /* ??? EV5 supports up to 64M, but did the systems with more than 1297 /* ??? EV5 supports up to 64M, but did the systems with more than
1300 16M of BCACHE ever exist? */ 1298 16M of BCACHE ever exist? */
1301 #define MAX_BCACHE_SIZE 16*1024*1024 1299 #define MAX_BCACHE_SIZE 16*1024*1024
1302 1300
1303 /* Note that the offchip caches are direct mapped on all Alphas. */ 1301 /* Note that the offchip caches are direct mapped on all Alphas. */
1304 static int __init 1302 static int __init
1305 external_cache_probe(int minsize, int width) 1303 external_cache_probe(int minsize, int width)
1306 { 1304 {
1307 int cycles, prev_cycles = 1000000; 1305 int cycles, prev_cycles = 1000000;
1308 int stride = 1 << width; 1306 int stride = 1 << width;
1309 long size = minsize, maxsize = MAX_BCACHE_SIZE * 2; 1307 long size = minsize, maxsize = MAX_BCACHE_SIZE * 2;
1310 1308
1311 if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT) 1309 if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT)
1312 maxsize = 1 << (floor_log2(max_low_pfn + 1) + PAGE_SHIFT); 1310 maxsize = 1 << (floor_log2(max_low_pfn + 1) + PAGE_SHIFT);
1313 1311
1314 /* Get the first block cached. */ 1312 /* Get the first block cached. */
1315 read_mem_block(__va(0), stride, size); 1313 read_mem_block(__va(0), stride, size);
1316 1314
1317 while (size < maxsize) { 1315 while (size < maxsize) {
1318 /* Get an average load latency in cycles. */ 1316 /* Get an average load latency in cycles. */
1319 cycles = read_mem_block(__va(0), stride, size); 1317 cycles = read_mem_block(__va(0), stride, size);
1320 if (cycles > prev_cycles * 2) { 1318 if (cycles > prev_cycles * 2) {
1321 /* Fine, we exceed the cache. */ 1319 /* Fine, we exceed the cache. */
1322 printk("%ldK Bcache detected; load hit latency %d " 1320 printk("%ldK Bcache detected; load hit latency %d "
1323 "cycles, load miss latency %d cycles\n", 1321 "cycles, load miss latency %d cycles\n",
1324 size >> 11, prev_cycles, cycles); 1322 size >> 11, prev_cycles, cycles);
1325 return CSHAPE(size >> 1, width, 1); 1323 return CSHAPE(size >> 1, width, 1);
1326 } 1324 }
1327 /* Try to get the next block cached. */ 1325 /* Try to get the next block cached. */
1328 read_mem_block(__va(size), stride, size); 1326 read_mem_block(__va(size), stride, size);
1329 prev_cycles = cycles; 1327 prev_cycles = cycles;
1330 size <<= 1; 1328 size <<= 1;
1331 } 1329 }
1332 return -1; /* No BCACHE found. */ 1330 return -1; /* No BCACHE found. */
1333 } 1331 }
1334 1332
1335 static void __init 1333 static void __init
1336 determine_cpu_caches (unsigned int cpu_type) 1334 determine_cpu_caches (unsigned int cpu_type)
1337 { 1335 {
1338 int L1I, L1D, L2, L3; 1336 int L1I, L1D, L2, L3;
1339 1337
1340 switch (cpu_type) { 1338 switch (cpu_type) {
1341 case EV4_CPU: 1339 case EV4_CPU:
1342 case EV45_CPU: 1340 case EV45_CPU:
1343 { 1341 {
1344 if (cpu_type == EV4_CPU) 1342 if (cpu_type == EV4_CPU)
1345 L1I = CSHAPE(8*1024, 5, 1); 1343 L1I = CSHAPE(8*1024, 5, 1);
1346 else 1344 else
1347 L1I = CSHAPE(16*1024, 5, 1); 1345 L1I = CSHAPE(16*1024, 5, 1);
1348 L1D = L1I; 1346 L1D = L1I;
1349 L3 = -1; 1347 L3 = -1;
1350 1348
1351 /* BIU_CTL is a write-only Abox register. PALcode has a 1349 /* BIU_CTL is a write-only Abox register. PALcode has a
1352 shadow copy, and may be available from some versions 1350 shadow copy, and may be available from some versions
1353 of the CSERVE PALcall. If we can get it, then 1351 of the CSERVE PALcall. If we can get it, then
1354 1352
1355 unsigned long biu_ctl, size; 1353 unsigned long biu_ctl, size;
1356 size = 128*1024 * (1 << ((biu_ctl >> 28) & 7)); 1354 size = 128*1024 * (1 << ((biu_ctl >> 28) & 7));
1357 L2 = CSHAPE (size, 5, 1); 1355 L2 = CSHAPE (size, 5, 1);
1358 1356
1359 Unfortunately, we can't rely on that. 1357 Unfortunately, we can't rely on that.
1360 */ 1358 */
1361 L2 = external_cache_probe(128*1024, 5); 1359 L2 = external_cache_probe(128*1024, 5);
1362 break; 1360 break;
1363 } 1361 }
1364 1362
1365 case LCA4_CPU: 1363 case LCA4_CPU:
1366 { 1364 {
1367 unsigned long car, size; 1365 unsigned long car, size;
1368 1366
1369 L1I = L1D = CSHAPE(8*1024, 5, 1); 1367 L1I = L1D = CSHAPE(8*1024, 5, 1);
1370 L3 = -1; 1368 L3 = -1;
1371 1369
1372 car = *(vuip) phys_to_virt (0x120000078UL); 1370 car = *(vuip) phys_to_virt (0x120000078UL);
1373 size = 64*1024 * (1 << ((car >> 5) & 7)); 1371 size = 64*1024 * (1 << ((car >> 5) & 7));
1374 /* No typo -- 8 byte cacheline size. Whodathunk. */ 1372 /* No typo -- 8 byte cacheline size. Whodathunk. */
1375 L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1); 1373 L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1);
1376 break; 1374 break;
1377 } 1375 }
1378 1376
1379 case EV5_CPU: 1377 case EV5_CPU:
1380 case EV56_CPU: 1378 case EV56_CPU:
1381 { 1379 {
1382 unsigned long sc_ctl, width; 1380 unsigned long sc_ctl, width;
1383 1381
1384 L1I = L1D = CSHAPE(8*1024, 5, 1); 1382 L1I = L1D = CSHAPE(8*1024, 5, 1);
1385 1383
1386 /* Check the line size of the Scache. */ 1384 /* Check the line size of the Scache. */
1387 sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL); 1385 sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL);
1388 width = sc_ctl & 0x1000 ? 6 : 5; 1386 width = sc_ctl & 0x1000 ? 6 : 5;
1389 L2 = CSHAPE (96*1024, width, 3); 1387 L2 = CSHAPE (96*1024, width, 3);
1390 1388
1391 /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode 1389 /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode
1392 has a shadow copy, and may be available from some versions 1390 has a shadow copy, and may be available from some versions
1393 of the CSERVE PALcall. If we can get it, then 1391 of the CSERVE PALcall. If we can get it, then
1394 1392
1395 unsigned long bc_control, bc_config, size; 1393 unsigned long bc_control, bc_config, size;
1396 size = 1024*1024 * (1 << ((bc_config & 7) - 1)); 1394 size = 1024*1024 * (1 << ((bc_config & 7) - 1));
1397 L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1); 1395 L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1);
1398 1396
1399 Unfortunately, we can't rely on that. 1397 Unfortunately, we can't rely on that.
1400 */ 1398 */
1401 L3 = external_cache_probe(1024*1024, width); 1399 L3 = external_cache_probe(1024*1024, width);
1402 break; 1400 break;
1403 } 1401 }
1404 1402
1405 case PCA56_CPU: 1403 case PCA56_CPU:
1406 case PCA57_CPU: 1404 case PCA57_CPU:
1407 { 1405 {
1408 unsigned long cbox_config, size; 1406 unsigned long cbox_config, size;
1409 1407
1410 if (cpu_type == PCA56_CPU) { 1408 if (cpu_type == PCA56_CPU) {
1411 L1I = CSHAPE(16*1024, 6, 1); 1409 L1I = CSHAPE(16*1024, 6, 1);
1412 L1D = CSHAPE(8*1024, 5, 1); 1410 L1D = CSHAPE(8*1024, 5, 1);
1413 } else { 1411 } else {
1414 L1I = CSHAPE(32*1024, 6, 2); 1412 L1I = CSHAPE(32*1024, 6, 2);
1415 L1D = CSHAPE(16*1024, 5, 1); 1413 L1D = CSHAPE(16*1024, 5, 1);
1416 } 1414 }
1417 L3 = -1; 1415 L3 = -1;
1418 1416
1419 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); 1417 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
1420 size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); 1418 size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
1421 1419
1422 #if 0 1420 #if 0
1423 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); 1421 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
1424 #else 1422 #else
1425 L2 = external_cache_probe(512*1024, 6); 1423 L2 = external_cache_probe(512*1024, 6);
1426 #endif 1424 #endif
1427 break; 1425 break;
1428 } 1426 }
1429 1427
1430 case EV6_CPU: 1428 case EV6_CPU:
1431 case EV67_CPU: 1429 case EV67_CPU:
1432 case EV68CB_CPU: 1430 case EV68CB_CPU:
1433 case EV68AL_CPU: 1431 case EV68AL_CPU:
1434 case EV68CX_CPU: 1432 case EV68CX_CPU:
1435 case EV69_CPU: 1433 case EV69_CPU:
1436 L1I = L1D = CSHAPE(64*1024, 6, 2); 1434 L1I = L1D = CSHAPE(64*1024, 6, 2);
1437 L2 = external_cache_probe(1024*1024, 6); 1435 L2 = external_cache_probe(1024*1024, 6);
1438 L3 = -1; 1436 L3 = -1;
1439 break; 1437 break;
1440 1438
1441 case EV7_CPU: 1439 case EV7_CPU:
1442 case EV79_CPU: 1440 case EV79_CPU:
1443 L1I = L1D = CSHAPE(64*1024, 6, 2); 1441 L1I = L1D = CSHAPE(64*1024, 6, 2);
1444 L2 = CSHAPE(7*1024*1024/4, 6, 7); 1442 L2 = CSHAPE(7*1024*1024/4, 6, 7);
1445 L3 = -1; 1443 L3 = -1;
1446 break; 1444 break;
1447 1445
1448 default: 1446 default:
1449 /* Nothing known about this cpu type. */ 1447 /* Nothing known about this cpu type. */
1450 L1I = L1D = L2 = L3 = 0; 1448 L1I = L1D = L2 = L3 = 0;
1451 break; 1449 break;
1452 } 1450 }
1453 1451
1454 alpha_l1i_cacheshape = L1I; 1452 alpha_l1i_cacheshape = L1I;
1455 alpha_l1d_cacheshape = L1D; 1453 alpha_l1d_cacheshape = L1D;
1456 alpha_l2_cacheshape = L2; 1454 alpha_l2_cacheshape = L2;
1457 alpha_l3_cacheshape = L3; 1455 alpha_l3_cacheshape = L3;
1458 } 1456 }
1459 1457
1460 /* 1458 /*
1461 * We show only CPU #0 info. 1459 * We show only CPU #0 info.
1462 */ 1460 */
1463 static void * 1461 static void *
1464 c_start(struct seq_file *f, loff_t *pos) 1462 c_start(struct seq_file *f, loff_t *pos)
1465 { 1463 {
1466 return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset; 1464 return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset;
1467 } 1465 }
1468 1466
1469 static void * 1467 static void *
1470 c_next(struct seq_file *f, void *v, loff_t *pos) 1468 c_next(struct seq_file *f, void *v, loff_t *pos)
1471 { 1469 {
1472 return NULL; 1470 return NULL;
1473 } 1471 }
1474 1472
1475 static void 1473 static void
1476 c_stop(struct seq_file *f, void *v) 1474 c_stop(struct seq_file *f, void *v)
1477 { 1475 {
1478 } 1476 }
1479 1477
1480 struct seq_operations cpuinfo_op = { 1478 struct seq_operations cpuinfo_op = {
1481 .start = c_start, 1479 .start = c_start,
1482 .next = c_next, 1480 .next = c_next,
1483 .stop = c_stop, 1481 .stop = c_stop,
1484 .show = show_cpuinfo, 1482 .show = show_cpuinfo,
1485 }; 1483 };
1486 1484
1487 1485
1488 static int 1486 static int
1489 alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr) 1487 alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1490 { 1488 {
1491 #if 1 1489 #if 1
1492 /* FIXME FIXME FIXME */ 1490 /* FIXME FIXME FIXME */
1493 /* If we are using SRM and serial console, just hard halt here. */ 1491 /* If we are using SRM and serial console, just hard halt here. */
1494 if (alpha_using_srm && srmcons_output) 1492 if (alpha_using_srm && srmcons_output)
1495 __halt(); 1493 __halt();
1496 #endif 1494 #endif
1497 return NOTIFY_DONE; 1495 return NOTIFY_DONE;
1498 } 1496 }
1499 1497
1500 static __init int add_pcspkr(void) 1498 static __init int add_pcspkr(void)
1501 { 1499 {
1502 struct platform_device *pd; 1500 struct platform_device *pd;
1503 int ret; 1501 int ret;
1504 1502
1505 pd = platform_device_alloc("pcspkr", -1); 1503 pd = platform_device_alloc("pcspkr", -1);
1506 if (!pd) 1504 if (!pd)
1507 return -ENOMEM; 1505 return -ENOMEM;
1508 1506
1509 ret = platform_device_add(pd); 1507 ret = platform_device_add(pd);
1510 if (ret) 1508 if (ret)
1511 platform_device_put(pd); 1509 platform_device_put(pd);
1512 1510
1513 return ret; 1511 return ret;
1514 } 1512 }
1515 device_initcall(add_pcspkr); 1513 device_initcall(add_pcspkr);
1516 1514
arch/alpha/kernel/sys_ruffian.c
1 /* 1 /*
2 * linux/arch/alpha/kernel/sys_ruffian.c 2 * linux/arch/alpha/kernel/sys_ruffian.c
3 * 3 *
4 * Copyright (C) 1995 David A Rusling 4 * Copyright (C) 1995 David A Rusling
5 * Copyright (C) 1996 Jay A Estabrook 5 * Copyright (C) 1996 Jay A Estabrook
6 * Copyright (C) 1998, 1999, 2000 Richard Henderson 6 * Copyright (C) 1998, 1999, 2000 Richard Henderson
7 * 7 *
8 * Code supporting the RUFFIAN. 8 * Code supporting the RUFFIAN.
9 */ 9 */
10 10
11 #include <linux/kernel.h> 11 #include <linux/kernel.h>
12 #include <linux/types.h> 12 #include <linux/types.h>
13 #include <linux/mm.h> 13 #include <linux/mm.h>
14 #include <linux/sched.h> 14 #include <linux/sched.h>
15 #include <linux/pci.h> 15 #include <linux/pci.h>
16 #include <linux/ioport.h> 16 #include <linux/ioport.h>
17 #include <linux/init.h> 17 #include <linux/init.h>
18 18
19 #include <asm/ptrace.h> 19 #include <asm/ptrace.h>
20 #include <asm/system.h> 20 #include <asm/system.h>
21 #include <asm/dma.h> 21 #include <asm/dma.h>
22 #include <asm/irq.h> 22 #include <asm/irq.h>
23 #include <asm/mmu_context.h> 23 #include <asm/mmu_context.h>
24 #include <asm/io.h> 24 #include <asm/io.h>
25 #include <asm/pgtable.h> 25 #include <asm/pgtable.h>
26 #include <asm/core_cia.h> 26 #include <asm/core_cia.h>
27 #include <asm/tlbflush.h> 27 #include <asm/tlbflush.h>
28 #include <asm/8253pit.h> 28 #include <asm/8253pit.h>
29 29
30 #include "proto.h" 30 #include "proto.h"
31 #include "irq_impl.h" 31 #include "irq_impl.h"
32 #include "pci_impl.h" 32 #include "pci_impl.h"
33 #include "machvec_impl.h" 33 #include "machvec_impl.h"
34 34
35 35
36 static void __init 36 static void __init
37 ruffian_init_irq(void) 37 ruffian_init_irq(void)
38 { 38 {
39 /* Invert 6&7 for i82371 */ 39 /* Invert 6&7 for i82371 */
40 *(vulp)PYXIS_INT_HILO = 0x000000c0UL; mb(); 40 *(vulp)PYXIS_INT_HILO = 0x000000c0UL; mb();
41 *(vulp)PYXIS_INT_CNFG = 0x00002064UL; mb(); /* all clear */ 41 *(vulp)PYXIS_INT_CNFG = 0x00002064UL; mb(); /* all clear */
42 42
43 outb(0x11,0xA0); 43 outb(0x11,0xA0);
44 outb(0x08,0xA1); 44 outb(0x08,0xA1);
45 outb(0x02,0xA1); 45 outb(0x02,0xA1);
46 outb(0x01,0xA1); 46 outb(0x01,0xA1);
47 outb(0xFF,0xA1); 47 outb(0xFF,0xA1);
48 48
49 outb(0x11,0x20); 49 outb(0x11,0x20);
50 outb(0x00,0x21); 50 outb(0x00,0x21);
51 outb(0x04,0x21); 51 outb(0x04,0x21);
52 outb(0x01,0x21); 52 outb(0x01,0x21);
53 outb(0xFF,0x21); 53 outb(0xFF,0x21);
54 54
55 /* Finish writing the 82C59A PIC Operation Control Words */ 55 /* Finish writing the 82C59A PIC Operation Control Words */
56 outb(0x20,0xA0); 56 outb(0x20,0xA0);
57 outb(0x20,0x20); 57 outb(0x20,0x20);
58 58
59 init_i8259a_irqs(); 59 init_i8259a_irqs();
60 60
61 /* Not interested in the bogus interrupts (0,3,6), 61 /* Not interested in the bogus interrupts (0,3,6),
62 NMI (1), HALT (2), flash (5), or 21142 (8). */ 62 NMI (1), HALT (2), flash (5), or 21142 (8). */
63 init_pyxis_irqs(0x16f0000); 63 init_pyxis_irqs(0x16f0000);
64 64
65 common_init_isa_dma(); 65 common_init_isa_dma();
66 } 66 }
67 67
68 #define RUFFIAN_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) 68 #define RUFFIAN_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ)
69 69
70 static void __init 70 static void __init
71 ruffian_init_rtc(void) 71 ruffian_init_rtc(void)
72 { 72 {
73 /* Ruffian does not have the RTC connected to the CPU timer 73 /* Ruffian does not have the RTC connected to the CPU timer
74 interrupt. Instead, it uses the PIT connected to IRQ 0. */ 74 interrupt. Instead, it uses the PIT connected to IRQ 0. */
75 75
76 /* Setup interval timer. */ 76 /* Setup interval timer. */
77 outb(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */ 77 outb(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
78 outb(RUFFIAN_LATCH & 0xff, 0x40); /* LSB */ 78 outb(RUFFIAN_LATCH & 0xff, 0x40); /* LSB */
79 outb(RUFFIAN_LATCH >> 8, 0x40); /* MSB */ 79 outb(RUFFIAN_LATCH >> 8, 0x40); /* MSB */
80 80
81 outb(0xb6, 0x43); /* pit counter 2: speaker */ 81 outb(0xb6, 0x43); /* pit counter 2: speaker */
82 outb(0x31, 0x42); 82 outb(0x31, 0x42);
83 outb(0x13, 0x42); 83 outb(0x13, 0x42);
84 84
85 setup_irq(0, &timer_irqaction); 85 setup_irq(0, &timer_irqaction);
86 } 86 }
87 87
88 static void 88 static void
89 ruffian_kill_arch (int mode) 89 ruffian_kill_arch (int mode)
90 { 90 {
91 cia_kill_arch(mode); 91 cia_kill_arch(mode);
92 #if 0 92 #if 0
93 /* This only causes re-entry to ARCSBIOS */ 93 /* This only causes re-entry to ARCSBIOS */
94 /* Perhaps this works for other PYXIS as well? */ 94 /* Perhaps this works for other PYXIS as well? */
95 *(vuip) PYXIS_RESET = 0x0000dead; 95 *(vuip) PYXIS_RESET = 0x0000dead;
96 mb(); 96 mb();
97 #endif 97 #endif
98 } 98 }
99 99
100 /* 100 /*
101 * Interrupt routing: 101 * Interrupt routing:
102 * 102 *
103 * Primary bus 103 * Primary bus
104 * IdSel INTA INTB INTC INTD 104 * IdSel INTA INTB INTC INTD
105 * 21052 13 - - - - 105 * 21052 13 - - - -
106 * SIO 14 23 - - - 106 * SIO 14 23 - - -
107 * 21143 15 44 - - - 107 * 21143 15 44 - - -
108 * Slot 0 17 43 42 41 40 108 * Slot 0 17 43 42 41 40
109 * 109 *
110 * Secondary bus 110 * Secondary bus
111 * IdSel INTA INTB INTC INTD 111 * IdSel INTA INTB INTC INTD
112 * Slot 0 8 (18) 19 18 17 16 112 * Slot 0 8 (18) 19 18 17 16
113 * Slot 1 9 (19) 31 30 29 28 113 * Slot 1 9 (19) 31 30 29 28
114 * Slot 2 10 (20) 27 26 25 24 114 * Slot 2 10 (20) 27 26 25 24
115 * Slot 3 11 (21) 39 38 37 36 115 * Slot 3 11 (21) 39 38 37 36
116 * Slot 4 12 (22) 35 34 33 32 116 * Slot 4 12 (22) 35 34 33 32
117 * 53c875 13 (23) 20 - - - 117 * 53c875 13 (23) 20 - - -
118 * 118 *
119 */ 119 */
120 120
121 static int __init 121 static int __init
122 ruffian_map_irq(struct pci_dev *dev, u8 slot, u8 pin) 122 ruffian_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
123 { 123 {
124 static char irq_tab[11][5] __initdata = { 124 static char irq_tab[11][5] __initdata = {
125 /*INT INTA INTB INTC INTD */ 125 /*INT INTA INTB INTC INTD */
126 {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */ 126 {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */
127 {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */ 127 {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */
128 {44, 44, 44, 44, 44}, /* IdSel 15, 21143 */ 128 {44, 44, 44, 44, 44}, /* IdSel 15, 21143 */
129 {-1, -1, -1, -1, -1}, /* IdSel 16, none */ 129 {-1, -1, -1, -1, -1}, /* IdSel 16, none */
130 {43, 43, 42, 41, 40}, /* IdSel 17, 64-bit slot */ 130 {43, 43, 42, 41, 40}, /* IdSel 17, 64-bit slot */
131 /* the next 6 are actually on PCI bus 1, across the bridge */ 131 /* the next 6 are actually on PCI bus 1, across the bridge */
132 {19, 19, 18, 17, 16}, /* IdSel 8, slot 0 */ 132 {19, 19, 18, 17, 16}, /* IdSel 8, slot 0 */
133 {31, 31, 30, 29, 28}, /* IdSel 9, slot 1 */ 133 {31, 31, 30, 29, 28}, /* IdSel 9, slot 1 */
134 {27, 27, 26, 25, 24}, /* IdSel 10, slot 2 */ 134 {27, 27, 26, 25, 24}, /* IdSel 10, slot 2 */
135 {39, 39, 38, 37, 36}, /* IdSel 11, slot 3 */ 135 {39, 39, 38, 37, 36}, /* IdSel 11, slot 3 */
136 {35, 35, 34, 33, 32}, /* IdSel 12, slot 4 */ 136 {35, 35, 34, 33, 32}, /* IdSel 12, slot 4 */
137 {20, 20, 20, 20, 20}, /* IdSel 13, 53c875 */ 137 {20, 20, 20, 20, 20}, /* IdSel 13, 53c875 */
138 }; 138 };
139 const long min_idsel = 13, max_idsel = 23, irqs_per_slot = 5; 139 const long min_idsel = 13, max_idsel = 23, irqs_per_slot = 5;
140 return COMMON_TABLE_LOOKUP; 140 return COMMON_TABLE_LOOKUP;
141 } 141 }
142 142
143 static u8 __init 143 static u8 __init
144 ruffian_swizzle(struct pci_dev *dev, u8 *pinp) 144 ruffian_swizzle(struct pci_dev *dev, u8 *pinp)
145 { 145 {
146 int slot, pin = *pinp; 146 int slot, pin = *pinp;
147 147
148 if (dev->bus->number == 0) { 148 if (dev->bus->number == 0) {
149 slot = PCI_SLOT(dev->devfn); 149 slot = PCI_SLOT(dev->devfn);
150 } 150 }
151 /* Check for the built-in bridge. */ 151 /* Check for the built-in bridge. */
152 else if (PCI_SLOT(dev->bus->self->devfn) == 13) { 152 else if (PCI_SLOT(dev->bus->self->devfn) == 13) {
153 slot = PCI_SLOT(dev->devfn) + 10; 153 slot = PCI_SLOT(dev->devfn) + 10;
154 } 154 }
155 else 155 else
156 { 156 {
157 /* Must be a card-based bridge. */ 157 /* Must be a card-based bridge. */
158 do { 158 do {
159 if (PCI_SLOT(dev->bus->self->devfn) == 13) { 159 if (PCI_SLOT(dev->bus->self->devfn) == 13) {
160 slot = PCI_SLOT(dev->devfn) + 10; 160 slot = PCI_SLOT(dev->devfn) + 10;
161 break; 161 break;
162 } 162 }
163 pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); 163 pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
164 164
165 /* Move up the chain of bridges. */ 165 /* Move up the chain of bridges. */
166 dev = dev->bus->self; 166 dev = dev->bus->self;
167 /* Slot of the next bridge. */ 167 /* Slot of the next bridge. */
168 slot = PCI_SLOT(dev->devfn); 168 slot = PCI_SLOT(dev->devfn);
169 } while (dev->bus->self); 169 } while (dev->bus->self);
170 } 170 }
171 *pinp = pin; 171 *pinp = pin;
172 return slot; 172 return slot;
173 } 173 }
174 174
175 #ifdef BUILDING_FOR_MILO 175 #ifdef BUILDING_FOR_MILO
176 /* 176 /*
177 * The DeskStation Ruffian motherboard firmware does not place 177 * The DeskStation Ruffian motherboard firmware does not place
178 * the memory size in the PALimpure area. Therefore, we use 178 * the memory size in the PALimpure area. Therefore, we use
179 * the Bank Configuration Registers in PYXIS to obtain the size. 179 * the Bank Configuration Registers in PYXIS to obtain the size.
180 */ 180 */
181 static unsigned long __init 181 static unsigned long __init
182 ruffian_get_bank_size(unsigned long offset) 182 ruffian_get_bank_size(unsigned long offset)
183 { 183 {
184 unsigned long bank_addr, bank, ret = 0; 184 unsigned long bank_addr, bank, ret = 0;
185 185
186 /* Valid offsets are: 0x800, 0x840 and 0x880 186 /* Valid offsets are: 0x800, 0x840 and 0x880
187 since Ruffian only uses three banks. */ 187 since Ruffian only uses three banks. */
188 bank_addr = (unsigned long)PYXIS_MCR + offset; 188 bank_addr = (unsigned long)PYXIS_MCR + offset;
189 bank = *(vulp)bank_addr; 189 bank = *(vulp)bank_addr;
190 190
191 /* Check BANK_ENABLE */ 191 /* Check BANK_ENABLE */
192 if (bank & 0x01) { 192 if (bank & 0x01) {
193 static unsigned long size[] __initdata = { 193 static unsigned long size[] __initdata = {
194 0x40000000UL, /* 0x00, 1G */ 194 0x40000000UL, /* 0x00, 1G */
195 0x20000000UL, /* 0x02, 512M */ 195 0x20000000UL, /* 0x02, 512M */
196 0x10000000UL, /* 0x04, 256M */ 196 0x10000000UL, /* 0x04, 256M */
197 0x08000000UL, /* 0x06, 128M */ 197 0x08000000UL, /* 0x06, 128M */
198 0x04000000UL, /* 0x08, 64M */ 198 0x04000000UL, /* 0x08, 64M */
199 0x02000000UL, /* 0x0a, 32M */ 199 0x02000000UL, /* 0x0a, 32M */
200 0x01000000UL, /* 0x0c, 16M */ 200 0x01000000UL, /* 0x0c, 16M */
201 0x00800000UL, /* 0x0e, 8M */ 201 0x00800000UL, /* 0x0e, 8M */
202 0x80000000UL, /* 0x10, 2G */ 202 0x80000000UL, /* 0x10, 2G */
203 }; 203 };
204 204
205 bank = (bank & 0x1e) >> 1; 205 bank = (bank & 0x1e) >> 1;
206 if (bank < sizeof(size)/sizeof(*size)) 206 if (bank < ARRAY_SIZE(size))
207 ret = size[bank]; 207 ret = size[bank];
208 } 208 }
209 209
210 return ret; 210 return ret;
211 } 211 }
212 #endif /* BUILDING_FOR_MILO */ 212 #endif /* BUILDING_FOR_MILO */
213 213
214 /* 214 /*
215 * The System Vector 215 * The System Vector
216 */ 216 */
217 217
218 struct alpha_machine_vector ruffian_mv __initmv = { 218 struct alpha_machine_vector ruffian_mv __initmv = {
219 .vector_name = "Ruffian", 219 .vector_name = "Ruffian",
220 DO_EV5_MMU, 220 DO_EV5_MMU,
221 DO_DEFAULT_RTC, 221 DO_DEFAULT_RTC,
222 DO_PYXIS_IO, 222 DO_PYXIS_IO,
223 .machine_check = cia_machine_check, 223 .machine_check = cia_machine_check,
224 .max_isa_dma_address = ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS, 224 .max_isa_dma_address = ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS,
225 .min_io_address = DEFAULT_IO_BASE, 225 .min_io_address = DEFAULT_IO_BASE,
226 .min_mem_address = DEFAULT_MEM_BASE, 226 .min_mem_address = DEFAULT_MEM_BASE,
227 .pci_dac_offset = PYXIS_DAC_OFFSET, 227 .pci_dac_offset = PYXIS_DAC_OFFSET,
228 228
229 .nr_irqs = 48, 229 .nr_irqs = 48,
230 .device_interrupt = pyxis_device_interrupt, 230 .device_interrupt = pyxis_device_interrupt,
231 231
232 .init_arch = pyxis_init_arch, 232 .init_arch = pyxis_init_arch,
233 .init_irq = ruffian_init_irq, 233 .init_irq = ruffian_init_irq,
234 .init_rtc = ruffian_init_rtc, 234 .init_rtc = ruffian_init_rtc,
235 .init_pci = cia_init_pci, 235 .init_pci = cia_init_pci,
236 .kill_arch = ruffian_kill_arch, 236 .kill_arch = ruffian_kill_arch,
237 .pci_map_irq = ruffian_map_irq, 237 .pci_map_irq = ruffian_map_irq,
238 .pci_swizzle = ruffian_swizzle, 238 .pci_swizzle = ruffian_swizzle,
239 }; 239 };
240 ALIAS_MV(ruffian) 240 ALIAS_MV(ruffian)
241 241
arch/alpha/kernel/time.c
1 /* 1 /*
2 * linux/arch/alpha/kernel/time.c 2 * linux/arch/alpha/kernel/time.c
3 * 3 *
4 * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds 4 * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds
5 * 5 *
6 * This file contains the PC-specific time handling details: 6 * This file contains the PC-specific time handling details:
7 * reading the RTC at bootup, etc.. 7 * reading the RTC at bootup, etc..
8 * 1994-07-02 Alan Modra 8 * 1994-07-02 Alan Modra
9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime 9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
10 * 1995-03-26 Markus Kuhn 10 * 1995-03-26 Markus Kuhn
11 * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887 11 * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
12 * precision CMOS clock update 12 * precision CMOS clock update
13 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 13 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
14 * "A Kernel Model for Precision Timekeeping" by Dave Mills 14 * "A Kernel Model for Precision Timekeeping" by Dave Mills
15 * 1997-01-09 Adrian Sun 15 * 1997-01-09 Adrian Sun
16 * use interval timer if CONFIG_RTC=y 16 * use interval timer if CONFIG_RTC=y
17 * 1997-10-29 John Bowman (bowman@math.ualberta.ca) 17 * 1997-10-29 John Bowman (bowman@math.ualberta.ca)
18 * fixed tick loss calculation in timer_interrupt 18 * fixed tick loss calculation in timer_interrupt
19 * (round system clock to nearest tick instead of truncating) 19 * (round system clock to nearest tick instead of truncating)
20 * fixed algorithm in time_init for getting time from CMOS clock 20 * fixed algorithm in time_init for getting time from CMOS clock
21 * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net) 21 * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net)
22 * fixed algorithm in do_gettimeofday() for calculating the precise time 22 * fixed algorithm in do_gettimeofday() for calculating the precise time
23 * from processor cycle counter (now taking lost_ticks into account) 23 * from processor cycle counter (now taking lost_ticks into account)
24 * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de> 24 * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de>
25 * Fixed time_init to be aware of epoches != 1900. This prevents 25 * Fixed time_init to be aware of epoches != 1900. This prevents
26 * booting up in 2048 for me;) Code is stolen from rtc.c. 26 * booting up in 2048 for me;) Code is stolen from rtc.c.
27 * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com> 27 * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com>
28 * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM 28 * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
29 */ 29 */
30 #include <linux/errno.h> 30 #include <linux/errno.h>
31 #include <linux/module.h> 31 #include <linux/module.h>
32 #include <linux/sched.h> 32 #include <linux/sched.h>
33 #include <linux/kernel.h> 33 #include <linux/kernel.h>
34 #include <linux/param.h> 34 #include <linux/param.h>
35 #include <linux/string.h> 35 #include <linux/string.h>
36 #include <linux/mm.h> 36 #include <linux/mm.h>
37 #include <linux/delay.h> 37 #include <linux/delay.h>
38 #include <linux/ioport.h> 38 #include <linux/ioport.h>
39 #include <linux/irq.h> 39 #include <linux/irq.h>
40 #include <linux/interrupt.h> 40 #include <linux/interrupt.h>
41 #include <linux/init.h> 41 #include <linux/init.h>
42 #include <linux/bcd.h> 42 #include <linux/bcd.h>
43 #include <linux/profile.h> 43 #include <linux/profile.h>
44 44
45 #include <asm/uaccess.h> 45 #include <asm/uaccess.h>
46 #include <asm/io.h> 46 #include <asm/io.h>
47 #include <asm/hwrpb.h> 47 #include <asm/hwrpb.h>
48 #include <asm/8253pit.h> 48 #include <asm/8253pit.h>
49 49
50 #include <linux/mc146818rtc.h> 50 #include <linux/mc146818rtc.h>
51 #include <linux/time.h> 51 #include <linux/time.h>
52 #include <linux/timex.h> 52 #include <linux/timex.h>
53 53
54 #include "proto.h" 54 #include "proto.h"
55 #include "irq_impl.h" 55 #include "irq_impl.h"
56 56
57 extern unsigned long wall_jiffies; /* kernel/timer.c */ 57 extern unsigned long wall_jiffies; /* kernel/timer.c */
58 58
59 static int set_rtc_mmss(unsigned long); 59 static int set_rtc_mmss(unsigned long);
60 60
61 DEFINE_SPINLOCK(rtc_lock); 61 DEFINE_SPINLOCK(rtc_lock);
62 62
63 #define TICK_SIZE (tick_nsec / 1000) 63 #define TICK_SIZE (tick_nsec / 1000)
64 64
65 /* 65 /*
66 * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting 66 * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting
67 * by 48 gives us 16 bits for HZ while keeping the accuracy good even 67 * by 48 gives us 16 bits for HZ while keeping the accuracy good even
68 * for large CPU clock rates. 68 * for large CPU clock rates.
69 */ 69 */
70 #define FIX_SHIFT 48 70 #define FIX_SHIFT 48
71 71
72 /* lump static variables together for more efficient access: */ 72 /* lump static variables together for more efficient access: */
73 static struct { 73 static struct {
74 /* cycle counter last time it got invoked */ 74 /* cycle counter last time it got invoked */
75 __u32 last_time; 75 __u32 last_time;
76 /* ticks/cycle * 2^48 */ 76 /* ticks/cycle * 2^48 */
77 unsigned long scaled_ticks_per_cycle; 77 unsigned long scaled_ticks_per_cycle;
78 /* last time the CMOS clock got updated */ 78 /* last time the CMOS clock got updated */
79 time_t last_rtc_update; 79 time_t last_rtc_update;
80 /* partial unused tick */ 80 /* partial unused tick */
81 unsigned long partial_tick; 81 unsigned long partial_tick;
82 } state; 82 } state;
83 83
84 unsigned long est_cycle_freq; 84 unsigned long est_cycle_freq;
85 85
86 86
87 static inline __u32 rpcc(void) 87 static inline __u32 rpcc(void)
88 { 88 {
89 __u32 result; 89 __u32 result;
90 asm volatile ("rpcc %0" : "=r"(result)); 90 asm volatile ("rpcc %0" : "=r"(result));
91 return result; 91 return result;
92 } 92 }
93 93
94 /* 94 /*
95 * Scheduler clock - returns current time in nanosec units. 95 * Scheduler clock - returns current time in nanosec units.
96 * 96 *
97 * Copied from ARM code for expediency... ;-} 97 * Copied from ARM code for expediency... ;-}
98 */ 98 */
99 unsigned long long sched_clock(void) 99 unsigned long long sched_clock(void)
100 { 100 {
101 return (unsigned long long)jiffies * (1000000000 / HZ); 101 return (unsigned long long)jiffies * (1000000000 / HZ);
102 } 102 }
103 103
104 104
105 /* 105 /*
106 * timer_interrupt() needs to keep up the real-time clock, 106 * timer_interrupt() needs to keep up the real-time clock,
107 * as well as call the "do_timer()" routine every clocktick 107 * as well as call the "do_timer()" routine every clocktick
108 */ 108 */
109 irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs) 109 irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs)
110 { 110 {
111 unsigned long delta; 111 unsigned long delta;
112 __u32 now; 112 __u32 now;
113 long nticks; 113 long nticks;
114 114
115 #ifndef CONFIG_SMP 115 #ifndef CONFIG_SMP
116 /* Not SMP, do kernel PC profiling here. */ 116 /* Not SMP, do kernel PC profiling here. */
117 profile_tick(CPU_PROFILING, regs); 117 profile_tick(CPU_PROFILING, regs);
118 #endif 118 #endif
119 119
120 write_seqlock(&xtime_lock); 120 write_seqlock(&xtime_lock);
121 121
122 /* 122 /*
123 * Calculate how many ticks have passed since the last update, 123 * Calculate how many ticks have passed since the last update,
124 * including any previous partial leftover. Save any resulting 124 * including any previous partial leftover. Save any resulting
125 * fraction for the next pass. 125 * fraction for the next pass.
126 */ 126 */
127 now = rpcc(); 127 now = rpcc();
128 delta = now - state.last_time; 128 delta = now - state.last_time;
129 state.last_time = now; 129 state.last_time = now;
130 delta = delta * state.scaled_ticks_per_cycle + state.partial_tick; 130 delta = delta * state.scaled_ticks_per_cycle + state.partial_tick;
131 state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1); 131 state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1);
132 nticks = delta >> FIX_SHIFT; 132 nticks = delta >> FIX_SHIFT;
133 133
134 while (nticks > 0) { 134 while (nticks > 0) {
135 do_timer(regs); 135 do_timer(regs);
136 #ifndef CONFIG_SMP 136 #ifndef CONFIG_SMP
137 update_process_times(user_mode(regs)); 137 update_process_times(user_mode(regs));
138 #endif 138 #endif
139 nticks--; 139 nticks--;
140 } 140 }
141 141
142 /* 142 /*
143 * If we have an externally synchronized Linux clock, then update 143 * If we have an externally synchronized Linux clock, then update
144 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 144 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
145 * called as close as possible to 500 ms before the new second starts. 145 * called as close as possible to 500 ms before the new second starts.
146 */ 146 */
147 if (ntp_synced() 147 if (ntp_synced()
148 && xtime.tv_sec > state.last_rtc_update + 660 148 && xtime.tv_sec > state.last_rtc_update + 660
149 && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2 149 && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2
150 && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) { 150 && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) {
151 int tmp = set_rtc_mmss(xtime.tv_sec); 151 int tmp = set_rtc_mmss(xtime.tv_sec);
152 state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0); 152 state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0);
153 } 153 }
154 154
155 write_sequnlock(&xtime_lock); 155 write_sequnlock(&xtime_lock);
156 return IRQ_HANDLED; 156 return IRQ_HANDLED;
157 } 157 }
158 158
159 void 159 void
160 common_init_rtc(void) 160 common_init_rtc(void)
161 { 161 {
162 unsigned char x; 162 unsigned char x;
163 163
164 /* Reset periodic interrupt frequency. */ 164 /* Reset periodic interrupt frequency. */
165 x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; 165 x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
166 /* Test includes known working values on various platforms 166 /* Test includes known working values on various platforms
167 where 0x26 is wrong; we refuse to change those. */ 167 where 0x26 is wrong; we refuse to change those. */
168 if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { 168 if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
169 printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x); 169 printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x);
170 CMOS_WRITE(0x26, RTC_FREQ_SELECT); 170 CMOS_WRITE(0x26, RTC_FREQ_SELECT);
171 } 171 }
172 172
173 /* Turn on periodic interrupts. */ 173 /* Turn on periodic interrupts. */
174 x = CMOS_READ(RTC_CONTROL); 174 x = CMOS_READ(RTC_CONTROL);
175 if (!(x & RTC_PIE)) { 175 if (!(x & RTC_PIE)) {
176 printk("Turning on RTC interrupts.\n"); 176 printk("Turning on RTC interrupts.\n");
177 x |= RTC_PIE; 177 x |= RTC_PIE;
178 x &= ~(RTC_AIE | RTC_UIE); 178 x &= ~(RTC_AIE | RTC_UIE);
179 CMOS_WRITE(x, RTC_CONTROL); 179 CMOS_WRITE(x, RTC_CONTROL);
180 } 180 }
181 (void) CMOS_READ(RTC_INTR_FLAGS); 181 (void) CMOS_READ(RTC_INTR_FLAGS);
182 182
183 outb(0x36, 0x43); /* pit counter 0: system timer */ 183 outb(0x36, 0x43); /* pit counter 0: system timer */
184 outb(0x00, 0x40); 184 outb(0x00, 0x40);
185 outb(0x00, 0x40); 185 outb(0x00, 0x40);
186 186
187 outb(0xb6, 0x43); /* pit counter 2: speaker */ 187 outb(0xb6, 0x43); /* pit counter 2: speaker */
188 outb(0x31, 0x42); 188 outb(0x31, 0x42);
189 outb(0x13, 0x42); 189 outb(0x13, 0x42);
190 190
191 init_rtc_irq(); 191 init_rtc_irq();
192 } 192 }
193 193
194 194
195 /* Validate a computed cycle counter result against the known bounds for 195 /* Validate a computed cycle counter result against the known bounds for
196 the given processor core. There's too much brokenness in the way of 196 the given processor core. There's too much brokenness in the way of
197 timing hardware for any one method to work everywhere. :-( 197 timing hardware for any one method to work everywhere. :-(
198 198
199 Return 0 if the result cannot be trusted, otherwise return the argument. */ 199 Return 0 if the result cannot be trusted, otherwise return the argument. */
200 200
201 static unsigned long __init 201 static unsigned long __init
202 validate_cc_value(unsigned long cc) 202 validate_cc_value(unsigned long cc)
203 { 203 {
204 static struct bounds { 204 static struct bounds {
205 unsigned int min, max; 205 unsigned int min, max;
206 } cpu_hz[] __initdata = { 206 } cpu_hz[] __initdata = {
207 [EV3_CPU] = { 50000000, 200000000 }, /* guess */ 207 [EV3_CPU] = { 50000000, 200000000 }, /* guess */
208 [EV4_CPU] = { 100000000, 300000000 }, 208 [EV4_CPU] = { 100000000, 300000000 },
209 [LCA4_CPU] = { 100000000, 300000000 }, /* guess */ 209 [LCA4_CPU] = { 100000000, 300000000 }, /* guess */
210 [EV45_CPU] = { 200000000, 300000000 }, 210 [EV45_CPU] = { 200000000, 300000000 },
211 [EV5_CPU] = { 250000000, 433000000 }, 211 [EV5_CPU] = { 250000000, 433000000 },
212 [EV56_CPU] = { 333000000, 667000000 }, 212 [EV56_CPU] = { 333000000, 667000000 },
213 [PCA56_CPU] = { 400000000, 600000000 }, /* guess */ 213 [PCA56_CPU] = { 400000000, 600000000 }, /* guess */
214 [PCA57_CPU] = { 500000000, 600000000 }, /* guess */ 214 [PCA57_CPU] = { 500000000, 600000000 }, /* guess */
215 [EV6_CPU] = { 466000000, 600000000 }, 215 [EV6_CPU] = { 466000000, 600000000 },
216 [EV67_CPU] = { 600000000, 750000000 }, 216 [EV67_CPU] = { 600000000, 750000000 },
217 [EV68AL_CPU] = { 750000000, 940000000 }, 217 [EV68AL_CPU] = { 750000000, 940000000 },
218 [EV68CB_CPU] = { 1000000000, 1333333333 }, 218 [EV68CB_CPU] = { 1000000000, 1333333333 },
219 /* None of the following are shipping as of 2001-11-01. */ 219 /* None of the following are shipping as of 2001-11-01. */
220 [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */ 220 [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */
221 [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */ 221 [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */
222 [EV7_CPU] = { 800000000, 1400000000 }, /* guess */ 222 [EV7_CPU] = { 800000000, 1400000000 }, /* guess */
223 [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */ 223 [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */
224 }; 224 };
225 225
226 /* Allow for some drift in the crystal. 10MHz is more than enough. */ 226 /* Allow for some drift in the crystal. 10MHz is more than enough. */
227 const unsigned int deviation = 10000000; 227 const unsigned int deviation = 10000000;
228 228
229 struct percpu_struct *cpu; 229 struct percpu_struct *cpu;
230 unsigned int index; 230 unsigned int index;
231 231
232 cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset); 232 cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset);
233 index = cpu->type & 0xffffffff; 233 index = cpu->type & 0xffffffff;
234 234
235 /* If index out of bounds, no way to validate. */ 235 /* If index out of bounds, no way to validate. */
236 if (index >= sizeof(cpu_hz)/sizeof(cpu_hz[0])) 236 if (index >= ARRAY_SIZE(cpu_hz))
237 return cc; 237 return cc;
238 238
239 /* If index contains no data, no way to validate. */ 239 /* If index contains no data, no way to validate. */
240 if (cpu_hz[index].max == 0) 240 if (cpu_hz[index].max == 0)
241 return cc; 241 return cc;
242 242
243 if (cc < cpu_hz[index].min - deviation 243 if (cc < cpu_hz[index].min - deviation
244 || cc > cpu_hz[index].max + deviation) 244 || cc > cpu_hz[index].max + deviation)
245 return 0; 245 return 0;
246 246
247 return cc; 247 return cc;
248 } 248 }
249 249
250 250
251 /* 251 /*
252 * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from 252 * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from
253 * arch/i386/time.c. 253 * arch/i386/time.c.
254 */ 254 */
255 255
256 #define CALIBRATE_LATCH 0xffff 256 #define CALIBRATE_LATCH 0xffff
257 #define TIMEOUT_COUNT 0x100000 257 #define TIMEOUT_COUNT 0x100000
258 258
259 static unsigned long __init 259 static unsigned long __init
260 calibrate_cc_with_pit(void) 260 calibrate_cc_with_pit(void)
261 { 261 {
262 int cc, count = 0; 262 int cc, count = 0;
263 263
264 /* Set the Gate high, disable speaker */ 264 /* Set the Gate high, disable speaker */
265 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 265 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
266 266
267 /* 267 /*
268 * Now let's take care of CTC channel 2 268 * Now let's take care of CTC channel 2
269 * 269 *
270 * Set the Gate high, program CTC channel 2 for mode 0, 270 * Set the Gate high, program CTC channel 2 for mode 0,
271 * (interrupt on terminal count mode), binary count, 271 * (interrupt on terminal count mode), binary count,
272 * load 5 * LATCH count, (LSB and MSB) to begin countdown. 272 * load 5 * LATCH count, (LSB and MSB) to begin countdown.
273 */ 273 */
274 outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ 274 outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
275 outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ 275 outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
276 outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ 276 outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
277 277
278 cc = rpcc(); 278 cc = rpcc();
279 do { 279 do {
280 count++; 280 count++;
281 } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT); 281 } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT);
282 cc = rpcc() - cc; 282 cc = rpcc() - cc;
283 283
284 /* Error: ECTCNEVERSET or ECPUTOOFAST. */ 284 /* Error: ECTCNEVERSET or ECPUTOOFAST. */
285 if (count <= 1 || count == TIMEOUT_COUNT) 285 if (count <= 1 || count == TIMEOUT_COUNT)
286 return 0; 286 return 0;
287 287
288 return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1); 288 return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1);
289 } 289 }
290 290
291 /* The Linux interpretation of the CMOS clock register contents: 291 /* The Linux interpretation of the CMOS clock register contents:
292 When the Update-In-Progress (UIP) flag goes from 1 to 0, the 292 When the Update-In-Progress (UIP) flag goes from 1 to 0, the
293 RTC registers show the second which has precisely just started. 293 RTC registers show the second which has precisely just started.
294 Let's hope other operating systems interpret the RTC the same way. */ 294 Let's hope other operating systems interpret the RTC the same way. */
295 295
296 static unsigned long __init 296 static unsigned long __init
297 rpcc_after_update_in_progress(void) 297 rpcc_after_update_in_progress(void)
298 { 298 {
299 do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)); 299 do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP));
300 do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); 300 do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
301 301
302 return rpcc(); 302 return rpcc();
303 } 303 }
304 304
305 void __init 305 void __init
306 time_init(void) 306 time_init(void)
307 { 307 {
308 unsigned int year, mon, day, hour, min, sec, cc1, cc2, epoch; 308 unsigned int year, mon, day, hour, min, sec, cc1, cc2, epoch;
309 unsigned long cycle_freq, tolerance; 309 unsigned long cycle_freq, tolerance;
310 long diff; 310 long diff;
311 311
312 /* Calibrate CPU clock -- attempt #1. */ 312 /* Calibrate CPU clock -- attempt #1. */
313 if (!est_cycle_freq) 313 if (!est_cycle_freq)
314 est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); 314 est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
315 315
316 cc1 = rpcc(); 316 cc1 = rpcc();
317 317
318 /* Calibrate CPU clock -- attempt #2. */ 318 /* Calibrate CPU clock -- attempt #2. */
319 if (!est_cycle_freq) { 319 if (!est_cycle_freq) {
320 cc1 = rpcc_after_update_in_progress(); 320 cc1 = rpcc_after_update_in_progress();
321 cc2 = rpcc_after_update_in_progress(); 321 cc2 = rpcc_after_update_in_progress();
322 est_cycle_freq = validate_cc_value(cc2 - cc1); 322 est_cycle_freq = validate_cc_value(cc2 - cc1);
323 cc1 = cc2; 323 cc1 = cc2;
324 } 324 }
325 325
326 cycle_freq = hwrpb->cycle_freq; 326 cycle_freq = hwrpb->cycle_freq;
327 if (est_cycle_freq) { 327 if (est_cycle_freq) {
328 /* If the given value is within 250 PPM of what we calculated, 328 /* If the given value is within 250 PPM of what we calculated,
329 accept it. Otherwise, use what we found. */ 329 accept it. Otherwise, use what we found. */
330 tolerance = cycle_freq / 4000; 330 tolerance = cycle_freq / 4000;
331 diff = cycle_freq - est_cycle_freq; 331 diff = cycle_freq - est_cycle_freq;
332 if (diff < 0) 332 if (diff < 0)
333 diff = -diff; 333 diff = -diff;
334 if ((unsigned long)diff > tolerance) { 334 if ((unsigned long)diff > tolerance) {
335 cycle_freq = est_cycle_freq; 335 cycle_freq = est_cycle_freq;
336 printk("HWRPB cycle frequency bogus. " 336 printk("HWRPB cycle frequency bogus. "
337 "Estimated %lu Hz\n", cycle_freq); 337 "Estimated %lu Hz\n", cycle_freq);
338 } else { 338 } else {
339 est_cycle_freq = 0; 339 est_cycle_freq = 0;
340 } 340 }
341 } else if (! validate_cc_value (cycle_freq)) { 341 } else if (! validate_cc_value (cycle_freq)) {
342 printk("HWRPB cycle frequency bogus, " 342 printk("HWRPB cycle frequency bogus, "
343 "and unable to estimate a proper value!\n"); 343 "and unable to estimate a proper value!\n");
344 } 344 }
345 345
346 /* From John Bowman <bowman@math.ualberta.ca>: allow the values 346 /* From John Bowman <bowman@math.ualberta.ca>: allow the values
347 to settle, as the Update-In-Progress bit going low isn't good 347 to settle, as the Update-In-Progress bit going low isn't good
348 enough on some hardware. 2ms is our guess; we haven't found 348 enough on some hardware. 2ms is our guess; we haven't found
349 bogomips yet, but this is close on a 500Mhz box. */ 349 bogomips yet, but this is close on a 500Mhz box. */
350 __delay(1000000); 350 __delay(1000000);
351 351
352 sec = CMOS_READ(RTC_SECONDS); 352 sec = CMOS_READ(RTC_SECONDS);
353 min = CMOS_READ(RTC_MINUTES); 353 min = CMOS_READ(RTC_MINUTES);
354 hour = CMOS_READ(RTC_HOURS); 354 hour = CMOS_READ(RTC_HOURS);
355 day = CMOS_READ(RTC_DAY_OF_MONTH); 355 day = CMOS_READ(RTC_DAY_OF_MONTH);
356 mon = CMOS_READ(RTC_MONTH); 356 mon = CMOS_READ(RTC_MONTH);
357 year = CMOS_READ(RTC_YEAR); 357 year = CMOS_READ(RTC_YEAR);
358 358
359 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { 359 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
360 BCD_TO_BIN(sec); 360 BCD_TO_BIN(sec);
361 BCD_TO_BIN(min); 361 BCD_TO_BIN(min);
362 BCD_TO_BIN(hour); 362 BCD_TO_BIN(hour);
363 BCD_TO_BIN(day); 363 BCD_TO_BIN(day);
364 BCD_TO_BIN(mon); 364 BCD_TO_BIN(mon);
365 BCD_TO_BIN(year); 365 BCD_TO_BIN(year);
366 } 366 }
367 367
368 /* PC-like is standard; used for year >= 70 */ 368 /* PC-like is standard; used for year >= 70 */
369 epoch = 1900; 369 epoch = 1900;
370 if (year < 20) 370 if (year < 20)
371 epoch = 2000; 371 epoch = 2000;
372 else if (year >= 20 && year < 48) 372 else if (year >= 20 && year < 48)
373 /* NT epoch */ 373 /* NT epoch */
374 epoch = 1980; 374 epoch = 1980;
375 else if (year >= 48 && year < 70) 375 else if (year >= 48 && year < 70)
376 /* Digital UNIX epoch */ 376 /* Digital UNIX epoch */
377 epoch = 1952; 377 epoch = 1952;
378 378
379 printk(KERN_INFO "Using epoch = %d\n", epoch); 379 printk(KERN_INFO "Using epoch = %d\n", epoch);
380 380
381 if ((year += epoch) < 1970) 381 if ((year += epoch) < 1970)
382 year += 100; 382 year += 100;
383 383
384 xtime.tv_sec = mktime(year, mon, day, hour, min, sec); 384 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
385 xtime.tv_nsec = 0; 385 xtime.tv_nsec = 0;
386 386
387 wall_to_monotonic.tv_sec -= xtime.tv_sec; 387 wall_to_monotonic.tv_sec -= xtime.tv_sec;
388 wall_to_monotonic.tv_nsec = 0; 388 wall_to_monotonic.tv_nsec = 0;
389 389
390 if (HZ > (1<<16)) { 390 if (HZ > (1<<16)) {
391 extern void __you_loose (void); 391 extern void __you_loose (void);
392 __you_loose(); 392 __you_loose();
393 } 393 }
394 394
395 state.last_time = cc1; 395 state.last_time = cc1;
396 state.scaled_ticks_per_cycle 396 state.scaled_ticks_per_cycle
397 = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; 397 = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
398 state.last_rtc_update = 0; 398 state.last_rtc_update = 0;
399 state.partial_tick = 0L; 399 state.partial_tick = 0L;
400 400
401 /* Startup the timer source. */ 401 /* Startup the timer source. */
402 alpha_mv.init_rtc(); 402 alpha_mv.init_rtc();
403 } 403 }
404 404
405 /* 405 /*
406 * Use the cycle counter to estimate an displacement from the last time 406 * Use the cycle counter to estimate an displacement from the last time
407 * tick. Unfortunately the Alpha designers made only the low 32-bits of 407 * tick. Unfortunately the Alpha designers made only the low 32-bits of
408 * the cycle counter active, so we overflow on 8.2 seconds on a 500MHz 408 * the cycle counter active, so we overflow on 8.2 seconds on a 500MHz
409 * part. So we can't do the "find absolute time in terms of cycles" thing 409 * part. So we can't do the "find absolute time in terms of cycles" thing
410 * that the other ports do. 410 * that the other ports do.
411 */ 411 */
412 void 412 void
413 do_gettimeofday(struct timeval *tv) 413 do_gettimeofday(struct timeval *tv)
414 { 414 {
415 unsigned long flags; 415 unsigned long flags;
416 unsigned long sec, usec, lost, seq; 416 unsigned long sec, usec, lost, seq;
417 unsigned long delta_cycles, delta_usec, partial_tick; 417 unsigned long delta_cycles, delta_usec, partial_tick;
418 418
419 do { 419 do {
420 seq = read_seqbegin_irqsave(&xtime_lock, flags); 420 seq = read_seqbegin_irqsave(&xtime_lock, flags);
421 421
422 delta_cycles = rpcc() - state.last_time; 422 delta_cycles = rpcc() - state.last_time;
423 sec = xtime.tv_sec; 423 sec = xtime.tv_sec;
424 usec = (xtime.tv_nsec / 1000); 424 usec = (xtime.tv_nsec / 1000);
425 partial_tick = state.partial_tick; 425 partial_tick = state.partial_tick;
426 lost = jiffies - wall_jiffies; 426 lost = jiffies - wall_jiffies;
427 427
428 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 428 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
429 429
430 #ifdef CONFIG_SMP 430 #ifdef CONFIG_SMP
431 /* Until and unless we figure out how to get cpu cycle counters 431 /* Until and unless we figure out how to get cpu cycle counters
432 in sync and keep them there, we can't use the rpcc tricks. */ 432 in sync and keep them there, we can't use the rpcc tricks. */
433 delta_usec = lost * (1000000 / HZ); 433 delta_usec = lost * (1000000 / HZ);
434 #else 434 #else
435 /* 435 /*
436 * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks) 436 * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks)
437 * = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks) 437 * = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks)
438 * = cycles * (s_t_p_c) * 15625 / (2**42 * ticks) 438 * = cycles * (s_t_p_c) * 15625 / (2**42 * ticks)
439 * 439 *
440 * which, given a 600MHz cycle and a 1024Hz tick, has a 440 * which, given a 600MHz cycle and a 1024Hz tick, has a
441 * dynamic range of about 1.7e17, which is less than the 441 * dynamic range of about 1.7e17, which is less than the
442 * 1.8e19 in an unsigned long, so we are safe from overflow. 442 * 1.8e19 in an unsigned long, so we are safe from overflow.
443 * 443 *
444 * Round, but with .5 up always, since .5 to even is harder 444 * Round, but with .5 up always, since .5 to even is harder
445 * with no clear gain. 445 * with no clear gain.
446 */ 446 */
447 447
448 delta_usec = (delta_cycles * state.scaled_ticks_per_cycle 448 delta_usec = (delta_cycles * state.scaled_ticks_per_cycle
449 + partial_tick 449 + partial_tick
450 + (lost << FIX_SHIFT)) * 15625; 450 + (lost << FIX_SHIFT)) * 15625;
451 delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; 451 delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
452 #endif 452 #endif
453 453
454 usec += delta_usec; 454 usec += delta_usec;
455 if (usec >= 1000000) { 455 if (usec >= 1000000) {
456 sec += 1; 456 sec += 1;
457 usec -= 1000000; 457 usec -= 1000000;
458 } 458 }
459 459
460 tv->tv_sec = sec; 460 tv->tv_sec = sec;
461 tv->tv_usec = usec; 461 tv->tv_usec = usec;
462 } 462 }
463 463
464 EXPORT_SYMBOL(do_gettimeofday); 464 EXPORT_SYMBOL(do_gettimeofday);
465 465
466 int 466 int
467 do_settimeofday(struct timespec *tv) 467 do_settimeofday(struct timespec *tv)
468 { 468 {
469 time_t wtm_sec, sec = tv->tv_sec; 469 time_t wtm_sec, sec = tv->tv_sec;
470 long wtm_nsec, nsec = tv->tv_nsec; 470 long wtm_nsec, nsec = tv->tv_nsec;
471 unsigned long delta_nsec; 471 unsigned long delta_nsec;
472 472
473 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 473 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
474 return -EINVAL; 474 return -EINVAL;
475 475
476 write_seqlock_irq(&xtime_lock); 476 write_seqlock_irq(&xtime_lock);
477 477
478 /* The offset that is added into time in do_gettimeofday above 478 /* The offset that is added into time in do_gettimeofday above
479 must be subtracted out here to keep a coherent view of the 479 must be subtracted out here to keep a coherent view of the
480 time. Without this, a full-tick error is possible. */ 480 time. Without this, a full-tick error is possible. */
481 481
482 #ifdef CONFIG_SMP 482 #ifdef CONFIG_SMP
483 delta_nsec = (jiffies - wall_jiffies) * (NSEC_PER_SEC / HZ); 483 delta_nsec = (jiffies - wall_jiffies) * (NSEC_PER_SEC / HZ);
484 #else 484 #else
485 delta_nsec = rpcc() - state.last_time; 485 delta_nsec = rpcc() - state.last_time;
486 delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle 486 delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle
487 + state.partial_tick 487 + state.partial_tick
488 + ((jiffies - wall_jiffies) << FIX_SHIFT)) * 15625; 488 + ((jiffies - wall_jiffies) << FIX_SHIFT)) * 15625;
489 delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; 489 delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
490 delta_nsec *= 1000; 490 delta_nsec *= 1000;
491 #endif 491 #endif
492 492
493 nsec -= delta_nsec; 493 nsec -= delta_nsec;
494 494
495 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 495 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
496 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 496 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
497 497
498 set_normalized_timespec(&xtime, sec, nsec); 498 set_normalized_timespec(&xtime, sec, nsec);
499 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 499 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
500 500
501 ntp_clear(); 501 ntp_clear();
502 502
503 write_sequnlock_irq(&xtime_lock); 503 write_sequnlock_irq(&xtime_lock);
504 clock_was_set(); 504 clock_was_set();
505 return 0; 505 return 0;
506 } 506 }
507 507
508 EXPORT_SYMBOL(do_settimeofday); 508 EXPORT_SYMBOL(do_settimeofday);
509 509
510 510
511 /* 511 /*
512 * In order to set the CMOS clock precisely, set_rtc_mmss has to be 512 * In order to set the CMOS clock precisely, set_rtc_mmss has to be
513 * called 500 ms after the second nowtime has started, because when 513 * called 500 ms after the second nowtime has started, because when
514 * nowtime is written into the registers of the CMOS clock, it will 514 * nowtime is written into the registers of the CMOS clock, it will
515 * jump to the next second precisely 500 ms later. Check the Motorola 515 * jump to the next second precisely 500 ms later. Check the Motorola
516 * MC146818A or Dallas DS12887 data sheet for details. 516 * MC146818A or Dallas DS12887 data sheet for details.
517 * 517 *
518 * BUG: This routine does not handle hour overflow properly; it just 518 * BUG: This routine does not handle hour overflow properly; it just
519 * sets the minutes. Usually you won't notice until after reboot! 519 * sets the minutes. Usually you won't notice until after reboot!
520 */ 520 */
521 521
522 522
523 static int 523 static int
524 set_rtc_mmss(unsigned long nowtime) 524 set_rtc_mmss(unsigned long nowtime)
525 { 525 {
526 int retval = 0; 526 int retval = 0;
527 int real_seconds, real_minutes, cmos_minutes; 527 int real_seconds, real_minutes, cmos_minutes;
528 unsigned char save_control, save_freq_select; 528 unsigned char save_control, save_freq_select;
529 529
530 /* irq are locally disabled here */ 530 /* irq are locally disabled here */
531 spin_lock(&rtc_lock); 531 spin_lock(&rtc_lock);
532 /* Tell the clock it's being set */ 532 /* Tell the clock it's being set */
533 save_control = CMOS_READ(RTC_CONTROL); 533 save_control = CMOS_READ(RTC_CONTROL);
534 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); 534 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
535 535
536 /* Stop and reset prescaler */ 536 /* Stop and reset prescaler */
537 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 537 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
538 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); 538 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
539 539
540 cmos_minutes = CMOS_READ(RTC_MINUTES); 540 cmos_minutes = CMOS_READ(RTC_MINUTES);
541 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 541 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
542 BCD_TO_BIN(cmos_minutes); 542 BCD_TO_BIN(cmos_minutes);
543 543
544 /* 544 /*
545 * since we're only adjusting minutes and seconds, 545 * since we're only adjusting minutes and seconds,
546 * don't interfere with hour overflow. This avoids 546 * don't interfere with hour overflow. This avoids
547 * messing with unknown time zones but requires your 547 * messing with unknown time zones but requires your
548 * RTC not to be off by more than 15 minutes 548 * RTC not to be off by more than 15 minutes
549 */ 549 */
550 real_seconds = nowtime % 60; 550 real_seconds = nowtime % 60;
551 real_minutes = nowtime / 60; 551 real_minutes = nowtime / 60;
552 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) { 552 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) {
553 /* correct for half hour time zone */ 553 /* correct for half hour time zone */
554 real_minutes += 30; 554 real_minutes += 30;
555 } 555 }
556 real_minutes %= 60; 556 real_minutes %= 60;
557 557
558 if (abs(real_minutes - cmos_minutes) < 30) { 558 if (abs(real_minutes - cmos_minutes) < 30) {
559 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { 559 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
560 BIN_TO_BCD(real_seconds); 560 BIN_TO_BCD(real_seconds);
561 BIN_TO_BCD(real_minutes); 561 BIN_TO_BCD(real_minutes);
562 } 562 }
563 CMOS_WRITE(real_seconds,RTC_SECONDS); 563 CMOS_WRITE(real_seconds,RTC_SECONDS);
564 CMOS_WRITE(real_minutes,RTC_MINUTES); 564 CMOS_WRITE(real_minutes,RTC_MINUTES);
565 } else { 565 } else {
566 printk(KERN_WARNING 566 printk(KERN_WARNING
567 "set_rtc_mmss: can't update from %d to %d\n", 567 "set_rtc_mmss: can't update from %d to %d\n",
568 cmos_minutes, real_minutes); 568 cmos_minutes, real_minutes);
569 retval = -1; 569 retval = -1;
570 } 570 }
571 571
572 /* The following flags have to be released exactly in this order, 572 /* The following flags have to be released exactly in this order,
573 * otherwise the DS12887 (popular MC146818A clone with integrated 573 * otherwise the DS12887 (popular MC146818A clone with integrated
574 * battery and quartz) will not reset the oscillator and will not 574 * battery and quartz) will not reset the oscillator and will not
575 * update precisely 500 ms later. You won't find this mentioned in 575 * update precisely 500 ms later. You won't find this mentioned in
576 * the Dallas Semiconductor data sheets, but who believes data 576 * the Dallas Semiconductor data sheets, but who believes data
577 * sheets anyway ... -- Markus Kuhn 577 * sheets anyway ... -- Markus Kuhn
578 */ 578 */
579 CMOS_WRITE(save_control, RTC_CONTROL); 579 CMOS_WRITE(save_control, RTC_CONTROL);
580 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 580 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
581 spin_unlock(&rtc_lock); 581 spin_unlock(&rtc_lock);
582 582
583 return retval; 583 return retval;
584 } 584 }
585 585