Commit 3ed3bce846abc7ef460104b461cac793e41afe5e

Authored by Matt Domsch
Committed by Linus Torvalds
1 parent 10dbe196a8

[PATCH] ia64: use i386 dmi_scan.c

Enable DMI table parsing on ia64.

Andi Kleen has a patch in his x86_64 tree which enables the use of i386
dmi_scan.c on x86_64.  dmi_scan.c functions are being used by the
drivers/char/ipmi/ipmi_si_intf.c driver for autodetecting the ports or
memory spaces where the IPMI controllers may be found.

This patch adds equivalent changes for ia64 as to what is in the x86_64
tree.  In addition, I reworked the DMI detection, such that on EFI-capable
systems, it uses the efi.smbios pointer to find the table, rather than
brute-force searching from 0xF0000.  On non-EFI systems, it continues the
brute-force search.

My test system, an Intel S870BN4 'Tiger4', aka Dell PowerEdge 7250, with
latest BIOS, does not list the IPMI controller in the ACPI namespace, nor
does it have an ACPI SPMI table.  Also note, currently shipping Dell x8xx
EM64T servers don't have these either, so DMI is the only method for
obtaining the address of the IPMI controller.

Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Acked-by: "Luck, Tony" <tony.luck@intel.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 6 changed files with 83 additions and 33 deletions Inline Diff

arch/i386/kernel/dmi_scan.c
1 #include <linux/types.h> 1 #include <linux/types.h>
2 #include <linux/string.h> 2 #include <linux/string.h>
3 #include <linux/init.h> 3 #include <linux/init.h>
4 #include <linux/module.h> 4 #include <linux/module.h>
5 #include <linux/dmi.h> 5 #include <linux/dmi.h>
6 #include <linux/efi.h>
6 #include <linux/bootmem.h> 7 #include <linux/bootmem.h>
7 #include <linux/slab.h> 8 #include <linux/slab.h>
8 #include <asm/dmi.h> 9 #include <asm/dmi.h>
9 10
10 static char * __init dmi_string(struct dmi_header *dm, u8 s) 11 static char * __init dmi_string(struct dmi_header *dm, u8 s)
11 { 12 {
12 u8 *bp = ((u8 *) dm) + dm->length; 13 u8 *bp = ((u8 *) dm) + dm->length;
13 char *str = ""; 14 char *str = "";
14 15
15 if (s) { 16 if (s) {
16 s--; 17 s--;
17 while (s > 0 && *bp) { 18 while (s > 0 && *bp) {
18 bp += strlen(bp) + 1; 19 bp += strlen(bp) + 1;
19 s--; 20 s--;
20 } 21 }
21 22
22 if (*bp != 0) { 23 if (*bp != 0) {
23 str = dmi_alloc(strlen(bp) + 1); 24 str = dmi_alloc(strlen(bp) + 1);
24 if (str != NULL) 25 if (str != NULL)
25 strcpy(str, bp); 26 strcpy(str, bp);
26 else 27 else
27 printk(KERN_ERR "dmi_string: out of memory.\n"); 28 printk(KERN_ERR "dmi_string: out of memory.\n");
28 } 29 }
29 } 30 }
30 31
31 return str; 32 return str;
32 } 33 }
33 34
34 /* 35 /*
35 * We have to be cautious here. We have seen BIOSes with DMI pointers 36 * We have to be cautious here. We have seen BIOSes with DMI pointers
36 * pointing to completely the wrong place for example 37 * pointing to completely the wrong place for example
37 */ 38 */
38 static int __init dmi_table(u32 base, int len, int num, 39 static int __init dmi_table(u32 base, int len, int num,
39 void (*decode)(struct dmi_header *)) 40 void (*decode)(struct dmi_header *))
40 { 41 {
41 u8 *buf, *data; 42 u8 *buf, *data;
42 int i = 0; 43 int i = 0;
43 44
44 buf = dmi_ioremap(base, len); 45 buf = dmi_ioremap(base, len);
45 if (buf == NULL) 46 if (buf == NULL)
46 return -1; 47 return -1;
47 48
48 data = buf; 49 data = buf;
49 50
50 /* 51 /*
51 * Stop when we see all the items the table claimed to have 52 * Stop when we see all the items the table claimed to have
52 * OR we run off the end of the table (also happens) 53 * OR we run off the end of the table (also happens)
53 */ 54 */
54 while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) { 55 while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
55 struct dmi_header *dm = (struct dmi_header *)data; 56 struct dmi_header *dm = (struct dmi_header *)data;
56 /* 57 /*
57 * We want to know the total length (formated area and strings) 58 * We want to know the total length (formated area and strings)
58 * before decoding to make sure we won't run off the table in 59 * before decoding to make sure we won't run off the table in
59 * dmi_decode or dmi_string 60 * dmi_decode or dmi_string
60 */ 61 */
61 data += dm->length; 62 data += dm->length;
62 while ((data - buf < len - 1) && (data[0] || data[1])) 63 while ((data - buf < len - 1) && (data[0] || data[1]))
63 data++; 64 data++;
64 if (data - buf < len - 1) 65 if (data - buf < len - 1)
65 decode(dm); 66 decode(dm);
66 data += 2; 67 data += 2;
67 i++; 68 i++;
68 } 69 }
69 dmi_iounmap(buf, len); 70 dmi_iounmap(buf, len);
70 return 0; 71 return 0;
71 } 72 }
72 73
73 static int __init dmi_checksum(u8 *buf) 74 static int __init dmi_checksum(u8 *buf)
74 { 75 {
75 u8 sum = 0; 76 u8 sum = 0;
76 int a; 77 int a;
77 78
78 for (a = 0; a < 15; a++) 79 for (a = 0; a < 15; a++)
79 sum += buf[a]; 80 sum += buf[a];
80 81
81 return sum == 0; 82 return sum == 0;
82 } 83 }
83 84
84 static char *dmi_ident[DMI_STRING_MAX]; 85 static char *dmi_ident[DMI_STRING_MAX];
85 static LIST_HEAD(dmi_devices); 86 static LIST_HEAD(dmi_devices);
86 87
87 /* 88 /*
88 * Save a DMI string 89 * Save a DMI string
89 */ 90 */
90 static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string) 91 static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
91 { 92 {
92 char *p, *d = (char*) dm; 93 char *p, *d = (char*) dm;
93 94
94 if (dmi_ident[slot]) 95 if (dmi_ident[slot])
95 return; 96 return;
96 97
97 p = dmi_string(dm, d[string]); 98 p = dmi_string(dm, d[string]);
98 if (p == NULL) 99 if (p == NULL)
99 return; 100 return;
100 101
101 dmi_ident[slot] = p; 102 dmi_ident[slot] = p;
102 } 103 }
103 104
104 static void __init dmi_save_devices(struct dmi_header *dm) 105 static void __init dmi_save_devices(struct dmi_header *dm)
105 { 106 {
106 int i, count = (dm->length - sizeof(struct dmi_header)) / 2; 107 int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
107 struct dmi_device *dev; 108 struct dmi_device *dev;
108 109
109 for (i = 0; i < count; i++) { 110 for (i = 0; i < count; i++) {
110 char *d = (char *)(dm + 1) + (i * 2); 111 char *d = (char *)(dm + 1) + (i * 2);
111 112
112 /* Skip disabled device */ 113 /* Skip disabled device */
113 if ((*d & 0x80) == 0) 114 if ((*d & 0x80) == 0)
114 continue; 115 continue;
115 116
116 dev = dmi_alloc(sizeof(*dev)); 117 dev = dmi_alloc(sizeof(*dev));
117 if (!dev) { 118 if (!dev) {
118 printk(KERN_ERR "dmi_save_devices: out of memory.\n"); 119 printk(KERN_ERR "dmi_save_devices: out of memory.\n");
119 break; 120 break;
120 } 121 }
121 122
122 dev->type = *d++ & 0x7f; 123 dev->type = *d++ & 0x7f;
123 dev->name = dmi_string(dm, *d); 124 dev->name = dmi_string(dm, *d);
124 dev->device_data = NULL; 125 dev->device_data = NULL;
125 126
126 list_add(&dev->list, &dmi_devices); 127 list_add(&dev->list, &dmi_devices);
127 } 128 }
128 } 129 }
129 130
130 static void __init dmi_save_ipmi_device(struct dmi_header *dm) 131 static void __init dmi_save_ipmi_device(struct dmi_header *dm)
131 { 132 {
132 struct dmi_device *dev; 133 struct dmi_device *dev;
133 void * data; 134 void * data;
134 135
135 data = dmi_alloc(dm->length); 136 data = dmi_alloc(dm->length);
136 if (data == NULL) { 137 if (data == NULL) {
137 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n"); 138 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
138 return; 139 return;
139 } 140 }
140 141
141 memcpy(data, dm, dm->length); 142 memcpy(data, dm, dm->length);
142 143
143 dev = dmi_alloc(sizeof(*dev)); 144 dev = dmi_alloc(sizeof(*dev));
144 if (!dev) { 145 if (!dev) {
145 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n"); 146 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
146 return; 147 return;
147 } 148 }
148 149
149 dev->type = DMI_DEV_TYPE_IPMI; 150 dev->type = DMI_DEV_TYPE_IPMI;
150 dev->name = "IPMI controller"; 151 dev->name = "IPMI controller";
151 dev->device_data = data; 152 dev->device_data = data;
152 153
153 list_add(&dev->list, &dmi_devices); 154 list_add(&dev->list, &dmi_devices);
154 } 155 }
155 156
156 /* 157 /*
157 * Process a DMI table entry. Right now all we care about are the BIOS 158 * Process a DMI table entry. Right now all we care about are the BIOS
158 * and machine entries. For 2.5 we should pull the smbus controller info 159 * and machine entries. For 2.5 we should pull the smbus controller info
159 * out of here. 160 * out of here.
160 */ 161 */
161 static void __init dmi_decode(struct dmi_header *dm) 162 static void __init dmi_decode(struct dmi_header *dm)
162 { 163 {
163 switch(dm->type) { 164 switch(dm->type) {
164 case 0: /* BIOS Information */ 165 case 0: /* BIOS Information */
165 dmi_save_ident(dm, DMI_BIOS_VENDOR, 4); 166 dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
166 dmi_save_ident(dm, DMI_BIOS_VERSION, 5); 167 dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
167 dmi_save_ident(dm, DMI_BIOS_DATE, 8); 168 dmi_save_ident(dm, DMI_BIOS_DATE, 8);
168 break; 169 break;
169 case 1: /* System Information */ 170 case 1: /* System Information */
170 dmi_save_ident(dm, DMI_SYS_VENDOR, 4); 171 dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
171 dmi_save_ident(dm, DMI_PRODUCT_NAME, 5); 172 dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
172 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); 173 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
173 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); 174 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
174 break; 175 break;
175 case 2: /* Base Board Information */ 176 case 2: /* Base Board Information */
176 dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); 177 dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
177 dmi_save_ident(dm, DMI_BOARD_NAME, 5); 178 dmi_save_ident(dm, DMI_BOARD_NAME, 5);
178 dmi_save_ident(dm, DMI_BOARD_VERSION, 6); 179 dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
179 break; 180 break;
180 case 10: /* Onboard Devices Information */ 181 case 10: /* Onboard Devices Information */
181 dmi_save_devices(dm); 182 dmi_save_devices(dm);
182 break; 183 break;
183 case 38: /* IPMI Device Information */ 184 case 38: /* IPMI Device Information */
184 dmi_save_ipmi_device(dm); 185 dmi_save_ipmi_device(dm);
185 } 186 }
186 } 187 }
187 188
188 void __init dmi_scan_machine(void) 189 static int __init dmi_present(char __iomem *p)
189 { 190 {
190 u8 buf[15]; 191 u8 buf[15];
192 memcpy_fromio(buf, p, 15);
193 if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
194 u16 num = (buf[13] << 8) | buf[12];
195 u16 len = (buf[7] << 8) | buf[6];
196 u32 base = (buf[11] << 24) | (buf[10] << 16) |
197 (buf[9] << 8) | buf[8];
198
199 /*
200 * DMI version 0.0 means that the real version is taken from
201 * the SMBIOS version, which we don't know at this point.
202 */
203 if (buf[14] != 0)
204 printk(KERN_INFO "DMI %d.%d present.\n",
205 buf[14] >> 4, buf[14] & 0xF);
206 else
207 printk(KERN_INFO "DMI present.\n");
208 if (dmi_table(base,len, num, dmi_decode) == 0)
209 return 0;
210 }
211 return 1;
212 }
213
214 void __init dmi_scan_machine(void)
215 {
191 char __iomem *p, *q; 216 char __iomem *p, *q;
217 int rc;
192 218
193 /* 219 if (efi_enabled) {
194 * no iounmap() for that ioremap(); it would be a no-op, but it's 220 if (!efi.smbios)
195 * so early in setup that sucker gets confused into doing what 221 goto out;
196 * it shouldn't if we actually call it.
197 */
198 p = ioremap(0xF0000, 0x10000);
199 if (p == NULL)
200 goto out;
201 222
202 for (q = p; q < p + 0x10000; q += 16) { 223 /* This is called as a core_initcall() because it isn't
203 memcpy_fromio(buf, q, 15); 224 * needed during early boot. This also means we can
204 if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { 225 * iounmap the space when we're done with it.
205 u16 num = (buf[13] << 8) | buf[12]; 226 */
206 u16 len = (buf[7] << 8) | buf[6]; 227 p = dmi_ioremap((unsigned long)efi.smbios, 0x10000);
207 u32 base = (buf[11] << 24) | (buf[10] << 16) | 228 if (p == NULL)
208 (buf[9] << 8) | buf[8]; 229 goto out;
209 230
210 /* 231 rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
211 * DMI version 0.0 means that the real version is taken from 232 iounmap(p);
212 * the SMBIOS version, which we don't know at this point. 233 if (!rc)
213 */ 234 return;
214 if (buf[14] != 0) 235 }
215 printk(KERN_INFO "DMI %d.%d present.\n", 236 else {
216 buf[14] >> 4, buf[14] & 0xF); 237 /*
217 else 238 * no iounmap() for that ioremap(); it would be a no-op, but
218 printk(KERN_INFO "DMI present.\n"); 239 * it's so early in setup that sucker gets confused into doing
240 * what it shouldn't if we actually call it.
241 */
242 p = dmi_ioremap(0xF0000, 0x10000);
243 if (p == NULL)
244 goto out;
219 245
220 if (dmi_table(base,len, num, dmi_decode) == 0) 246 for (q = p; q < p + 0x10000; q += 16) {
247 rc = dmi_present(q);
248 if (!rc)
221 return; 249 return;
222 } 250 }
223 } 251 }
224 252 out: printk(KERN_INFO "DMI not present or invalid.\n");
225 out: printk(KERN_INFO "DMI not present or invalid.\n");
226 } 253 }
227
228 254
229 /** 255 /**
230 * dmi_check_system - check system DMI data 256 * dmi_check_system - check system DMI data
231 * @list: array of dmi_system_id structures to match against 257 * @list: array of dmi_system_id structures to match against
232 * 258 *
233 * Walk the blacklist table running matching functions until someone 259 * Walk the blacklist table running matching functions until someone
234 * returns non zero or we hit the end. Callback function is called for 260 * returns non zero or we hit the end. Callback function is called for
235 * each successfull match. Returns the number of matches. 261 * each successfull match. Returns the number of matches.
236 */ 262 */
237 int dmi_check_system(struct dmi_system_id *list) 263 int dmi_check_system(struct dmi_system_id *list)
238 { 264 {
239 int i, count = 0; 265 int i, count = 0;
240 struct dmi_system_id *d = list; 266 struct dmi_system_id *d = list;
241 267
242 while (d->ident) { 268 while (d->ident) {
243 for (i = 0; i < ARRAY_SIZE(d->matches); i++) { 269 for (i = 0; i < ARRAY_SIZE(d->matches); i++) {
244 int s = d->matches[i].slot; 270 int s = d->matches[i].slot;
245 if (s == DMI_NONE) 271 if (s == DMI_NONE)
246 continue; 272 continue;
247 if (dmi_ident[s] && strstr(dmi_ident[s], d->matches[i].substr)) 273 if (dmi_ident[s] && strstr(dmi_ident[s], d->matches[i].substr))
248 continue; 274 continue;
249 /* No match */ 275 /* No match */
250 goto fail; 276 goto fail;
251 } 277 }
252 count++; 278 count++;
253 if (d->callback && d->callback(d)) 279 if (d->callback && d->callback(d))
254 break; 280 break;
255 fail: d++; 281 fail: d++;
256 } 282 }
257 283
258 return count; 284 return count;
259 } 285 }
260 EXPORT_SYMBOL(dmi_check_system); 286 EXPORT_SYMBOL(dmi_check_system);
261 287
262 /** 288 /**
263 * dmi_get_system_info - return DMI data value 289 * dmi_get_system_info - return DMI data value
264 * @field: data index (see enum dmi_filed) 290 * @field: data index (see enum dmi_filed)
265 * 291 *
266 * Returns one DMI data value, can be used to perform 292 * Returns one DMI data value, can be used to perform
267 * complex DMI data checks. 293 * complex DMI data checks.
268 */ 294 */
269 char *dmi_get_system_info(int field) 295 char *dmi_get_system_info(int field)
270 { 296 {
271 return dmi_ident[field]; 297 return dmi_ident[field];
272 } 298 }
273 EXPORT_SYMBOL(dmi_get_system_info); 299 EXPORT_SYMBOL(dmi_get_system_info);
274 300
275 /** 301 /**
276 * dmi_find_device - find onboard device by type/name 302 * dmi_find_device - find onboard device by type/name
277 * @type: device type or %DMI_DEV_TYPE_ANY to match all device types 303 * @type: device type or %DMI_DEV_TYPE_ANY to match all device types
278 * @desc: device name string or %NULL to match all 304 * @desc: device name string or %NULL to match all
279 * @from: previous device found in search, or %NULL for new search. 305 * @from: previous device found in search, or %NULL for new search.
280 * 306 *
281 * Iterates through the list of known onboard devices. If a device is 307 * Iterates through the list of known onboard devices. If a device is
282 * found with a matching @vendor and @device, a pointer to its device 308 * found with a matching @vendor and @device, a pointer to its device
283 * structure is returned. Otherwise, %NULL is returned. 309 * structure is returned. Otherwise, %NULL is returned.
284 * A new search is initiated by passing %NULL to the @from argument. 310 * A new search is initiated by passing %NULL to the @from argument.
285 * If @from is not %NULL, searches continue from next device. 311 * If @from is not %NULL, searches continue from next device.
286 */ 312 */
287 struct dmi_device * dmi_find_device(int type, const char *name, 313 struct dmi_device * dmi_find_device(int type, const char *name,
288 struct dmi_device *from) 314 struct dmi_device *from)
289 { 315 {
290 struct list_head *d, *head = from ? &from->list : &dmi_devices; 316 struct list_head *d, *head = from ? &from->list : &dmi_devices;
291 317
292 for(d = head->next; d != &dmi_devices; d = d->next) { 318 for(d = head->next; d != &dmi_devices; d = d->next) {
293 struct dmi_device *dev = list_entry(d, struct dmi_device, list); 319 struct dmi_device *dev = list_entry(d, struct dmi_device, list);
294 320
295 if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) && 321 if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) &&
296 ((name == NULL) || (strcmp(dev->name, name) == 0))) 322 ((name == NULL) || (strcmp(dev->name, name) == 0)))
297 return dev; 323 return dev;
298 } 324 }
299 325
300 return NULL; 326 return NULL;
301 } 327 }
302 EXPORT_SYMBOL(dmi_find_device); 328 EXPORT_SYMBOL(dmi_find_device);
303 329
304 /** 330 /**
305 * dmi_get_year - Return year of a DMI date 331 * dmi_get_year - Return year of a DMI date
306 * @field: data index (like dmi_get_system_info) 332 * @field: data index (like dmi_get_system_info)
307 * 333 *
308 * Returns -1 when the field doesn't exist. 0 when it is broken. 334 * Returns -1 when the field doesn't exist. 0 when it is broken.
309 */ 335 */
310 int dmi_get_year(int field) 336 int dmi_get_year(int field)
311 { 337 {
312 int year; 338 int year;
313 char *s = dmi_get_system_info(field); 339 char *s = dmi_get_system_info(field);
314 340
315 if (!s) 341 if (!s)
316 return -1; 342 return -1;
317 if (*s == '\0') 343 if (*s == '\0')
318 return 0; 344 return 0;
319 s = strrchr(s, '/'); 345 s = strrchr(s, '/');
320 if (!s) 346 if (!s)
321 return 0; 347 return 0;
322 348
323 s += 1; 349 s += 1;
324 year = simple_strtoul(s, NULL, 0); 350 year = simple_strtoul(s, NULL, 0);
325 if (year && year < 100) { /* 2-digit year */ 351 if (year && year < 100) { /* 2-digit year */
326 year += 1900; 352 year += 1900;
1 # 1 #
2 # For a description of the syntax of this configuration file, 2 # For a description of the syntax of this configuration file,
3 # see Documentation/kbuild/kconfig-language.txt. 3 # see Documentation/kbuild/kconfig-language.txt.
4 # 4 #
5 5
6 mainmenu "IA-64 Linux Kernel Configuration" 6 mainmenu "IA-64 Linux Kernel Configuration"
7 7
8 source "init/Kconfig" 8 source "init/Kconfig"
9 9
10 menu "Processor type and features" 10 menu "Processor type and features"
11 11
12 config IA64 12 config IA64
13 bool 13 bool
14 default y 14 default y
15 help 15 help
16 The Itanium Processor Family is Intel's 64-bit successor to 16 The Itanium Processor Family is Intel's 64-bit successor to
17 the 32-bit X86 line. The IA-64 Linux project has a home 17 the 32-bit X86 line. The IA-64 Linux project has a home
18 page at <http://www.linuxia64.org/> and a mailing list at 18 page at <http://www.linuxia64.org/> and a mailing list at
19 <linux-ia64@vger.kernel.org>. 19 <linux-ia64@vger.kernel.org>.
20 20
21 config 64BIT 21 config 64BIT
22 bool 22 bool
23 default y 23 default y
24 24
25 config MMU 25 config MMU
26 bool 26 bool
27 default y 27 default y
28 28
29 config SWIOTLB 29 config SWIOTLB
30 bool 30 bool
31 default y 31 default y
32 32
33 config RWSEM_XCHGADD_ALGORITHM 33 config RWSEM_XCHGADD_ALGORITHM
34 bool 34 bool
35 default y 35 default y
36 36
37 config GENERIC_CALIBRATE_DELAY 37 config GENERIC_CALIBRATE_DELAY
38 bool 38 bool
39 default y 39 default y
40 40
41 config TIME_INTERPOLATION 41 config TIME_INTERPOLATION
42 bool 42 bool
43 default y 43 default y
44 44
45 config DMI
46 bool
47 default y
48
45 config EFI 49 config EFI
46 bool 50 bool
47 default y 51 default y
48 52
49 config GENERIC_IOMAP 53 config GENERIC_IOMAP
50 bool 54 bool
51 default y 55 default y
52 56
53 config SCHED_NO_NO_OMIT_FRAME_POINTER 57 config SCHED_NO_NO_OMIT_FRAME_POINTER
54 bool 58 bool
55 default y 59 default y
56 60
57 config IA64_UNCACHED_ALLOCATOR 61 config IA64_UNCACHED_ALLOCATOR
58 bool 62 bool
59 select GENERIC_ALLOCATOR 63 select GENERIC_ALLOCATOR
60 64
61 config DMA_IS_DMA32 65 config DMA_IS_DMA32
62 bool 66 bool
63 default y 67 default y
64 68
65 choice 69 choice
66 prompt "System type" 70 prompt "System type"
67 default IA64_GENERIC 71 default IA64_GENERIC
68 72
69 config IA64_GENERIC 73 config IA64_GENERIC
70 bool "generic" 74 bool "generic"
71 select ACPI 75 select ACPI
72 select NUMA 76 select NUMA
73 select ACPI_NUMA 77 select ACPI_NUMA
74 help 78 help
75 This selects the system type of your hardware. A "generic" kernel 79 This selects the system type of your hardware. A "generic" kernel
76 will run on any supported IA-64 system. However, if you configure 80 will run on any supported IA-64 system. However, if you configure
77 a kernel for your specific system, it will be faster and smaller. 81 a kernel for your specific system, it will be faster and smaller.
78 82
79 generic For any supported IA-64 system 83 generic For any supported IA-64 system
80 DIG-compliant For DIG ("Developer's Interface Guide") compliant systems 84 DIG-compliant For DIG ("Developer's Interface Guide") compliant systems
81 HP-zx1/sx1000 For HP systems 85 HP-zx1/sx1000 For HP systems
82 HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices. 86 HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices.
83 SGI-SN2 For SGI Altix systems 87 SGI-SN2 For SGI Altix systems
84 Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> 88 Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
85 89
86 If you don't know what to do, choose "generic". 90 If you don't know what to do, choose "generic".
87 91
88 config IA64_DIG 92 config IA64_DIG
89 bool "DIG-compliant" 93 bool "DIG-compliant"
90 94
91 config IA64_HP_ZX1 95 config IA64_HP_ZX1
92 bool "HP-zx1/sx1000" 96 bool "HP-zx1/sx1000"
93 help 97 help
94 Build a kernel that runs on HP zx1 and sx1000 systems. This adds 98 Build a kernel that runs on HP zx1 and sx1000 systems. This adds
95 support for the HP I/O MMU. 99 support for the HP I/O MMU.
96 100
97 config IA64_HP_ZX1_SWIOTLB 101 config IA64_HP_ZX1_SWIOTLB
98 bool "HP-zx1/sx1000 with software I/O TLB" 102 bool "HP-zx1/sx1000 with software I/O TLB"
99 help 103 help
100 Build a kernel that runs on HP zx1 and sx1000 systems even when they 104 Build a kernel that runs on HP zx1 and sx1000 systems even when they
101 have broken PCI devices which cannot DMA to full 32 bits. Apart 105 have broken PCI devices which cannot DMA to full 32 bits. Apart
102 from support for the HP I/O MMU, this includes support for the software 106 from support for the HP I/O MMU, this includes support for the software
103 I/O TLB, which allows supporting the broken devices at the expense of 107 I/O TLB, which allows supporting the broken devices at the expense of
104 wasting some kernel memory (about 2MB by default). 108 wasting some kernel memory (about 2MB by default).
105 109
106 config IA64_SGI_SN2 110 config IA64_SGI_SN2
107 bool "SGI-SN2" 111 bool "SGI-SN2"
108 help 112 help
109 Selecting this option will optimize the kernel for use on sn2 based 113 Selecting this option will optimize the kernel for use on sn2 based
110 systems, but the resulting kernel binary will not run on other 114 systems, but the resulting kernel binary will not run on other
111 types of ia64 systems. If you have an SGI Altix system, it's safe 115 types of ia64 systems. If you have an SGI Altix system, it's safe
112 to select this option. If in doubt, select ia64 generic support 116 to select this option. If in doubt, select ia64 generic support
113 instead. 117 instead.
114 118
115 config IA64_HP_SIM 119 config IA64_HP_SIM
116 bool "Ski-simulator" 120 bool "Ski-simulator"
117 121
118 endchoice 122 endchoice
119 123
120 choice 124 choice
121 prompt "Processor type" 125 prompt "Processor type"
122 default ITANIUM 126 default ITANIUM
123 127
124 config ITANIUM 128 config ITANIUM
125 bool "Itanium" 129 bool "Itanium"
126 help 130 help
127 Select your IA-64 processor type. The default is Itanium. 131 Select your IA-64 processor type. The default is Itanium.
128 This choice is safe for all IA-64 systems, but may not perform 132 This choice is safe for all IA-64 systems, but may not perform
129 optimally on systems with, say, Itanium 2 or newer processors. 133 optimally on systems with, say, Itanium 2 or newer processors.
130 134
131 config MCKINLEY 135 config MCKINLEY
132 bool "Itanium 2" 136 bool "Itanium 2"
133 help 137 help
134 Select this to configure for an Itanium 2 (McKinley) processor. 138 Select this to configure for an Itanium 2 (McKinley) processor.
135 139
136 endchoice 140 endchoice
137 141
138 choice 142 choice
139 prompt "Kernel page size" 143 prompt "Kernel page size"
140 default IA64_PAGE_SIZE_16KB 144 default IA64_PAGE_SIZE_16KB
141 145
142 config IA64_PAGE_SIZE_4KB 146 config IA64_PAGE_SIZE_4KB
143 bool "4KB" 147 bool "4KB"
144 help 148 help
145 This lets you select the page size of the kernel. For best IA-64 149 This lets you select the page size of the kernel. For best IA-64
146 performance, a page size of 8KB or 16KB is recommended. For best 150 performance, a page size of 8KB or 16KB is recommended. For best
147 IA-32 compatibility, a page size of 4KB should be selected (the vast 151 IA-32 compatibility, a page size of 4KB should be selected (the vast
148 majority of IA-32 binaries work perfectly fine with a larger page 152 majority of IA-32 binaries work perfectly fine with a larger page
149 size). For Itanium 2 or newer systems, a page size of 64KB can also 153 size). For Itanium 2 or newer systems, a page size of 64KB can also
150 be selected. 154 be selected.
151 155
152 4KB For best IA-32 compatibility 156 4KB For best IA-32 compatibility
153 8KB For best IA-64 performance 157 8KB For best IA-64 performance
154 16KB For best IA-64 performance 158 16KB For best IA-64 performance
155 64KB Requires Itanium 2 or newer processor. 159 64KB Requires Itanium 2 or newer processor.
156 160
157 If you don't know what to do, choose 16KB. 161 If you don't know what to do, choose 16KB.
158 162
159 config IA64_PAGE_SIZE_8KB 163 config IA64_PAGE_SIZE_8KB
160 bool "8KB" 164 bool "8KB"
161 165
162 config IA64_PAGE_SIZE_16KB 166 config IA64_PAGE_SIZE_16KB
163 bool "16KB" 167 bool "16KB"
164 168
165 config IA64_PAGE_SIZE_64KB 169 config IA64_PAGE_SIZE_64KB
166 depends on !ITANIUM 170 depends on !ITANIUM
167 bool "64KB" 171 bool "64KB"
168 172
169 endchoice 173 endchoice
170 174
171 choice 175 choice
172 prompt "Page Table Levels" 176 prompt "Page Table Levels"
173 default PGTABLE_3 177 default PGTABLE_3
174 178
175 config PGTABLE_3 179 config PGTABLE_3
176 bool "3 Levels" 180 bool "3 Levels"
177 181
178 config PGTABLE_4 182 config PGTABLE_4
179 depends on !IA64_PAGE_SIZE_64KB 183 depends on !IA64_PAGE_SIZE_64KB
180 bool "4 Levels" 184 bool "4 Levels"
181 185
182 endchoice 186 endchoice
183 187
184 source kernel/Kconfig.hz 188 source kernel/Kconfig.hz
185 189
186 config IA64_BRL_EMU 190 config IA64_BRL_EMU
187 bool 191 bool
188 depends on ITANIUM 192 depends on ITANIUM
189 default y 193 default y
190 194
191 # align cache-sensitive data to 128 bytes 195 # align cache-sensitive data to 128 bytes
192 config IA64_L1_CACHE_SHIFT 196 config IA64_L1_CACHE_SHIFT
193 int 197 int
194 default "7" if MCKINLEY 198 default "7" if MCKINLEY
195 default "6" if ITANIUM 199 default "6" if ITANIUM
196 200
197 config IA64_CYCLONE 201 config IA64_CYCLONE
198 bool "Cyclone (EXA) Time Source support" 202 bool "Cyclone (EXA) Time Source support"
199 help 203 help
200 Say Y here to enable support for IBM EXA Cyclone time source. 204 Say Y here to enable support for IBM EXA Cyclone time source.
201 If you're unsure, answer N. 205 If you're unsure, answer N.
202 206
203 config IOSAPIC 207 config IOSAPIC
204 bool 208 bool
205 depends on !IA64_HP_SIM 209 depends on !IA64_HP_SIM
206 default y 210 default y
207 211
208 config IA64_SGI_SN_XP 212 config IA64_SGI_SN_XP
209 tristate "Support communication between SGI SSIs" 213 tristate "Support communication between SGI SSIs"
210 depends on IA64_GENERIC || IA64_SGI_SN2 214 depends on IA64_GENERIC || IA64_SGI_SN2
211 select IA64_UNCACHED_ALLOCATOR 215 select IA64_UNCACHED_ALLOCATOR
212 help 216 help
213 An SGI machine can be divided into multiple Single System 217 An SGI machine can be divided into multiple Single System
214 Images which act independently of each other and have 218 Images which act independently of each other and have
215 hardware based memory protection from the others. Enabling 219 hardware based memory protection from the others. Enabling
216 this feature will allow for direct communication between SSIs 220 this feature will allow for direct communication between SSIs
217 based on a network adapter and DMA messaging. 221 based on a network adapter and DMA messaging.
218 222
219 config FORCE_MAX_ZONEORDER 223 config FORCE_MAX_ZONEORDER
220 int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE 224 int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE
221 range 11 17 if !HUGETLB_PAGE 225 range 11 17 if !HUGETLB_PAGE
222 default "17" if HUGETLB_PAGE 226 default "17" if HUGETLB_PAGE
223 default "11" 227 default "11"
224 228
225 config SMP 229 config SMP
226 bool "Symmetric multi-processing support" 230 bool "Symmetric multi-processing support"
227 help 231 help
228 This enables support for systems with more than one CPU. If you have 232 This enables support for systems with more than one CPU. If you have
229 a system with only one CPU, say N. If you have a system with more 233 a system with only one CPU, say N. If you have a system with more
230 than one CPU, say Y. 234 than one CPU, say Y.
231 235
232 If you say N here, the kernel will run on single and multiprocessor 236 If you say N here, the kernel will run on single and multiprocessor
233 systems, but will use only one CPU of a multiprocessor system. If 237 systems, but will use only one CPU of a multiprocessor system. If
234 you say Y here, the kernel will run on many, but not all, 238 you say Y here, the kernel will run on many, but not all,
235 single processor systems. On a single processor system, the kernel 239 single processor systems. On a single processor system, the kernel
236 will run faster if you say N here. 240 will run faster if you say N here.
237 241
238 See also the <file:Documentation/smp.txt> and the SMP-HOWTO 242 See also the <file:Documentation/smp.txt> and the SMP-HOWTO
239 available at <http://www.tldp.org/docs.html#howto>. 243 available at <http://www.tldp.org/docs.html#howto>.
240 244
241 If you don't know what to do here, say N. 245 If you don't know what to do here, say N.
242 246
243 config NR_CPUS 247 config NR_CPUS
244 int "Maximum number of CPUs (2-1024)" 248 int "Maximum number of CPUs (2-1024)"
245 range 2 1024 249 range 2 1024
246 depends on SMP 250 depends on SMP
247 default "64" 251 default "64"
248 help 252 help
249 You should set this to the number of CPUs in your system, but 253 You should set this to the number of CPUs in your system, but
250 keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but 254 keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but
251 only use 2 CPUs on a >2 CPU system. Setting this to a value larger 255 only use 2 CPUs on a >2 CPU system. Setting this to a value larger
252 than 64 will cause the use of a CPU mask array, causing a small 256 than 64 will cause the use of a CPU mask array, causing a small
253 performance hit. 257 performance hit.
254 258
255 config IA64_NR_NODES 259 config IA64_NR_NODES
256 int "Maximum number of NODEs (256-1024)" if (IA64_SGI_SN2 || IA64_GENERIC) 260 int "Maximum number of NODEs (256-1024)" if (IA64_SGI_SN2 || IA64_GENERIC)
257 range 256 1024 261 range 256 1024
258 depends on IA64_SGI_SN2 || IA64_GENERIC 262 depends on IA64_SGI_SN2 || IA64_GENERIC
259 default "256" 263 default "256"
260 help 264 help
261 This option specifies the maximum number of nodes in your SSI system. 265 This option specifies the maximum number of nodes in your SSI system.
262 If in doubt, use the default. 266 If in doubt, use the default.
263 267
264 config HOTPLUG_CPU 268 config HOTPLUG_CPU
265 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" 269 bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
266 depends on SMP && EXPERIMENTAL 270 depends on SMP && EXPERIMENTAL
267 select HOTPLUG 271 select HOTPLUG
268 default n 272 default n
269 ---help--- 273 ---help---
270 Say Y here to experiment with turning CPUs off and on. CPUs 274 Say Y here to experiment with turning CPUs off and on. CPUs
271 can be controlled through /sys/devices/system/cpu/cpu#. 275 can be controlled through /sys/devices/system/cpu/cpu#.
272 Say N if you want to disable CPU hotplug. 276 Say N if you want to disable CPU hotplug.
273 277
274 config SCHED_SMT 278 config SCHED_SMT
275 bool "SMT scheduler support" 279 bool "SMT scheduler support"
276 depends on SMP 280 depends on SMP
277 default off 281 default off
278 help 282 help
279 Improves the CPU scheduler's decision making when dealing with 283 Improves the CPU scheduler's decision making when dealing with
280 Intel IA64 chips with MultiThreading at a cost of slightly increased 284 Intel IA64 chips with MultiThreading at a cost of slightly increased
281 overhead in some places. If unsure say N here. 285 overhead in some places. If unsure say N here.
282 286
283 config PERMIT_BSP_REMOVE 287 config PERMIT_BSP_REMOVE
284 bool "Support removal of Bootstrap Processor" 288 bool "Support removal of Bootstrap Processor"
285 depends on HOTPLUG_CPU 289 depends on HOTPLUG_CPU
286 default n 290 default n
287 ---help--- 291 ---help---
288 Say Y here if your platform SAL will support removal of BSP with HOTPLUG_CPU 292 Say Y here if your platform SAL will support removal of BSP with HOTPLUG_CPU
289 support. 293 support.
290 294
291 config FORCE_CPEI_RETARGET 295 config FORCE_CPEI_RETARGET
292 bool "Force assumption that CPEI can be re-targetted" 296 bool "Force assumption that CPEI can be re-targetted"
293 depends on PERMIT_BSP_REMOVE 297 depends on PERMIT_BSP_REMOVE
294 default n 298 default n
295 ---help--- 299 ---help---
296 Say Y if you need to force the assumption that CPEI can be re-targetted to 300 Say Y if you need to force the assumption that CPEI can be re-targetted to
297 any cpu in the system. This hint is available via ACPI 3.0 specifications. 301 any cpu in the system. This hint is available via ACPI 3.0 specifications.
298 Tiger4 systems are capable of re-directing CPEI to any CPU other than BSP. 302 Tiger4 systems are capable of re-directing CPEI to any CPU other than BSP.
299 This option it useful to enable this feature on older BIOS's as well. 303 This option it useful to enable this feature on older BIOS's as well.
300 You can also enable this by using boot command line option force_cpei=1. 304 You can also enable this by using boot command line option force_cpei=1.
301 305
302 config PREEMPT 306 config PREEMPT
303 bool "Preemptible Kernel" 307 bool "Preemptible Kernel"
304 help 308 help
305 This option reduces the latency of the kernel when reacting to 309 This option reduces the latency of the kernel when reacting to
306 real-time or interactive events by allowing a low priority process to 310 real-time or interactive events by allowing a low priority process to
307 be preempted even if it is in kernel mode executing a system call. 311 be preempted even if it is in kernel mode executing a system call.
308 This allows applications to run more reliably even when the system is 312 This allows applications to run more reliably even when the system is
309 under load. 313 under load.
310 314
311 Say Y here if you are building a kernel for a desktop, embedded 315 Say Y here if you are building a kernel for a desktop, embedded
312 or real-time system. Say N if you are unsure. 316 or real-time system. Say N if you are unsure.
313 317
314 source "mm/Kconfig" 318 source "mm/Kconfig"
315 319
316 config ARCH_SELECT_MEMORY_MODEL 320 config ARCH_SELECT_MEMORY_MODEL
317 def_bool y 321 def_bool y
318 322
319 config ARCH_DISCONTIGMEM_ENABLE 323 config ARCH_DISCONTIGMEM_ENABLE
320 def_bool y 324 def_bool y
321 help 325 help
322 Say Y to support efficient handling of discontiguous physical memory, 326 Say Y to support efficient handling of discontiguous physical memory,
323 for architectures which are either NUMA (Non-Uniform Memory Access) 327 for architectures which are either NUMA (Non-Uniform Memory Access)
324 or have huge holes in the physical address space for other reasons. 328 or have huge holes in the physical address space for other reasons.
325 See <file:Documentation/vm/numa> for more. 329 See <file:Documentation/vm/numa> for more.
326 330
327 config ARCH_FLATMEM_ENABLE 331 config ARCH_FLATMEM_ENABLE
328 def_bool y 332 def_bool y
329 333
330 config ARCH_SPARSEMEM_ENABLE 334 config ARCH_SPARSEMEM_ENABLE
331 def_bool y 335 def_bool y
332 depends on ARCH_DISCONTIGMEM_ENABLE 336 depends on ARCH_DISCONTIGMEM_ENABLE
333 337
334 config ARCH_DISCONTIGMEM_DEFAULT 338 config ARCH_DISCONTIGMEM_DEFAULT
335 def_bool y if (IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB) 339 def_bool y if (IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB)
336 depends on ARCH_DISCONTIGMEM_ENABLE 340 depends on ARCH_DISCONTIGMEM_ENABLE
337 341
338 config NUMA 342 config NUMA
339 bool "NUMA support" 343 bool "NUMA support"
340 depends on !IA64_HP_SIM && !FLATMEM 344 depends on !IA64_HP_SIM && !FLATMEM
341 default y if IA64_SGI_SN2 345 default y if IA64_SGI_SN2
342 help 346 help
343 Say Y to compile the kernel to support NUMA (Non-Uniform Memory 347 Say Y to compile the kernel to support NUMA (Non-Uniform Memory
344 Access). This option is for configuring high-end multiprocessor 348 Access). This option is for configuring high-end multiprocessor
345 server systems. If in doubt, say N. 349 server systems. If in doubt, say N.
346 350
347 # VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent. 351 # VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent.
348 # VIRTUAL_MEM_MAP has been retained for historical reasons. 352 # VIRTUAL_MEM_MAP has been retained for historical reasons.
349 config VIRTUAL_MEM_MAP 353 config VIRTUAL_MEM_MAP
350 bool "Virtual mem map" 354 bool "Virtual mem map"
351 depends on !SPARSEMEM 355 depends on !SPARSEMEM
352 default y if !IA64_HP_SIM 356 default y if !IA64_HP_SIM
353 help 357 help
354 Say Y to compile the kernel with support for a virtual mem map. 358 Say Y to compile the kernel with support for a virtual mem map.
355 This code also only takes effect if a memory hole of greater than 359 This code also only takes effect if a memory hole of greater than
356 1 Gb is found during boot. You must turn this option on if you 360 1 Gb is found during boot. You must turn this option on if you
357 require the DISCONTIGMEM option for your machine. If you are 361 require the DISCONTIGMEM option for your machine. If you are
358 unsure, say Y. 362 unsure, say Y.
359 363
360 config HOLES_IN_ZONE 364 config HOLES_IN_ZONE
361 bool 365 bool
362 default y if VIRTUAL_MEM_MAP 366 default y if VIRTUAL_MEM_MAP
363 367
364 config HAVE_ARCH_EARLY_PFN_TO_NID 368 config HAVE_ARCH_EARLY_PFN_TO_NID
365 def_bool y 369 def_bool y
366 depends on NEED_MULTIPLE_NODES 370 depends on NEED_MULTIPLE_NODES
367 371
368 config IA32_SUPPORT 372 config IA32_SUPPORT
369 bool "Support for Linux/x86 binaries" 373 bool "Support for Linux/x86 binaries"
370 help 374 help
371 IA-64 processors can execute IA-32 (X86) instructions. By 375 IA-64 processors can execute IA-32 (X86) instructions. By
372 saying Y here, the kernel will include IA-32 system call 376 saying Y here, the kernel will include IA-32 system call
373 emulation support which makes it possible to transparently 377 emulation support which makes it possible to transparently
374 run IA-32 Linux binaries on an IA-64 Linux system. 378 run IA-32 Linux binaries on an IA-64 Linux system.
375 If in doubt, say Y. 379 If in doubt, say Y.
376 380
377 config COMPAT 381 config COMPAT
378 bool 382 bool
379 depends on IA32_SUPPORT 383 depends on IA32_SUPPORT
380 default y 384 default y
381 385
382 config IA64_MCA_RECOVERY 386 config IA64_MCA_RECOVERY
383 tristate "MCA recovery from errors other than TLB." 387 tristate "MCA recovery from errors other than TLB."
384 388
385 config PERFMON 389 config PERFMON
386 bool "Performance monitor support" 390 bool "Performance monitor support"
387 help 391 help
388 Selects whether support for the IA-64 performance monitor hardware 392 Selects whether support for the IA-64 performance monitor hardware
389 is included in the kernel. This makes some kernel data-structures a 393 is included in the kernel. This makes some kernel data-structures a
390 little bigger and slows down execution a bit, but it is generally 394 little bigger and slows down execution a bit, but it is generally
391 a good idea to turn this on. If you're unsure, say Y. 395 a good idea to turn this on. If you're unsure, say Y.
392 396
393 config IA64_PALINFO 397 config IA64_PALINFO
394 tristate "/proc/pal support" 398 tristate "/proc/pal support"
395 help 399 help
396 If you say Y here, you are able to get PAL (Processor Abstraction 400 If you say Y here, you are able to get PAL (Processor Abstraction
397 Layer) information in /proc/pal. This contains useful information 401 Layer) information in /proc/pal. This contains useful information
398 about the processors in your systems, such as cache and TLB sizes 402 about the processors in your systems, such as cache and TLB sizes
399 and the PAL firmware version in use. 403 and the PAL firmware version in use.
400 404
401 To use this option, you have to ensure that the "/proc file system 405 To use this option, you have to ensure that the "/proc file system
402 support" (CONFIG_PROC_FS) is enabled, too. 406 support" (CONFIG_PROC_FS) is enabled, too.
403 407
404 config SGI_SN 408 config SGI_SN
405 def_bool y if (IA64_SGI_SN2 || IA64_GENERIC) 409 def_bool y if (IA64_SGI_SN2 || IA64_GENERIC)
406 410
407 source "drivers/firmware/Kconfig" 411 source "drivers/firmware/Kconfig"
408 412
409 source "fs/Kconfig.binfmt" 413 source "fs/Kconfig.binfmt"
410 414
411 endmenu 415 endmenu
412 416
413 menu "Power management and ACPI" 417 menu "Power management and ACPI"
414 418
415 source "kernel/power/Kconfig" 419 source "kernel/power/Kconfig"
416 420
417 source "drivers/acpi/Kconfig" 421 source "drivers/acpi/Kconfig"
418 422
419 if PM 423 if PM
420 424
421 source "arch/ia64/kernel/cpufreq/Kconfig" 425 source "arch/ia64/kernel/cpufreq/Kconfig"
422 426
423 endif 427 endif
424 428
425 endmenu 429 endmenu
426 430
427 if !IA64_HP_SIM 431 if !IA64_HP_SIM
428 432
429 menu "Bus options (PCI, PCMCIA)" 433 menu "Bus options (PCI, PCMCIA)"
430 434
431 config PCI 435 config PCI
432 bool "PCI support" 436 bool "PCI support"
433 help 437 help
434 Real IA-64 machines all have PCI/PCI-X/PCI Express busses. Say Y 438 Real IA-64 machines all have PCI/PCI-X/PCI Express busses. Say Y
435 here unless you are using a simulator without PCI support. 439 here unless you are using a simulator without PCI support.
436 440
437 config PCI_DOMAINS 441 config PCI_DOMAINS
438 bool 442 bool
439 default PCI 443 default PCI
440 444
441 source "drivers/pci/Kconfig" 445 source "drivers/pci/Kconfig"
442 446
443 source "drivers/pci/hotplug/Kconfig" 447 source "drivers/pci/hotplug/Kconfig"
444 448
445 source "drivers/pcmcia/Kconfig" 449 source "drivers/pcmcia/Kconfig"
446 450
447 endmenu 451 endmenu
448 452
449 endif 453 endif
450 454
451 source "net/Kconfig" 455 source "net/Kconfig"
452 456
453 source "drivers/Kconfig" 457 source "drivers/Kconfig"
454 458
455 source "fs/Kconfig" 459 source "fs/Kconfig"
456 460
457 source "lib/Kconfig" 461 source "lib/Kconfig"
458 462
459 # 463 #
460 # Use the generic interrupt handling code in kernel/irq/: 464 # Use the generic interrupt handling code in kernel/irq/:
461 # 465 #
462 config GENERIC_HARDIRQS 466 config GENERIC_HARDIRQS
463 bool 467 bool
464 default y 468 default y
465 469
466 config GENERIC_IRQ_PROBE 470 config GENERIC_IRQ_PROBE
467 bool 471 bool
468 default y 472 default y
469 473
470 config GENERIC_PENDING_IRQ 474 config GENERIC_PENDING_IRQ
471 bool 475 bool
472 depends on GENERIC_HARDIRQS && SMP 476 depends on GENERIC_HARDIRQS && SMP
473 default y 477 default y
474 478
475 source "arch/ia64/hp/sim/Kconfig" 479 source "arch/ia64/hp/sim/Kconfig"
476 480
477 menu "Instrumentation Support" 481 menu "Instrumentation Support"
478 depends on EXPERIMENTAL 482 depends on EXPERIMENTAL
479 483
480 source "arch/ia64/oprofile/Kconfig" 484 source "arch/ia64/oprofile/Kconfig"
481 485
482 config KPROBES 486 config KPROBES
483 bool "Kprobes (EXPERIMENTAL)" 487 bool "Kprobes (EXPERIMENTAL)"
484 depends on EXPERIMENTAL && MODULES 488 depends on EXPERIMENTAL && MODULES
485 help 489 help
486 Kprobes allows you to trap at almost any kernel address and 490 Kprobes allows you to trap at almost any kernel address and
487 execute a callback function. register_kprobe() establishes 491 execute a callback function. register_kprobe() establishes
488 a probepoint and specifies the callback. Kprobes is useful 492 a probepoint and specifies the callback. Kprobes is useful
489 for kernel debugging, non-intrusive instrumentation and testing. 493 for kernel debugging, non-intrusive instrumentation and testing.
490 If in doubt, say "N". 494 If in doubt, say "N".
491 endmenu 495 endmenu
492 496
493 source "arch/ia64/Kconfig.debug" 497 source "arch/ia64/Kconfig.debug"
494 498
495 source "security/Kconfig" 499 source "security/Kconfig"
496 500
497 source "crypto/Kconfig" 501 source "crypto/Kconfig"
498 502
arch/ia64/kernel/Makefile
1 # 1 #
2 # Makefile for the linux kernel. 2 # Makefile for the linux kernel.
3 # 3 #
4 4
5 extra-y := head.o init_task.o vmlinux.lds 5 extra-y := head.o init_task.o vmlinux.lds
6 6
7 obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 7 obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ 8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
9 salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 9 salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
10 unwind.o mca.o mca_asm.o topology.o 10 unwind.o mca.o mca_asm.o topology.o dmi_scan.o
11 11
12 obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o 12 obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
13 obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o 13 obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
14 obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o 14 obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
15 obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o 15 obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
16 16
17 ifneq ($(CONFIG_ACPI_PROCESSOR),) 17 ifneq ($(CONFIG_ACPI_PROCESSOR),)
18 obj-y += acpi-processor.o 18 obj-y += acpi-processor.o
19 endif 19 endif
20 20
21 obj-$(CONFIG_IA64_PALINFO) += palinfo.o 21 obj-$(CONFIG_IA64_PALINFO) += palinfo.o
22 obj-$(CONFIG_IOSAPIC) += iosapic.o 22 obj-$(CONFIG_IOSAPIC) += iosapic.o
23 obj-$(CONFIG_MODULES) += module.o 23 obj-$(CONFIG_MODULES) += module.o
24 obj-$(CONFIG_SMP) += smp.o smpboot.o 24 obj-$(CONFIG_SMP) += smp.o smpboot.o
25 obj-$(CONFIG_NUMA) += numa.o 25 obj-$(CONFIG_NUMA) += numa.o
26 obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o 26 obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
27 obj-$(CONFIG_IA64_CYCLONE) += cyclone.o 27 obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
28 obj-$(CONFIG_CPU_FREQ) += cpufreq/ 28 obj-$(CONFIG_CPU_FREQ) += cpufreq/
29 obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 29 obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
30 obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 30 obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
31 obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 31 obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
32 mca_recovery-y += mca_drv.o mca_drv_asm.o 32 mca_recovery-y += mca_drv.o mca_drv_asm.o
33 dmi_scan-y += ../../i386/kernel/dmi_scan.o
33 34
34 # The gate DSO image is built using a special linker script. 35 # The gate DSO image is built using a special linker script.
35 targets += gate.so gate-syms.o 36 targets += gate.so gate-syms.o
36 37
37 extra-y += gate.so gate-syms.o gate.lds gate.o 38 extra-y += gate.so gate-syms.o gate.lds gate.o
38 39
39 # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. 40 # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state.
40 CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 41 CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
41 42
42 CPPFLAGS_gate.lds := -P -C -U$(ARCH) 43 CPPFLAGS_gate.lds := -P -C -U$(ARCH)
43 44
44 quiet_cmd_gate = GATE $@ 45 quiet_cmd_gate = GATE $@
45 cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ 46 cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
46 47
47 GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 48 GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1
48 $(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE 49 $(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
49 $(call if_changed,gate) 50 $(call if_changed,gate)
50 51
51 $(obj)/built-in.o: $(obj)/gate-syms.o 52 $(obj)/built-in.o: $(obj)/gate-syms.o
52 $(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o 53 $(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o
53 54
54 GATECFLAGS_gate-syms.o = -r 55 GATECFLAGS_gate-syms.o = -r
55 $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE 56 $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
56 $(call if_changed,gate) 57 $(call if_changed,gate)
57 58
58 # gate-data.o contains the gate DSO image as data in section .data.gate. 59 # gate-data.o contains the gate DSO image as data in section .data.gate.
59 # We must build gate.so before we can assemble it. 60 # We must build gate.so before we can assemble it.
60 # Note: kbuild does not track this dependency due to usage of .incbin 61 # Note: kbuild does not track this dependency due to usage of .incbin
61 $(obj)/gate-data.o: $(obj)/gate.so 62 $(obj)/gate-data.o: $(obj)/gate.so
62 63
arch/ia64/kernel/setup.c
1 /* 1 /*
2 * Architecture-specific setup. 2 * Architecture-specific setup.
3 * 3 *
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com> 5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, 2004 Intel Corp 7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com> 8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com> 10 * Gordon Jin <gordon.jin@intel.com>
11 * Copyright (C) 1999 VA Linux Systems 11 * Copyright (C) 1999 VA Linux Systems
12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
13 * 13 *
14 * 12/26/04 S.Siddha, G.Jin, R.Seth 14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection 15 * Add multi-threading and multi-core detection
16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
21 * 01/07/99 S.Eranian added the support for command line argument 21 * 01/07/99 S.Eranian added the support for command line argument
22 * 06/24/99 W.Drummond added boot_cpu_data. 22 * 06/24/99 W.Drummond added boot_cpu_data.
23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
24 */ 24 */
25 #include <linux/config.h> 25 #include <linux/config.h>
26 #include <linux/module.h> 26 #include <linux/module.h>
27 #include <linux/init.h> 27 #include <linux/init.h>
28 28
29 #include <linux/acpi.h> 29 #include <linux/acpi.h>
30 #include <linux/bootmem.h> 30 #include <linux/bootmem.h>
31 #include <linux/console.h> 31 #include <linux/console.h>
32 #include <linux/delay.h> 32 #include <linux/delay.h>
33 #include <linux/kernel.h> 33 #include <linux/kernel.h>
34 #include <linux/reboot.h> 34 #include <linux/reboot.h>
35 #include <linux/sched.h> 35 #include <linux/sched.h>
36 #include <linux/seq_file.h> 36 #include <linux/seq_file.h>
37 #include <linux/string.h> 37 #include <linux/string.h>
38 #include <linux/threads.h> 38 #include <linux/threads.h>
39 #include <linux/tty.h> 39 #include <linux/tty.h>
40 #include <linux/dmi.h>
40 #include <linux/serial.h> 41 #include <linux/serial.h>
41 #include <linux/serial_core.h> 42 #include <linux/serial_core.h>
42 #include <linux/efi.h> 43 #include <linux/efi.h>
43 #include <linux/initrd.h> 44 #include <linux/initrd.h>
44 #include <linux/pm.h> 45 #include <linux/pm.h>
45 #include <linux/cpufreq.h> 46 #include <linux/cpufreq.h>
46 47
47 #include <asm/ia32.h> 48 #include <asm/ia32.h>
48 #include <asm/machvec.h> 49 #include <asm/machvec.h>
49 #include <asm/mca.h> 50 #include <asm/mca.h>
50 #include <asm/meminit.h> 51 #include <asm/meminit.h>
51 #include <asm/page.h> 52 #include <asm/page.h>
52 #include <asm/patch.h> 53 #include <asm/patch.h>
53 #include <asm/pgtable.h> 54 #include <asm/pgtable.h>
54 #include <asm/processor.h> 55 #include <asm/processor.h>
55 #include <asm/sal.h> 56 #include <asm/sal.h>
56 #include <asm/sections.h> 57 #include <asm/sections.h>
57 #include <asm/serial.h> 58 #include <asm/serial.h>
58 #include <asm/setup.h> 59 #include <asm/setup.h>
59 #include <asm/smp.h> 60 #include <asm/smp.h>
60 #include <asm/system.h> 61 #include <asm/system.h>
61 #include <asm/unistd.h> 62 #include <asm/unistd.h>
62 #include <asm/system.h> 63 #include <asm/system.h>
63 64
64 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 65 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
65 # error "struct cpuinfo_ia64 too big!" 66 # error "struct cpuinfo_ia64 too big!"
66 #endif 67 #endif
67 68
68 #ifdef CONFIG_SMP 69 #ifdef CONFIG_SMP
69 unsigned long __per_cpu_offset[NR_CPUS]; 70 unsigned long __per_cpu_offset[NR_CPUS];
70 EXPORT_SYMBOL(__per_cpu_offset); 71 EXPORT_SYMBOL(__per_cpu_offset);
71 #endif 72 #endif
72 73
73 extern void ia64_setup_printk_clock(void); 74 extern void ia64_setup_printk_clock(void);
74 75
75 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 76 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
76 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 77 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
77 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); 78 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
78 unsigned long ia64_cycles_per_usec; 79 unsigned long ia64_cycles_per_usec;
79 struct ia64_boot_param *ia64_boot_param; 80 struct ia64_boot_param *ia64_boot_param;
80 struct screen_info screen_info; 81 struct screen_info screen_info;
81 unsigned long vga_console_iobase; 82 unsigned long vga_console_iobase;
82 unsigned long vga_console_membase; 83 unsigned long vga_console_membase;
83 84
84 static struct resource data_resource = { 85 static struct resource data_resource = {
85 .name = "Kernel data", 86 .name = "Kernel data",
86 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 87 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
87 }; 88 };
88 89
89 static struct resource code_resource = { 90 static struct resource code_resource = {
90 .name = "Kernel code", 91 .name = "Kernel code",
91 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 92 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
92 }; 93 };
93 extern void efi_initialize_iomem_resources(struct resource *, 94 extern void efi_initialize_iomem_resources(struct resource *,
94 struct resource *); 95 struct resource *);
95 extern char _text[], _end[], _etext[]; 96 extern char _text[], _end[], _etext[];
96 97
97 unsigned long ia64_max_cacheline_size; 98 unsigned long ia64_max_cacheline_size;
98 99
99 int dma_get_cache_alignment(void) 100 int dma_get_cache_alignment(void)
100 { 101 {
101 return ia64_max_cacheline_size; 102 return ia64_max_cacheline_size;
102 } 103 }
103 EXPORT_SYMBOL(dma_get_cache_alignment); 104 EXPORT_SYMBOL(dma_get_cache_alignment);
104 105
105 unsigned long ia64_iobase; /* virtual address for I/O accesses */ 106 unsigned long ia64_iobase; /* virtual address for I/O accesses */
106 EXPORT_SYMBOL(ia64_iobase); 107 EXPORT_SYMBOL(ia64_iobase);
107 struct io_space io_space[MAX_IO_SPACES]; 108 struct io_space io_space[MAX_IO_SPACES];
108 EXPORT_SYMBOL(io_space); 109 EXPORT_SYMBOL(io_space);
109 unsigned int num_io_spaces; 110 unsigned int num_io_spaces;
110 111
111 /* 112 /*
112 * "flush_icache_range()" needs to know what processor dependent stride size to use 113 * "flush_icache_range()" needs to know what processor dependent stride size to use
113 * when it makes i-cache(s) coherent with d-caches. 114 * when it makes i-cache(s) coherent with d-caches.
114 */ 115 */
115 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 116 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
116 unsigned long ia64_i_cache_stride_shift = ~0; 117 unsigned long ia64_i_cache_stride_shift = ~0;
117 118
118 /* 119 /*
119 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 120 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
120 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 121 * mask specifies a mask of address bits that must be 0 in order for two buffers to be
121 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 122 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
122 * address of the second buffer must be aligned to (merge_mask+1) in order to be 123 * address of the second buffer must be aligned to (merge_mask+1) in order to be
123 * mergeable). By default, we assume there is no I/O MMU which can merge physically 124 * mergeable). By default, we assume there is no I/O MMU which can merge physically
124 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 125 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
125 * page-size of 2^64. 126 * page-size of 2^64.
126 */ 127 */
127 unsigned long ia64_max_iommu_merge_mask = ~0UL; 128 unsigned long ia64_max_iommu_merge_mask = ~0UL;
128 EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 129 EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
129 130
130 /* 131 /*
131 * We use a special marker for the end of memory and it uses the extra (+1) slot 132 * We use a special marker for the end of memory and it uses the extra (+1) slot
132 */ 133 */
133 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; 134 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
134 int num_rsvd_regions __initdata; 135 int num_rsvd_regions __initdata;
135 136
136 137
137 /* 138 /*
138 * Filter incoming memory segments based on the primitive map created from the boot 139 * Filter incoming memory segments based on the primitive map created from the boot
139 * parameters. Segments contained in the map are removed from the memory ranges. A 140 * parameters. Segments contained in the map are removed from the memory ranges. A
140 * caller-specified function is called with the memory ranges that remain after filtering. 141 * caller-specified function is called with the memory ranges that remain after filtering.
141 * This routine does not assume the incoming segments are sorted. 142 * This routine does not assume the incoming segments are sorted.
142 */ 143 */
143 int __init 144 int __init
144 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 145 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
145 { 146 {
146 unsigned long range_start, range_end, prev_start; 147 unsigned long range_start, range_end, prev_start;
147 void (*func)(unsigned long, unsigned long, int); 148 void (*func)(unsigned long, unsigned long, int);
148 int i; 149 int i;
149 150
150 #if IGNORE_PFN0 151 #if IGNORE_PFN0
151 if (start == PAGE_OFFSET) { 152 if (start == PAGE_OFFSET) {
152 printk(KERN_WARNING "warning: skipping physical page 0\n"); 153 printk(KERN_WARNING "warning: skipping physical page 0\n");
153 start += PAGE_SIZE; 154 start += PAGE_SIZE;
154 if (start >= end) return 0; 155 if (start >= end) return 0;
155 } 156 }
156 #endif 157 #endif
157 /* 158 /*
158 * lowest possible address(walker uses virtual) 159 * lowest possible address(walker uses virtual)
159 */ 160 */
160 prev_start = PAGE_OFFSET; 161 prev_start = PAGE_OFFSET;
161 func = arg; 162 func = arg;
162 163
163 for (i = 0; i < num_rsvd_regions; ++i) { 164 for (i = 0; i < num_rsvd_regions; ++i) {
164 range_start = max(start, prev_start); 165 range_start = max(start, prev_start);
165 range_end = min(end, rsvd_region[i].start); 166 range_end = min(end, rsvd_region[i].start);
166 167
167 if (range_start < range_end) 168 if (range_start < range_end)
168 call_pernode_memory(__pa(range_start), range_end - range_start, func); 169 call_pernode_memory(__pa(range_start), range_end - range_start, func);
169 170
170 /* nothing more available in this segment */ 171 /* nothing more available in this segment */
171 if (range_end == end) return 0; 172 if (range_end == end) return 0;
172 173
173 prev_start = rsvd_region[i].end; 174 prev_start = rsvd_region[i].end;
174 } 175 }
175 /* end of memory marker allows full processing inside loop body */ 176 /* end of memory marker allows full processing inside loop body */
176 return 0; 177 return 0;
177 } 178 }
178 179
179 static void __init 180 static void __init
180 sort_regions (struct rsvd_region *rsvd_region, int max) 181 sort_regions (struct rsvd_region *rsvd_region, int max)
181 { 182 {
182 int j; 183 int j;
183 184
184 /* simple bubble sorting */ 185 /* simple bubble sorting */
185 while (max--) { 186 while (max--) {
186 for (j = 0; j < max; ++j) { 187 for (j = 0; j < max; ++j) {
187 if (rsvd_region[j].start > rsvd_region[j+1].start) { 188 if (rsvd_region[j].start > rsvd_region[j+1].start) {
188 struct rsvd_region tmp; 189 struct rsvd_region tmp;
189 tmp = rsvd_region[j]; 190 tmp = rsvd_region[j];
190 rsvd_region[j] = rsvd_region[j + 1]; 191 rsvd_region[j] = rsvd_region[j + 1];
191 rsvd_region[j + 1] = tmp; 192 rsvd_region[j + 1] = tmp;
192 } 193 }
193 } 194 }
194 } 195 }
195 } 196 }
196 197
197 /* 198 /*
198 * Request address space for all standard resources 199 * Request address space for all standard resources
199 */ 200 */
200 static int __init register_memory(void) 201 static int __init register_memory(void)
201 { 202 {
202 code_resource.start = ia64_tpa(_text); 203 code_resource.start = ia64_tpa(_text);
203 code_resource.end = ia64_tpa(_etext) - 1; 204 code_resource.end = ia64_tpa(_etext) - 1;
204 data_resource.start = ia64_tpa(_etext); 205 data_resource.start = ia64_tpa(_etext);
205 data_resource.end = ia64_tpa(_end) - 1; 206 data_resource.end = ia64_tpa(_end) - 1;
206 efi_initialize_iomem_resources(&code_resource, &data_resource); 207 efi_initialize_iomem_resources(&code_resource, &data_resource);
207 208
208 return 0; 209 return 0;
209 } 210 }
210 211
211 __initcall(register_memory); 212 __initcall(register_memory);
212 213
213 /** 214 /**
214 * reserve_memory - setup reserved memory areas 215 * reserve_memory - setup reserved memory areas
215 * 216 *
216 * Setup the reserved memory areas set aside for the boot parameters, 217 * Setup the reserved memory areas set aside for the boot parameters,
217 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 218 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
218 * see include/asm-ia64/meminit.h if you need to define more. 219 * see include/asm-ia64/meminit.h if you need to define more.
219 */ 220 */
220 void __init 221 void __init
221 reserve_memory (void) 222 reserve_memory (void)
222 { 223 {
223 int n = 0; 224 int n = 0;
224 225
225 /* 226 /*
226 * none of the entries in this table overlap 227 * none of the entries in this table overlap
227 */ 228 */
228 rsvd_region[n].start = (unsigned long) ia64_boot_param; 229 rsvd_region[n].start = (unsigned long) ia64_boot_param;
229 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 230 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
230 n++; 231 n++;
231 232
232 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 233 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
233 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 234 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
234 n++; 235 n++;
235 236
236 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 237 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
237 rsvd_region[n].end = (rsvd_region[n].start 238 rsvd_region[n].end = (rsvd_region[n].start
238 + strlen(__va(ia64_boot_param->command_line)) + 1); 239 + strlen(__va(ia64_boot_param->command_line)) + 1);
239 n++; 240 n++;
240 241
241 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 242 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
242 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 243 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
243 n++; 244 n++;
244 245
245 #ifdef CONFIG_BLK_DEV_INITRD 246 #ifdef CONFIG_BLK_DEV_INITRD
246 if (ia64_boot_param->initrd_start) { 247 if (ia64_boot_param->initrd_start) {
247 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 248 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
248 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 249 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
249 n++; 250 n++;
250 } 251 }
251 #endif 252 #endif
252 253
253 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 254 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
254 n++; 255 n++;
255 256
256 /* end of memory marker */ 257 /* end of memory marker */
257 rsvd_region[n].start = ~0UL; 258 rsvd_region[n].start = ~0UL;
258 rsvd_region[n].end = ~0UL; 259 rsvd_region[n].end = ~0UL;
259 n++; 260 n++;
260 261
261 num_rsvd_regions = n; 262 num_rsvd_regions = n;
262 263
263 sort_regions(rsvd_region, num_rsvd_regions); 264 sort_regions(rsvd_region, num_rsvd_regions);
264 } 265 }
265 266
266 /** 267 /**
267 * find_initrd - get initrd parameters from the boot parameter structure 268 * find_initrd - get initrd parameters from the boot parameter structure
268 * 269 *
269 * Grab the initrd start and end from the boot parameter struct given us by 270 * Grab the initrd start and end from the boot parameter struct given us by
270 * the boot loader. 271 * the boot loader.
271 */ 272 */
272 void __init 273 void __init
273 find_initrd (void) 274 find_initrd (void)
274 { 275 {
275 #ifdef CONFIG_BLK_DEV_INITRD 276 #ifdef CONFIG_BLK_DEV_INITRD
276 if (ia64_boot_param->initrd_start) { 277 if (ia64_boot_param->initrd_start) {
277 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 278 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
278 initrd_end = initrd_start+ia64_boot_param->initrd_size; 279 initrd_end = initrd_start+ia64_boot_param->initrd_size;
279 280
280 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 281 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
281 initrd_start, ia64_boot_param->initrd_size); 282 initrd_start, ia64_boot_param->initrd_size);
282 } 283 }
283 #endif 284 #endif
284 } 285 }
285 286
286 static void __init 287 static void __init
287 io_port_init (void) 288 io_port_init (void)
288 { 289 {
289 unsigned long phys_iobase; 290 unsigned long phys_iobase;
290 291
291 /* 292 /*
292 * Set `iobase' based on the EFI memory map or, failing that, the 293 * Set `iobase' based on the EFI memory map or, failing that, the
293 * value firmware left in ar.k0. 294 * value firmware left in ar.k0.
294 * 295 *
295 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute 296 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
296 * the port's virtual address, so ia32_load_state() loads it with a 297 * the port's virtual address, so ia32_load_state() loads it with a
297 * user virtual address. But in ia64 mode, glibc uses the 298 * user virtual address. But in ia64 mode, glibc uses the
298 * *physical* address in ar.k0 to mmap the appropriate area from 299 * *physical* address in ar.k0 to mmap the appropriate area from
299 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both 300 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
300 * cases, user-mode can only use the legacy 0-64K I/O port space. 301 * cases, user-mode can only use the legacy 0-64K I/O port space.
301 * 302 *
302 * ar.k0 is not involved in kernel I/O port accesses, which can use 303 * ar.k0 is not involved in kernel I/O port accesses, which can use
303 * any of the I/O port spaces and are done via MMIO using the 304 * any of the I/O port spaces and are done via MMIO using the
304 * virtual mmio_base from the appropriate io_space[]. 305 * virtual mmio_base from the appropriate io_space[].
305 */ 306 */
306 phys_iobase = efi_get_iobase(); 307 phys_iobase = efi_get_iobase();
307 if (!phys_iobase) { 308 if (!phys_iobase) {
308 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 309 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
309 printk(KERN_INFO "No I/O port range found in EFI memory map, " 310 printk(KERN_INFO "No I/O port range found in EFI memory map, "
310 "falling back to AR.KR0 (0x%lx)\n", phys_iobase); 311 "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
311 } 312 }
312 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 313 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
313 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 314 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
314 315
315 /* setup legacy IO port space */ 316 /* setup legacy IO port space */
316 io_space[0].mmio_base = ia64_iobase; 317 io_space[0].mmio_base = ia64_iobase;
317 io_space[0].sparse = 1; 318 io_space[0].sparse = 1;
318 num_io_spaces = 1; 319 num_io_spaces = 1;
319 } 320 }
320 321
321 /** 322 /**
322 * early_console_setup - setup debugging console 323 * early_console_setup - setup debugging console
323 * 324 *
324 * Consoles started here require little enough setup that we can start using 325 * Consoles started here require little enough setup that we can start using
325 * them very early in the boot process, either right after the machine 326 * them very early in the boot process, either right after the machine
326 * vector initialization, or even before if the drivers can detect their hw. 327 * vector initialization, or even before if the drivers can detect their hw.
327 * 328 *
328 * Returns non-zero if a console couldn't be setup. 329 * Returns non-zero if a console couldn't be setup.
329 */ 330 */
330 static inline int __init 331 static inline int __init
331 early_console_setup (char *cmdline) 332 early_console_setup (char *cmdline)
332 { 333 {
333 int earlycons = 0; 334 int earlycons = 0;
334 335
335 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 336 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
336 { 337 {
337 extern int sn_serial_console_early_setup(void); 338 extern int sn_serial_console_early_setup(void);
338 if (!sn_serial_console_early_setup()) 339 if (!sn_serial_console_early_setup())
339 earlycons++; 340 earlycons++;
340 } 341 }
341 #endif 342 #endif
342 #ifdef CONFIG_EFI_PCDP 343 #ifdef CONFIG_EFI_PCDP
343 if (!efi_setup_pcdp_console(cmdline)) 344 if (!efi_setup_pcdp_console(cmdline))
344 earlycons++; 345 earlycons++;
345 #endif 346 #endif
346 #ifdef CONFIG_SERIAL_8250_CONSOLE 347 #ifdef CONFIG_SERIAL_8250_CONSOLE
347 if (!early_serial_console_init(cmdline)) 348 if (!early_serial_console_init(cmdline))
348 earlycons++; 349 earlycons++;
349 #endif 350 #endif
350 351
351 return (earlycons) ? 0 : -1; 352 return (earlycons) ? 0 : -1;
352 } 353 }
353 354
354 static inline void 355 static inline void
355 mark_bsp_online (void) 356 mark_bsp_online (void)
356 { 357 {
357 #ifdef CONFIG_SMP 358 #ifdef CONFIG_SMP
358 /* If we register an early console, allow CPU 0 to printk */ 359 /* If we register an early console, allow CPU 0 to printk */
359 cpu_set(smp_processor_id(), cpu_online_map); 360 cpu_set(smp_processor_id(), cpu_online_map);
360 #endif 361 #endif
361 } 362 }
362 363
363 #ifdef CONFIG_SMP 364 #ifdef CONFIG_SMP
364 static void __init 365 static void __init
365 check_for_logical_procs (void) 366 check_for_logical_procs (void)
366 { 367 {
367 pal_logical_to_physical_t info; 368 pal_logical_to_physical_t info;
368 s64 status; 369 s64 status;
369 370
370 status = ia64_pal_logical_to_phys(0, &info); 371 status = ia64_pal_logical_to_phys(0, &info);
371 if (status == -1) { 372 if (status == -1) {
372 printk(KERN_INFO "No logical to physical processor mapping " 373 printk(KERN_INFO "No logical to physical processor mapping "
373 "available\n"); 374 "available\n");
374 return; 375 return;
375 } 376 }
376 if (status) { 377 if (status) {
377 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", 378 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
378 status); 379 status);
379 return; 380 return;
380 } 381 }
381 /* 382 /*
382 * Total number of siblings that BSP has. Though not all of them 383 * Total number of siblings that BSP has. Though not all of them
383 * may have booted successfully. The correct number of siblings 384 * may have booted successfully. The correct number of siblings
384 * booted is in info.overview_num_log. 385 * booted is in info.overview_num_log.
385 */ 386 */
386 smp_num_siblings = info.overview_tpc; 387 smp_num_siblings = info.overview_tpc;
387 smp_num_cpucores = info.overview_cpp; 388 smp_num_cpucores = info.overview_cpp;
388 } 389 }
389 #endif 390 #endif
390 391
391 static __initdata int nomca; 392 static __initdata int nomca;
392 static __init int setup_nomca(char *s) 393 static __init int setup_nomca(char *s)
393 { 394 {
394 nomca = 1; 395 nomca = 1;
395 return 0; 396 return 0;
396 } 397 }
397 early_param("nomca", setup_nomca); 398 early_param("nomca", setup_nomca);
398 399
399 void __init 400 void __init
400 setup_arch (char **cmdline_p) 401 setup_arch (char **cmdline_p)
401 { 402 {
402 unw_init(); 403 unw_init();
403 404
404 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 405 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
405 406
406 *cmdline_p = __va(ia64_boot_param->command_line); 407 *cmdline_p = __va(ia64_boot_param->command_line);
407 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); 408 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
408 409
409 efi_init(); 410 efi_init();
410 io_port_init(); 411 io_port_init();
411 412
412 parse_early_param(); 413 parse_early_param();
413 414
414 #ifdef CONFIG_IA64_GENERIC 415 #ifdef CONFIG_IA64_GENERIC
415 machvec_init(NULL); 416 machvec_init(NULL);
416 #endif 417 #endif
417 418
418 if (early_console_setup(*cmdline_p) == 0) 419 if (early_console_setup(*cmdline_p) == 0)
419 mark_bsp_online(); 420 mark_bsp_online();
420 421
421 #ifdef CONFIG_ACPI 422 #ifdef CONFIG_ACPI
422 /* Initialize the ACPI boot-time table parser */ 423 /* Initialize the ACPI boot-time table parser */
423 acpi_table_init(); 424 acpi_table_init();
424 # ifdef CONFIG_ACPI_NUMA 425 # ifdef CONFIG_ACPI_NUMA
425 acpi_numa_init(); 426 acpi_numa_init();
426 # endif 427 # endif
427 #else 428 #else
428 # ifdef CONFIG_SMP 429 # ifdef CONFIG_SMP
429 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ 430 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
430 # endif 431 # endif
431 #endif /* CONFIG_APCI_BOOT */ 432 #endif /* CONFIG_APCI_BOOT */
432 433
433 find_memory(); 434 find_memory();
434 435
435 /* process SAL system table: */ 436 /* process SAL system table: */
436 ia64_sal_init(efi.sal_systab); 437 ia64_sal_init(efi.sal_systab);
437 438
438 ia64_setup_printk_clock(); 439 ia64_setup_printk_clock();
439 440
440 #ifdef CONFIG_SMP 441 #ifdef CONFIG_SMP
441 cpu_physical_id(0) = hard_smp_processor_id(); 442 cpu_physical_id(0) = hard_smp_processor_id();
442 443
443 cpu_set(0, cpu_sibling_map[0]); 444 cpu_set(0, cpu_sibling_map[0]);
444 cpu_set(0, cpu_core_map[0]); 445 cpu_set(0, cpu_core_map[0]);
445 446
446 check_for_logical_procs(); 447 check_for_logical_procs();
447 if (smp_num_cpucores > 1) 448 if (smp_num_cpucores > 1)
448 printk(KERN_INFO 449 printk(KERN_INFO
449 "cpu package is Multi-Core capable: number of cores=%d\n", 450 "cpu package is Multi-Core capable: number of cores=%d\n",
450 smp_num_cpucores); 451 smp_num_cpucores);
451 if (smp_num_siblings > 1) 452 if (smp_num_siblings > 1)
452 printk(KERN_INFO 453 printk(KERN_INFO
453 "cpu package is Multi-Threading capable: number of siblings=%d\n", 454 "cpu package is Multi-Threading capable: number of siblings=%d\n",
454 smp_num_siblings); 455 smp_num_siblings);
455 #endif 456 #endif
456 457
457 cpu_init(); /* initialize the bootstrap CPU */ 458 cpu_init(); /* initialize the bootstrap CPU */
458 mmu_context_init(); /* initialize context_id bitmap */ 459 mmu_context_init(); /* initialize context_id bitmap */
459 460
460 #ifdef CONFIG_ACPI 461 #ifdef CONFIG_ACPI
461 acpi_boot_init(); 462 acpi_boot_init();
462 #endif 463 #endif
463 464
464 #ifdef CONFIG_VT 465 #ifdef CONFIG_VT
465 if (!conswitchp) { 466 if (!conswitchp) {
466 # if defined(CONFIG_DUMMY_CONSOLE) 467 # if defined(CONFIG_DUMMY_CONSOLE)
467 conswitchp = &dummy_con; 468 conswitchp = &dummy_con;
468 # endif 469 # endif
469 # if defined(CONFIG_VGA_CONSOLE) 470 # if defined(CONFIG_VGA_CONSOLE)
470 /* 471 /*
471 * Non-legacy systems may route legacy VGA MMIO range to system 472 * Non-legacy systems may route legacy VGA MMIO range to system
472 * memory. vga_con probes the MMIO hole, so memory looks like 473 * memory. vga_con probes the MMIO hole, so memory looks like
473 * a VGA device to it. The EFI memory map can tell us if it's 474 * a VGA device to it. The EFI memory map can tell us if it's
474 * memory so we can avoid this problem. 475 * memory so we can avoid this problem.
475 */ 476 */
476 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 477 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
477 conswitchp = &vga_con; 478 conswitchp = &vga_con;
478 # endif 479 # endif
479 } 480 }
480 #endif 481 #endif
481 482
482 /* enable IA-64 Machine Check Abort Handling unless disabled */ 483 /* enable IA-64 Machine Check Abort Handling unless disabled */
483 if (!nomca) 484 if (!nomca)
484 ia64_mca_init(); 485 ia64_mca_init();
485 486
486 platform_setup(cmdline_p); 487 platform_setup(cmdline_p);
487 paging_init(); 488 paging_init();
488 } 489 }
489 490
490 /* 491 /*
491 * Display cpu info for all cpu's. 492 * Display cpu info for all cpu's.
492 */ 493 */
493 static int 494 static int
494 show_cpuinfo (struct seq_file *m, void *v) 495 show_cpuinfo (struct seq_file *m, void *v)
495 { 496 {
496 #ifdef CONFIG_SMP 497 #ifdef CONFIG_SMP
497 # define lpj c->loops_per_jiffy 498 # define lpj c->loops_per_jiffy
498 # define cpunum c->cpu 499 # define cpunum c->cpu
499 #else 500 #else
500 # define lpj loops_per_jiffy 501 # define lpj loops_per_jiffy
501 # define cpunum 0 502 # define cpunum 0
502 #endif 503 #endif
503 static struct { 504 static struct {
504 unsigned long mask; 505 unsigned long mask;
505 const char *feature_name; 506 const char *feature_name;
506 } feature_bits[] = { 507 } feature_bits[] = {
507 { 1UL << 0, "branchlong" }, 508 { 1UL << 0, "branchlong" },
508 { 1UL << 1, "spontaneous deferral"}, 509 { 1UL << 1, "spontaneous deferral"},
509 { 1UL << 2, "16-byte atomic ops" } 510 { 1UL << 2, "16-byte atomic ops" }
510 }; 511 };
511 char family[32], features[128], *cp, sep; 512 char family[32], features[128], *cp, sep;
512 struct cpuinfo_ia64 *c = v; 513 struct cpuinfo_ia64 *c = v;
513 unsigned long mask; 514 unsigned long mask;
514 unsigned long proc_freq; 515 unsigned long proc_freq;
515 int i; 516 int i;
516 517
517 mask = c->features; 518 mask = c->features;
518 519
519 switch (c->family) { 520 switch (c->family) {
520 case 0x07: memcpy(family, "Itanium", 8); break; 521 case 0x07: memcpy(family, "Itanium", 8); break;
521 case 0x1f: memcpy(family, "Itanium 2", 10); break; 522 case 0x1f: memcpy(family, "Itanium 2", 10); break;
522 default: sprintf(family, "%u", c->family); break; 523 default: sprintf(family, "%u", c->family); break;
523 } 524 }
524 525
525 /* build the feature string: */ 526 /* build the feature string: */
526 memcpy(features, " standard", 10); 527 memcpy(features, " standard", 10);
527 cp = features; 528 cp = features;
528 sep = 0; 529 sep = 0;
529 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 530 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
530 if (mask & feature_bits[i].mask) { 531 if (mask & feature_bits[i].mask) {
531 if (sep) 532 if (sep)
532 *cp++ = sep; 533 *cp++ = sep;
533 sep = ','; 534 sep = ',';
534 *cp++ = ' '; 535 *cp++ = ' ';
535 strcpy(cp, feature_bits[i].feature_name); 536 strcpy(cp, feature_bits[i].feature_name);
536 cp += strlen(feature_bits[i].feature_name); 537 cp += strlen(feature_bits[i].feature_name);
537 mask &= ~feature_bits[i].mask; 538 mask &= ~feature_bits[i].mask;
538 } 539 }
539 } 540 }
540 if (mask) { 541 if (mask) {
541 /* print unknown features as a hex value: */ 542 /* print unknown features as a hex value: */
542 if (sep) 543 if (sep)
543 *cp++ = sep; 544 *cp++ = sep;
544 sprintf(cp, " 0x%lx", mask); 545 sprintf(cp, " 0x%lx", mask);
545 } 546 }
546 547
547 proc_freq = cpufreq_quick_get(cpunum); 548 proc_freq = cpufreq_quick_get(cpunum);
548 if (!proc_freq) 549 if (!proc_freq)
549 proc_freq = c->proc_freq / 1000; 550 proc_freq = c->proc_freq / 1000;
550 551
551 seq_printf(m, 552 seq_printf(m,
552 "processor : %d\n" 553 "processor : %d\n"
553 "vendor : %s\n" 554 "vendor : %s\n"
554 "arch : IA-64\n" 555 "arch : IA-64\n"
555 "family : %s\n" 556 "family : %s\n"
556 "model : %u\n" 557 "model : %u\n"
557 "revision : %u\n" 558 "revision : %u\n"
558 "archrev : %u\n" 559 "archrev : %u\n"
559 "features :%s\n" /* don't change this---it _is_ right! */ 560 "features :%s\n" /* don't change this---it _is_ right! */
560 "cpu number : %lu\n" 561 "cpu number : %lu\n"
561 "cpu regs : %u\n" 562 "cpu regs : %u\n"
562 "cpu MHz : %lu.%06lu\n" 563 "cpu MHz : %lu.%06lu\n"
563 "itc MHz : %lu.%06lu\n" 564 "itc MHz : %lu.%06lu\n"
564 "BogoMIPS : %lu.%02lu\n", 565 "BogoMIPS : %lu.%02lu\n",
565 cpunum, c->vendor, family, c->model, c->revision, c->archrev, 566 cpunum, c->vendor, family, c->model, c->revision, c->archrev,
566 features, c->ppn, c->number, 567 features, c->ppn, c->number,
567 proc_freq / 1000, proc_freq % 1000, 568 proc_freq / 1000, proc_freq % 1000,
568 c->itc_freq / 1000000, c->itc_freq % 1000000, 569 c->itc_freq / 1000000, c->itc_freq % 1000000,
569 lpj*HZ/500000, (lpj*HZ/5000) % 100); 570 lpj*HZ/500000, (lpj*HZ/5000) % 100);
570 #ifdef CONFIG_SMP 571 #ifdef CONFIG_SMP
571 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); 572 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum]));
572 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 573 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
573 seq_printf(m, 574 seq_printf(m,
574 "physical id: %u\n" 575 "physical id: %u\n"
575 "core id : %u\n" 576 "core id : %u\n"
576 "thread id : %u\n", 577 "thread id : %u\n",
577 c->socket_id, c->core_id, c->thread_id); 578 c->socket_id, c->core_id, c->thread_id);
578 #endif 579 #endif
579 seq_printf(m,"\n"); 580 seq_printf(m,"\n");
580 581
581 return 0; 582 return 0;
582 } 583 }
583 584
584 static void * 585 static void *
585 c_start (struct seq_file *m, loff_t *pos) 586 c_start (struct seq_file *m, loff_t *pos)
586 { 587 {
587 #ifdef CONFIG_SMP 588 #ifdef CONFIG_SMP
588 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 589 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
589 ++*pos; 590 ++*pos;
590 #endif 591 #endif
591 return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 592 return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
592 } 593 }
593 594
594 static void * 595 static void *
595 c_next (struct seq_file *m, void *v, loff_t *pos) 596 c_next (struct seq_file *m, void *v, loff_t *pos)
596 { 597 {
597 ++*pos; 598 ++*pos;
598 return c_start(m, pos); 599 return c_start(m, pos);
599 } 600 }
600 601
601 static void 602 static void
602 c_stop (struct seq_file *m, void *v) 603 c_stop (struct seq_file *m, void *v)
603 { 604 {
604 } 605 }
605 606
606 struct seq_operations cpuinfo_op = { 607 struct seq_operations cpuinfo_op = {
607 .start = c_start, 608 .start = c_start,
608 .next = c_next, 609 .next = c_next,
609 .stop = c_stop, 610 .stop = c_stop,
610 .show = show_cpuinfo 611 .show = show_cpuinfo
611 }; 612 };
612 613
613 static void __cpuinit 614 static void __cpuinit
614 identify_cpu (struct cpuinfo_ia64 *c) 615 identify_cpu (struct cpuinfo_ia64 *c)
615 { 616 {
616 union { 617 union {
617 unsigned long bits[5]; 618 unsigned long bits[5];
618 struct { 619 struct {
619 /* id 0 & 1: */ 620 /* id 0 & 1: */
620 char vendor[16]; 621 char vendor[16];
621 622
622 /* id 2 */ 623 /* id 2 */
623 u64 ppn; /* processor serial number */ 624 u64 ppn; /* processor serial number */
624 625
625 /* id 3: */ 626 /* id 3: */
626 unsigned number : 8; 627 unsigned number : 8;
627 unsigned revision : 8; 628 unsigned revision : 8;
628 unsigned model : 8; 629 unsigned model : 8;
629 unsigned family : 8; 630 unsigned family : 8;
630 unsigned archrev : 8; 631 unsigned archrev : 8;
631 unsigned reserved : 24; 632 unsigned reserved : 24;
632 633
633 /* id 4: */ 634 /* id 4: */
634 u64 features; 635 u64 features;
635 } field; 636 } field;
636 } cpuid; 637 } cpuid;
637 pal_vm_info_1_u_t vm1; 638 pal_vm_info_1_u_t vm1;
638 pal_vm_info_2_u_t vm2; 639 pal_vm_info_2_u_t vm2;
639 pal_status_t status; 640 pal_status_t status;
640 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 641 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
641 int i; 642 int i;
642 643
643 for (i = 0; i < 5; ++i) 644 for (i = 0; i < 5; ++i)
644 cpuid.bits[i] = ia64_get_cpuid(i); 645 cpuid.bits[i] = ia64_get_cpuid(i);
645 646
646 memcpy(c->vendor, cpuid.field.vendor, 16); 647 memcpy(c->vendor, cpuid.field.vendor, 16);
647 #ifdef CONFIG_SMP 648 #ifdef CONFIG_SMP
648 c->cpu = smp_processor_id(); 649 c->cpu = smp_processor_id();
649 650
650 /* below default values will be overwritten by identify_siblings() 651 /* below default values will be overwritten by identify_siblings()
651 * for Multi-Threading/Multi-Core capable cpu's 652 * for Multi-Threading/Multi-Core capable cpu's
652 */ 653 */
653 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 654 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
654 c->socket_id = -1; 655 c->socket_id = -1;
655 656
656 identify_siblings(c); 657 identify_siblings(c);
657 #endif 658 #endif
658 c->ppn = cpuid.field.ppn; 659 c->ppn = cpuid.field.ppn;
659 c->number = cpuid.field.number; 660 c->number = cpuid.field.number;
660 c->revision = cpuid.field.revision; 661 c->revision = cpuid.field.revision;
661 c->model = cpuid.field.model; 662 c->model = cpuid.field.model;
662 c->family = cpuid.field.family; 663 c->family = cpuid.field.family;
663 c->archrev = cpuid.field.archrev; 664 c->archrev = cpuid.field.archrev;
664 c->features = cpuid.field.features; 665 c->features = cpuid.field.features;
665 666
666 status = ia64_pal_vm_summary(&vm1, &vm2); 667 status = ia64_pal_vm_summary(&vm1, &vm2);
667 if (status == PAL_STATUS_SUCCESS) { 668 if (status == PAL_STATUS_SUCCESS) {
668 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 669 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
669 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 670 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
670 } 671 }
671 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 672 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
672 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 673 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
673 } 674 }
674 675
675 void 676 void
676 setup_per_cpu_areas (void) 677 setup_per_cpu_areas (void)
677 { 678 {
678 /* start_kernel() requires this... */ 679 /* start_kernel() requires this... */
679 #ifdef CONFIG_ACPI_HOTPLUG_CPU 680 #ifdef CONFIG_ACPI_HOTPLUG_CPU
680 prefill_possible_map(); 681 prefill_possible_map();
681 #endif 682 #endif
682 } 683 }
683 684
684 /* 685 /*
685 * Calculate the max. cache line size. 686 * Calculate the max. cache line size.
686 * 687 *
687 * In addition, the minimum of the i-cache stride sizes is calculated for 688 * In addition, the minimum of the i-cache stride sizes is calculated for
688 * "flush_icache_range()". 689 * "flush_icache_range()".
689 */ 690 */
690 static void __cpuinit 691 static void __cpuinit
691 get_max_cacheline_size (void) 692 get_max_cacheline_size (void)
692 { 693 {
693 unsigned long line_size, max = 1; 694 unsigned long line_size, max = 1;
694 unsigned int cache_size = 0; 695 unsigned int cache_size = 0;
695 u64 l, levels, unique_caches; 696 u64 l, levels, unique_caches;
696 pal_cache_config_info_t cci; 697 pal_cache_config_info_t cci;
697 s64 status; 698 s64 status;
698 699
699 status = ia64_pal_cache_summary(&levels, &unique_caches); 700 status = ia64_pal_cache_summary(&levels, &unique_caches);
700 if (status != 0) { 701 if (status != 0) {
701 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 702 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
702 __FUNCTION__, status); 703 __FUNCTION__, status);
703 max = SMP_CACHE_BYTES; 704 max = SMP_CACHE_BYTES;
704 /* Safest setup for "flush_icache_range()" */ 705 /* Safest setup for "flush_icache_range()" */
705 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 706 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
706 goto out; 707 goto out;
707 } 708 }
708 709
709 for (l = 0; l < levels; ++l) { 710 for (l = 0; l < levels; ++l) {
710 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 711 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
711 &cci); 712 &cci);
712 if (status != 0) { 713 if (status != 0) {
713 printk(KERN_ERR 714 printk(KERN_ERR
714 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 715 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
715 __FUNCTION__, l, status); 716 __FUNCTION__, l, status);
716 max = SMP_CACHE_BYTES; 717 max = SMP_CACHE_BYTES;
717 /* The safest setup for "flush_icache_range()" */ 718 /* The safest setup for "flush_icache_range()" */
718 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 719 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
719 cci.pcci_unified = 1; 720 cci.pcci_unified = 1;
720 } 721 }
721 line_size = 1 << cci.pcci_line_size; 722 line_size = 1 << cci.pcci_line_size;
722 if (line_size > max) 723 if (line_size > max)
723 max = line_size; 724 max = line_size;
724 if (cache_size < cci.pcci_cache_size) 725 if (cache_size < cci.pcci_cache_size)
725 cache_size = cci.pcci_cache_size; 726 cache_size = cci.pcci_cache_size;
726 if (!cci.pcci_unified) { 727 if (!cci.pcci_unified) {
727 status = ia64_pal_cache_config_info(l, 728 status = ia64_pal_cache_config_info(l,
728 /* cache_type (instruction)= */ 1, 729 /* cache_type (instruction)= */ 1,
729 &cci); 730 &cci);
730 if (status != 0) { 731 if (status != 0) {
731 printk(KERN_ERR 732 printk(KERN_ERR
732 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 733 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
733 __FUNCTION__, l, status); 734 __FUNCTION__, l, status);
734 /* The safest setup for "flush_icache_range()" */ 735 /* The safest setup for "flush_icache_range()" */
735 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 736 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
736 } 737 }
737 } 738 }
738 if (cci.pcci_stride < ia64_i_cache_stride_shift) 739 if (cci.pcci_stride < ia64_i_cache_stride_shift)
739 ia64_i_cache_stride_shift = cci.pcci_stride; 740 ia64_i_cache_stride_shift = cci.pcci_stride;
740 } 741 }
741 out: 742 out:
742 #ifdef CONFIG_SMP 743 #ifdef CONFIG_SMP
743 max_cache_size = max(max_cache_size, cache_size); 744 max_cache_size = max(max_cache_size, cache_size);
744 #endif 745 #endif
745 if (max > ia64_max_cacheline_size) 746 if (max > ia64_max_cacheline_size)
746 ia64_max_cacheline_size = max; 747 ia64_max_cacheline_size = max;
747 } 748 }
748 749
749 /* 750 /*
750 * cpu_init() initializes state that is per-CPU. This function acts 751 * cpu_init() initializes state that is per-CPU. This function acts
751 * as a 'CPU state barrier', nothing should get across. 752 * as a 'CPU state barrier', nothing should get across.
752 */ 753 */
753 void __cpuinit 754 void __cpuinit
754 cpu_init (void) 755 cpu_init (void)
755 { 756 {
756 extern void __cpuinit ia64_mmu_init (void *); 757 extern void __cpuinit ia64_mmu_init (void *);
757 unsigned long num_phys_stacked; 758 unsigned long num_phys_stacked;
758 pal_vm_info_2_u_t vmi; 759 pal_vm_info_2_u_t vmi;
759 unsigned int max_ctx; 760 unsigned int max_ctx;
760 struct cpuinfo_ia64 *cpu_info; 761 struct cpuinfo_ia64 *cpu_info;
761 void *cpu_data; 762 void *cpu_data;
762 763
763 cpu_data = per_cpu_init(); 764 cpu_data = per_cpu_init();
764 765
765 /* 766 /*
766 * We set ar.k3 so that assembly code in MCA handler can compute 767 * We set ar.k3 so that assembly code in MCA handler can compute
767 * physical addresses of per cpu variables with a simple: 768 * physical addresses of per cpu variables with a simple:
768 * phys = ar.k3 + &per_cpu_var 769 * phys = ar.k3 + &per_cpu_var
769 */ 770 */
770 ia64_set_kr(IA64_KR_PER_CPU_DATA, 771 ia64_set_kr(IA64_KR_PER_CPU_DATA,
771 ia64_tpa(cpu_data) - (long) __per_cpu_start); 772 ia64_tpa(cpu_data) - (long) __per_cpu_start);
772 773
773 get_max_cacheline_size(); 774 get_max_cacheline_size();
774 775
775 /* 776 /*
776 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 777 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
777 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 778 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
778 * depends on the data returned by identify_cpu(). We break the dependency by 779 * depends on the data returned by identify_cpu(). We break the dependency by
779 * accessing cpu_data() through the canonical per-CPU address. 780 * accessing cpu_data() through the canonical per-CPU address.
780 */ 781 */
781 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 782 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
782 identify_cpu(cpu_info); 783 identify_cpu(cpu_info);
783 784
784 #ifdef CONFIG_MCKINLEY 785 #ifdef CONFIG_MCKINLEY
785 { 786 {
786 # define FEATURE_SET 16 787 # define FEATURE_SET 16
787 struct ia64_pal_retval iprv; 788 struct ia64_pal_retval iprv;
788 789
789 if (cpu_info->family == 0x1f) { 790 if (cpu_info->family == 0x1f) {
790 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 791 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
791 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 792 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
792 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 793 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
793 (iprv.v1 | 0x80), FEATURE_SET, 0); 794 (iprv.v1 | 0x80), FEATURE_SET, 0);
794 } 795 }
795 } 796 }
796 #endif 797 #endif
797 798
798 /* Clear the stack memory reserved for pt_regs: */ 799 /* Clear the stack memory reserved for pt_regs: */
799 memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); 800 memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
800 801
801 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 802 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
802 803
803 /* 804 /*
804 * Initialize the page-table base register to a global 805 * Initialize the page-table base register to a global
805 * directory with all zeroes. This ensure that we can handle 806 * directory with all zeroes. This ensure that we can handle
806 * TLB-misses to user address-space even before we created the 807 * TLB-misses to user address-space even before we created the
807 * first user address-space. This may happen, e.g., due to 808 * first user address-space. This may happen, e.g., due to
808 * aggressive use of lfetch.fault. 809 * aggressive use of lfetch.fault.
809 */ 810 */
810 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 811 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
811 812
812 /* 813 /*
813 * Initialize default control register to defer speculative faults except 814 * Initialize default control register to defer speculative faults except
814 * for those arising from TLB misses, which are not deferred. The 815 * for those arising from TLB misses, which are not deferred. The
815 * kernel MUST NOT depend on a particular setting of these bits (in other words, 816 * kernel MUST NOT depend on a particular setting of these bits (in other words,
816 * the kernel must have recovery code for all speculative accesses). Turn on 817 * the kernel must have recovery code for all speculative accesses). Turn on
817 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 818 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
818 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 819 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
819 * be fine). 820 * be fine).
820 */ 821 */
821 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 822 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
822 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 823 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
823 atomic_inc(&init_mm.mm_count); 824 atomic_inc(&init_mm.mm_count);
824 current->active_mm = &init_mm; 825 current->active_mm = &init_mm;
825 if (current->mm) 826 if (current->mm)
826 BUG(); 827 BUG();
827 828
828 ia64_mmu_init(ia64_imva(cpu_data)); 829 ia64_mmu_init(ia64_imva(cpu_data));
829 ia64_mca_cpu_init(ia64_imva(cpu_data)); 830 ia64_mca_cpu_init(ia64_imva(cpu_data));
830 831
831 #ifdef CONFIG_IA32_SUPPORT 832 #ifdef CONFIG_IA32_SUPPORT
832 ia32_cpu_init(); 833 ia32_cpu_init();
833 #endif 834 #endif
834 835
835 /* Clear ITC to eliminiate sched_clock() overflows in human time. */ 836 /* Clear ITC to eliminiate sched_clock() overflows in human time. */
836 ia64_set_itc(0); 837 ia64_set_itc(0);
837 838
838 /* disable all local interrupt sources: */ 839 /* disable all local interrupt sources: */
839 ia64_set_itv(1 << 16); 840 ia64_set_itv(1 << 16);
840 ia64_set_lrr0(1 << 16); 841 ia64_set_lrr0(1 << 16);
841 ia64_set_lrr1(1 << 16); 842 ia64_set_lrr1(1 << 16);
842 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 843 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
843 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 844 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
844 845
845 /* clear TPR & XTP to enable all interrupt classes: */ 846 /* clear TPR & XTP to enable all interrupt classes: */
846 ia64_setreg(_IA64_REG_CR_TPR, 0); 847 ia64_setreg(_IA64_REG_CR_TPR, 0);
847 #ifdef CONFIG_SMP 848 #ifdef CONFIG_SMP
848 normal_xtp(); 849 normal_xtp();
849 #endif 850 #endif
850 851
851 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 852 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
852 if (ia64_pal_vm_summary(NULL, &vmi) == 0) 853 if (ia64_pal_vm_summary(NULL, &vmi) == 0)
853 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 854 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
854 else { 855 else {
855 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 856 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
856 max_ctx = (1U << 15) - 1; /* use architected minimum */ 857 max_ctx = (1U << 15) - 1; /* use architected minimum */
857 } 858 }
858 while (max_ctx < ia64_ctx.max_ctx) { 859 while (max_ctx < ia64_ctx.max_ctx) {
859 unsigned int old = ia64_ctx.max_ctx; 860 unsigned int old = ia64_ctx.max_ctx;
860 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 861 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
861 break; 862 break;
862 } 863 }
863 864
864 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 865 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
865 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 866 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
866 "stacked regs\n"); 867 "stacked regs\n");
867 num_phys_stacked = 96; 868 num_phys_stacked = 96;
868 } 869 }
869 /* size of physical stacked register partition plus 8 bytes: */ 870 /* size of physical stacked register partition plus 8 bytes: */
870 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; 871 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
871 platform_cpu_init(); 872 platform_cpu_init();
872 pm_idle = default_idle; 873 pm_idle = default_idle;
873 } 874 }
874 875
875 /* 876 /*
876 * On SMP systems, when the scheduler does migration-cost autodetection, 877 * On SMP systems, when the scheduler does migration-cost autodetection,
877 * it needs a way to flush as much of the CPU's caches as possible. 878 * it needs a way to flush as much of the CPU's caches as possible.
878 */ 879 */
879 void sched_cacheflush(void) 880 void sched_cacheflush(void)
880 { 881 {
881 ia64_sal_cache_flush(3); 882 ia64_sal_cache_flush(3);
882 } 883 }
883 884
884 void __init 885 void __init
885 check_bugs (void) 886 check_bugs (void)
886 { 887 {
887 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 888 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
888 (unsigned long) __end___mckinley_e9_bundles); 889 (unsigned long) __end___mckinley_e9_bundles);
889 } 890 }
891
892 static int __init run_dmi_scan(void)
893 {
894 dmi_scan_machine();
895 return 0;
896 }
897 core_initcall(run_dmi_scan);
890 898
include/asm-ia64/dmi.h
File was created 1 #ifndef _ASM_DMI_H
2 #define _ASM_DMI_H 1
3
4 #include <asm/io.h>
5
6 #endif
7
include/asm-ia64/io.h
1 #ifndef _ASM_IA64_IO_H 1 #ifndef _ASM_IA64_IO_H
2 #define _ASM_IA64_IO_H 2 #define _ASM_IA64_IO_H
3 3
4 /* 4 /*
5 * This file contains the definitions for the emulated IO instructions 5 * This file contains the definitions for the emulated IO instructions
6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same 6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
8 * versions of the single-IO instructions (inb_p/inw_p/..). 8 * versions of the single-IO instructions (inb_p/inw_p/..).
9 * 9 *
10 * This file is not meant to be obfuscating: it's just complicated to 10 * This file is not meant to be obfuscating: it's just complicated to
11 * (a) handle it all in a way that makes gcc able to optimize it as 11 * (a) handle it all in a way that makes gcc able to optimize it as
12 * well as possible and (b) trying to avoid writing the same thing 12 * well as possible and (b) trying to avoid writing the same thing
13 * over and over again with slight variations and possibly making a 13 * over and over again with slight variations and possibly making a
14 * mistake somewhere. 14 * mistake somewhere.
15 * 15 *
16 * Copyright (C) 1998-2003 Hewlett-Packard Co 16 * Copyright (C) 1998-2003 Hewlett-Packard Co
17 * David Mosberger-Tang <davidm@hpl.hp.com> 17 * David Mosberger-Tang <davidm@hpl.hp.com>
18 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> 18 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
19 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 19 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
20 */ 20 */
21 21
22 /* We don't use IO slowdowns on the ia64, but.. */ 22 /* We don't use IO slowdowns on the ia64, but.. */
23 #define __SLOW_DOWN_IO do { } while (0) 23 #define __SLOW_DOWN_IO do { } while (0)
24 #define SLOW_DOWN_IO do { } while (0) 24 #define SLOW_DOWN_IO do { } while (0)
25 25
26 #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED) 26 #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED)
27 27
28 /* 28 /*
29 * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but 29 * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
30 * large machines may have multiple other I/O spaces so we can't place any a priori limit 30 * large machines may have multiple other I/O spaces so we can't place any a priori limit
31 * on IO_SPACE_LIMIT. These additional spaces are described in ACPI. 31 * on IO_SPACE_LIMIT. These additional spaces are described in ACPI.
32 */ 32 */
33 #define IO_SPACE_LIMIT 0xffffffffffffffffUL 33 #define IO_SPACE_LIMIT 0xffffffffffffffffUL
34 34
35 #define MAX_IO_SPACES_BITS 4 35 #define MAX_IO_SPACES_BITS 4
36 #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS) 36 #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
37 #define IO_SPACE_BITS 24 37 #define IO_SPACE_BITS 24
38 #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS) 38 #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
39 39
40 #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS) 40 #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
41 #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS) 41 #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
42 #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1)) 42 #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
43 43
44 #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff)) 44 #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff))
45 45
46 struct io_space { 46 struct io_space {
47 unsigned long mmio_base; /* base in MMIO space */ 47 unsigned long mmio_base; /* base in MMIO space */
48 int sparse; 48 int sparse;
49 }; 49 };
50 50
51 extern struct io_space io_space[]; 51 extern struct io_space io_space[];
52 extern unsigned int num_io_spaces; 52 extern unsigned int num_io_spaces;
53 53
54 # ifdef __KERNEL__ 54 # ifdef __KERNEL__
55 55
56 /* 56 /*
57 * All MMIO iomem cookies are in region 6; anything less is a PIO cookie: 57 * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
58 * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap) 58 * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap)
59 * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port) 59 * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port)
60 * 60 *
61 * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch 61 * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
62 * code that uses bare port numbers without the prerequisite pci_iomap(). 62 * code that uses bare port numbers without the prerequisite pci_iomap().
63 */ 63 */
64 #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS)) 64 #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
65 #define PIO_MASK (PIO_OFFSET - 1) 65 #define PIO_MASK (PIO_OFFSET - 1)
66 #define PIO_RESERVED __IA64_UNCACHED_OFFSET 66 #define PIO_RESERVED __IA64_UNCACHED_OFFSET
67 #define HAVE_ARCH_PIO_SIZE 67 #define HAVE_ARCH_PIO_SIZE
68 68
69 #include <asm/intrinsics.h> 69 #include <asm/intrinsics.h>
70 #include <asm/machvec.h> 70 #include <asm/machvec.h>
71 #include <asm/page.h> 71 #include <asm/page.h>
72 #include <asm/system.h> 72 #include <asm/system.h>
73 #include <asm-generic/iomap.h> 73 #include <asm-generic/iomap.h>
74 74
75 /* 75 /*
76 * Change virtual addresses to physical addresses and vv. 76 * Change virtual addresses to physical addresses and vv.
77 */ 77 */
78 static inline unsigned long 78 static inline unsigned long
79 virt_to_phys (volatile void *address) 79 virt_to_phys (volatile void *address)
80 { 80 {
81 return (unsigned long) address - PAGE_OFFSET; 81 return (unsigned long) address - PAGE_OFFSET;
82 } 82 }
83 83
84 static inline void* 84 static inline void*
85 phys_to_virt (unsigned long address) 85 phys_to_virt (unsigned long address)
86 { 86 {
87 return (void *) (address + PAGE_OFFSET); 87 return (void *) (address + PAGE_OFFSET);
88 } 88 }
89 89
90 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE 90 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
91 extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */ 91 extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */
92 extern int valid_mmap_phys_addr_range (unsigned long addr, size_t *count); 92 extern int valid_mmap_phys_addr_range (unsigned long addr, size_t *count);
93 93
94 /* 94 /*
95 * The following two macros are deprecated and scheduled for removal. 95 * The following two macros are deprecated and scheduled for removal.
96 * Please use the PCI-DMA interface defined in <asm/pci.h> instead. 96 * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
97 */ 97 */
98 #define bus_to_virt phys_to_virt 98 #define bus_to_virt phys_to_virt
99 #define virt_to_bus virt_to_phys 99 #define virt_to_bus virt_to_phys
100 #define page_to_bus page_to_phys 100 #define page_to_bus page_to_phys
101 101
102 # endif /* KERNEL */ 102 # endif /* KERNEL */
103 103
104 /* 104 /*
105 * Memory fence w/accept. This should never be used in code that is 105 * Memory fence w/accept. This should never be used in code that is
106 * not IA-64 specific. 106 * not IA-64 specific.
107 */ 107 */
108 #define __ia64_mf_a() ia64_mfa() 108 #define __ia64_mf_a() ia64_mfa()
109 109
110 /** 110 /**
111 * ___ia64_mmiowb - I/O write barrier 111 * ___ia64_mmiowb - I/O write barrier
112 * 112 *
113 * Ensure ordering of I/O space writes. This will make sure that writes 113 * Ensure ordering of I/O space writes. This will make sure that writes
114 * following the barrier will arrive after all previous writes. For most 114 * following the barrier will arrive after all previous writes. For most
115 * ia64 platforms, this is a simple 'mf.a' instruction. 115 * ia64 platforms, this is a simple 'mf.a' instruction.
116 * 116 *
117 * See Documentation/DocBook/deviceiobook.tmpl for more information. 117 * See Documentation/DocBook/deviceiobook.tmpl for more information.
118 */ 118 */
119 static inline void ___ia64_mmiowb(void) 119 static inline void ___ia64_mmiowb(void)
120 { 120 {
121 ia64_mfa(); 121 ia64_mfa();
122 } 122 }
123 123
124 static inline void* 124 static inline void*
125 __ia64_mk_io_addr (unsigned long port) 125 __ia64_mk_io_addr (unsigned long port)
126 { 126 {
127 struct io_space *space; 127 struct io_space *space;
128 unsigned long offset; 128 unsigned long offset;
129 129
130 space = &io_space[IO_SPACE_NR(port)]; 130 space = &io_space[IO_SPACE_NR(port)];
131 port = IO_SPACE_PORT(port); 131 port = IO_SPACE_PORT(port);
132 if (space->sparse) 132 if (space->sparse)
133 offset = IO_SPACE_SPARSE_ENCODING(port); 133 offset = IO_SPACE_SPARSE_ENCODING(port);
134 else 134 else
135 offset = port; 135 offset = port;
136 136
137 return (void *) (space->mmio_base | offset); 137 return (void *) (space->mmio_base | offset);
138 } 138 }
139 139
140 #define __ia64_inb ___ia64_inb 140 #define __ia64_inb ___ia64_inb
141 #define __ia64_inw ___ia64_inw 141 #define __ia64_inw ___ia64_inw
142 #define __ia64_inl ___ia64_inl 142 #define __ia64_inl ___ia64_inl
143 #define __ia64_outb ___ia64_outb 143 #define __ia64_outb ___ia64_outb
144 #define __ia64_outw ___ia64_outw 144 #define __ia64_outw ___ia64_outw
145 #define __ia64_outl ___ia64_outl 145 #define __ia64_outl ___ia64_outl
146 #define __ia64_readb ___ia64_readb 146 #define __ia64_readb ___ia64_readb
147 #define __ia64_readw ___ia64_readw 147 #define __ia64_readw ___ia64_readw
148 #define __ia64_readl ___ia64_readl 148 #define __ia64_readl ___ia64_readl
149 #define __ia64_readq ___ia64_readq 149 #define __ia64_readq ___ia64_readq
150 #define __ia64_readb_relaxed ___ia64_readb 150 #define __ia64_readb_relaxed ___ia64_readb
151 #define __ia64_readw_relaxed ___ia64_readw 151 #define __ia64_readw_relaxed ___ia64_readw
152 #define __ia64_readl_relaxed ___ia64_readl 152 #define __ia64_readl_relaxed ___ia64_readl
153 #define __ia64_readq_relaxed ___ia64_readq 153 #define __ia64_readq_relaxed ___ia64_readq
154 #define __ia64_writeb ___ia64_writeb 154 #define __ia64_writeb ___ia64_writeb
155 #define __ia64_writew ___ia64_writew 155 #define __ia64_writew ___ia64_writew
156 #define __ia64_writel ___ia64_writel 156 #define __ia64_writel ___ia64_writel
157 #define __ia64_writeq ___ia64_writeq 157 #define __ia64_writeq ___ia64_writeq
158 #define __ia64_mmiowb ___ia64_mmiowb 158 #define __ia64_mmiowb ___ia64_mmiowb
159 159
160 /* 160 /*
161 * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure 161 * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
162 * that the access has completed before executing other I/O accesses. Since we're doing 162 * that the access has completed before executing other I/O accesses. Since we're doing
163 * the accesses through an uncachable (UC) translation, the CPU will execute them in 163 * the accesses through an uncachable (UC) translation, the CPU will execute them in
164 * program order. However, we still need to tell the compiler not to shuffle them around 164 * program order. However, we still need to tell the compiler not to shuffle them around
165 * during optimization, which is why we use "volatile" pointers. 165 * during optimization, which is why we use "volatile" pointers.
166 */ 166 */
167 167
168 static inline unsigned int 168 static inline unsigned int
169 ___ia64_inb (unsigned long port) 169 ___ia64_inb (unsigned long port)
170 { 170 {
171 volatile unsigned char *addr = __ia64_mk_io_addr(port); 171 volatile unsigned char *addr = __ia64_mk_io_addr(port);
172 unsigned char ret; 172 unsigned char ret;
173 173
174 ret = *addr; 174 ret = *addr;
175 __ia64_mf_a(); 175 __ia64_mf_a();
176 return ret; 176 return ret;
177 } 177 }
178 178
179 static inline unsigned int 179 static inline unsigned int
180 ___ia64_inw (unsigned long port) 180 ___ia64_inw (unsigned long port)
181 { 181 {
182 volatile unsigned short *addr = __ia64_mk_io_addr(port); 182 volatile unsigned short *addr = __ia64_mk_io_addr(port);
183 unsigned short ret; 183 unsigned short ret;
184 184
185 ret = *addr; 185 ret = *addr;
186 __ia64_mf_a(); 186 __ia64_mf_a();
187 return ret; 187 return ret;
188 } 188 }
189 189
190 static inline unsigned int 190 static inline unsigned int
191 ___ia64_inl (unsigned long port) 191 ___ia64_inl (unsigned long port)
192 { 192 {
193 volatile unsigned int *addr = __ia64_mk_io_addr(port); 193 volatile unsigned int *addr = __ia64_mk_io_addr(port);
194 unsigned int ret; 194 unsigned int ret;
195 195
196 ret = *addr; 196 ret = *addr;
197 __ia64_mf_a(); 197 __ia64_mf_a();
198 return ret; 198 return ret;
199 } 199 }
200 200
201 static inline void 201 static inline void
202 ___ia64_outb (unsigned char val, unsigned long port) 202 ___ia64_outb (unsigned char val, unsigned long port)
203 { 203 {
204 volatile unsigned char *addr = __ia64_mk_io_addr(port); 204 volatile unsigned char *addr = __ia64_mk_io_addr(port);
205 205
206 *addr = val; 206 *addr = val;
207 __ia64_mf_a(); 207 __ia64_mf_a();
208 } 208 }
209 209
210 static inline void 210 static inline void
211 ___ia64_outw (unsigned short val, unsigned long port) 211 ___ia64_outw (unsigned short val, unsigned long port)
212 { 212 {
213 volatile unsigned short *addr = __ia64_mk_io_addr(port); 213 volatile unsigned short *addr = __ia64_mk_io_addr(port);
214 214
215 *addr = val; 215 *addr = val;
216 __ia64_mf_a(); 216 __ia64_mf_a();
217 } 217 }
218 218
219 static inline void 219 static inline void
220 ___ia64_outl (unsigned int val, unsigned long port) 220 ___ia64_outl (unsigned int val, unsigned long port)
221 { 221 {
222 volatile unsigned int *addr = __ia64_mk_io_addr(port); 222 volatile unsigned int *addr = __ia64_mk_io_addr(port);
223 223
224 *addr = val; 224 *addr = val;
225 __ia64_mf_a(); 225 __ia64_mf_a();
226 } 226 }
227 227
228 static inline void 228 static inline void
229 __insb (unsigned long port, void *dst, unsigned long count) 229 __insb (unsigned long port, void *dst, unsigned long count)
230 { 230 {
231 unsigned char *dp = dst; 231 unsigned char *dp = dst;
232 232
233 while (count--) 233 while (count--)
234 *dp++ = platform_inb(port); 234 *dp++ = platform_inb(port);
235 } 235 }
236 236
237 static inline void 237 static inline void
238 __insw (unsigned long port, void *dst, unsigned long count) 238 __insw (unsigned long port, void *dst, unsigned long count)
239 { 239 {
240 unsigned short *dp = dst; 240 unsigned short *dp = dst;
241 241
242 while (count--) 242 while (count--)
243 *dp++ = platform_inw(port); 243 *dp++ = platform_inw(port);
244 } 244 }
245 245
246 static inline void 246 static inline void
247 __insl (unsigned long port, void *dst, unsigned long count) 247 __insl (unsigned long port, void *dst, unsigned long count)
248 { 248 {
249 unsigned int *dp = dst; 249 unsigned int *dp = dst;
250 250
251 while (count--) 251 while (count--)
252 *dp++ = platform_inl(port); 252 *dp++ = platform_inl(port);
253 } 253 }
254 254
255 static inline void 255 static inline void
256 __outsb (unsigned long port, const void *src, unsigned long count) 256 __outsb (unsigned long port, const void *src, unsigned long count)
257 { 257 {
258 const unsigned char *sp = src; 258 const unsigned char *sp = src;
259 259
260 while (count--) 260 while (count--)
261 platform_outb(*sp++, port); 261 platform_outb(*sp++, port);
262 } 262 }
263 263
264 static inline void 264 static inline void
265 __outsw (unsigned long port, const void *src, unsigned long count) 265 __outsw (unsigned long port, const void *src, unsigned long count)
266 { 266 {
267 const unsigned short *sp = src; 267 const unsigned short *sp = src;
268 268
269 while (count--) 269 while (count--)
270 platform_outw(*sp++, port); 270 platform_outw(*sp++, port);
271 } 271 }
272 272
273 static inline void 273 static inline void
274 __outsl (unsigned long port, const void *src, unsigned long count) 274 __outsl (unsigned long port, const void *src, unsigned long count)
275 { 275 {
276 const unsigned int *sp = src; 276 const unsigned int *sp = src;
277 277
278 while (count--) 278 while (count--)
279 platform_outl(*sp++, port); 279 platform_outl(*sp++, port);
280 } 280 }
281 281
282 /* 282 /*
283 * Unfortunately, some platforms are broken and do not follow the IA-64 architecture 283 * Unfortunately, some platforms are broken and do not follow the IA-64 architecture
284 * specification regarding legacy I/O support. Thus, we have to make these operations 284 * specification regarding legacy I/O support. Thus, we have to make these operations
285 * platform dependent... 285 * platform dependent...
286 */ 286 */
287 #define __inb platform_inb 287 #define __inb platform_inb
288 #define __inw platform_inw 288 #define __inw platform_inw
289 #define __inl platform_inl 289 #define __inl platform_inl
290 #define __outb platform_outb 290 #define __outb platform_outb
291 #define __outw platform_outw 291 #define __outw platform_outw
292 #define __outl platform_outl 292 #define __outl platform_outl
293 #define __mmiowb platform_mmiowb 293 #define __mmiowb platform_mmiowb
294 294
295 #define inb(p) __inb(p) 295 #define inb(p) __inb(p)
296 #define inw(p) __inw(p) 296 #define inw(p) __inw(p)
297 #define inl(p) __inl(p) 297 #define inl(p) __inl(p)
298 #define insb(p,d,c) __insb(p,d,c) 298 #define insb(p,d,c) __insb(p,d,c)
299 #define insw(p,d,c) __insw(p,d,c) 299 #define insw(p,d,c) __insw(p,d,c)
300 #define insl(p,d,c) __insl(p,d,c) 300 #define insl(p,d,c) __insl(p,d,c)
301 #define outb(v,p) __outb(v,p) 301 #define outb(v,p) __outb(v,p)
302 #define outw(v,p) __outw(v,p) 302 #define outw(v,p) __outw(v,p)
303 #define outl(v,p) __outl(v,p) 303 #define outl(v,p) __outl(v,p)
304 #define outsb(p,s,c) __outsb(p,s,c) 304 #define outsb(p,s,c) __outsb(p,s,c)
305 #define outsw(p,s,c) __outsw(p,s,c) 305 #define outsw(p,s,c) __outsw(p,s,c)
306 #define outsl(p,s,c) __outsl(p,s,c) 306 #define outsl(p,s,c) __outsl(p,s,c)
307 #define mmiowb() __mmiowb() 307 #define mmiowb() __mmiowb()
308 308
309 /* 309 /*
310 * The address passed to these functions are ioremap()ped already. 310 * The address passed to these functions are ioremap()ped already.
311 * 311 *
312 * We need these to be machine vectors since some platforms don't provide 312 * We need these to be machine vectors since some platforms don't provide
313 * DMA coherence via PIO reads (PCI drivers and the spec imply that this is 313 * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
314 * a good idea). Writes are ok though for all existing ia64 platforms (and 314 * a good idea). Writes are ok though for all existing ia64 platforms (and
315 * hopefully it'll stay that way). 315 * hopefully it'll stay that way).
316 */ 316 */
317 static inline unsigned char 317 static inline unsigned char
318 ___ia64_readb (const volatile void __iomem *addr) 318 ___ia64_readb (const volatile void __iomem *addr)
319 { 319 {
320 return *(volatile unsigned char __force *)addr; 320 return *(volatile unsigned char __force *)addr;
321 } 321 }
322 322
323 static inline unsigned short 323 static inline unsigned short
324 ___ia64_readw (const volatile void __iomem *addr) 324 ___ia64_readw (const volatile void __iomem *addr)
325 { 325 {
326 return *(volatile unsigned short __force *)addr; 326 return *(volatile unsigned short __force *)addr;
327 } 327 }
328 328
329 static inline unsigned int 329 static inline unsigned int
330 ___ia64_readl (const volatile void __iomem *addr) 330 ___ia64_readl (const volatile void __iomem *addr)
331 { 331 {
332 return *(volatile unsigned int __force *) addr; 332 return *(volatile unsigned int __force *) addr;
333 } 333 }
334 334
335 static inline unsigned long 335 static inline unsigned long
336 ___ia64_readq (const volatile void __iomem *addr) 336 ___ia64_readq (const volatile void __iomem *addr)
337 { 337 {
338 return *(volatile unsigned long __force *) addr; 338 return *(volatile unsigned long __force *) addr;
339 } 339 }
340 340
341 static inline void 341 static inline void
342 __writeb (unsigned char val, volatile void __iomem *addr) 342 __writeb (unsigned char val, volatile void __iomem *addr)
343 { 343 {
344 *(volatile unsigned char __force *) addr = val; 344 *(volatile unsigned char __force *) addr = val;
345 } 345 }
346 346
347 static inline void 347 static inline void
348 __writew (unsigned short val, volatile void __iomem *addr) 348 __writew (unsigned short val, volatile void __iomem *addr)
349 { 349 {
350 *(volatile unsigned short __force *) addr = val; 350 *(volatile unsigned short __force *) addr = val;
351 } 351 }
352 352
353 static inline void 353 static inline void
354 __writel (unsigned int val, volatile void __iomem *addr) 354 __writel (unsigned int val, volatile void __iomem *addr)
355 { 355 {
356 *(volatile unsigned int __force *) addr = val; 356 *(volatile unsigned int __force *) addr = val;
357 } 357 }
358 358
359 static inline void 359 static inline void
360 __writeq (unsigned long val, volatile void __iomem *addr) 360 __writeq (unsigned long val, volatile void __iomem *addr)
361 { 361 {
362 *(volatile unsigned long __force *) addr = val; 362 *(volatile unsigned long __force *) addr = val;
363 } 363 }
364 364
365 #define __readb platform_readb 365 #define __readb platform_readb
366 #define __readw platform_readw 366 #define __readw platform_readw
367 #define __readl platform_readl 367 #define __readl platform_readl
368 #define __readq platform_readq 368 #define __readq platform_readq
369 #define __readb_relaxed platform_readb_relaxed 369 #define __readb_relaxed platform_readb_relaxed
370 #define __readw_relaxed platform_readw_relaxed 370 #define __readw_relaxed platform_readw_relaxed
371 #define __readl_relaxed platform_readl_relaxed 371 #define __readl_relaxed platform_readl_relaxed
372 #define __readq_relaxed platform_readq_relaxed 372 #define __readq_relaxed platform_readq_relaxed
373 373
374 #define readb(a) __readb((a)) 374 #define readb(a) __readb((a))
375 #define readw(a) __readw((a)) 375 #define readw(a) __readw((a))
376 #define readl(a) __readl((a)) 376 #define readl(a) __readl((a))
377 #define readq(a) __readq((a)) 377 #define readq(a) __readq((a))
378 #define readb_relaxed(a) __readb_relaxed((a)) 378 #define readb_relaxed(a) __readb_relaxed((a))
379 #define readw_relaxed(a) __readw_relaxed((a)) 379 #define readw_relaxed(a) __readw_relaxed((a))
380 #define readl_relaxed(a) __readl_relaxed((a)) 380 #define readl_relaxed(a) __readl_relaxed((a))
381 #define readq_relaxed(a) __readq_relaxed((a)) 381 #define readq_relaxed(a) __readq_relaxed((a))
382 #define __raw_readb readb 382 #define __raw_readb readb
383 #define __raw_readw readw 383 #define __raw_readw readw
384 #define __raw_readl readl 384 #define __raw_readl readl
385 #define __raw_readq readq 385 #define __raw_readq readq
386 #define __raw_readb_relaxed readb_relaxed 386 #define __raw_readb_relaxed readb_relaxed
387 #define __raw_readw_relaxed readw_relaxed 387 #define __raw_readw_relaxed readw_relaxed
388 #define __raw_readl_relaxed readl_relaxed 388 #define __raw_readl_relaxed readl_relaxed
389 #define __raw_readq_relaxed readq_relaxed 389 #define __raw_readq_relaxed readq_relaxed
390 #define writeb(v,a) __writeb((v), (a)) 390 #define writeb(v,a) __writeb((v), (a))
391 #define writew(v,a) __writew((v), (a)) 391 #define writew(v,a) __writew((v), (a))
392 #define writel(v,a) __writel((v), (a)) 392 #define writel(v,a) __writel((v), (a))
393 #define writeq(v,a) __writeq((v), (a)) 393 #define writeq(v,a) __writeq((v), (a))
394 #define __raw_writeb writeb 394 #define __raw_writeb writeb
395 #define __raw_writew writew 395 #define __raw_writew writew
396 #define __raw_writel writel 396 #define __raw_writel writel
397 #define __raw_writeq writeq 397 #define __raw_writeq writeq
398 398
399 #ifndef inb_p 399 #ifndef inb_p
400 # define inb_p inb 400 # define inb_p inb
401 #endif 401 #endif
402 #ifndef inw_p 402 #ifndef inw_p
403 # define inw_p inw 403 # define inw_p inw
404 #endif 404 #endif
405 #ifndef inl_p 405 #ifndef inl_p
406 # define inl_p inl 406 # define inl_p inl
407 #endif 407 #endif
408 408
409 #ifndef outb_p 409 #ifndef outb_p
410 # define outb_p outb 410 # define outb_p outb
411 #endif 411 #endif
412 #ifndef outw_p 412 #ifndef outw_p
413 # define outw_p outw 413 # define outw_p outw
414 #endif 414 #endif
415 #ifndef outl_p 415 #ifndef outl_p
416 # define outl_p outl 416 # define outl_p outl
417 #endif 417 #endif
418 418
419 /* 419 /*
420 * An "address" in IO memory space is not clearly either an integer or a pointer. We will 420 * An "address" in IO memory space is not clearly either an integer or a pointer. We will
421 * accept both, thus the casts. 421 * accept both, thus the casts.
422 * 422 *
423 * On ia-64, we access the physical I/O memory space through the uncached kernel region. 423 * On ia-64, we access the physical I/O memory space through the uncached kernel region.
424 */ 424 */
425 static inline void __iomem * 425 static inline void __iomem *
426 ioremap (unsigned long offset, unsigned long size) 426 ioremap (unsigned long offset, unsigned long size)
427 { 427 {
428 return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset)); 428 return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset));
429 } 429 }
430 430
431 static inline void 431 static inline void
432 iounmap (volatile void __iomem *addr) 432 iounmap (volatile void __iomem *addr)
433 { 433 {
434 } 434 }
435 435
436 #define ioremap_nocache(o,s) ioremap(o,s) 436 #define ioremap_nocache(o,s) ioremap(o,s)
437 437
438 /* Use normal IO mappings for DMI */
439 #define dmi_ioremap ioremap
440 #define dmi_iounmap(x,l) iounmap(x)
441 #define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
442
438 # ifdef __KERNEL__ 443 # ifdef __KERNEL__
439 444
440 /* 445 /*
441 * String version of IO memory access ops: 446 * String version of IO memory access ops:
442 */ 447 */
443 extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n); 448 extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
444 extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n); 449 extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
445 extern void memset_io(volatile void __iomem *s, int c, long n); 450 extern void memset_io(volatile void __iomem *s, int c, long n);
446 451
447 #define dma_cache_inv(_start,_size) do { } while (0) 452 #define dma_cache_inv(_start,_size) do { } while (0)
448 #define dma_cache_wback(_start,_size) do { } while (0) 453 #define dma_cache_wback(_start,_size) do { } while (0)
449 #define dma_cache_wback_inv(_start,_size) do { } while (0) 454 #define dma_cache_wback_inv(_start,_size) do { } while (0)
450 455
451 # endif /* __KERNEL__ */ 456 # endif /* __KERNEL__ */
452 457
453 /* 458 /*
454 * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that 459 * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
455 * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64). 460 * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
456 * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on 461 * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
457 * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing 462 * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
458 * over BIO-level virtual merging. 463 * over BIO-level virtual merging.
459 */ 464 */
460 extern unsigned long ia64_max_iommu_merge_mask; 465 extern unsigned long ia64_max_iommu_merge_mask;
461 #if 1 466 #if 1
462 #define BIO_VMERGE_BOUNDARY 0 467 #define BIO_VMERGE_BOUNDARY 0
463 #else 468 #else
464 /* 469 /*
465 * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be 470 * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
466 * replaced by dma_merge_mask() or something of that sort. Note: the only way 471 * replaced by dma_merge_mask() or something of that sort. Note: the only way
467 * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets 472 * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
468 * expanded into: 473 * expanded into:
469 * 474 *
470 * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask) 475 * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
471 * 476 *
472 * which is precisely what we want. 477 * which is precisely what we want.
473 */ 478 */
474 #define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1) 479 #define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
475 #endif 480 #endif
476 481
477 #endif /* _ASM_IA64_IO_H */ 482 #endif /* _ASM_IA64_IO_H */
478 483